xref: /petsc/src/vec/is/utils/isltog.c (revision 73f4d3771d9e6ab3f04055eab794d7609818b9d3)
1 /*$Id: isltog.c,v 1.65 2001/05/21 14:16:29 bsmith Exp $*/
2 
3 #include "petscsys.h"   /*I "petscsys.h" I*/
4 #include "src/vec/is/isimpl.h"    /*I "petscis.h"  I*/
5 
6 #undef __FUNCT__
7 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
8 /*@C
9     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
10 
11     Not Collective
12 
13     Input Parameter:
14 .   ltog - local to global mapping
15 
16     Output Parameter:
17 .   n - the number of entries in the local mapping
18 
19     Level: advanced
20 
21     Concepts: mapping^local to global
22 
23 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
24 @*/
25 int ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n)
26 {
27   PetscFunctionBegin;
28   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE);
29   *n = mapping->n;
30   PetscFunctionReturn(0);
31 }
32 
33 #undef __FUNCT__
34 #define __FUNCT__ "ISLocalToGlobalMappingView"
35 /*@C
36     ISLocalToGlobalMappingView - View a local to global mapping
37 
38     Not Collective
39 
40     Input Parameters:
41 +   ltog - local to global mapping
42 -   viewer - viewer
43 
44     Level: advanced
45 
46     Concepts: mapping^local to global
47 
48 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
49 @*/
50 int ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
51 {
52   int        i,ierr,rank;
53   PetscTruth isascii;
54 
55   PetscFunctionBegin;
56   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE);
57   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);
58   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_COOKIE);
59 
60   ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr);
61   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
62   if (isascii) {
63     for (i=0; i<mapping->n; i++) {
64       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
65     }
66     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
67   } else {
68     SETERRQ1(1,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
69   }
70 
71   PetscFunctionReturn(0);
72 }
73 
74 #undef __FUNCT__
75 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
76 /*@C
77     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
78     ordering and a global parallel ordering.
79 
80     Not collective
81 
82     Input Parameter:
83 .   is - index set containing the global numbers for each local
84 
85     Output Parameter:
86 .   mapping - new mapping data structure
87 
88     Level: advanced
89 
90     Concepts: mapping^local to global
91 
92 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
93 @*/
94 int ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
95 {
96   int      n,*indices,ierr;
97   MPI_Comm comm;
98 
99   PetscFunctionBegin;
100   PetscValidHeaderSpecific(is,IS_COOKIE);
101 
102   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
103   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
104   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
105   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
106   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
107 
108   PetscFunctionReturn(0);
109 }
110 
111 #undef __FUNCT__
112 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
113 /*@C
114     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
115     ordering and a global parallel ordering.
116 
117     Not Collective, but communicator may have more than one process
118 
119     Input Parameters:
120 +   comm - MPI communicator
121 .   n - the number of local elements
122 -   indices - the global index for each local element
123 
124     Output Parameter:
125 .   mapping - new mapping data structure
126 
127     Level: advanced
128 
129     Concepts: mapping^local to global
130 
131 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
132 @*/
133 int ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
134 {
135   int ierr;
136 
137   PetscFunctionBegin;
138   PetscValidIntPointer(indices);
139   PetscValidPointer(mapping);
140 
141   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
142                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
143   PetscLogObjectCreate(*mapping);
144   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int));
145 
146   (*mapping)->n       = n;
147   ierr = PetscMalloc((n+1)*sizeof(int),&(*mapping)->indices);CHKERRQ(ierr);
148   ierr = PetscMemcpy((*mapping)->indices,indices,n*sizeof(int));CHKERRQ(ierr);
149 
150   /*
151       Do not create the global to local mapping. This is only created if
152      ISGlobalToLocalMapping() is called
153   */
154   (*mapping)->globals = 0;
155   PetscFunctionReturn(0);
156 }
157 
158 #undef __FUNCT__
159 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
160 /*@C
161     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
162        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
163        and VecSetLocalToGlobalMappingBlock().
164 
165     Not Collective, but communicator may have more than one process
166 
167     Input Parameters:
168 +    inmap - original point-wise mapping
169 -    bs - block size
170 
171     Output Parameter:
172 .   outmap - block based mapping
173 
174     Level: advanced
175 
176     Concepts: mapping^local to global
177 
178 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
179 @*/
180 int ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,int bs,ISLocalToGlobalMapping *outmap)
181 {
182   int ierr,*ii,i,n;
183 
184   PetscFunctionBegin;
185 
186   if (bs > 1) {
187     n    = inmap->n/bs;
188     ierr = PetscMalloc(n*sizeof(int),&ii);CHKERRQ(ierr);
189     for (i=0; i<n; i++) {
190       ii[i] = inmap->indices[bs*i]/bs;
191     }
192     ierr = ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);CHKERRQ(ierr);
193     ierr = PetscFree(ii);CHKERRQ(ierr);
194   } else {
195     *outmap = inmap;
196     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
197   }
198   PetscFunctionReturn(0);
199 }
200 
201 #undef __FUNCT__
202 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
203 /*@
204    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
205    ordering and a global parallel ordering.
206 
207    Note Collective
208 
209    Input Parameters:
210 .  mapping - mapping data structure
211 
212    Level: advanced
213 
214 .seealso: ISLocalToGlobalMappingCreate()
215 @*/
216 int ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
217 {
218   int ierr;
219   PetscFunctionBegin;
220   PetscValidPointer(mapping);
221   if (--mapping->refct > 0) PetscFunctionReturn(0);
222   if (mapping->refct < 0) {
223     SETERRQ(1,"Mapping already destroyed");
224   }
225 
226   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
227   if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);}
228   PetscLogObjectDestroy(mapping);
229   PetscHeaderDestroy(mapping);
230   PetscFunctionReturn(0);
231 }
232 
233 #undef __FUNCT__
234 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
235 /*@
236     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
237     a new index set using the global numbering defined in an ISLocalToGlobalMapping
238     context.
239 
240     Not collective
241 
242     Input Parameters:
243 +   mapping - mapping between local and global numbering
244 -   is - index set in local numbering
245 
246     Output Parameters:
247 .   newis - index set in global numbering
248 
249     Level: advanced
250 
251     Concepts: mapping^local to global
252 
253 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
254           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
255 @*/
256 int ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
257 {
258   int ierr,n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
259 
260   PetscFunctionBegin;
261   PetscValidPointer(mapping);
262   PetscValidHeaderSpecific(is,IS_COOKIE);
263   PetscValidPointer(newis);
264 
265   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
266   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
267   idxmap = mapping->indices;
268 
269   ierr = PetscMalloc((n+1)*sizeof(int),&idxout);CHKERRQ(ierr);
270   for (i=0; i<n; i++) {
271     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax,i);
272     idxout[i] = idxmap[idxin[i]];
273   }
274   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
275   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
276   ierr = PetscFree(idxout);CHKERRQ(ierr);
277   PetscFunctionReturn(0);
278 }
279 
280 /*MC
281    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
282    and converts them to the global numbering.
283 
284    Not collective
285 
286    Input Parameters:
287 +  mapping - the local to global mapping context
288 .  N - number of integers
289 -  in - input indices in local numbering
290 
291    Output Parameter:
292 .  out - indices in global numbering
293 
294    Synopsis:
295    int ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
296 
297    Notes:
298    The in and out array parameters may be identical.
299 
300    Level: advanced
301 
302 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
303           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
304           AOPetscToApplication(), ISGlobalToLocalMappingApply()
305 
306     Concepts: mapping^local to global
307 
308 M*/
309 
310 /* -----------------------------------------------------------------------------------------*/
311 
312 #undef __FUNCT__
313 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
314 /*
315     Creates the global fields in the ISLocalToGlobalMapping structure
316 */
317 static int ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
318 {
319   int ierr,i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
320 
321   PetscFunctionBegin;
322   end   = 0;
323   start = 100000000;
324 
325   for (i=0; i<n; i++) {
326     if (idx[i] < 0) continue;
327     if (idx[i] < start) start = idx[i];
328     if (idx[i] > end)   end   = idx[i];
329   }
330   if (start > end) {start = 0; end = -1;}
331   mapping->globalstart = start;
332   mapping->globalend   = end;
333 
334   ierr             = PetscMalloc((end-start+2)*sizeof(int),&globals);CHKERRQ(ierr);
335   mapping->globals = globals;
336   for (i=0; i<end-start+1; i++) {
337     globals[i] = -1;
338   }
339   for (i=0; i<n; i++) {
340     if (idx[i] < 0) continue;
341     globals[idx[i] - start] = i;
342   }
343 
344   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(int));
345   PetscFunctionReturn(0);
346 }
347 
348 #undef __FUNCT__
349 #define __FUNCT__ "ISGlobalToLocalMappingApply"
350 /*@
351     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
352     specified with a global numbering.
353 
354     Not collective
355 
356     Input Parameters:
357 +   mapping - mapping between local and global numbering
358 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
359            IS_GTOLM_DROP - drops the indices with no local value from the output list
360 .   n - number of global indices to map
361 -   idx - global indices to map
362 
363     Output Parameters:
364 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
365 -   idxout - local index of each global index, one must pass in an array long enough
366              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
367              idxout == PETSC_NULL to determine the required length (returned in nout)
368              and then allocate the required space and call ISGlobalToLocalMappingApply()
369              a second time to set the values.
370 
371     Notes:
372     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
373 
374     This is not scalable in memory usage. Each processor requires O(Nglobal) size
375     array to compute these.
376 
377     Level: advanced
378 
379     Concepts: mapping^global to local
380 
381 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
382           ISLocalToGlobalMappingDestroy()
383 @*/
384 int ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
385                                   int n,const int idx[],int *nout,int idxout[])
386 {
387   int i,ierr,*globals,nf = 0,tmp,start,end;
388 
389   PetscFunctionBegin;
390   if (!mapping->globals) {
391     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
392   }
393   globals = mapping->globals;
394   start   = mapping->globalstart;
395   end     = mapping->globalend;
396 
397   if (type == IS_GTOLM_MASK) {
398     if (idxout) {
399       for (i=0; i<n; i++) {
400         if (idx[i] < 0) idxout[i] = idx[i];
401         else if (idx[i] < start) idxout[i] = -1;
402         else if (idx[i] > end)   idxout[i] = -1;
403         else                     idxout[i] = globals[idx[i] - start];
404       }
405     }
406     if (nout) *nout = n;
407   } else {
408     if (idxout) {
409       for (i=0; i<n; i++) {
410         if (idx[i] < 0) continue;
411         if (idx[i] < start) continue;
412         if (idx[i] > end) continue;
413         tmp = globals[idx[i] - start];
414         if (tmp < 0) continue;
415         idxout[nf++] = tmp;
416       }
417     } else {
418       for (i=0; i<n; i++) {
419         if (idx[i] < 0) continue;
420         if (idx[i] < start) continue;
421         if (idx[i] > end) continue;
422         tmp = globals[idx[i] - start];
423         if (tmp < 0) continue;
424         nf++;
425       }
426     }
427     if (nout) *nout = nf;
428   }
429 
430   PetscFunctionReturn(0);
431 }
432 
433 #undef __FUNCT__
434 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
435 /*@C
436     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
437      each index shared by more than one processor
438 
439     Collective on ISLocalToGlobalMapping
440 
441     Input Parameters:
442 .   mapping - the mapping from local to global indexing
443 
444     Output Parameter:
445 +   nproc - number of processors that are connected to this one
446 .   proc - neighboring processors
447 .   numproc - number of indices for each subdomain (processor)
448 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
449 
450     Level: advanced
451 
452     Concepts: mapping^local to global
453 
454 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
455           ISLocalToGlobalMappingRestoreInfo()
456 @*/
457 int ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
458 {
459   int         i,n = mapping->n,ierr,Ng,ng,max = 0,*lindices = mapping->indices;
460   int         size,rank,*nprocs,*owner,nsends,*sends,j,*starts,*work,nmax,nrecvs,*recvs,proc;
461   int         tag1,tag2,tag3,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
462   int         node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
463   int         first_procs,first_numprocs,*first_indices;
464   MPI_Request *recv_waits,*send_waits;
465   MPI_Status  recv_status,*send_status,*recv_statuses;
466   MPI_Comm    comm = mapping->comm;
467   PetscTruth  debug = PETSC_FALSE;
468 
469   PetscFunctionBegin;
470   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
471   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
472   if (size == 1) {
473     *nproc         = 0;
474     *procs         = PETSC_NULL;
475     ierr           = PetscMalloc(sizeof(int),numprocs);CHKERRQ(ierr);
476     (*numprocs)[0] = 0;
477     ierr           = PetscMalloc(sizeof(int*),indices);CHKERRQ(ierr);
478     (*indices)[0]  = PETSC_NULL;
479     PetscFunctionReturn(0);
480   }
481 
482   ierr = PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);CHKERRQ(ierr);
483 
484   /*
485     Notes on ISLocalToGlobalMappingGetInfo
486 
487     globally owned node - the nodes that have been assigned to this processor in global
488            numbering, just for this routine.
489 
490     nontrivial globally owned node - node assigned to this processor that is on a subdomain
491            boundary (i.e. is has more than one local owner)
492 
493     locally owned node - node that exists on this processors subdomain
494 
495     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
496            local subdomain
497   */
498   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
499   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
500   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
501 
502   for (i=0; i<n; i++) {
503     if (lindices[i] > max) max = lindices[i];
504   }
505   ierr   = MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
506   Ng++;
507   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
508   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
509   scale  = Ng/size + 1;
510   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
511   rstart = scale*rank;
512 
513   /* determine ownership ranges of global indices */
514   ierr = PetscMalloc((2*size+1)*sizeof(int),&nprocs);CHKERRQ(ierr);
515   ierr = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
516 
517   /* determine owners of each local node  */
518   ierr = PetscMalloc((n+1)*sizeof(int),&owner);CHKERRQ(ierr);
519   for (i=0; i<n; i++) {
520     proc              = lindices[i]/scale; /* processor that globally owns this index */
521     nprocs[size+proc] = 1;                 /* processor globally owns at least one of ours */
522     owner[i]          = proc;
523     nprocs[proc]++;                        /* count of how many that processor globally owns of ours */
524   }
525   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[size + i];
526   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends);
527 
528   /* inform other processors of number of messages and max length*/
529   ierr = PetscMalloc(2*size*sizeof(int),&work);CHKERRQ(ierr);
530   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
531   nmax   = work[rank];
532   nrecvs = work[size+rank];
533   ierr   = PetscFree(work);CHKERRQ(ierr);
534   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs);
535 
536   /* post receives for owned rows */
537   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int),&recvs);CHKERRQ(ierr);
538   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
539   for (i=0; i<nrecvs; i++) {
540     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
541   }
542 
543   /* pack messages containing lists of local nodes to owners */
544   ierr       = PetscMalloc((2*n+1)*sizeof(int),&sends);CHKERRQ(ierr);
545   ierr       = PetscMalloc((size+1)*sizeof(int),&starts);CHKERRQ(ierr);
546   starts[0]  = 0;
547   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
548   for (i=0; i<n; i++) {
549     sends[starts[owner[i]]++] = lindices[i];
550     sends[starts[owner[i]]++] = i;
551   }
552   ierr = PetscFree(owner);CHKERRQ(ierr);
553   starts[0]  = 0;
554   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
555 
556   /* send the messages */
557   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
558   ierr = PetscMalloc((nsends+1)*sizeof(int),&dest);CHKERRQ(ierr);
559   cnt = 0;
560   for (i=0; i<size; i++) {
561     if (nprocs[i]) {
562       ierr      = MPI_Isend(sends+starts[i],2*nprocs[i],MPI_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
563       dest[cnt] = i;
564       cnt++;
565     }
566   }
567   ierr = PetscFree(starts);CHKERRQ(ierr);
568 
569   /* wait on receives */
570   ierr = PetscMalloc((2*nrecvs+1)*sizeof(int),&source);CHKERRQ(ierr);
571   len  = source + nrecvs;
572   cnt  = nrecvs;
573   ierr = PetscMalloc((ng+1)*sizeof(int),&nownedsenders);CHKERRQ(ierr);
574   ierr = PetscMemzero(nownedsenders,ng*sizeof(int));CHKERRQ(ierr);
575   while (cnt) {
576     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
577     /* unpack receives into our local space */
578     ierr           = MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);CHKERRQ(ierr);
579     source[imdex]  = recv_status.MPI_SOURCE;
580     len[imdex]     = len[imdex]/2;
581     /* count how many local owners for each of my global owned indices */
582     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
583     cnt--;
584   }
585   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
586 
587   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
588   nowned  = 0;
589   nownedm = 0;
590   for (i=0; i<ng; i++) {
591     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
592   }
593 
594   /* create single array to contain rank of all local owners of each globally owned index */
595   ierr      = PetscMalloc((nownedm+1)*sizeof(int),&ownedsenders);CHKERRQ(ierr);
596   ierr      = PetscMalloc((ng+1)*sizeof(int),&starts);CHKERRQ(ierr);
597   starts[0] = 0;
598   for (i=1; i<ng; i++) {
599     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
600     else starts[i] = starts[i-1];
601   }
602 
603   /* for each nontrival globally owned node list all arriving processors */
604   for (i=0; i<nrecvs; i++) {
605     for (j=0; j<len[i]; j++) {
606       node = recvs[2*i*nmax+2*j]-rstart;
607       if (nownedsenders[node] > 1) {
608         ownedsenders[starts[node]++] = source[i];
609       }
610     }
611   }
612 
613   if (debug) { /* -----------------------------------  */
614     starts[0]    = 0;
615     for (i=1; i<ng; i++) {
616       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
617       else starts[i] = starts[i-1];
618     }
619     for (i=0; i<ng; i++) {
620       if (nownedsenders[i] > 1) {
621         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
622         for (j=0; j<nownedsenders[i]; j++) {
623           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
624         }
625         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
626       }
627     }
628     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
629   }/* -----------------------------------  */
630 
631   /* wait on original sends */
632   if (nsends) {
633     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
634     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
635     ierr = PetscFree(send_status);CHKERRQ(ierr);
636   }
637   ierr = PetscFree(send_waits);CHKERRQ(ierr);
638   ierr = PetscFree(sends);CHKERRQ(ierr);
639   ierr = PetscFree(nprocs);CHKERRQ(ierr);
640 
641   /* pack messages to send back to local owners */
642   starts[0]    = 0;
643   for (i=1; i<ng; i++) {
644     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
645     else starts[i] = starts[i-1];
646   }
647   nsends2 = nrecvs;
648   ierr    = PetscMalloc((nsends2+1)*sizeof(int),&nprocs);CHKERRQ(ierr); /* length of each message */
649   for (i=0; i<nrecvs; i++) {
650     nprocs[i] = 1;
651     for (j=0; j<len[i]; j++) {
652       node = recvs[2*i*nmax+2*j]-rstart;
653       if (nownedsenders[node] > 1) {
654         nprocs[i] += 2 + nownedsenders[node];
655       }
656     }
657   }
658   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
659   ierr = PetscMalloc((nt+1)*sizeof(int),&sends2);CHKERRQ(ierr);
660   ierr = PetscMalloc((nsends2+1)*sizeof(int),&starts2);CHKERRQ(ierr);
661   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
662   /*
663      Each message is 1 + nprocs[i] long, and consists of
664        (0) the number of nodes being sent back
665        (1) the local node number,
666        (2) the number of processors sharing it,
667        (3) the processors sharing it
668   */
669   for (i=0; i<nsends2; i++) {
670     cnt = 1;
671     sends2[starts2[i]] = 0;
672     for (j=0; j<len[i]; j++) {
673       node = recvs[2*i*nmax+2*j]-rstart;
674       if (nownedsenders[node] > 1) {
675         sends2[starts2[i]]++;
676         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
677         sends2[starts2[i]+cnt++] = nownedsenders[node];
678         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));CHKERRQ(ierr);
679         cnt += nownedsenders[node];
680       }
681     }
682   }
683 
684   /* send the message lengths */
685   for (i=0; i<nsends2; i++) {
686     ierr = MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag2,comm);CHKERRQ(ierr);
687   }
688 
689   /* receive the message lengths */
690   nrecvs2 = nsends;
691   ierr = PetscMalloc((nrecvs2+1)*sizeof(int),&lens2);CHKERRQ(ierr);
692   ierr = PetscMalloc((nrecvs2+1)*sizeof(int),&starts3);CHKERRQ(ierr);
693   nt      = 0;
694   for (i=0; i<nrecvs2; i++) {
695     ierr =  MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag2,comm,&recv_status);CHKERRQ(ierr);
696     nt   += lens2[i];
697   }
698   starts3[0] = 0;
699   for (i=0; i<nrecvs2-1; i++) {
700     starts3[i+1] = starts3[i] + lens2[i];
701   }
702   ierr = PetscMalloc((nt+1)*sizeof(int),&recvs2);CHKERRQ(ierr);
703   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
704   for (i=0; i<nrecvs2; i++) {
705     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
706   }
707 
708   /* send the messages */
709   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
710   for (i=0; i<nsends2; i++) {
711     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
712   }
713 
714   /* wait on receives */
715   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
716   ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
717   ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
718   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
719   ierr = PetscFree(nprocs);CHKERRQ(ierr);
720 
721   if (debug) { /* -----------------------------------  */
722     cnt = 0;
723     for (i=0; i<nrecvs2; i++) {
724       nt = recvs2[cnt++];
725       for (j=0; j<nt; j++) {
726         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
727         for (k=0; k<recvs2[cnt+1]; k++) {
728           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
729         }
730         cnt += 2 + recvs2[cnt+1];
731         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
732       }
733     }
734     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
735   } /* -----------------------------------  */
736 
737   /* count number subdomains for each local node */
738   ierr = PetscMalloc(size*sizeof(int),&nprocs);CHKERRQ(ierr);
739   ierr = PetscMemzero(nprocs,size*sizeof(int));CHKERRQ(ierr);
740   cnt  = 0;
741   for (i=0; i<nrecvs2; i++) {
742     nt = recvs2[cnt++];
743     for (j=0; j<nt; j++) {
744       for (k=0; k<recvs2[cnt+1]; k++) {
745         nprocs[recvs2[cnt+2+k]]++;
746       }
747       cnt += 2 + recvs2[cnt+1];
748     }
749   }
750   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
751   *nproc    = nt;
752   ierr = PetscMalloc((nt+1)*sizeof(int),procs);CHKERRQ(ierr);
753   ierr = PetscMalloc((nt+1)*sizeof(int),numprocs);CHKERRQ(ierr);
754   ierr = PetscMalloc((nt+1)*sizeof(int*),indices);CHKERRQ(ierr);
755   ierr = PetscMalloc(size*sizeof(int),&bprocs);CHKERRQ(ierr);
756   cnt       = 0;
757   for (i=0; i<size; i++) {
758     if (nprocs[i] > 0) {
759       bprocs[i]        = cnt;
760       (*procs)[cnt]    = i;
761       (*numprocs)[cnt] = nprocs[i];
762       ierr             = PetscMalloc(nprocs[i]*sizeof(int),&(*indices)[cnt]);CHKERRQ(ierr);
763       cnt++;
764     }
765   }
766 
767   /* make the list of subdomains for each nontrivial local node */
768   ierr = PetscMemzero(*numprocs,nt*sizeof(int));CHKERRQ(ierr);
769   cnt  = 0;
770   for (i=0; i<nrecvs2; i++) {
771     nt = recvs2[cnt++];
772     for (j=0; j<nt; j++) {
773       for (k=0; k<recvs2[cnt+1]; k++) {
774         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
775       }
776       cnt += 2 + recvs2[cnt+1];
777     }
778   }
779   ierr = PetscFree(bprocs);CHKERRQ(ierr);
780   ierr = PetscFree(recvs2);CHKERRQ(ierr);
781 
782   /* sort the node indexing by their global numbers */
783   nt = *nproc;
784   for (i=0; i<nt; i++) {
785     ierr = PetscMalloc(((*numprocs)[i])*sizeof(int),&tmp);CHKERRQ(ierr);
786     for (j=0; j<(*numprocs)[i]; j++) {
787       tmp[j] = lindices[(*indices)[i][j]];
788     }
789     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
790     ierr = PetscFree(tmp);CHKERRQ(ierr);
791   }
792 
793   if (debug) { /* -----------------------------------  */
794     nt = *nproc;
795     for (i=0; i<nt; i++) {
796       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
797       for (j=0; j<(*numprocs)[i]; j++) {
798         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
799       }
800       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
801     }
802     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
803   } /* -----------------------------------  */
804 
805   /* wait on sends */
806   if (nsends2) {
807     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
808     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
809     ierr = PetscFree(send_status);CHKERRQ(ierr);
810   }
811 
812   ierr = PetscFree(starts3);CHKERRQ(ierr);
813   ierr = PetscFree(dest);CHKERRQ(ierr);
814   ierr = PetscFree(send_waits);CHKERRQ(ierr);
815 
816   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
817   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
818   ierr = PetscFree(starts);CHKERRQ(ierr);
819   ierr = PetscFree(starts2);CHKERRQ(ierr);
820   ierr = PetscFree(lens2);CHKERRQ(ierr);
821 
822   ierr = PetscFree(source);CHKERRQ(ierr);
823   ierr = PetscFree(recvs);CHKERRQ(ierr);
824   ierr = PetscFree(nprocs);CHKERRQ(ierr);
825   ierr = PetscFree(sends2);CHKERRQ(ierr);
826 
827   /* put the information about myself as the first entry in the list */
828   first_procs    = (*procs)[0];
829   first_numprocs = (*numprocs)[0];
830   first_indices  = (*indices)[0];
831   for (i=0; i<*nproc; i++) {
832     if ((*procs)[i] == rank) {
833       (*procs)[0]    = (*procs)[i];
834       (*numprocs)[0] = (*numprocs)[i];
835       (*indices)[0]  = (*indices)[i];
836       (*procs)[i]    = first_procs;
837       (*numprocs)[i] = first_numprocs;
838       (*indices)[i]  = first_indices;
839       break;
840     }
841   }
842 
843   PetscFunctionReturn(0);
844 }
845 
846 #undef __FUNCT__
847 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
848 /*@C
849     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
850 
851     Collective on ISLocalToGlobalMapping
852 
853     Input Parameters:
854 .   mapping - the mapping from local to global indexing
855 
856     Output Parameter:
857 +   nproc - number of processors that are connected to this one
858 .   proc - neighboring processors
859 .   numproc - number of indices for each processor
860 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
861 
862     Level: advanced
863 
864 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
865           ISLocalToGlobalMappingGetInfo()
866 @*/
867 int ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
868 {
869   int ierr,i;
870 
871   PetscFunctionBegin;
872   if (*procs) {ierr = PetscFree(*procs);CHKERRQ(ierr);}
873   if (*numprocs) {ierr = PetscFree(*numprocs);CHKERRQ(ierr);}
874   if (*indices) {
875     if ((*indices)[0]) {ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);}
876     for (i=1; i<*nproc; i++) {
877       if ((*indices)[i]) {ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);}
878     }
879     ierr = PetscFree(*indices);CHKERRQ(ierr);
880   }
881   PetscFunctionReturn(0);
882 }
883 
884 
885 
886 
887 
888 
889 
890 
891 
892 
893 
894 
895 
896 
897 
898 
899 
900 
901