xref: /petsc/src/vec/is/utils/isltog.c (revision 29bbc08cd5461c366aba6645924e8ff42acd1de0)
1 /*$Id: isltog.c,v 1.57 2000/08/08 21:51:55 bsmith Exp bsmith $*/
2 
3 #include "petscsys.h"   /*I "petscsys.h" I*/
4 #include "src/vec/is/isimpl.h"    /*I "petscis.h"  I*/
5 
6 #undef __FUNC__
7 #define __FUNC__ /*<a name="ISLocalToGlobalMappingGetSize"></a>*/"ISLocalToGlobalMappingGetSize"
8 /*@C
9     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
10 
11     Not Collective
12 
13     Input Parameter:
14 .   ltog - local to global mapping
15 
16     Output Parameter:
17 .   n - the number of entries in the local mapping
18 
19     Level: advanced
20 
21 .keywords: IS, local-to-global mapping, create
22 
23 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
24 @*/
25 int ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n)
26 {
27   PetscFunctionBegin;
28   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE);
29   *n = mapping->n;
30   PetscFunctionReturn(0);
31 }
32 
33 #undef __FUNC__
34 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingView"
35 /*@C
36     ISLocalToGlobalMappingView - View a local to global mapping
37 
38     Not Collective
39 
40     Input Parameters:
41 +   ltog - local to global mapping
42 -   viewer - viewer
43 
44     Level: advanced
45 
46 .keywords: IS, local-to-global mapping, create
47 
48 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
49 @*/
50 int ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,Viewer viewer)
51 {
52   int        i,ierr,rank;
53   PetscTruth isascii;
54 
55   PetscFunctionBegin;
56   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE);
57   if (!viewer) viewer = VIEWER_STDOUT_(mapping->comm);
58   PetscValidHeaderSpecific(viewer,VIEWER_COOKIE);
59   PetscCheckSameComm(mapping,viewer);
60 
61   ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr);
62   ierr = PetscTypeCompare((PetscObject)viewer,ASCII_VIEWER,&isascii);CHKERRQ(ierr);
63   if (isascii) {
64     for (i=0; i<mapping->n; i++) {
65       ierr = ViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
66     }
67     ierr = ViewerFlush(viewer);CHKERRQ(ierr);
68   } else {
69     SETERRQ1(1,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
70   }
71 
72   PetscFunctionReturn(0);
73 }
74 
75 #undef __FUNC__
76 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingCreateIS"
77 /*@C
78     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
79     ordering and a global parallel ordering.
80 
81     Not collective
82 
83     Input Parameter:
84 .   is - index set containing the global numbers for each local
85 
86     Output Parameter:
87 .   mapping - new mapping data structure
88 
89     Level: advanced
90 
91 .keywords: IS, local-to-global mapping, create
92 
93 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
94 @*/
95 int ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
96 {
97   int      n,*indices,ierr;
98   MPI_Comm comm;
99 
100   PetscFunctionBegin;
101   PetscValidHeaderSpecific(is,IS_COOKIE);
102 
103   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
104   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
105   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
106   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
107   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
108 
109   PetscFunctionReturn(0);
110 }
111 
112 #undef __FUNC__
113 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingCreate"
114 /*@C
115     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
116     ordering and a global parallel ordering.
117 
118     Not Collective, but communicator may have more than one process
119 
120     Input Parameters:
121 +   comm - MPI communicator
122 .   n - the number of local elements
123 -   indices - the global index for each local element
124 
125     Output Parameter:
126 .   mapping - new mapping data structure
127 
128     Level: advanced
129 
130 .keywords: IS, local-to-global mapping, create
131 
132 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
133 @*/
134 int ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
135 {
136   int ierr;
137 
138   PetscFunctionBegin;
139   PetscValidIntPointer(indices);
140   PetscValidPointer(mapping);
141 
142   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
143                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
144   PLogObjectCreate(*mapping);
145   PLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int));
146 
147   (*mapping)->n       = n;
148   (*mapping)->indices = (int*)PetscMalloc((n+1)*sizeof(int));CHKPTRQ((*mapping)->indices);
149   ierr = PetscMemcpy((*mapping)->indices,indices,n*sizeof(int));CHKERRQ(ierr);
150 
151   /*
152       Do not create the global to local mapping. This is only created if
153      ISGlobalToLocalMapping() is called
154   */
155   (*mapping)->globals = 0;
156   PetscFunctionReturn(0);
157 }
158 
159 #undef __FUNC__
160 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingDestroy"
161 /*@
162    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
163    ordering and a global parallel ordering.
164 
165    Note Collective
166 
167    Input Parameters:
168 .  mapping - mapping data structure
169 
170    Level: advanced
171 
172 .keywords: IS, local-to-global mapping, destroy
173 
174 .seealso: ISLocalToGlobalMappingCreate()
175 @*/
176 int ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
177 {
178   int ierr;
179   PetscFunctionBegin;
180   PetscValidPointer(mapping);
181   if (--mapping->refct > 0) PetscFunctionReturn(0);
182   if (mapping->refct < 0) {
183     SETERRQ(1,"Mapping already destroyed");
184   }
185 
186   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
187   if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);}
188   PLogObjectDestroy(mapping);
189   PetscHeaderDestroy(mapping);
190   PetscFunctionReturn(0);
191 }
192 
193 #undef __FUNC__
194 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingApplyIS"
195 /*@
196     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
197     a new index set using the global numbering defined in an ISLocalToGlobalMapping
198     context.
199 
200     Not collective
201 
202     Input Parameters:
203 +   mapping - mapping between local and global numbering
204 -   is - index set in local numbering
205 
206     Output Parameters:
207 .   newis - index set in global numbering
208 
209     Level: advanced
210 
211 .keywords: IS, local-to-global mapping, apply
212 
213 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
214           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
215 @*/
216 int ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
217 {
218   int ierr,n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
219 
220   PetscFunctionBegin;
221   PetscValidPointer(mapping);
222   PetscValidHeaderSpecific(is,IS_COOKIE);
223   PetscValidPointer(newis);
224 
225   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
226   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
227   idxmap = mapping->indices;
228 
229   idxout = (int*)PetscMalloc((n+1)*sizeof(int));CHKPTRQ(idxout);
230   for (i=0; i<n; i++) {
231     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax,i);
232     idxout[i] = idxmap[idxin[i]];
233   }
234   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
235   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
236   ierr = PetscFree(idxout);CHKERRQ(ierr);
237   PetscFunctionReturn(0);
238 }
239 
240 /*MC
241    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
242    and converts them to the global numbering.
243 
244    Not collective
245 
246    Input Parameters:
247 +  mapping - the local to global mapping context
248 .  N - number of integers
249 -  in - input indices in local numbering
250 
251    Output Parameter:
252 .  out - indices in global numbering
253 
254    Synopsis:
255    ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
256 
257    Notes:
258    The in and out array parameters may be identical.
259 
260    Level: advanced
261 
262 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
263           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
264           AOPetscToApplication(), ISGlobalToLocalMappingApply()
265 
266 .keywords: local-to-global, mapping, apply
267 
268 M*/
269 
270 /* -----------------------------------------------------------------------------------------*/
271 
272 #undef __FUNC__
273 #define __FUNC__ /*<a name=""></a>*/"ISGlobalToLocalMappingSetUp_Private"
274 /*
275     Creates the global fields in the ISLocalToGlobalMapping structure
276 */
277 static int ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
278 {
279   int i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
280 
281   PetscFunctionBegin;
282   end   = 0;
283   start = 100000000;
284 
285   for (i=0; i<n; i++) {
286     if (idx[i] < 0) continue;
287     if (idx[i] < start) start = idx[i];
288     if (idx[i] > end)   end   = idx[i];
289   }
290   if (start > end) {start = 0; end = -1;}
291   mapping->globalstart = start;
292   mapping->globalend   = end;
293 
294   globals = mapping->globals = (int*)PetscMalloc((end-start+2)*sizeof(int));CHKPTRQ(mapping->globals);
295   for (i=0; i<end-start+1; i++) {
296     globals[i] = -1;
297   }
298   for (i=0; i<n; i++) {
299     if (idx[i] < 0) continue;
300     globals[idx[i] - start] = i;
301   }
302 
303   PLogObjectMemory(mapping,(end-start+1)*sizeof(int));
304   PetscFunctionReturn(0);
305 }
306 
307 #undef __FUNC__
308 #define __FUNC__ /*<a name=""></a>*/"ISGlobalToLocalMappingApply"
309 /*@
310     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
311     specified with a global numbering.
312 
313     Not collective
314 
315     Input Parameters:
316 +   mapping - mapping between local and global numbering
317 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
318            IS_GTOLM_DROP - drops the indices with no local value from the output list
319 .   n - number of global indices to map
320 -   idx - global indices to map
321 
322     Output Parameters:
323 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
324 -   idxout - local index of each global index, one must pass in an array long enough
325              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
326              idxout == PETSC_NULL to determine the required length (returned in nout)
327              and then allocate the required space and call ISGlobalToLocalMappingApply()
328              a second time to set the values.
329 
330     Notes:
331     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
332 
333     This is not scalable in memory usage. Each processor requires O(Nglobal) size
334     array to compute these.
335 
336     Level: advanced
337 
338 .keywords: IS, global-to-local mapping, apply
339 
340 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
341           ISLocalToGlobalMappingDestroy()
342 @*/
343 int ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
344                                   int n,const int idx[],int *nout,int idxout[])
345 {
346   int i,ierr,*globals,nf = 0,tmp,start,end;
347 
348   PetscFunctionBegin;
349   if (!mapping->globals) {
350     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
351   }
352   globals = mapping->globals;
353   start   = mapping->globalstart;
354   end     = mapping->globalend;
355 
356   if (type == IS_GTOLM_MASK) {
357     if (idxout) {
358       for (i=0; i<n; i++) {
359         if (idx[i] < 0) idxout[i] = idx[i];
360         else if (idx[i] < start) idxout[i] = -1;
361         else if (idx[i] > end)   idxout[i] = -1;
362         else                     idxout[i] = globals[idx[i] - start];
363       }
364     }
365     if (nout) *nout = n;
366   } else {
367     if (idxout) {
368       for (i=0; i<n; i++) {
369         if (idx[i] < 0) continue;
370         if (idx[i] < start) continue;
371         if (idx[i] > end) continue;
372         tmp = globals[idx[i] - start];
373         if (tmp < 0) continue;
374         idxout[nf++] = tmp;
375       }
376     } else {
377       for (i=0; i<n; i++) {
378         if (idx[i] < 0) continue;
379         if (idx[i] < start) continue;
380         if (idx[i] > end) continue;
381         tmp = globals[idx[i] - start];
382         if (tmp < 0) continue;
383         nf++;
384       }
385     }
386     if (nout) *nout = nf;
387   }
388 
389   PetscFunctionReturn(0);
390 }
391 
392 #undef __FUNC__
393 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingGetInfo"
394 /*@C
395     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
396      each index shared by more than one processor
397 
398     Collective on ISLocalToGlobalMapping
399 
400     Input Parameters:
401 .   mapping - the mapping from local to global indexing
402 
403     Output Parameter:
404 +   nproc - number of processors that are connected to this one
405 .   proc - neighboring processors
406 .   numproc - number of indices for each subdomain (processor)
407 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
408 
409     Level: advanced
410 
411 .keywords: IS, local-to-global mapping, neighbors
412 
413 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
414           ISLocalToGlobalMappingRestoreInfo()
415 @*/
416 int ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
417 {
418   int         i,n = mapping->n,ierr,Ng,ng = PETSC_DECIDE,max = 0,*lindices = mapping->indices;
419   int         size,rank,*nprocs,*owner,nsends,*sends,j,*starts,*work,nmax,nrecvs,*recvs,proc;
420   int         tag1,tag2,tag3,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
421   int         node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
422   int         first_procs,first_numprocs,*first_indices;
423   MPI_Request *recv_waits,*send_waits;
424   MPI_Status  recv_status,*send_status,*recv_statuses;
425   MPI_Comm    comm = mapping->comm;
426   PetscTruth  debug = PETSC_FALSE;
427 
428   PetscFunctionBegin;
429   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
430   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
431   if (size == 1) {
432     *nproc    = 0;
433     *procs    = PETSC_NULL;
434     *numprocs = (int*)PetscMalloc(sizeof(int));CHKPTRQ(*numprocs);
435     (*numprocs)[0] = 0;
436     *indices  = (int**)PetscMalloc(sizeof(int*));CHKPTRQ(*indices);
437     (*indices)[0]  = PETSC_NULL;
438     PetscFunctionReturn(0);
439   }
440 
441   ierr = OptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);CHKERRQ(ierr);
442 
443   /*
444     Notes on ISLocalToGlobalMappingGetInfo
445 
446     globally owned node - the nodes that have been assigned to this processor in global
447            numbering, just for this routine.
448 
449     nontrivial globally owned node - node assigned to this processor that is on a subdomain
450            boundary (i.e. is has more than one local owner)
451 
452     locally owned node - node that exists on this processors subdomain
453 
454     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
455            local subdomain
456   */
457   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
458   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
459   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
460 
461   for (i=0; i<n; i++) {
462     if (lindices[i] > max) max = lindices[i];
463   }
464   ierr   = MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
465   Ng++;
466   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
467   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
468   scale  = Ng/size + 1;
469   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
470   rstart = scale*rank;
471 
472   /* determine ownership ranges of global indices */
473   nprocs    = (int*)PetscMalloc((2*size+1)*sizeof(int));CHKPTRQ(nprocs);
474   ierr      = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
475 
476   /* determine owners of each local node  */
477   owner    = (int*)PetscMalloc((n+1)*sizeof(int));CHKPTRQ(owner);
478   for (i=0; i<n; i++) {
479     proc              = lindices[i]/scale; /* processor that globally owns this index */
480     nprocs[size+proc] = 1;                 /* processor globally owns at least one of ours */
481     owner[i]          = proc;
482     nprocs[proc]++;                        /* count of how many that processor globally owns of ours */
483   }
484   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[size + i];
485   PLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends);
486 
487   /* inform other processors of number of messages and max length*/
488   work   = (int*)PetscMalloc(2*size*sizeof(int));CHKPTRQ(work);
489   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
490   nmax   = work[rank];
491   nrecvs = work[size+rank];
492   ierr   = PetscFree(work);CHKERRQ(ierr);
493   PLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs);
494 
495   /* post receives for owned rows */
496   recvs      = (int*)PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int));CHKPTRQ(recvs);
497   recv_waits = (MPI_Request*)PetscMalloc((nrecvs+1)*sizeof(MPI_Request));CHKPTRQ(recv_waits);
498   for (i=0; i<nrecvs; i++) {
499     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
500   }
501 
502   /* pack messages containing lists of local nodes to owners */
503   sends    = (int*)PetscMalloc((2*n+1)*sizeof(int));CHKPTRQ(sends);
504   starts   = (int*)PetscMalloc((size+1)*sizeof(int));CHKPTRQ(starts);
505   starts[0]  = 0;
506   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
507   for (i=0; i<n; i++) {
508     sends[starts[owner[i]]++] = lindices[i];
509     sends[starts[owner[i]]++] = i;
510   }
511   ierr = PetscFree(owner);CHKERRQ(ierr);
512   starts[0]  = 0;
513   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
514 
515   /* send the messages */
516   send_waits = (MPI_Request*)PetscMalloc((nsends+1)*sizeof(MPI_Request));CHKPTRQ(send_waits);
517   dest       = (int*)PetscMalloc((nsends+1)*sizeof(int));CHKPTRQ(dest);
518   cnt = 0;
519   for (i=0; i<size; i++) {
520     if (nprocs[i]) {
521       ierr      = MPI_Isend(sends+starts[i],2*nprocs[i],MPI_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
522       dest[cnt] = i;
523       cnt++;
524     }
525   }
526   ierr = PetscFree(starts);CHKERRQ(ierr);
527 
528   /* wait on receives */
529   source = (int*)PetscMalloc((2*nrecvs+1)*sizeof(int));CHKPTRQ(source);
530   len    = source + nrecvs;
531   cnt    = nrecvs;
532   nownedsenders = (int*)PetscMalloc((ng+1)*sizeof(int));CHKPTRQ(nownedsenders);
533   ierr          = PetscMemzero(nownedsenders,ng*sizeof(int));CHKERRQ(ierr);
534   while (cnt) {
535     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
536     /* unpack receives into our local space */
537     ierr           = MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);CHKERRQ(ierr);
538     source[imdex]  = recv_status.MPI_SOURCE;
539     len[imdex]     = len[imdex]/2;
540     /* count how many local owners for each of my global owned indices */
541     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
542     cnt--;
543   }
544   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
545 
546   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
547   nowned  = 0;
548   nownedm = 0;
549   for (i=0; i<ng; i++) {
550     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
551   }
552 
553   /* create single array to contain rank of all local owners of each globally owned index */
554   ownedsenders = (int*)PetscMalloc((nownedm+1)*sizeof(int));CHKERRQ(ierr);
555   starts       = (int*)PetscMalloc((ng+1)*sizeof(int));CHKPTRQ(starts);
556   starts[0]    = 0;
557   for (i=1; i<ng; i++) {
558     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
559     else starts[i] = starts[i-1];
560   }
561 
562   /* for each nontrival globally owned node list all arriving processors */
563   for (i=0; i<nrecvs; i++) {
564     for (j=0; j<len[i]; j++) {
565       node = recvs[2*i*nmax+2*j]-rstart;
566       if (nownedsenders[node] > 1) {
567         ownedsenders[starts[node]++] = source[i];
568       }
569     }
570   }
571 
572   if (debug) { /* -----------------------------------  */
573     starts[0]    = 0;
574     for (i=1; i<ng; i++) {
575       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
576       else starts[i] = starts[i-1];
577     }
578     for (i=0; i<ng; i++) {
579       if (nownedsenders[i] > 1) {
580         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
581         for (j=0; j<nownedsenders[i]; j++) {
582           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
583         }
584         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
585       }
586     }
587     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
588   }/* -----------------------------------  */
589 
590   /* wait on original sends */
591   if (nsends) {
592     send_status = (MPI_Status*)PetscMalloc(nsends*sizeof(MPI_Status));CHKPTRQ(send_status);
593     ierr        = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
594     ierr        = PetscFree(send_status);CHKERRQ(ierr);
595   }
596   ierr = PetscFree(send_waits);CHKERRQ(ierr);
597   ierr = PetscFree(sends);CHKERRQ(ierr);
598   ierr = PetscFree(nprocs);CHKERRQ(ierr);
599 
600   /* pack messages to send back to local owners */
601   starts[0]    = 0;
602   for (i=1; i<ng; i++) {
603     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
604     else starts[i] = starts[i-1];
605   }
606   nsends2 = nrecvs;
607   nprocs  = (int*)PetscMalloc((nsends2+1)*sizeof(int));CHKPTRQ(nprocs); /* length of each message */
608   cnt    = 0;
609   for (i=0; i<nrecvs; i++) {
610     nprocs[i] = 1;
611     for (j=0; j<len[i]; j++) {
612       node = recvs[2*i*nmax+2*j]-rstart;
613       if (nownedsenders[node] > 1) {
614         nprocs[i] += 2 + nownedsenders[node];
615       }
616     }
617   }
618   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
619   sends2     = (int*)PetscMalloc((nt+1)*sizeof(int));CHKPTRQ(sends2);
620   starts2    = (int*)PetscMalloc((nsends2+1)*sizeof(int));CHKPTRQ(starts2);
621   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
622   /*
623      Each message is 1 + nprocs[i] long, and consists of
624        (0) the number of nodes being sent back
625        (1) the local node number,
626        (2) the number of processors sharing it,
627        (3) the processors sharing it
628   */
629   for (i=0; i<nsends2; i++) {
630     cnt = 1;
631     sends2[starts2[i]] = 0;
632     for (j=0; j<len[i]; j++) {
633       node = recvs[2*i*nmax+2*j]-rstart;
634       if (nownedsenders[node] > 1) {
635         sends2[starts2[i]]++;
636         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
637         sends2[starts2[i]+cnt++] = nownedsenders[node];
638         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));CHKERRQ(ierr);
639         cnt += nownedsenders[node];
640       }
641     }
642   }
643 
644   /* send the message lengths */
645   for (i=0; i<nsends2; i++) {
646     ierr = MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag2,comm);CHKERRQ(ierr);
647   }
648 
649   /* receive the message lengths */
650   nrecvs2 = nsends;
651   lens2   = (int*)PetscMalloc((nrecvs2+1)*sizeof(int));CHKPTRQ(lens2);
652   starts3 = (int*)PetscMalloc((nrecvs2+1)*sizeof(int));CHKPTRQ(starts3);
653   nt      = 0;
654   for (i=0; i<nrecvs2; i++) {
655     ierr =  MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag2,comm,&recv_status);CHKERRQ(ierr);
656     nt   += lens2[i];
657   }
658   starts3[0] = 0;
659   for (i=0; i<nrecvs2-1; i++) {
660     starts3[i+1] = starts3[i] + lens2[i];
661   }
662   recvs2     = (int*)PetscMalloc((nt+1)*sizeof(int));CHKPTRQ(recvs2);
663   recv_waits = (MPI_Request*)PetscMalloc((nrecvs2+1)*sizeof(MPI_Request));CHKPTRQ(recv_waits);
664   for (i=0; i<nrecvs2; i++) {
665     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
666   }
667 
668   /* send the messages */
669   send_waits = (MPI_Request*)PetscMalloc((nsends2+1)*sizeof(MPI_Request));CHKPTRQ(send_waits);
670   for (i=0; i<nsends2; i++) {
671     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
672   }
673 
674   /* wait on receives */
675   recv_statuses = (MPI_Status*)PetscMalloc((nrecvs2+1)*sizeof(MPI_Status));CHKPTRQ(recv_statuses);
676   ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
677   ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
678   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
679   ierr = PetscFree(nprocs);CHKERRQ(ierr);
680 
681   if (debug) { /* -----------------------------------  */
682     cnt = 0;
683     for (i=0; i<nrecvs2; i++) {
684       nt = recvs2[cnt++];
685       for (j=0; j<nt; j++) {
686         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
687         for (k=0; k<recvs2[cnt+1]; k++) {
688           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
689         }
690         cnt += 2 + recvs2[cnt+1];
691         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
692       }
693     }
694     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
695   } /* -----------------------------------  */
696 
697   /* count number subdomains for each local node */
698   nprocs = (int*)PetscMalloc(size*sizeof(int));CHKPTRQ(nprocs);
699   ierr   = PetscMemzero(nprocs,size*sizeof(int));CHKERRQ(ierr);
700   cnt    = 0;
701   for (i=0; i<nrecvs2; i++) {
702     nt = recvs2[cnt++];
703     for (j=0; j<nt; j++) {
704       for (k=0; k<recvs2[cnt+1]; k++) {
705         nprocs[recvs2[cnt+2+k]]++;
706       }
707       cnt += 2 + recvs2[cnt+1];
708     }
709   }
710   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
711   *nproc    = nt;
712   *procs    = (int*)PetscMalloc((nt+1)*sizeof(int));CHKPTRQ(procs);
713   *numprocs = (int*)PetscMalloc((nt+1)*sizeof(int));CHKPTRQ(numprocs);
714   *indices  = (int**)PetscMalloc((nt+1)*sizeof(int*));CHKPTRQ(procs);
715   bprocs    = (int*)PetscMalloc(size*sizeof(int));CHKERRQ(ierr);
716   cnt       = 0;
717   for (i=0; i<size; i++) {
718     if (nprocs[i] > 0) {
719       bprocs[i]        = cnt;
720       (*procs)[cnt]    = i;
721       (*numprocs)[cnt] = nprocs[i];
722       (*indices)[cnt]  = (int*)PetscMalloc(nprocs[i]*sizeof(int));CHKPTRQ((*indices)[cnt]);
723       cnt++;
724     }
725   }
726 
727   /* make the list of subdomains for each nontrivial local node */
728   ierr = PetscMemzero(*numprocs,nt*sizeof(int));CHKERRQ(ierr);
729   cnt  = 0;
730   for (i=0; i<nrecvs2; i++) {
731     nt = recvs2[cnt++];
732     for (j=0; j<nt; j++) {
733       for (k=0; k<recvs2[cnt+1]; k++) {
734         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
735       }
736       cnt += 2 + recvs2[cnt+1];
737     }
738   }
739   ierr = PetscFree(bprocs);CHKERRQ(ierr);
740   ierr = PetscFree(recvs2);CHKERRQ(ierr);
741 
742   /* sort the node indexing by their global numbers */
743   nt = *nproc;
744   for (i=0; i<nt; i++) {
745     tmp = (int*)PetscMalloc(((*numprocs)[i])*sizeof(int));CHKPTRQ(tmp);
746     for (j=0; j<(*numprocs)[i]; j++) {
747       tmp[j] = lindices[(*indices)[i][j]];
748     }
749     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
750     ierr = PetscFree(tmp);CHKERRQ(ierr);
751   }
752 
753   if (debug) { /* -----------------------------------  */
754     nt = *nproc;
755     for (i=0; i<nt; i++) {
756       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
757       for (j=0; j<(*numprocs)[i]; j++) {
758         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
759       }
760       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
761     }
762     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
763   } /* -----------------------------------  */
764 
765   /* wait on sends */
766   if (nsends2) {
767     send_status = (MPI_Status*)PetscMalloc(nsends2*sizeof(MPI_Status));CHKPTRQ(send_status);
768     ierr        = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
769     ierr        = PetscFree(send_status);CHKERRQ(ierr);
770   }
771 
772   ierr = PetscFree(starts3);CHKERRQ(ierr);
773   ierr = PetscFree(dest);CHKERRQ(ierr);
774   ierr = PetscFree(send_waits);CHKERRQ(ierr);
775 
776   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
777   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
778   ierr = PetscFree(starts);CHKERRQ(ierr);
779   ierr = PetscFree(starts2);CHKERRQ(ierr);
780   ierr = PetscFree(lens2);CHKERRQ(ierr);
781 
782   ierr = PetscFree(source);CHKERRQ(ierr);
783   ierr = PetscFree(recvs);CHKERRQ(ierr);
784   ierr = PetscFree(nprocs);CHKERRQ(ierr);
785   ierr = PetscFree(sends2);CHKERRQ(ierr);
786 
787   /* put the information about myself as the first entry in the list */
788   first_procs    = (*procs)[0];
789   first_numprocs = (*numprocs)[0];
790   first_indices  = (*indices)[0];
791   for (i=0; i<*nproc; i++) {
792     if ((*procs)[i] == rank) {
793       (*procs)[0]    = (*procs)[i];
794       (*numprocs)[0] = (*numprocs)[i];
795       (*indices)[0]  = (*indices)[i];
796       (*procs)[i]    = first_procs;
797       (*numprocs)[i] = first_numprocs;
798       (*indices)[i]  = first_indices;
799       break;
800     }
801   }
802 
803   PetscFunctionReturn(0);
804 }
805 
806 #undef __FUNC__
807 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingRestoreInfo"
808 /*@C
809     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
810 
811     Collective on ISLocalToGlobalMapping
812 
813     Input Parameters:
814 .   mapping - the mapping from local to global indexing
815 
816     Output Parameter:
817 +   nproc - number of processors that are connected to this one
818 .   proc - neighboring processors
819 .   numproc - number of indices for each processor
820 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
821 
822     Level: advanced
823 
824 .keywords: IS, local-to-global mapping, neighbors
825 
826 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
827           ISLocalToGlobalMappingGetInfo()
828 @*/
829 int ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
830 {
831   int ierr,i;
832 
833   PetscFunctionBegin;
834   if (*procs) {ierr = PetscFree(*procs);CHKERRQ(ierr);}
835   if (*numprocs) {ierr = PetscFree(*numprocs);CHKERRQ(ierr);}
836   if (*indices) {
837     if ((*indices)[0]) {ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);}
838     for (i=1; i<*nproc; i++) {
839       if ((*indices)[i]) {ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);}
840     }
841     ierr = PetscFree(*indices);CHKERRQ(ierr);
842   }
843   PetscFunctionReturn(0);
844 }
845 
846 
847 
848 
849 
850 
851 
852 
853 
854 
855 
856 
857 
858 
859 
860 
861 
862 
863