xref: /petsc/src/vec/is/utils/isltog.c (revision 8bc8193efbc389280f83b3d41dffa9e2d23e2ace)
1 
2 #include "petscsys.h"   /*I "petscsys.h" I*/
3 #include "src/vec/is/isimpl.h"    /*I "petscis.h"  I*/
4 
5 EXTERN PetscErrorCode VecInitializePackage(char *);
6 PetscCookie IS_LTOGM_COOKIE = -1;
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
10 /*@C
11     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
12 
13     Not Collective
14 
15     Input Parameter:
16 .   ltog - local to global mapping
17 
18     Output Parameter:
19 .   n - the number of entries in the local mapping
20 
21     Level: advanced
22 
23     Concepts: mapping^local to global
24 
25 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
26 @*/
27 PetscErrorCode ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
28 {
29   PetscFunctionBegin;
30   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
31   PetscValidIntPointer(n,2);
32   *n = mapping->n;
33   PetscFunctionReturn(0);
34 }
35 
36 #undef __FUNCT__
37 #define __FUNCT__ "ISLocalToGlobalMappingView"
38 /*@C
39     ISLocalToGlobalMappingView - View a local to global mapping
40 
41     Not Collective
42 
43     Input Parameters:
44 +   ltog - local to global mapping
45 -   viewer - viewer
46 
47     Level: advanced
48 
49     Concepts: mapping^local to global
50 
51 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
52 @*/
53 PetscErrorCode ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
54 {
55   PetscInt        i;
56   PetscMPIInt     rank;
57   PetscTruth      iascii;
58   PetscErrorCode  ierr;
59 
60   PetscFunctionBegin;
61   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
62   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);
63   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_COOKIE,2);
64 
65   ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr);
66   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
67   if (iascii) {
68     for (i=0; i<mapping->n; i++) {
69       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
70     }
71     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
72   } else {
73     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
74   }
75 
76   PetscFunctionReturn(0);
77 }
78 
79 #undef __FUNCT__
80 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
81 /*@C
82     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
83     ordering and a global parallel ordering.
84 
85     Not collective
86 
87     Input Parameter:
88 .   is - index set containing the global numbers for each local
89 
90     Output Parameter:
91 .   mapping - new mapping data structure
92 
93     Level: advanced
94 
95     Concepts: mapping^local to global
96 
97 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
98 @*/
99 PetscErrorCode ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
100 {
101   PetscErrorCode ierr;
102   PetscInt      n,*indices;
103   MPI_Comm comm;
104 
105   PetscFunctionBegin;
106   PetscValidHeaderSpecific(is,IS_COOKIE,1);
107   PetscValidPointer(mapping,2);
108 
109   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
110   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
111   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
112   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
113   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
114 
115   PetscFunctionReturn(0);
116 }
117 
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
121 /*@C
122     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
123     ordering and a global parallel ordering.
124 
125     Not Collective, but communicator may have more than one process
126 
127     Input Parameters:
128 +   comm - MPI communicator
129 .   n - the number of local elements
130 -   indices - the global index for each local element
131 
132     Output Parameter:
133 .   mapping - new mapping data structure
134 
135     Level: advanced
136 
137     Concepts: mapping^local to global
138 
139 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
140 @*/
141 PetscErrorCode ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
142 {
143   PetscErrorCode ierr;
144   PetscInt       *in;
145 
146   PetscFunctionBegin;
147   PetscValidIntPointer(indices,3);
148   PetscValidPointer(mapping,4);
149   ierr = PetscMalloc(n*sizeof(PetscInt),&in);CHKERRQ(ierr);
150   ierr = PetscMemcpy(in,indices,n*sizeof(PetscInt));CHKERRQ(ierr);
151   ierr = ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);CHKERRQ(ierr);
152   PetscFunctionReturn(0);
153 }
154 
155 #undef __FUNCT__
156 #define __FUNCT__ "ISLocalToGlobalMappingCreateNC"
157 /*@C
158     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
159     ordering and a global parallel ordering.
160 
161     Not Collective, but communicator may have more than one process
162 
163     Input Parameters:
164 +   comm - MPI communicator
165 .   n - the number of local elements
166 -   indices - the global index for each local element
167 
168     Output Parameter:
169 .   mapping - new mapping data structure
170 
171     Level: developer
172 
173     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
174     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.
175 
176     Concepts: mapping^local to global
177 
178 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
179 @*/
180 PetscErrorCode ISLocalToGlobalMappingCreateNC(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
181 {
182   PetscErrorCode ierr;
183 
184   PetscFunctionBegin;
185   PetscValidIntPointer(indices,3);
186   PetscValidPointer(mapping,4);
187   *mapping = PETSC_NULL;
188 #ifndef PETSC_USE_DYNAMIC_LIBRARIES
189   ierr = VecInitializePackage(PETSC_NULL);CHKERRQ(ierr);
190 #endif
191   if (IS_LTOGM_COOKIE == -1) {
192     ierr = PetscLogClassRegister(&IS_LTOGM_COOKIE,"IS Local to global mapping");CHKERRQ(ierr);
193   }
194 
195   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
196                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
197   PetscLogObjectCreate(*mapping);
198   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(PetscInt));
199 
200   (*mapping)->n       = n;
201   (*mapping)->indices = (PetscInt*)indices;
202 
203   /*
204       Do not create the global to local mapping. This is only created if
205      ISGlobalToLocalMapping() is called
206   */
207   (*mapping)->globals = 0;
208   PetscFunctionReturn(0);
209 }
210 
211 #undef __FUNCT__
212 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
213 /*@C
214     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
215        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
216        and VecSetLocalToGlobalMappingBlock().
217 
218     Not Collective, but communicator may have more than one process
219 
220     Input Parameters:
221 +    inmap - original point-wise mapping
222 -    bs - block size
223 
224     Output Parameter:
225 .   outmap - block based mapping
226 
227     Level: advanced
228 
229     Concepts: mapping^local to global
230 
231 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
232 @*/
233 PetscErrorCode ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
234 {
235   PetscErrorCode ierr;
236   PetscInt       *ii,i,n;
237 
238   PetscFunctionBegin;
239   if (bs > 1) {
240     n    = inmap->n/bs;
241     if (n*bs != inmap->n) SETERRQ(PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
242     ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr);
243     for (i=0; i<n; i++) {
244       ii[i] = inmap->indices[bs*i];
245     }
246     ierr = ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);CHKERRQ(ierr);
247     ierr = PetscFree(ii);CHKERRQ(ierr);
248   } else {
249     *outmap = inmap;
250     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
251   }
252   PetscFunctionReturn(0);
253 }
254 
255 #undef __FUNCT__
256 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
257 /*@
258    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
259    ordering and a global parallel ordering.
260 
261    Note Collective
262 
263    Input Parameters:
264 .  mapping - mapping data structure
265 
266    Level: advanced
267 
268 .seealso: ISLocalToGlobalMappingCreate()
269 @*/
270 PetscErrorCode ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
271 {
272   PetscErrorCode ierr;
273   PetscFunctionBegin;
274   PetscValidPointer(mapping,1);
275   if (--mapping->refct > 0) PetscFunctionReturn(0);
276   if (mapping->refct < 0) {
277     SETERRQ(PETSC_ERR_PLIB,"Mapping already destroyed");
278   }
279 
280   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
281   if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);}
282   PetscLogObjectDestroy(mapping);
283   PetscHeaderDestroy(mapping);
284   PetscFunctionReturn(0);
285 }
286 
287 #undef __FUNCT__
288 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
289 /*@
290     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
291     a new index set using the global numbering defined in an ISLocalToGlobalMapping
292     context.
293 
294     Not collective
295 
296     Input Parameters:
297 +   mapping - mapping between local and global numbering
298 -   is - index set in local numbering
299 
300     Output Parameters:
301 .   newis - index set in global numbering
302 
303     Level: advanced
304 
305     Concepts: mapping^local to global
306 
307 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
308           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
309 @*/
310 PetscErrorCode ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
311 {
312   PetscErrorCode ierr;
313   PetscInt            n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
314 
315   PetscFunctionBegin;
316   PetscValidPointer(mapping,1);
317   PetscValidHeaderSpecific(is,IS_COOKIE,2);
318   PetscValidPointer(newis,3);
319 
320   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
321   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
322   idxmap = mapping->indices;
323 
324   ierr = PetscMalloc(n*sizeof(PetscInt),&idxout);CHKERRQ(ierr);
325   for (i=0; i<n; i++) {
326     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
327     idxout[i] = idxmap[idxin[i]];
328   }
329   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
330   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
331   ierr = PetscFree(idxout);CHKERRQ(ierr);
332   PetscFunctionReturn(0);
333 }
334 
335 /*MC
336    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
337    and converts them to the global numbering.
338 
339    Not collective
340 
341    Input Parameters:
342 +  mapping - the local to global mapping context
343 .  N - number of integers
344 -  in - input indices in local numbering
345 
346    Output Parameter:
347 .  out - indices in global numbering
348 
349    Synopsis:
350    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
351 
352    Notes:
353    The in and out array parameters may be identical.
354 
355    Level: advanced
356 
357 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
358           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
359           AOPetscToApplication(), ISGlobalToLocalMappingApply()
360 
361     Concepts: mapping^local to global
362 
363 M*/
364 
365 /* -----------------------------------------------------------------------------------------*/
366 
367 #undef __FUNCT__
368 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
369 /*
370     Creates the global fields in the ISLocalToGlobalMapping structure
371 */
372 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
373 {
374   PetscErrorCode ierr;
375   PetscInt            i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
376 
377   PetscFunctionBegin;
378   end   = 0;
379   start = 100000000;
380 
381   for (i=0; i<n; i++) {
382     if (idx[i] < 0) continue;
383     if (idx[i] < start) start = idx[i];
384     if (idx[i] > end)   end   = idx[i];
385   }
386   if (start > end) {start = 0; end = -1;}
387   mapping->globalstart = start;
388   mapping->globalend   = end;
389 
390   ierr             = PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);CHKERRQ(ierr);
391   mapping->globals = globals;
392   for (i=0; i<end-start+1; i++) {
393     globals[i] = -1;
394   }
395   for (i=0; i<n; i++) {
396     if (idx[i] < 0) continue;
397     globals[idx[i] - start] = i;
398   }
399 
400   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));
401   PetscFunctionReturn(0);
402 }
403 
404 #undef __FUNCT__
405 #define __FUNCT__ "ISGlobalToLocalMappingApply"
406 /*@
407     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
408     specified with a global numbering.
409 
410     Not collective
411 
412     Input Parameters:
413 +   mapping - mapping between local and global numbering
414 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
415            IS_GTOLM_DROP - drops the indices with no local value from the output list
416 .   n - number of global indices to map
417 -   idx - global indices to map
418 
419     Output Parameters:
420 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
421 -   idxout - local index of each global index, one must pass in an array long enough
422              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
423              idxout == PETSC_NULL to determine the required length (returned in nout)
424              and then allocate the required space and call ISGlobalToLocalMappingApply()
425              a second time to set the values.
426 
427     Notes:
428     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
429 
430     This is not scalable in memory usage. Each processor requires O(Nglobal) size
431     array to compute these.
432 
433     Level: advanced
434 
435     Concepts: mapping^global to local
436 
437 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
438           ISLocalToGlobalMappingDestroy()
439 @*/
440 PetscErrorCode ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
441                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
442 {
443   PetscInt i,*globals,nf = 0,tmp,start,end;
444   PetscErrorCode ierr;
445 
446   PetscFunctionBegin;
447   if (!mapping->globals) {
448     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
449   }
450   globals = mapping->globals;
451   start   = mapping->globalstart;
452   end     = mapping->globalend;
453 
454   if (type == IS_GTOLM_MASK) {
455     if (idxout) {
456       for (i=0; i<n; i++) {
457         if (idx[i] < 0) idxout[i] = idx[i];
458         else if (idx[i] < start) idxout[i] = -1;
459         else if (idx[i] > end)   idxout[i] = -1;
460         else                     idxout[i] = globals[idx[i] - start];
461       }
462     }
463     if (nout) *nout = n;
464   } else {
465     if (idxout) {
466       for (i=0; i<n; i++) {
467         if (idx[i] < 0) continue;
468         if (idx[i] < start) continue;
469         if (idx[i] > end) continue;
470         tmp = globals[idx[i] - start];
471         if (tmp < 0) continue;
472         idxout[nf++] = tmp;
473       }
474     } else {
475       for (i=0; i<n; i++) {
476         if (idx[i] < 0) continue;
477         if (idx[i] < start) continue;
478         if (idx[i] > end) continue;
479         tmp = globals[idx[i] - start];
480         if (tmp < 0) continue;
481         nf++;
482       }
483     }
484     if (nout) *nout = nf;
485   }
486 
487   PetscFunctionReturn(0);
488 }
489 
490 #undef __FUNCT__
491 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
492 /*@C
493     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
494      each index shared by more than one processor
495 
496     Collective on ISLocalToGlobalMapping
497 
498     Input Parameters:
499 .   mapping - the mapping from local to global indexing
500 
501     Output Parameter:
502 +   nproc - number of processors that are connected to this one
503 .   proc - neighboring processors
504 .   numproc - number of indices for each subdomain (processor)
505 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
506 
507     Level: advanced
508 
509     Concepts: mapping^local to global
510 
511 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
512           ISLocalToGlobalMappingRestoreInfo()
513 @*/
514 PetscErrorCode ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
515 {
516   PetscErrorCode ierr;
517   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
518   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
519   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
520   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
521   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
522   PetscInt       first_procs,first_numprocs,*first_indices;
523   MPI_Request    *recv_waits,*send_waits;
524   MPI_Status     recv_status,*send_status,*recv_statuses;
525   MPI_Comm       comm = mapping->comm;
526   PetscTruth     debug = PETSC_FALSE;
527 
528   PetscFunctionBegin;
529   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
530   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
531   if (size == 1) {
532     *nproc         = 0;
533     *procs         = PETSC_NULL;
534     ierr           = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr);
535     (*numprocs)[0] = 0;
536     ierr           = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr);
537     (*indices)[0]  = PETSC_NULL;
538     PetscFunctionReturn(0);
539   }
540 
541   ierr = PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);CHKERRQ(ierr);
542 
543   /*
544     Notes on ISLocalToGlobalMappingGetInfo
545 
546     globally owned node - the nodes that have been assigned to this processor in global
547            numbering, just for this routine.
548 
549     nontrivial globally owned node - node assigned to this processor that is on a subdomain
550            boundary (i.e. is has more than one local owner)
551 
552     locally owned node - node that exists on this processors subdomain
553 
554     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
555            local subdomain
556   */
557   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
558   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
559   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
560 
561   for (i=0; i<n; i++) {
562     if (lindices[i] > max) max = lindices[i];
563   }
564   ierr   = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
565   Ng++;
566   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
567   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
568   scale  = Ng/size + 1;
569   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
570   rstart = scale*rank;
571 
572   /* determine ownership ranges of global indices */
573   ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
574   ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
575 
576   /* determine owners of each local node  */
577   ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr);
578   for (i=0; i<n; i++) {
579     proc             = lindices[i]/scale; /* processor that globally owns this index */
580     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
581     owner[i]         = proc;
582     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
583   }
584   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
585   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends);
586 
587   /* inform other processors of number of messages and max length*/
588   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
589   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs);
590 
591   /* post receives for owned rows */
592   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr);
593   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
594   for (i=0; i<nrecvs; i++) {
595     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
596   }
597 
598   /* pack messages containing lists of local nodes to owners */
599   ierr       = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr);
600   ierr       = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
601   starts[0]  = 0;
602   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
603   for (i=0; i<n; i++) {
604     sends[starts[owner[i]]++] = lindices[i];
605     sends[starts[owner[i]]++] = i;
606   }
607   ierr = PetscFree(owner);CHKERRQ(ierr);
608   starts[0]  = 0;
609   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
610 
611   /* send the messages */
612   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
613   ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr);
614   cnt = 0;
615   for (i=0; i<size; i++) {
616     if (nprocs[2*i]) {
617       ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
618       dest[cnt] = i;
619       cnt++;
620     }
621   }
622   ierr = PetscFree(starts);CHKERRQ(ierr);
623 
624   /* wait on receives */
625   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);CHKERRQ(ierr);
626   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);CHKERRQ(ierr);
627   cnt  = nrecvs;
628   ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr);
629   ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr);
630   while (cnt) {
631     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
632     /* unpack receives into our local space */
633     ierr           = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr);
634     source[imdex]  = recv_status.MPI_SOURCE;
635     len[imdex]     = len[imdex]/2;
636     /* count how many local owners for each of my global owned indices */
637     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
638     cnt--;
639   }
640   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
641 
642   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
643   nowned  = 0;
644   nownedm = 0;
645   for (i=0; i<ng; i++) {
646     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
647   }
648 
649   /* create single array to contain rank of all local owners of each globally owned index */
650   ierr      = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr);
651   ierr      = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
652   starts[0] = 0;
653   for (i=1; i<ng; i++) {
654     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
655     else starts[i] = starts[i-1];
656   }
657 
658   /* for each nontrival globally owned node list all arriving processors */
659   for (i=0; i<nrecvs; i++) {
660     for (j=0; j<len[i]; j++) {
661       node = recvs[2*i*nmax+2*j]-rstart;
662       if (nownedsenders[node] > 1) {
663         ownedsenders[starts[node]++] = source[i];
664       }
665     }
666   }
667 
668   if (debug) { /* -----------------------------------  */
669     starts[0]    = 0;
670     for (i=1; i<ng; i++) {
671       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
672       else starts[i] = starts[i-1];
673     }
674     for (i=0; i<ng; i++) {
675       if (nownedsenders[i] > 1) {
676         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
677         for (j=0; j<nownedsenders[i]; j++) {
678           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
679         }
680         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
681       }
682     }
683     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
684   }/* -----------------------------------  */
685 
686   /* wait on original sends */
687   if (nsends) {
688     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
689     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
690     ierr = PetscFree(send_status);CHKERRQ(ierr);
691   }
692   ierr = PetscFree(send_waits);CHKERRQ(ierr);
693   ierr = PetscFree(sends);CHKERRQ(ierr);
694   ierr = PetscFree(nprocs);CHKERRQ(ierr);
695 
696   /* pack messages to send back to local owners */
697   starts[0]    = 0;
698   for (i=1; i<ng; i++) {
699     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
700     else starts[i] = starts[i-1];
701   }
702   nsends2 = nrecvs;
703   ierr    = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */
704   for (i=0; i<nrecvs; i++) {
705     nprocs[i] = 1;
706     for (j=0; j<len[i]; j++) {
707       node = recvs[2*i*nmax+2*j]-rstart;
708       if (nownedsenders[node] > 1) {
709         nprocs[i] += 2 + nownedsenders[node];
710       }
711     }
712   }
713   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
714   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr);
715   ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr);
716   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
717   /*
718      Each message is 1 + nprocs[i] long, and consists of
719        (0) the number of nodes being sent back
720        (1) the local node number,
721        (2) the number of processors sharing it,
722        (3) the processors sharing it
723   */
724   for (i=0; i<nsends2; i++) {
725     cnt = 1;
726     sends2[starts2[i]] = 0;
727     for (j=0; j<len[i]; j++) {
728       node = recvs[2*i*nmax+2*j]-rstart;
729       if (nownedsenders[node] > 1) {
730         sends2[starts2[i]]++;
731         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
732         sends2[starts2[i]+cnt++] = nownedsenders[node];
733         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr);
734         cnt += nownedsenders[node];
735       }
736     }
737   }
738 
739   /* receive the message lengths */
740   nrecvs2 = nsends;
741   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr);
742   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr);
743   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
744   for (i=0; i<nrecvs2; i++) {
745     ierr = MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);CHKERRQ(ierr);
746   }
747 
748   /* send the message lengths */
749   for (i=0; i<nsends2; i++) {
750     ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr);
751   }
752 
753   /* wait on receives of lens */
754   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
755   ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
756   ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
757   ierr = PetscFree(recv_waits);
758 
759   starts3[0] = 0;
760   nt         = 0;
761   for (i=0; i<nrecvs2-1; i++) {
762     starts3[i+1] = starts3[i] + lens2[i];
763     nt          += lens2[i];
764   }
765   nt += lens2[nrecvs2-1];
766 
767   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr);
768   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
769   for (i=0; i<nrecvs2; i++) {
770     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
771   }
772 
773   /* send the messages */
774   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
775   for (i=0; i<nsends2; i++) {
776     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
777   }
778 
779   /* wait on receives */
780   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
781   ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
782   ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
783   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
784   ierr = PetscFree(nprocs);CHKERRQ(ierr);
785 
786   if (debug) { /* -----------------------------------  */
787     cnt = 0;
788     for (i=0; i<nrecvs2; i++) {
789       nt = recvs2[cnt++];
790       for (j=0; j<nt; j++) {
791         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
792         for (k=0; k<recvs2[cnt+1]; k++) {
793           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
794         }
795         cnt += 2 + recvs2[cnt+1];
796         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
797       }
798     }
799     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
800   } /* -----------------------------------  */
801 
802   /* count number subdomains for each local node */
803   ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
804   ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr);
805   cnt  = 0;
806   for (i=0; i<nrecvs2; i++) {
807     nt = recvs2[cnt++];
808     for (j=0; j<nt; j++) {
809       for (k=0; k<recvs2[cnt+1]; k++) {
810         nprocs[recvs2[cnt+2+k]]++;
811       }
812       cnt += 2 + recvs2[cnt+1];
813     }
814   }
815   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
816   *nproc    = nt;
817   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr);
818   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr);
819   ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr);
820   ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr);
821   cnt       = 0;
822   for (i=0; i<size; i++) {
823     if (nprocs[i] > 0) {
824       bprocs[i]        = cnt;
825       (*procs)[cnt]    = i;
826       (*numprocs)[cnt] = nprocs[i];
827       ierr             = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr);
828       cnt++;
829     }
830   }
831 
832   /* make the list of subdomains for each nontrivial local node */
833   ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr);
834   cnt  = 0;
835   for (i=0; i<nrecvs2; i++) {
836     nt = recvs2[cnt++];
837     for (j=0; j<nt; j++) {
838       for (k=0; k<recvs2[cnt+1]; k++) {
839         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
840       }
841       cnt += 2 + recvs2[cnt+1];
842     }
843   }
844   ierr = PetscFree(bprocs);CHKERRQ(ierr);
845   ierr = PetscFree(recvs2);CHKERRQ(ierr);
846 
847   /* sort the node indexing by their global numbers */
848   nt = *nproc;
849   for (i=0; i<nt; i++) {
850     ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr);
851     for (j=0; j<(*numprocs)[i]; j++) {
852       tmp[j] = lindices[(*indices)[i][j]];
853     }
854     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
855     ierr = PetscFree(tmp);CHKERRQ(ierr);
856   }
857 
858   if (debug) { /* -----------------------------------  */
859     nt = *nproc;
860     for (i=0; i<nt; i++) {
861       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
862       for (j=0; j<(*numprocs)[i]; j++) {
863         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
864       }
865       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
866     }
867     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
868   } /* -----------------------------------  */
869 
870   /* wait on sends */
871   if (nsends2) {
872     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
873     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
874     ierr = PetscFree(send_status);CHKERRQ(ierr);
875   }
876 
877   ierr = PetscFree(starts3);CHKERRQ(ierr);
878   ierr = PetscFree(dest);CHKERRQ(ierr);
879   ierr = PetscFree(send_waits);CHKERRQ(ierr);
880 
881   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
882   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
883   ierr = PetscFree(starts);CHKERRQ(ierr);
884   ierr = PetscFree(starts2);CHKERRQ(ierr);
885   ierr = PetscFree(lens2);CHKERRQ(ierr);
886 
887   ierr = PetscFree(source);CHKERRQ(ierr);
888   ierr = PetscFree(len);CHKERRQ(ierr);
889   ierr = PetscFree(recvs);CHKERRQ(ierr);
890   ierr = PetscFree(nprocs);CHKERRQ(ierr);
891   ierr = PetscFree(sends2);CHKERRQ(ierr);
892 
893   /* put the information about myself as the first entry in the list */
894   first_procs    = (*procs)[0];
895   first_numprocs = (*numprocs)[0];
896   first_indices  = (*indices)[0];
897   for (i=0; i<*nproc; i++) {
898     if ((*procs)[i] == rank) {
899       (*procs)[0]    = (*procs)[i];
900       (*numprocs)[0] = (*numprocs)[i];
901       (*indices)[0]  = (*indices)[i];
902       (*procs)[i]    = first_procs;
903       (*numprocs)[i] = first_numprocs;
904       (*indices)[i]  = first_indices;
905       break;
906     }
907   }
908   PetscFunctionReturn(0);
909 }
910 
911 #undef __FUNCT__
912 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
913 /*@C
914     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
915 
916     Collective on ISLocalToGlobalMapping
917 
918     Input Parameters:
919 .   mapping - the mapping from local to global indexing
920 
921     Output Parameter:
922 +   nproc - number of processors that are connected to this one
923 .   proc - neighboring processors
924 .   numproc - number of indices for each processor
925 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
926 
927     Level: advanced
928 
929 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
930           ISLocalToGlobalMappingGetInfo()
931 @*/
932 PetscErrorCode ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
933 {
934   PetscErrorCode ierr;
935   PetscInt i;
936 
937   PetscFunctionBegin;
938   if (*procs) {ierr = PetscFree(*procs);CHKERRQ(ierr);}
939   if (*numprocs) {ierr = PetscFree(*numprocs);CHKERRQ(ierr);}
940   if (*indices) {
941     if ((*indices)[0]) {ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);}
942     for (i=1; i<*nproc; i++) {
943       if ((*indices)[i]) {ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);}
944     }
945     ierr = PetscFree(*indices);CHKERRQ(ierr);
946   }
947   PetscFunctionReturn(0);
948 }
949 
950 
951 
952 
953 
954 
955 
956 
957 
958 
959 
960 
961 
962 
963 
964 
965 
966 
967