xref: /petsc/src/vec/is/utils/isltog.c (revision 97c4aaa0041b17e0788ef5380763993e79c96a23)
1 #define PETSCVEC_DLL
2 
3 #include "petscvec.h"   /*I "petscvec.h" I*/
4 #include "private/isimpl.h"    /*I "petscis.h"  I*/
5 
6 PetscClassId PETSCVEC_DLLEXPORT IS_LTOGM_CLASSID;
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
10 /*@C
11     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
12 
13     Not Collective
14 
15     Input Parameter:
16 .   ltog - local to global mapping
17 
18     Output Parameter:
19 .   n - the number of entries in the local mapping
20 
21     Level: advanced
22 
23     Concepts: mapping^local to global
24 
25 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
26 @*/
27 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
28 {
29   PetscFunctionBegin;
30   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
31   PetscValidIntPointer(n,2);
32   *n = mapping->n;
33   PetscFunctionReturn(0);
34 }
35 
36 #undef __FUNCT__
37 #define __FUNCT__ "ISLocalToGlobalMappingView"
38 /*@C
39     ISLocalToGlobalMappingView - View a local to global mapping
40 
41     Not Collective
42 
43     Input Parameters:
44 +   ltog - local to global mapping
45 -   viewer - viewer
46 
47     Level: advanced
48 
49     Concepts: mapping^local to global
50 
51 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
52 @*/
53 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
54 {
55   PetscInt        i;
56   PetscMPIInt     rank;
57   PetscBool       iascii;
58   PetscErrorCode  ierr;
59 
60   PetscFunctionBegin;
61   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
62   if (!viewer) {
63     ierr = PetscViewerASCIIGetStdout(((PetscObject)mapping)->comm,&viewer);CHKERRQ(ierr);
64   }
65   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_CLASSID,2);
66 
67   ierr = MPI_Comm_rank(((PetscObject)mapping)->comm,&rank);CHKERRQ(ierr);
68   ierr = PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
69   if (iascii) {
70     for (i=0; i<mapping->n; i++) {
71       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
72     }
73     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
74   } else {
75     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
76   }
77 
78   PetscFunctionReturn(0);
79 }
80 
81 #undef __FUNCT__
82 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
83 /*@
84     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
85     ordering and a global parallel ordering.
86 
87     Not collective
88 
89     Input Parameter:
90 .   is - index set containing the global numbers for each local
91 
92     Output Parameter:
93 .   mapping - new mapping data structure
94 
95     Level: advanced
96 
97     Concepts: mapping^local to global
98 
99 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
100 @*/
101 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
102 {
103   PetscErrorCode ierr;
104   PetscInt       n;
105   const PetscInt *indices;
106   MPI_Comm       comm;
107 
108   PetscFunctionBegin;
109   PetscValidHeaderSpecific(is,IS_CLASSID,1);
110   PetscValidPointer(mapping,2);
111 
112   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
113   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
114   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
115   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,PETSC_COPY_VALUES,mapping);CHKERRQ(ierr);
116   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
117   PetscFunctionReturn(0);
118 }
119 
120 
121 #undef __FUNCT__
122 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
123 /*@
124     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
125     ordering and a global parallel ordering.
126 
127     Not Collective, but communicator may have more than one process
128 
129     Input Parameters:
130 +   comm - MPI communicator
131 .   n - the number of local elements
132 .   indices - the global index for each local element
133 -   mode - see PetscCopyMode
134 
135     Output Parameter:
136 .   mapping - new mapping data structure
137 
138     Level: advanced
139 
140     Concepts: mapping^local to global
141 
142 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
143 @*/
144 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],PetscCopyMode mode,ISLocalToGlobalMapping *mapping)
145 {
146   PetscErrorCode ierr;
147   PetscInt       *in;
148 
149   PetscFunctionBegin;
150   if (n) PetscValidIntPointer(indices,3);
151   PetscValidPointer(mapping,4);
152 
153   *mapping = PETSC_NULL;
154 #ifndef PETSC_USE_DYNAMIC_LIBRARIES
155   ierr = ISInitializePackage(PETSC_NULL);CHKERRQ(ierr);
156 #endif
157 
158   ierr = PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_CLASSID,0,"ISLocalToGlobalMapping",
159 			   cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);CHKERRQ(ierr);
160   (*mapping)->n       = n;
161   /*
162     Do not create the global to local mapping. This is only created if
163     ISGlobalToLocalMapping() is called
164   */
165   (*mapping)->globals = 0;
166   if (mode == PETSC_COPY_VALUES) {
167     ierr = PetscMalloc(n*sizeof(PetscInt),&in);CHKERRQ(ierr);
168     ierr = PetscMemcpy(in,indices,n*sizeof(PetscInt));CHKERRQ(ierr);
169     ierr = PetscLogObjectMemory(*mapping,n*sizeof(PetscInt));CHKERRQ(ierr);
170     (*mapping)->indices = in;
171   } else if (mode == PETSC_OWN_POINTER) {
172     (*mapping)->indices = (PetscInt*)indices;
173   } else SETERRQ(cm,PETSC_ERR_SUP,"Cannot currently use PETSC_USE_POINTER");
174   PetscFunctionReturn(0);
175 }
176 
177 #undef __FUNCT__
178 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
179 /*@
180     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
181        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
182        and VecSetLocalToGlobalMappingBlock().
183 
184     Not Collective, but communicator may have more than one process
185 
186     Input Parameters:
187 +    inmap - original point-wise mapping
188 -    bs - block size
189 
190     Output Parameter:
191 .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
192 
193     Level: advanced
194 
195     Concepts: mapping^local to global
196 
197 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
198 @*/
199 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
200 {
201   PetscErrorCode ierr;
202   PetscInt       *ii,i,n;
203 
204   PetscFunctionBegin;
205   PetscValidHeaderSpecific(inmap,IS_LTOGM_CLASSID,1);
206   PetscValidPointer(outmap,1);
207   if (bs > 1) {
208     n    = inmap->n/bs;
209     if (n*bs != inmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
210     ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr);
211     for (i=0; i<n; i++) {
212       ii[i] = inmap->indices[bs*i]/bs;
213     }
214     ierr = ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);CHKERRQ(ierr);
215   } else {
216     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
217     *outmap = inmap;
218   }
219   PetscFunctionReturn(0);
220 }
221 
222 #undef __FUNCT__
223 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
224 /*@
225    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
226    ordering and a global parallel ordering.
227 
228    Note Collective
229 
230    Input Parameters:
231 .  mapping - mapping data structure
232 
233    Level: advanced
234 
235 .seealso: ISLocalToGlobalMappingCreate()
236 @*/
237 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
238 {
239   PetscErrorCode ierr;
240   PetscFunctionBegin;
241   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
242   if (--((PetscObject)mapping)->refct > 0) PetscFunctionReturn(0);
243   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
244   ierr = PetscFree(mapping->globals);CHKERRQ(ierr);
245   ierr = PetscHeaderDestroy(mapping);CHKERRQ(ierr);
246   PetscFunctionReturn(0);
247 }
248 
249 #undef __FUNCT__
250 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
251 /*@
252     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
253     a new index set using the global numbering defined in an ISLocalToGlobalMapping
254     context.
255 
256     Not collective
257 
258     Input Parameters:
259 +   mapping - mapping between local and global numbering
260 -   is - index set in local numbering
261 
262     Output Parameters:
263 .   newis - index set in global numbering
264 
265     Level: advanced
266 
267     Concepts: mapping^local to global
268 
269 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
270           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
271 @*/
272 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
273 {
274   PetscErrorCode ierr;
275   PetscInt       n,i,*idxmap,*idxout,Nmax = mapping->n;
276   const PetscInt *idxin;
277 
278   PetscFunctionBegin;
279   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
280   PetscValidHeaderSpecific(is,IS_CLASSID,2);
281   PetscValidPointer(newis,3);
282 
283   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
284   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
285   idxmap = mapping->indices;
286 
287   ierr = PetscMalloc(n*sizeof(PetscInt),&idxout);CHKERRQ(ierr);
288   for (i=0; i<n; i++) {
289     if (idxin[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
290     idxout[i] = idxmap[idxin[i]];
291   }
292   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
293   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,PETSC_OWN_POINTER,newis);CHKERRQ(ierr);
294   PetscFunctionReturn(0);
295 }
296 
297 /*MC
298    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
299    and converts them to the global numbering.
300 
301    Synopsis:
302    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
303 
304    Not collective
305 
306    Input Parameters:
307 +  mapping - the local to global mapping context
308 .  N - number of integers
309 -  in - input indices in local numbering
310 
311    Output Parameter:
312 .  out - indices in global numbering
313 
314    Notes:
315    The in and out array parameters may be identical.
316 
317    Level: advanced
318 
319 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
320           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
321           AOPetscToApplication(), ISGlobalToLocalMappingApply()
322 
323     Concepts: mapping^local to global
324 
325 M*/
326 
327 /* -----------------------------------------------------------------------------------------*/
328 
329 #undef __FUNCT__
330 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
331 /*
332     Creates the global fields in the ISLocalToGlobalMapping structure
333 */
334 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
335 {
336   PetscErrorCode ierr;
337   PetscInt       i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
338 
339   PetscFunctionBegin;
340   end   = 0;
341   start = 100000000;
342 
343   for (i=0; i<n; i++) {
344     if (idx[i] < 0) continue;
345     if (idx[i] < start) start = idx[i];
346     if (idx[i] > end)   end   = idx[i];
347   }
348   if (start > end) {start = 0; end = -1;}
349   mapping->globalstart = start;
350   mapping->globalend   = end;
351 
352   ierr             = PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);CHKERRQ(ierr);
353   mapping->globals = globals;
354   for (i=0; i<end-start+1; i++) {
355     globals[i] = -1;
356   }
357   for (i=0; i<n; i++) {
358     if (idx[i] < 0) continue;
359     globals[idx[i] - start] = i;
360   }
361 
362   ierr = PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));CHKERRQ(ierr);
363   PetscFunctionReturn(0);
364 }
365 
366 #undef __FUNCT__
367 #define __FUNCT__ "ISGlobalToLocalMappingApply"
368 /*@
369     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
370     specified with a global numbering.
371 
372     Not collective
373 
374     Input Parameters:
375 +   mapping - mapping between local and global numbering
376 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
377            IS_GTOLM_DROP - drops the indices with no local value from the output list
378 .   n - number of global indices to map
379 -   idx - global indices to map
380 
381     Output Parameters:
382 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
383 -   idxout - local index of each global index, one must pass in an array long enough
384              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
385              idxout == PETSC_NULL to determine the required length (returned in nout)
386              and then allocate the required space and call ISGlobalToLocalMappingApply()
387              a second time to set the values.
388 
389     Notes:
390     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
391 
392     This is not scalable in memory usage. Each processor requires O(Nglobal) size
393     array to compute these.
394 
395     Level: advanced
396 
397     Concepts: mapping^global to local
398 
399 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
400           ISLocalToGlobalMappingDestroy()
401 @*/
402 PetscErrorCode PETSCVEC_DLLEXPORT ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
403                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
404 {
405   PetscInt       i,*globals,nf = 0,tmp,start,end;
406   PetscErrorCode ierr;
407 
408   PetscFunctionBegin;
409   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
410   if (!mapping->globals) {
411     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
412   }
413   globals = mapping->globals;
414   start   = mapping->globalstart;
415   end     = mapping->globalend;
416 
417   if (type == IS_GTOLM_MASK) {
418     if (idxout) {
419       for (i=0; i<n; i++) {
420         if (idx[i] < 0) idxout[i] = idx[i];
421         else if (idx[i] < start) idxout[i] = -1;
422         else if (idx[i] > end)   idxout[i] = -1;
423         else                     idxout[i] = globals[idx[i] - start];
424       }
425     }
426     if (nout) *nout = n;
427   } else {
428     if (idxout) {
429       for (i=0; i<n; i++) {
430         if (idx[i] < 0) continue;
431         if (idx[i] < start) continue;
432         if (idx[i] > end) continue;
433         tmp = globals[idx[i] - start];
434         if (tmp < 0) continue;
435         idxout[nf++] = tmp;
436       }
437     } else {
438       for (i=0; i<n; i++) {
439         if (idx[i] < 0) continue;
440         if (idx[i] < start) continue;
441         if (idx[i] > end) continue;
442         tmp = globals[idx[i] - start];
443         if (tmp < 0) continue;
444         nf++;
445       }
446     }
447     if (nout) *nout = nf;
448   }
449 
450   PetscFunctionReturn(0);
451 }
452 
453 #undef __FUNCT__
454 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
455 /*@C
456     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
457      each index shared by more than one processor
458 
459     Collective on ISLocalToGlobalMapping
460 
461     Input Parameters:
462 .   mapping - the mapping from local to global indexing
463 
464     Output Parameter:
465 +   nproc - number of processors that are connected to this one
466 .   proc - neighboring processors
467 .   numproc - number of indices for each subdomain (processor)
468 -   indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering)
469 
470     Level: advanced
471 
472     Concepts: mapping^local to global
473 
474     Fortran Usage:
475 $        ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by
476 $        ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
477           PetscInt indices[nproc][numprocmax],ierr)
478         There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and
479         indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.
480 
481 
482 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
483           ISLocalToGlobalMappingRestoreInfo()
484 @*/
485 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
486 {
487   PetscErrorCode ierr;
488   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
489   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
490   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
491   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
492   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
493   PetscInt       first_procs,first_numprocs,*first_indices;
494   MPI_Request    *recv_waits,*send_waits;
495   MPI_Status     recv_status,*send_status,*recv_statuses;
496   MPI_Comm       comm = ((PetscObject)mapping)->comm;
497   PetscBool      debug = PETSC_FALSE;
498 
499   PetscFunctionBegin;
500   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
501   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
502   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
503   if (size == 1) {
504     *nproc         = 0;
505     *procs         = PETSC_NULL;
506     ierr           = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr);
507     (*numprocs)[0] = 0;
508     ierr           = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr);
509     (*indices)[0]  = PETSC_NULL;
510     PetscFunctionReturn(0);
511   }
512 
513   ierr = PetscOptionsGetTruth(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,PETSC_NULL);CHKERRQ(ierr);
514 
515   /*
516     Notes on ISLocalToGlobalMappingGetInfo
517 
518     globally owned node - the nodes that have been assigned to this processor in global
519            numbering, just for this routine.
520 
521     nontrivial globally owned node - node assigned to this processor that is on a subdomain
522            boundary (i.e. is has more than one local owner)
523 
524     locally owned node - node that exists on this processors subdomain
525 
526     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
527            local subdomain
528   */
529   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
530   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
531   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
532 
533   for (i=0; i<n; i++) {
534     if (lindices[i] > max) max = lindices[i];
535   }
536   ierr   = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
537   Ng++;
538   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
539   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
540   scale  = Ng/size + 1;
541   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
542   rstart = scale*rank;
543 
544   /* determine ownership ranges of global indices */
545   ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
546   ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
547 
548   /* determine owners of each local node  */
549   ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr);
550   for (i=0; i<n; i++) {
551     proc             = lindices[i]/scale; /* processor that globally owns this index */
552     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
553     owner[i]         = proc;
554     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
555   }
556   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
557   ierr = PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);CHKERRQ(ierr);
558 
559   /* inform other processors of number of messages and max length*/
560   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
561   ierr = PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);CHKERRQ(ierr);
562 
563   /* post receives for owned rows */
564   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr);
565   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
566   for (i=0; i<nrecvs; i++) {
567     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
568   }
569 
570   /* pack messages containing lists of local nodes to owners */
571   ierr       = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr);
572   ierr       = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
573   starts[0]  = 0;
574   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
575   for (i=0; i<n; i++) {
576     sends[starts[owner[i]]++] = lindices[i];
577     sends[starts[owner[i]]++] = i;
578   }
579   ierr = PetscFree(owner);CHKERRQ(ierr);
580   starts[0]  = 0;
581   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
582 
583   /* send the messages */
584   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
585   ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr);
586   cnt = 0;
587   for (i=0; i<size; i++) {
588     if (nprocs[2*i]) {
589       ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
590       dest[cnt] = i;
591       cnt++;
592     }
593   }
594   ierr = PetscFree(starts);CHKERRQ(ierr);
595 
596   /* wait on receives */
597   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);CHKERRQ(ierr);
598   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);CHKERRQ(ierr);
599   cnt  = nrecvs;
600   ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr);
601   ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr);
602   while (cnt) {
603     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
604     /* unpack receives into our local space */
605     ierr           = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr);
606     source[imdex]  = recv_status.MPI_SOURCE;
607     len[imdex]     = len[imdex]/2;
608     /* count how many local owners for each of my global owned indices */
609     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
610     cnt--;
611   }
612   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
613 
614   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
615   nowned  = 0;
616   nownedm = 0;
617   for (i=0; i<ng; i++) {
618     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
619   }
620 
621   /* create single array to contain rank of all local owners of each globally owned index */
622   ierr      = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr);
623   ierr      = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
624   starts[0] = 0;
625   for (i=1; i<ng; i++) {
626     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
627     else starts[i] = starts[i-1];
628   }
629 
630   /* for each nontrival globally owned node list all arriving processors */
631   for (i=0; i<nrecvs; i++) {
632     for (j=0; j<len[i]; j++) {
633       node = recvs[2*i*nmax+2*j]-rstart;
634       if (nownedsenders[node] > 1) {
635         ownedsenders[starts[node]++] = source[i];
636       }
637     }
638   }
639 
640   if (debug) { /* -----------------------------------  */
641     starts[0]    = 0;
642     for (i=1; i<ng; i++) {
643       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
644       else starts[i] = starts[i-1];
645     }
646     for (i=0; i<ng; i++) {
647       if (nownedsenders[i] > 1) {
648         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
649         for (j=0; j<nownedsenders[i]; j++) {
650           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
651         }
652         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
653       }
654     }
655     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
656   }/* -----------------------------------  */
657 
658   /* wait on original sends */
659   if (nsends) {
660     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
661     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
662     ierr = PetscFree(send_status);CHKERRQ(ierr);
663   }
664   ierr = PetscFree(send_waits);CHKERRQ(ierr);
665   ierr = PetscFree(sends);CHKERRQ(ierr);
666   ierr = PetscFree(nprocs);CHKERRQ(ierr);
667 
668   /* pack messages to send back to local owners */
669   starts[0]    = 0;
670   for (i=1; i<ng; i++) {
671     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
672     else starts[i] = starts[i-1];
673   }
674   nsends2 = nrecvs;
675   ierr    = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */
676   for (i=0; i<nrecvs; i++) {
677     nprocs[i] = 1;
678     for (j=0; j<len[i]; j++) {
679       node = recvs[2*i*nmax+2*j]-rstart;
680       if (nownedsenders[node] > 1) {
681         nprocs[i] += 2 + nownedsenders[node];
682       }
683     }
684   }
685   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
686   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr);
687   ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr);
688   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
689   /*
690      Each message is 1 + nprocs[i] long, and consists of
691        (0) the number of nodes being sent back
692        (1) the local node number,
693        (2) the number of processors sharing it,
694        (3) the processors sharing it
695   */
696   for (i=0; i<nsends2; i++) {
697     cnt = 1;
698     sends2[starts2[i]] = 0;
699     for (j=0; j<len[i]; j++) {
700       node = recvs[2*i*nmax+2*j]-rstart;
701       if (nownedsenders[node] > 1) {
702         sends2[starts2[i]]++;
703         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
704         sends2[starts2[i]+cnt++] = nownedsenders[node];
705         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr);
706         cnt += nownedsenders[node];
707       }
708     }
709   }
710 
711   /* receive the message lengths */
712   nrecvs2 = nsends;
713   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr);
714   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr);
715   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
716   for (i=0; i<nrecvs2; i++) {
717     ierr = MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);CHKERRQ(ierr);
718   }
719 
720   /* send the message lengths */
721   for (i=0; i<nsends2; i++) {
722     ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr);
723   }
724 
725   /* wait on receives of lens */
726   if (nrecvs2) {
727     ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
728     ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
729     ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
730   }
731   ierr = PetscFree(recv_waits);
732 
733   starts3[0] = 0;
734   nt         = 0;
735   for (i=0; i<nrecvs2-1; i++) {
736     starts3[i+1] = starts3[i] + lens2[i];
737     nt          += lens2[i];
738   }
739   nt += lens2[nrecvs2-1];
740 
741   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr);
742   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
743   for (i=0; i<nrecvs2; i++) {
744     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
745   }
746 
747   /* send the messages */
748   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
749   for (i=0; i<nsends2; i++) {
750     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
751   }
752 
753   /* wait on receives */
754   if (nrecvs2) {
755     ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
756     ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
757     ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
758   }
759   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
760   ierr = PetscFree(nprocs);CHKERRQ(ierr);
761 
762   if (debug) { /* -----------------------------------  */
763     cnt = 0;
764     for (i=0; i<nrecvs2; i++) {
765       nt = recvs2[cnt++];
766       for (j=0; j<nt; j++) {
767         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
768         for (k=0; k<recvs2[cnt+1]; k++) {
769           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
770         }
771         cnt += 2 + recvs2[cnt+1];
772         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
773       }
774     }
775     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
776   } /* -----------------------------------  */
777 
778   /* count number subdomains for each local node */
779   ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
780   ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr);
781   cnt  = 0;
782   for (i=0; i<nrecvs2; i++) {
783     nt = recvs2[cnt++];
784     for (j=0; j<nt; j++) {
785       for (k=0; k<recvs2[cnt+1]; k++) {
786         nprocs[recvs2[cnt+2+k]]++;
787       }
788       cnt += 2 + recvs2[cnt+1];
789     }
790   }
791   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
792   *nproc    = nt;
793   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr);
794   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr);
795   ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr);
796   ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr);
797   cnt       = 0;
798   for (i=0; i<size; i++) {
799     if (nprocs[i] > 0) {
800       bprocs[i]        = cnt;
801       (*procs)[cnt]    = i;
802       (*numprocs)[cnt] = nprocs[i];
803       ierr             = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr);
804       cnt++;
805     }
806   }
807 
808   /* make the list of subdomains for each nontrivial local node */
809   ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr);
810   cnt  = 0;
811   for (i=0; i<nrecvs2; i++) {
812     nt = recvs2[cnt++];
813     for (j=0; j<nt; j++) {
814       for (k=0; k<recvs2[cnt+1]; k++) {
815         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
816       }
817       cnt += 2 + recvs2[cnt+1];
818     }
819   }
820   ierr = PetscFree(bprocs);CHKERRQ(ierr);
821   ierr = PetscFree(recvs2);CHKERRQ(ierr);
822 
823   /* sort the node indexing by their global numbers */
824   nt = *nproc;
825   for (i=0; i<nt; i++) {
826     ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr);
827     for (j=0; j<(*numprocs)[i]; j++) {
828       tmp[j] = lindices[(*indices)[i][j]];
829     }
830     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
831     ierr = PetscFree(tmp);CHKERRQ(ierr);
832   }
833 
834   if (debug) { /* -----------------------------------  */
835     nt = *nproc;
836     for (i=0; i<nt; i++) {
837       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
838       for (j=0; j<(*numprocs)[i]; j++) {
839         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
840       }
841       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
842     }
843     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
844   } /* -----------------------------------  */
845 
846   /* wait on sends */
847   if (nsends2) {
848     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
849     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
850     ierr = PetscFree(send_status);CHKERRQ(ierr);
851   }
852 
853   ierr = PetscFree(starts3);CHKERRQ(ierr);
854   ierr = PetscFree(dest);CHKERRQ(ierr);
855   ierr = PetscFree(send_waits);CHKERRQ(ierr);
856 
857   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
858   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
859   ierr = PetscFree(starts);CHKERRQ(ierr);
860   ierr = PetscFree(starts2);CHKERRQ(ierr);
861   ierr = PetscFree(lens2);CHKERRQ(ierr);
862 
863   ierr = PetscFree(source);CHKERRQ(ierr);
864   ierr = PetscFree(len);CHKERRQ(ierr);
865   ierr = PetscFree(recvs);CHKERRQ(ierr);
866   ierr = PetscFree(nprocs);CHKERRQ(ierr);
867   ierr = PetscFree(sends2);CHKERRQ(ierr);
868 
869   /* put the information about myself as the first entry in the list */
870   first_procs    = (*procs)[0];
871   first_numprocs = (*numprocs)[0];
872   first_indices  = (*indices)[0];
873   for (i=0; i<*nproc; i++) {
874     if ((*procs)[i] == rank) {
875       (*procs)[0]    = (*procs)[i];
876       (*numprocs)[0] = (*numprocs)[i];
877       (*indices)[0]  = (*indices)[i];
878       (*procs)[i]    = first_procs;
879       (*numprocs)[i] = first_numprocs;
880       (*indices)[i]  = first_indices;
881       break;
882     }
883   }
884   PetscFunctionReturn(0);
885 }
886 
887 #undef __FUNCT__
888 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
889 /*@C
890     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
891 
892     Collective on ISLocalToGlobalMapping
893 
894     Input Parameters:
895 .   mapping - the mapping from local to global indexing
896 
897     Output Parameter:
898 +   nproc - number of processors that are connected to this one
899 .   proc - neighboring processors
900 .   numproc - number of indices for each processor
901 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
902 
903     Level: advanced
904 
905 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
906           ISLocalToGlobalMappingGetInfo()
907 @*/
908 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
909 {
910   PetscErrorCode ierr;
911   PetscInt       i;
912 
913   PetscFunctionBegin;
914   ierr = PetscFree(*procs);CHKERRQ(ierr);
915   ierr = PetscFree(*numprocs);CHKERRQ(ierr);
916   if (*indices) {
917     ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);
918     for (i=1; i<*nproc; i++) {
919       ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);
920     }
921     ierr = PetscFree(*indices);CHKERRQ(ierr);
922   }
923   PetscFunctionReturn(0);
924 }
925