xref: /petsc/src/vec/is/utils/isltog.c (revision 0700a8246d308f50502909ba325e6169d3ee27eb)
1 #define PETSCVEC_DLL
2 
3 #include "petscvec.h"   /*I "petscvec.h" I*/
4 #include "private/isimpl.h"    /*I "petscis.h"  I*/
5 
6 PetscClassId PETSCVEC_DLLEXPORT IS_LTOGM_CLASSID;
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
10 /*@C
11     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
12 
13     Not Collective
14 
15     Input Parameter:
16 .   ltog - local to global mapping
17 
18     Output Parameter:
19 .   n - the number of entries in the local mapping
20 
21     Level: advanced
22 
23     Concepts: mapping^local to global
24 
25 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
26 @*/
27 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
28 {
29   PetscFunctionBegin;
30   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
31   PetscValidIntPointer(n,2);
32   *n = mapping->n;
33   PetscFunctionReturn(0);
34 }
35 
36 #undef __FUNCT__
37 #define __FUNCT__ "ISLocalToGlobalMappingView"
38 /*@C
39     ISLocalToGlobalMappingView - View a local to global mapping
40 
41     Not Collective
42 
43     Input Parameters:
44 +   ltog - local to global mapping
45 -   viewer - viewer
46 
47     Level: advanced
48 
49     Concepts: mapping^local to global
50 
51 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
52 @*/
53 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
54 {
55   PetscInt        i;
56   PetscMPIInt     rank;
57   PetscTruth      iascii;
58   PetscErrorCode  ierr;
59 
60   PetscFunctionBegin;
61   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
62   if (!viewer) {
63     ierr = PetscViewerASCIIGetStdout(((PetscObject)mapping)->comm,&viewer);CHKERRQ(ierr);
64   }
65   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_CLASSID,2);
66 
67   ierr = MPI_Comm_rank(((PetscObject)mapping)->comm,&rank);CHKERRQ(ierr);
68   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
69   if (iascii) {
70     for (i=0; i<mapping->n; i++) {
71       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
72     }
73     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
74   } else {
75     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
76   }
77 
78   PetscFunctionReturn(0);
79 }
80 
81 #undef __FUNCT__
82 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
83 /*@
84     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
85     ordering and a global parallel ordering.
86 
87     Not collective
88 
89     Input Parameter:
90 .   is - index set containing the global numbers for each local
91 
92     Output Parameter:
93 .   mapping - new mapping data structure
94 
95     Level: advanced
96 
97     Concepts: mapping^local to global
98 
99 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
100 @*/
101 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
102 {
103   PetscErrorCode ierr;
104   PetscInt       n;
105   const PetscInt *indices;
106   MPI_Comm       comm;
107 
108   PetscFunctionBegin;
109   PetscValidHeaderSpecific(is,IS_CLASSID,1);
110   PetscValidPointer(mapping,2);
111 
112   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
113   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
114   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
115   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
116   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
117 
118   PetscFunctionReturn(0);
119 }
120 
121 
122 #undef __FUNCT__
123 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
124 /*@
125     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
126     ordering and a global parallel ordering.
127 
128     Not Collective, but communicator may have more than one process
129 
130     Input Parameters:
131 +   comm - MPI communicator
132 .   n - the number of local elements
133 -   indices - the global index for each local element
134 
135     Output Parameter:
136 .   mapping - new mapping data structure
137 
138     Level: advanced
139 
140     Concepts: mapping^local to global
141 
142 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
143 @*/
144 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
145 {
146   PetscErrorCode ierr;
147   PetscInt       *in;
148 
149   PetscFunctionBegin;
150   if (n) PetscValidIntPointer(indices,3);
151   PetscValidPointer(mapping,4);
152   ierr = PetscMalloc(n*sizeof(PetscInt),&in);CHKERRQ(ierr);
153   ierr = PetscMemcpy(in,indices,n*sizeof(PetscInt));CHKERRQ(ierr);
154   ierr = ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);CHKERRQ(ierr);
155   PetscFunctionReturn(0);
156 }
157 
158 #undef __FUNCT__
159 #define __FUNCT__ "ISLocalToGlobalMappingCreateNC"
160 /*@C
161     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
162     ordering and a global parallel ordering.
163 
164     Not Collective, but communicator may have more than one process
165 
166     Input Parameters:
167 +   comm - MPI communicator
168 .   n - the number of local elements
169 -   indices - the global index for each local element
170 
171     Output Parameter:
172 .   mapping - new mapping data structure
173 
174     Level: developer
175 
176     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
177     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.
178 
179     Concepts: mapping^local to global
180 
181 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
182 @*/
183 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateNC(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
184 {
185   PetscErrorCode ierr;
186 
187   PetscFunctionBegin;
188   if (n) PetscValidIntPointer(indices,3);
189   PetscValidPointer(mapping,4);
190   *mapping = PETSC_NULL;
191 #ifndef PETSC_USE_DYNAMIC_LIBRARIES
192   ierr = ISInitializePackage(PETSC_NULL);CHKERRQ(ierr);
193 #endif
194 
195   ierr = PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_CLASSID,0,"ISLocalToGlobalMapping",
196 			   cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);CHKERRQ(ierr);
197 
198   (*mapping)->n       = n;
199   (*mapping)->indices = (PetscInt*)indices;
200   ierr = PetscLogObjectMemory(*mapping,n*sizeof(PetscInt));CHKERRQ(ierr);
201 
202   /*
203     Do not create the global to local mapping. This is only created if
204     ISGlobalToLocalMapping() is called
205   */
206   (*mapping)->globals = 0;
207   PetscFunctionReturn(0);
208 }
209 
210 #undef __FUNCT__
211 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
212 /*@
213     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
214        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
215        and VecSetLocalToGlobalMappingBlock().
216 
217     Not Collective, but communicator may have more than one process
218 
219     Input Parameters:
220 +    inmap - original point-wise mapping
221 -    bs - block size
222 
223     Output Parameter:
224 .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
225 
226     Level: advanced
227 
228     Concepts: mapping^local to global
229 
230 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
231 @*/
232 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
233 {
234   PetscErrorCode ierr;
235   PetscInt       *ii,i,n;
236 
237   PetscFunctionBegin;
238   PetscValidHeaderSpecific(inmap,IS_LTOGM_CLASSID,1);
239   PetscValidPointer(outmap,1);
240   if (bs > 1) {
241     n    = inmap->n/bs;
242     if (n*bs != inmap->n) SETERRQ(PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
243     ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr);
244     for (i=0; i<n; i++) {
245       ii[i] = inmap->indices[bs*i]/bs;
246     }
247     ierr = ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,outmap);CHKERRQ(ierr);
248     ierr = PetscFree(ii);CHKERRQ(ierr);
249   } else {
250     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
251     *outmap = inmap;
252   }
253   PetscFunctionReturn(0);
254 }
255 
256 #undef __FUNCT__
257 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
258 /*@
259    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
260    ordering and a global parallel ordering.
261 
262    Note Collective
263 
264    Input Parameters:
265 .  mapping - mapping data structure
266 
267    Level: advanced
268 
269 .seealso: ISLocalToGlobalMappingCreate()
270 @*/
271 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
272 {
273   PetscErrorCode ierr;
274   PetscFunctionBegin;
275   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
276   if (--((PetscObject)mapping)->refct > 0) PetscFunctionReturn(0);
277   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
278   ierr = PetscFree(mapping->globals);CHKERRQ(ierr);
279   ierr = PetscHeaderDestroy(mapping);CHKERRQ(ierr);
280   PetscFunctionReturn(0);
281 }
282 
283 #undef __FUNCT__
284 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
285 /*@
286     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
287     a new index set using the global numbering defined in an ISLocalToGlobalMapping
288     context.
289 
290     Not collective
291 
292     Input Parameters:
293 +   mapping - mapping between local and global numbering
294 -   is - index set in local numbering
295 
296     Output Parameters:
297 .   newis - index set in global numbering
298 
299     Level: advanced
300 
301     Concepts: mapping^local to global
302 
303 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
304           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
305 @*/
306 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
307 {
308   PetscErrorCode ierr;
309   PetscInt       n,i,*idxmap,*idxout,Nmax = mapping->n;
310   const PetscInt *idxin;
311 
312   PetscFunctionBegin;
313   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
314   PetscValidHeaderSpecific(is,IS_CLASSID,2);
315   PetscValidPointer(newis,3);
316 
317   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
318   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
319   idxmap = mapping->indices;
320 
321   ierr = PetscMalloc(n*sizeof(PetscInt),&idxout);CHKERRQ(ierr);
322   for (i=0; i<n; i++) {
323     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
324     idxout[i] = idxmap[idxin[i]];
325   }
326   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
327   ierr = ISCreateGeneralNC(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
328   PetscFunctionReturn(0);
329 }
330 
331 /*MC
332    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
333    and converts them to the global numbering.
334 
335    Not collective
336 
337    Input Parameters:
338 +  mapping - the local to global mapping context
339 .  N - number of integers
340 -  in - input indices in local numbering
341 
342    Output Parameter:
343 .  out - indices in global numbering
344 
345    Synopsis:
346    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
347 
348    Notes:
349    The in and out array parameters may be identical.
350 
351    Level: advanced
352 
353 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
354           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
355           AOPetscToApplication(), ISGlobalToLocalMappingApply()
356 
357     Concepts: mapping^local to global
358 
359 M*/
360 
361 /* -----------------------------------------------------------------------------------------*/
362 
363 #undef __FUNCT__
364 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
365 /*
366     Creates the global fields in the ISLocalToGlobalMapping structure
367 */
368 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
369 {
370   PetscErrorCode ierr;
371   PetscInt       i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
372 
373   PetscFunctionBegin;
374   end   = 0;
375   start = 100000000;
376 
377   for (i=0; i<n; i++) {
378     if (idx[i] < 0) continue;
379     if (idx[i] < start) start = idx[i];
380     if (idx[i] > end)   end   = idx[i];
381   }
382   if (start > end) {start = 0; end = -1;}
383   mapping->globalstart = start;
384   mapping->globalend   = end;
385 
386   ierr             = PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);CHKERRQ(ierr);
387   mapping->globals = globals;
388   for (i=0; i<end-start+1; i++) {
389     globals[i] = -1;
390   }
391   for (i=0; i<n; i++) {
392     if (idx[i] < 0) continue;
393     globals[idx[i] - start] = i;
394   }
395 
396   ierr = PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));CHKERRQ(ierr);
397   PetscFunctionReturn(0);
398 }
399 
400 #undef __FUNCT__
401 #define __FUNCT__ "ISGlobalToLocalMappingApply"
402 /*@
403     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
404     specified with a global numbering.
405 
406     Not collective
407 
408     Input Parameters:
409 +   mapping - mapping between local and global numbering
410 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
411            IS_GTOLM_DROP - drops the indices with no local value from the output list
412 .   n - number of global indices to map
413 -   idx - global indices to map
414 
415     Output Parameters:
416 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
417 -   idxout - local index of each global index, one must pass in an array long enough
418              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
419              idxout == PETSC_NULL to determine the required length (returned in nout)
420              and then allocate the required space and call ISGlobalToLocalMappingApply()
421              a second time to set the values.
422 
423     Notes:
424     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
425 
426     This is not scalable in memory usage. Each processor requires O(Nglobal) size
427     array to compute these.
428 
429     Level: advanced
430 
431     Concepts: mapping^global to local
432 
433 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
434           ISLocalToGlobalMappingDestroy()
435 @*/
436 PetscErrorCode PETSCVEC_DLLEXPORT ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
437                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
438 {
439   PetscInt       i,*globals,nf = 0,tmp,start,end;
440   PetscErrorCode ierr;
441 
442   PetscFunctionBegin;
443   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
444   if (!mapping->globals) {
445     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
446   }
447   globals = mapping->globals;
448   start   = mapping->globalstart;
449   end     = mapping->globalend;
450 
451   if (type == IS_GTOLM_MASK) {
452     if (idxout) {
453       for (i=0; i<n; i++) {
454         if (idx[i] < 0) idxout[i] = idx[i];
455         else if (idx[i] < start) idxout[i] = -1;
456         else if (idx[i] > end)   idxout[i] = -1;
457         else                     idxout[i] = globals[idx[i] - start];
458       }
459     }
460     if (nout) *nout = n;
461   } else {
462     if (idxout) {
463       for (i=0; i<n; i++) {
464         if (idx[i] < 0) continue;
465         if (idx[i] < start) continue;
466         if (idx[i] > end) continue;
467         tmp = globals[idx[i] - start];
468         if (tmp < 0) continue;
469         idxout[nf++] = tmp;
470       }
471     } else {
472       for (i=0; i<n; i++) {
473         if (idx[i] < 0) continue;
474         if (idx[i] < start) continue;
475         if (idx[i] > end) continue;
476         tmp = globals[idx[i] - start];
477         if (tmp < 0) continue;
478         nf++;
479       }
480     }
481     if (nout) *nout = nf;
482   }
483 
484   PetscFunctionReturn(0);
485 }
486 
487 #undef __FUNCT__
488 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
489 /*@C
490     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
491      each index shared by more than one processor
492 
493     Collective on ISLocalToGlobalMapping
494 
495     Input Parameters:
496 .   mapping - the mapping from local to global indexing
497 
498     Output Parameter:
499 +   nproc - number of processors that are connected to this one
500 .   proc - neighboring processors
501 .   numproc - number of indices for each subdomain (processor)
502 -   indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering)
503 
504     Level: advanced
505 
506     Concepts: mapping^local to global
507 
508     Fortran Usage:
509 $        ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by
510 $        ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
511           PetscInt indices[nproc][numprocmax],ierr)
512         There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and
513         indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.
514 
515 
516 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
517           ISLocalToGlobalMappingRestoreInfo()
518 @*/
519 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
520 {
521   PetscErrorCode ierr;
522   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
523   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
524   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
525   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
526   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
527   PetscInt       first_procs,first_numprocs,*first_indices;
528   MPI_Request    *recv_waits,*send_waits;
529   MPI_Status     recv_status,*send_status,*recv_statuses;
530   MPI_Comm       comm = ((PetscObject)mapping)->comm;
531   PetscTruth     debug = PETSC_FALSE;
532 
533   PetscFunctionBegin;
534   PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1);
535   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
536   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
537   if (size == 1) {
538     *nproc         = 0;
539     *procs         = PETSC_NULL;
540     ierr           = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr);
541     (*numprocs)[0] = 0;
542     ierr           = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr);
543     (*indices)[0]  = PETSC_NULL;
544     PetscFunctionReturn(0);
545   }
546 
547   ierr = PetscOptionsGetTruth(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,PETSC_NULL);CHKERRQ(ierr);
548 
549   /*
550     Notes on ISLocalToGlobalMappingGetInfo
551 
552     globally owned node - the nodes that have been assigned to this processor in global
553            numbering, just for this routine.
554 
555     nontrivial globally owned node - node assigned to this processor that is on a subdomain
556            boundary (i.e. is has more than one local owner)
557 
558     locally owned node - node that exists on this processors subdomain
559 
560     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
561            local subdomain
562   */
563   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
564   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
565   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
566 
567   for (i=0; i<n; i++) {
568     if (lindices[i] > max) max = lindices[i];
569   }
570   ierr   = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
571   Ng++;
572   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
573   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
574   scale  = Ng/size + 1;
575   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
576   rstart = scale*rank;
577 
578   /* determine ownership ranges of global indices */
579   ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
580   ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
581 
582   /* determine owners of each local node  */
583   ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr);
584   for (i=0; i<n; i++) {
585     proc             = lindices[i]/scale; /* processor that globally owns this index */
586     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
587     owner[i]         = proc;
588     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
589   }
590   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
591   ierr = PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);CHKERRQ(ierr);
592 
593   /* inform other processors of number of messages and max length*/
594   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
595   ierr = PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);CHKERRQ(ierr);
596 
597   /* post receives for owned rows */
598   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr);
599   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
600   for (i=0; i<nrecvs; i++) {
601     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
602   }
603 
604   /* pack messages containing lists of local nodes to owners */
605   ierr       = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr);
606   ierr       = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
607   starts[0]  = 0;
608   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
609   for (i=0; i<n; i++) {
610     sends[starts[owner[i]]++] = lindices[i];
611     sends[starts[owner[i]]++] = i;
612   }
613   ierr = PetscFree(owner);CHKERRQ(ierr);
614   starts[0]  = 0;
615   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
616 
617   /* send the messages */
618   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
619   ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr);
620   cnt = 0;
621   for (i=0; i<size; i++) {
622     if (nprocs[2*i]) {
623       ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
624       dest[cnt] = i;
625       cnt++;
626     }
627   }
628   ierr = PetscFree(starts);CHKERRQ(ierr);
629 
630   /* wait on receives */
631   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);CHKERRQ(ierr);
632   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);CHKERRQ(ierr);
633   cnt  = nrecvs;
634   ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr);
635   ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr);
636   while (cnt) {
637     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
638     /* unpack receives into our local space */
639     ierr           = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr);
640     source[imdex]  = recv_status.MPI_SOURCE;
641     len[imdex]     = len[imdex]/2;
642     /* count how many local owners for each of my global owned indices */
643     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
644     cnt--;
645   }
646   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
647 
648   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
649   nowned  = 0;
650   nownedm = 0;
651   for (i=0; i<ng; i++) {
652     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
653   }
654 
655   /* create single array to contain rank of all local owners of each globally owned index */
656   ierr      = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr);
657   ierr      = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
658   starts[0] = 0;
659   for (i=1; i<ng; i++) {
660     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
661     else starts[i] = starts[i-1];
662   }
663 
664   /* for each nontrival globally owned node list all arriving processors */
665   for (i=0; i<nrecvs; i++) {
666     for (j=0; j<len[i]; j++) {
667       node = recvs[2*i*nmax+2*j]-rstart;
668       if (nownedsenders[node] > 1) {
669         ownedsenders[starts[node]++] = source[i];
670       }
671     }
672   }
673 
674   if (debug) { /* -----------------------------------  */
675     starts[0]    = 0;
676     for (i=1; i<ng; i++) {
677       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
678       else starts[i] = starts[i-1];
679     }
680     for (i=0; i<ng; i++) {
681       if (nownedsenders[i] > 1) {
682         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
683         for (j=0; j<nownedsenders[i]; j++) {
684           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
685         }
686         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
687       }
688     }
689     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
690   }/* -----------------------------------  */
691 
692   /* wait on original sends */
693   if (nsends) {
694     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
695     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
696     ierr = PetscFree(send_status);CHKERRQ(ierr);
697   }
698   ierr = PetscFree(send_waits);CHKERRQ(ierr);
699   ierr = PetscFree(sends);CHKERRQ(ierr);
700   ierr = PetscFree(nprocs);CHKERRQ(ierr);
701 
702   /* pack messages to send back to local owners */
703   starts[0]    = 0;
704   for (i=1; i<ng; i++) {
705     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
706     else starts[i] = starts[i-1];
707   }
708   nsends2 = nrecvs;
709   ierr    = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */
710   for (i=0; i<nrecvs; i++) {
711     nprocs[i] = 1;
712     for (j=0; j<len[i]; j++) {
713       node = recvs[2*i*nmax+2*j]-rstart;
714       if (nownedsenders[node] > 1) {
715         nprocs[i] += 2 + nownedsenders[node];
716       }
717     }
718   }
719   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
720   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr);
721   ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr);
722   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
723   /*
724      Each message is 1 + nprocs[i] long, and consists of
725        (0) the number of nodes being sent back
726        (1) the local node number,
727        (2) the number of processors sharing it,
728        (3) the processors sharing it
729   */
730   for (i=0; i<nsends2; i++) {
731     cnt = 1;
732     sends2[starts2[i]] = 0;
733     for (j=0; j<len[i]; j++) {
734       node = recvs[2*i*nmax+2*j]-rstart;
735       if (nownedsenders[node] > 1) {
736         sends2[starts2[i]]++;
737         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
738         sends2[starts2[i]+cnt++] = nownedsenders[node];
739         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr);
740         cnt += nownedsenders[node];
741       }
742     }
743   }
744 
745   /* receive the message lengths */
746   nrecvs2 = nsends;
747   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr);
748   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr);
749   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
750   for (i=0; i<nrecvs2; i++) {
751     ierr = MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);CHKERRQ(ierr);
752   }
753 
754   /* send the message lengths */
755   for (i=0; i<nsends2; i++) {
756     ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr);
757   }
758 
759   /* wait on receives of lens */
760   if (nrecvs2) {
761     ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
762     ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
763     ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
764   }
765   ierr = PetscFree(recv_waits);
766 
767   starts3[0] = 0;
768   nt         = 0;
769   for (i=0; i<nrecvs2-1; i++) {
770     starts3[i+1] = starts3[i] + lens2[i];
771     nt          += lens2[i];
772   }
773   nt += lens2[nrecvs2-1];
774 
775   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr);
776   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
777   for (i=0; i<nrecvs2; i++) {
778     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
779   }
780 
781   /* send the messages */
782   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
783   for (i=0; i<nsends2; i++) {
784     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
785   }
786 
787   /* wait on receives */
788   if (nrecvs2) {
789     ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
790     ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
791     ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
792   }
793   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
794   ierr = PetscFree(nprocs);CHKERRQ(ierr);
795 
796   if (debug) { /* -----------------------------------  */
797     cnt = 0;
798     for (i=0; i<nrecvs2; i++) {
799       nt = recvs2[cnt++];
800       for (j=0; j<nt; j++) {
801         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
802         for (k=0; k<recvs2[cnt+1]; k++) {
803           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
804         }
805         cnt += 2 + recvs2[cnt+1];
806         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
807       }
808     }
809     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
810   } /* -----------------------------------  */
811 
812   /* count number subdomains for each local node */
813   ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
814   ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr);
815   cnt  = 0;
816   for (i=0; i<nrecvs2; i++) {
817     nt = recvs2[cnt++];
818     for (j=0; j<nt; j++) {
819       for (k=0; k<recvs2[cnt+1]; k++) {
820         nprocs[recvs2[cnt+2+k]]++;
821       }
822       cnt += 2 + recvs2[cnt+1];
823     }
824   }
825   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
826   *nproc    = nt;
827   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr);
828   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr);
829   ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr);
830   ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr);
831   cnt       = 0;
832   for (i=0; i<size; i++) {
833     if (nprocs[i] > 0) {
834       bprocs[i]        = cnt;
835       (*procs)[cnt]    = i;
836       (*numprocs)[cnt] = nprocs[i];
837       ierr             = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr);
838       cnt++;
839     }
840   }
841 
842   /* make the list of subdomains for each nontrivial local node */
843   ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr);
844   cnt  = 0;
845   for (i=0; i<nrecvs2; i++) {
846     nt = recvs2[cnt++];
847     for (j=0; j<nt; j++) {
848       for (k=0; k<recvs2[cnt+1]; k++) {
849         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
850       }
851       cnt += 2 + recvs2[cnt+1];
852     }
853   }
854   ierr = PetscFree(bprocs);CHKERRQ(ierr);
855   ierr = PetscFree(recvs2);CHKERRQ(ierr);
856 
857   /* sort the node indexing by their global numbers */
858   nt = *nproc;
859   for (i=0; i<nt; i++) {
860     ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr);
861     for (j=0; j<(*numprocs)[i]; j++) {
862       tmp[j] = lindices[(*indices)[i][j]];
863     }
864     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
865     ierr = PetscFree(tmp);CHKERRQ(ierr);
866   }
867 
868   if (debug) { /* -----------------------------------  */
869     nt = *nproc;
870     for (i=0; i<nt; i++) {
871       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
872       for (j=0; j<(*numprocs)[i]; j++) {
873         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
874       }
875       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
876     }
877     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
878   } /* -----------------------------------  */
879 
880   /* wait on sends */
881   if (nsends2) {
882     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
883     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
884     ierr = PetscFree(send_status);CHKERRQ(ierr);
885   }
886 
887   ierr = PetscFree(starts3);CHKERRQ(ierr);
888   ierr = PetscFree(dest);CHKERRQ(ierr);
889   ierr = PetscFree(send_waits);CHKERRQ(ierr);
890 
891   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
892   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
893   ierr = PetscFree(starts);CHKERRQ(ierr);
894   ierr = PetscFree(starts2);CHKERRQ(ierr);
895   ierr = PetscFree(lens2);CHKERRQ(ierr);
896 
897   ierr = PetscFree(source);CHKERRQ(ierr);
898   ierr = PetscFree(len);CHKERRQ(ierr);
899   ierr = PetscFree(recvs);CHKERRQ(ierr);
900   ierr = PetscFree(nprocs);CHKERRQ(ierr);
901   ierr = PetscFree(sends2);CHKERRQ(ierr);
902 
903   /* put the information about myself as the first entry in the list */
904   first_procs    = (*procs)[0];
905   first_numprocs = (*numprocs)[0];
906   first_indices  = (*indices)[0];
907   for (i=0; i<*nproc; i++) {
908     if ((*procs)[i] == rank) {
909       (*procs)[0]    = (*procs)[i];
910       (*numprocs)[0] = (*numprocs)[i];
911       (*indices)[0]  = (*indices)[i];
912       (*procs)[i]    = first_procs;
913       (*numprocs)[i] = first_numprocs;
914       (*indices)[i]  = first_indices;
915       break;
916     }
917   }
918   PetscFunctionReturn(0);
919 }
920 
921 #undef __FUNCT__
922 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
923 /*@C
924     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
925 
926     Collective on ISLocalToGlobalMapping
927 
928     Input Parameters:
929 .   mapping - the mapping from local to global indexing
930 
931     Output Parameter:
932 +   nproc - number of processors that are connected to this one
933 .   proc - neighboring processors
934 .   numproc - number of indices for each processor
935 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
936 
937     Level: advanced
938 
939 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
940           ISLocalToGlobalMappingGetInfo()
941 @*/
942 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
943 {
944   PetscErrorCode ierr;
945   PetscInt       i;
946 
947   PetscFunctionBegin;
948   ierr = PetscFree(*procs);CHKERRQ(ierr);
949   ierr = PetscFree(*numprocs);CHKERRQ(ierr);
950   if (*indices) {
951     ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);
952     for (i=1; i<*nproc; i++) {
953       ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);
954     }
955     ierr = PetscFree(*indices);CHKERRQ(ierr);
956   }
957   PetscFunctionReturn(0);
958 }
959