xref: /petsc/src/vec/is/utils/isltog.c (revision 9be4fee84348b1eaee2eca38a590c7003da9a71b)
1 #define PETSCVEC_DLL
2 
3 #include "petscvec.h"   /*I "petscsys.h" I*/
4 #include "src/vec/is/isimpl.h"    /*I "petscis.h"  I*/
5 
6 PetscCookie PETSCVEC_DLLEXPORT IS_LTOGM_COOKIE = -1;
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
10 /*@C
11     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
12 
13     Not Collective
14 
15     Input Parameter:
16 .   ltog - local to global mapping
17 
18     Output Parameter:
19 .   n - the number of entries in the local mapping
20 
21     Level: advanced
22 
23     Concepts: mapping^local to global
24 
25 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
26 @*/
27 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
28 {
29   PetscFunctionBegin;
30   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
31   PetscValidIntPointer(n,2);
32   *n = mapping->n;
33   PetscFunctionReturn(0);
34 }
35 
36 #undef __FUNCT__
37 #define __FUNCT__ "ISLocalToGlobalMappingView"
38 /*@C
39     ISLocalToGlobalMappingView - View a local to global mapping
40 
41     Not Collective
42 
43     Input Parameters:
44 +   ltog - local to global mapping
45 -   viewer - viewer
46 
47     Level: advanced
48 
49     Concepts: mapping^local to global
50 
51 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
52 @*/
53 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
54 {
55   PetscInt        i;
56   PetscMPIInt     rank;
57   PetscTruth      iascii;
58   PetscErrorCode  ierr;
59 
60   PetscFunctionBegin;
61   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
62   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);
63   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_COOKIE,2);
64 
65   ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr);
66   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
67   if (iascii) {
68     for (i=0; i<mapping->n; i++) {
69       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
70     }
71     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
72   } else {
73     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
74   }
75 
76   PetscFunctionReturn(0);
77 }
78 
79 #undef __FUNCT__
80 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
81 /*@C
82     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
83     ordering and a global parallel ordering.
84 
85     Not collective
86 
87     Input Parameter:
88 .   is - index set containing the global numbers for each local
89 
90     Output Parameter:
91 .   mapping - new mapping data structure
92 
93     Level: advanced
94 
95     Concepts: mapping^local to global
96 
97 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
98 @*/
99 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
100 {
101   PetscErrorCode ierr;
102   PetscInt      n,*indices;
103   MPI_Comm comm;
104 
105   PetscFunctionBegin;
106   PetscValidHeaderSpecific(is,IS_COOKIE,1);
107   PetscValidPointer(mapping,2);
108 
109   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
110   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
111   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
112   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
113   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
114 
115   PetscFunctionReturn(0);
116 }
117 
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
121 /*@
122     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
123     ordering and a global parallel ordering.
124 
125     Not Collective, but communicator may have more than one process
126 
127     Input Parameters:
128 +   comm - MPI communicator
129 .   n - the number of local elements
130 -   indices - the global index for each local element
131 
132     Output Parameter:
133 .   mapping - new mapping data structure
134 
135     Level: advanced
136 
137     Concepts: mapping^local to global
138 
139 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
140 @*/
141 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
142 {
143   PetscErrorCode ierr;
144   PetscInt       *in;
145 
146   PetscFunctionBegin;
147   PetscValidIntPointer(indices,3);
148   PetscValidPointer(mapping,4);
149   ierr = PetscMalloc(n*sizeof(PetscInt),&in);CHKERRQ(ierr);
150   ierr = PetscMemcpy(in,indices,n*sizeof(PetscInt));CHKERRQ(ierr);
151   ierr = ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);CHKERRQ(ierr);
152   PetscFunctionReturn(0);
153 }
154 
155 #undef __FUNCT__
156 #define __FUNCT__ "ISLocalToGlobalMappingCreateNC"
157 /*@C
158     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
159     ordering and a global parallel ordering.
160 
161     Not Collective, but communicator may have more than one process
162 
163     Input Parameters:
164 +   comm - MPI communicator
165 .   n - the number of local elements
166 -   indices - the global index for each local element
167 
168     Output Parameter:
169 .   mapping - new mapping data structure
170 
171     Level: developer
172 
173     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
174     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.
175 
176     Concepts: mapping^local to global
177 
178 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
179 @*/
180 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateNC(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
181 {
182   PetscErrorCode ierr;
183 
184   PetscFunctionBegin;
185   if (n) {
186     PetscValidIntPointer(indices,3);
187   }
188   PetscValidPointer(mapping,4);
189   *mapping = PETSC_NULL;
190 #ifndef PETSC_USE_DYNAMIC_LIBRARIES
191   ierr = VecInitializePackage(PETSC_NULL);CHKERRQ(ierr);
192 #endif
193   if (IS_LTOGM_COOKIE == -1) {
194     ierr = PetscLogClassRegister(&IS_LTOGM_COOKIE,"IS Local to global mapping");CHKERRQ(ierr);
195   }
196 
197   ierr = PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
198                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);CHKERRQ(ierr);
199   ierr = PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(PetscInt));CHKERRQ(ierr);
200 
201   (*mapping)->n       = n;
202   (*mapping)->indices = (PetscInt*)indices;
203 
204   /*
205       Do not create the global to local mapping. This is only created if
206      ISGlobalToLocalMapping() is called
207   */
208   (*mapping)->globals = 0;
209   PetscFunctionReturn(0);
210 }
211 
212 #undef __FUNCT__
213 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
214 /*@
215     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
216        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
217        and VecSetLocalToGlobalMappingBlock().
218 
219     Not Collective, but communicator may have more than one process
220 
221     Input Parameters:
222 +    inmap - original point-wise mapping
223 -    bs - block size
224 
225     Output Parameter:
226 .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
227 
228     Level: advanced
229 
230     Concepts: mapping^local to global
231 
232 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
233 @*/
234 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
235 {
236   PetscErrorCode ierr;
237   PetscInt       *ii,i,n;
238 
239   PetscFunctionBegin;
240   if (bs > 1) {
241     n    = inmap->n/bs;
242     if (n*bs != inmap->n) SETERRQ(PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
243     ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr);
244     for (i=0; i<n; i++) {
245       ii[i] = inmap->indices[bs*i]/bs;
246     }
247     ierr = ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);CHKERRQ(ierr);
248     ierr = PetscFree(ii);CHKERRQ(ierr);
249   } else {
250     *outmap = inmap;
251     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
252   }
253   PetscFunctionReturn(0);
254 }
255 
256 #undef __FUNCT__
257 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
258 /*@
259    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
260    ordering and a global parallel ordering.
261 
262    Note Collective
263 
264    Input Parameters:
265 .  mapping - mapping data structure
266 
267    Level: advanced
268 
269 .seealso: ISLocalToGlobalMappingCreate()
270 @*/
271 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
272 {
273   PetscErrorCode ierr;
274   PetscFunctionBegin;
275   PetscValidPointer(mapping,1);
276   if (--mapping->refct > 0) PetscFunctionReturn(0);
277   if (mapping->refct < 0) {
278     SETERRQ(PETSC_ERR_PLIB,"Mapping already destroyed");
279   }
280 
281   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
282   if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);}
283   ierr = PetscHeaderDestroy(mapping);CHKERRQ(ierr);
284   PetscFunctionReturn(0);
285 }
286 
287 #undef __FUNCT__
288 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
289 /*@
290     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
291     a new index set using the global numbering defined in an ISLocalToGlobalMapping
292     context.
293 
294     Not collective
295 
296     Input Parameters:
297 +   mapping - mapping between local and global numbering
298 -   is - index set in local numbering
299 
300     Output Parameters:
301 .   newis - index set in global numbering
302 
303     Level: advanced
304 
305     Concepts: mapping^local to global
306 
307 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
308           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
309 @*/
310 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
311 {
312   PetscErrorCode ierr;
313   PetscInt            n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
314 
315   PetscFunctionBegin;
316   PetscValidPointer(mapping,1);
317   PetscValidHeaderSpecific(is,IS_COOKIE,2);
318   PetscValidPointer(newis,3);
319 
320   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
321   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
322   idxmap = mapping->indices;
323 
324   ierr = PetscMalloc(n*sizeof(PetscInt),&idxout);CHKERRQ(ierr);
325   for (i=0; i<n; i++) {
326     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
327     idxout[i] = idxmap[idxin[i]];
328   }
329   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
330   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
331   ierr = PetscFree(idxout);CHKERRQ(ierr);
332   PetscFunctionReturn(0);
333 }
334 
335 /*MC
336    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
337    and converts them to the global numbering.
338 
339    Not collective
340 
341    Input Parameters:
342 +  mapping - the local to global mapping context
343 .  N - number of integers
344 -  in - input indices in local numbering
345 
346    Output Parameter:
347 .  out - indices in global numbering
348 
349    Synopsis:
350    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
351 
352    Notes:
353    The in and out array parameters may be identical.
354 
355    Level: advanced
356 
357 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
358           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
359           AOPetscToApplication(), ISGlobalToLocalMappingApply()
360 
361     Concepts: mapping^local to global
362 
363 M*/
364 
365 /* -----------------------------------------------------------------------------------------*/
366 
367 #undef __FUNCT__
368 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
369 /*
370     Creates the global fields in the ISLocalToGlobalMapping structure
371 */
372 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
373 {
374   PetscErrorCode ierr;
375   PetscInt            i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
376 
377   PetscFunctionBegin;
378   end   = 0;
379   start = 100000000;
380 
381   for (i=0; i<n; i++) {
382     if (idx[i] < 0) continue;
383     if (idx[i] < start) start = idx[i];
384     if (idx[i] > end)   end   = idx[i];
385   }
386   if (start > end) {start = 0; end = -1;}
387   mapping->globalstart = start;
388   mapping->globalend   = end;
389 
390   ierr             = PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);CHKERRQ(ierr);
391   mapping->globals = globals;
392   for (i=0; i<end-start+1; i++) {
393     globals[i] = -1;
394   }
395   for (i=0; i<n; i++) {
396     if (idx[i] < 0) continue;
397     globals[idx[i] - start] = i;
398   }
399 
400   ierr = PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));CHKERRQ(ierr);
401   PetscFunctionReturn(0);
402 }
403 
404 #undef __FUNCT__
405 #define __FUNCT__ "ISGlobalToLocalMappingApply"
406 /*@
407     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
408     specified with a global numbering.
409 
410     Not collective
411 
412     Input Parameters:
413 +   mapping - mapping between local and global numbering
414 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
415            IS_GTOLM_DROP - drops the indices with no local value from the output list
416 .   n - number of global indices to map
417 -   idx - global indices to map
418 
419     Output Parameters:
420 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
421 -   idxout - local index of each global index, one must pass in an array long enough
422              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
423              idxout == PETSC_NULL to determine the required length (returned in nout)
424              and then allocate the required space and call ISGlobalToLocalMappingApply()
425              a second time to set the values.
426 
427     Notes:
428     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
429 
430     This is not scalable in memory usage. Each processor requires O(Nglobal) size
431     array to compute these.
432 
433     Level: advanced
434 
435     Concepts: mapping^global to local
436 
437 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
438           ISLocalToGlobalMappingDestroy()
439 @*/
440 PetscErrorCode PETSCVEC_DLLEXPORT ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
441                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
442 {
443   PetscInt i,*globals,nf = 0,tmp,start,end;
444   PetscErrorCode ierr;
445 
446   PetscFunctionBegin;
447   if (!mapping->globals) {
448     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
449   }
450   globals = mapping->globals;
451   start   = mapping->globalstart;
452   end     = mapping->globalend;
453 
454   if (type == IS_GTOLM_MASK) {
455     if (idxout) {
456       for (i=0; i<n; i++) {
457         if (idx[i] < 0) idxout[i] = idx[i];
458         else if (idx[i] < start) idxout[i] = -1;
459         else if (idx[i] > end)   idxout[i] = -1;
460         else                     idxout[i] = globals[idx[i] - start];
461       }
462     }
463     if (nout) *nout = n;
464   } else {
465     if (idxout) {
466       for (i=0; i<n; i++) {
467         if (idx[i] < 0) continue;
468         if (idx[i] < start) continue;
469         if (idx[i] > end) continue;
470         tmp = globals[idx[i] - start];
471         if (tmp < 0) continue;
472         idxout[nf++] = tmp;
473       }
474     } else {
475       for (i=0; i<n; i++) {
476         if (idx[i] < 0) continue;
477         if (idx[i] < start) continue;
478         if (idx[i] > end) continue;
479         tmp = globals[idx[i] - start];
480         if (tmp < 0) continue;
481         nf++;
482       }
483     }
484     if (nout) *nout = nf;
485   }
486 
487   PetscFunctionReturn(0);
488 }
489 
490 #undef __FUNCT__
491 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
492 /*@C
493     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
494      each index shared by more than one processor
495 
496     Collective on ISLocalToGlobalMapping
497 
498     Input Parameters:
499 .   mapping - the mapping from local to global indexing
500 
501     Output Parameter:
502 +   nproc - number of processors that are connected to this one
503 .   proc - neighboring processors
504 .   numproc - number of indices for each subdomain (processor)
505 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
506 
507     Level: advanced
508 
509     Concepts: mapping^local to global
510 
511 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
512           ISLocalToGlobalMappingRestoreInfo()
513 @*/
514 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
515 {
516   PetscErrorCode ierr;
517   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
518   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
519   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
520   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
521   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
522   PetscInt       first_procs,first_numprocs,*first_indices;
523   MPI_Request    *recv_waits,*send_waits;
524   MPI_Status     recv_status,*send_status,*recv_statuses;
525   MPI_Comm       comm = mapping->comm;
526   PetscTruth     debug = PETSC_FALSE;
527 
528   PetscFunctionBegin;
529   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
530   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
531   if (size == 1) {
532     *nproc         = 0;
533     *procs         = PETSC_NULL;
534     ierr           = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr);
535     (*numprocs)[0] = 0;
536     ierr           = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr);
537     (*indices)[0]  = PETSC_NULL;
538     PetscFunctionReturn(0);
539   }
540 
541   ierr = PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);CHKERRQ(ierr);
542 
543   /*
544     Notes on ISLocalToGlobalMappingGetInfo
545 
546     globally owned node - the nodes that have been assigned to this processor in global
547            numbering, just for this routine.
548 
549     nontrivial globally owned node - node assigned to this processor that is on a subdomain
550            boundary (i.e. is has more than one local owner)
551 
552     locally owned node - node that exists on this processors subdomain
553 
554     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
555            local subdomain
556   */
557   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
558   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
559   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
560 
561   for (i=0; i<n; i++) {
562     if (lindices[i] > max) max = lindices[i];
563   }
564   ierr   = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
565   Ng++;
566   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
567   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
568   scale  = Ng/size + 1;
569   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
570   rstart = scale*rank;
571 
572   /* determine ownership ranges of global indices */
573   ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
574   ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
575 
576   /* determine owners of each local node  */
577   ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr);
578   for (i=0; i<n; i++) {
579     proc             = lindices[i]/scale; /* processor that globally owns this index */
580     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
581     owner[i]         = proc;
582     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
583   }
584   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
585   ierr = PetscVerboseInfo((0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends));CHKERRQ(ierr);
586 
587   /* inform other processors of number of messages and max length*/
588   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
589   ierr = PetscVerboseInfo((0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs));CHKERRQ(ierr);
590 
591   /* post receives for owned rows */
592   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr);
593   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
594   for (i=0; i<nrecvs; i++) {
595     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
596   }
597 
598   /* pack messages containing lists of local nodes to owners */
599   ierr       = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr);
600   ierr       = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
601   starts[0]  = 0;
602   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
603   for (i=0; i<n; i++) {
604     sends[starts[owner[i]]++] = lindices[i];
605     sends[starts[owner[i]]++] = i;
606   }
607   ierr = PetscFree(owner);CHKERRQ(ierr);
608   starts[0]  = 0;
609   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
610 
611   /* send the messages */
612   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
613   ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr);
614   cnt = 0;
615   for (i=0; i<size; i++) {
616     if (nprocs[2*i]) {
617       ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
618       dest[cnt] = i;
619       cnt++;
620     }
621   }
622   ierr = PetscFree(starts);CHKERRQ(ierr);
623 
624   /* wait on receives */
625   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);CHKERRQ(ierr);
626   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);CHKERRQ(ierr);
627   cnt  = nrecvs;
628   ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr);
629   ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr);
630   while (cnt) {
631     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
632     /* unpack receives into our local space */
633     ierr           = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr);
634     source[imdex]  = recv_status.MPI_SOURCE;
635     len[imdex]     = len[imdex]/2;
636     /* count how many local owners for each of my global owned indices */
637     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
638     cnt--;
639   }
640   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
641 
642   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
643   nowned  = 0;
644   nownedm = 0;
645   for (i=0; i<ng; i++) {
646     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
647   }
648 
649   /* create single array to contain rank of all local owners of each globally owned index */
650   ierr      = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr);
651   ierr      = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
652   starts[0] = 0;
653   for (i=1; i<ng; i++) {
654     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
655     else starts[i] = starts[i-1];
656   }
657 
658   /* for each nontrival globally owned node list all arriving processors */
659   for (i=0; i<nrecvs; i++) {
660     for (j=0; j<len[i]; j++) {
661       node = recvs[2*i*nmax+2*j]-rstart;
662       if (nownedsenders[node] > 1) {
663         ownedsenders[starts[node]++] = source[i];
664       }
665     }
666   }
667 
668   if (debug) { /* -----------------------------------  */
669     starts[0]    = 0;
670     for (i=1; i<ng; i++) {
671       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
672       else starts[i] = starts[i-1];
673     }
674     for (i=0; i<ng; i++) {
675       if (nownedsenders[i] > 1) {
676         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
677         for (j=0; j<nownedsenders[i]; j++) {
678           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
679         }
680         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
681       }
682     }
683     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
684   }/* -----------------------------------  */
685 
686   /* wait on original sends */
687   if (nsends) {
688     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
689     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
690     ierr = PetscFree(send_status);CHKERRQ(ierr);
691   }
692   ierr = PetscFree(send_waits);CHKERRQ(ierr);
693   ierr = PetscFree(sends);CHKERRQ(ierr);
694   ierr = PetscFree(nprocs);CHKERRQ(ierr);
695 
696   /* pack messages to send back to local owners */
697   starts[0]    = 0;
698   for (i=1; i<ng; i++) {
699     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
700     else starts[i] = starts[i-1];
701   }
702   nsends2 = nrecvs;
703   ierr    = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */
704   for (i=0; i<nrecvs; i++) {
705     nprocs[i] = 1;
706     for (j=0; j<len[i]; j++) {
707       node = recvs[2*i*nmax+2*j]-rstart;
708       if (nownedsenders[node] > 1) {
709         nprocs[i] += 2 + nownedsenders[node];
710       }
711     }
712   }
713   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
714   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr);
715   ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr);
716   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
717   /*
718      Each message is 1 + nprocs[i] long, and consists of
719        (0) the number of nodes being sent back
720        (1) the local node number,
721        (2) the number of processors sharing it,
722        (3) the processors sharing it
723   */
724   for (i=0; i<nsends2; i++) {
725     cnt = 1;
726     sends2[starts2[i]] = 0;
727     for (j=0; j<len[i]; j++) {
728       node = recvs[2*i*nmax+2*j]-rstart;
729       if (nownedsenders[node] > 1) {
730         sends2[starts2[i]]++;
731         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
732         sends2[starts2[i]+cnt++] = nownedsenders[node];
733         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr);
734         cnt += nownedsenders[node];
735       }
736     }
737   }
738 
739   /* receive the message lengths */
740   nrecvs2 = nsends;
741   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr);
742   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr);
743   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
744   for (i=0; i<nrecvs2; i++) {
745     ierr = MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);CHKERRQ(ierr);
746   }
747 
748   /* send the message lengths */
749   for (i=0; i<nsends2; i++) {
750     ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr);
751   }
752 
753   /* wait on receives of lens */
754   if (nrecvs2) {
755     ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
756     ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
757     ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
758   }
759   ierr = PetscFree(recv_waits);
760 
761   starts3[0] = 0;
762   nt         = 0;
763   for (i=0; i<nrecvs2-1; i++) {
764     starts3[i+1] = starts3[i] + lens2[i];
765     nt          += lens2[i];
766   }
767   nt += lens2[nrecvs2-1];
768 
769   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr);
770   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
771   for (i=0; i<nrecvs2; i++) {
772     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
773   }
774 
775   /* send the messages */
776   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
777   for (i=0; i<nsends2; i++) {
778     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
779   }
780 
781   /* wait on receives */
782   if (nrecvs2) {
783     ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
784     ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
785     ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
786   }
787   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
788   ierr = PetscFree(nprocs);CHKERRQ(ierr);
789 
790   if (debug) { /* -----------------------------------  */
791     cnt = 0;
792     for (i=0; i<nrecvs2; i++) {
793       nt = recvs2[cnt++];
794       for (j=0; j<nt; j++) {
795         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
796         for (k=0; k<recvs2[cnt+1]; k++) {
797           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
798         }
799         cnt += 2 + recvs2[cnt+1];
800         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
801       }
802     }
803     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
804   } /* -----------------------------------  */
805 
806   /* count number subdomains for each local node */
807   ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
808   ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr);
809   cnt  = 0;
810   for (i=0; i<nrecvs2; i++) {
811     nt = recvs2[cnt++];
812     for (j=0; j<nt; j++) {
813       for (k=0; k<recvs2[cnt+1]; k++) {
814         nprocs[recvs2[cnt+2+k]]++;
815       }
816       cnt += 2 + recvs2[cnt+1];
817     }
818   }
819   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
820   *nproc    = nt;
821   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr);
822   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr);
823   ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr);
824   ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr);
825   cnt       = 0;
826   for (i=0; i<size; i++) {
827     if (nprocs[i] > 0) {
828       bprocs[i]        = cnt;
829       (*procs)[cnt]    = i;
830       (*numprocs)[cnt] = nprocs[i];
831       ierr             = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr);
832       cnt++;
833     }
834   }
835 
836   /* make the list of subdomains for each nontrivial local node */
837   ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr);
838   cnt  = 0;
839   for (i=0; i<nrecvs2; i++) {
840     nt = recvs2[cnt++];
841     for (j=0; j<nt; j++) {
842       for (k=0; k<recvs2[cnt+1]; k++) {
843         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
844       }
845       cnt += 2 + recvs2[cnt+1];
846     }
847   }
848   ierr = PetscFree(bprocs);CHKERRQ(ierr);
849   ierr = PetscFree(recvs2);CHKERRQ(ierr);
850 
851   /* sort the node indexing by their global numbers */
852   nt = *nproc;
853   for (i=0; i<nt; i++) {
854     ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr);
855     for (j=0; j<(*numprocs)[i]; j++) {
856       tmp[j] = lindices[(*indices)[i][j]];
857     }
858     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
859     ierr = PetscFree(tmp);CHKERRQ(ierr);
860   }
861 
862   if (debug) { /* -----------------------------------  */
863     nt = *nproc;
864     for (i=0; i<nt; i++) {
865       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
866       for (j=0; j<(*numprocs)[i]; j++) {
867         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
868       }
869       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
870     }
871     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
872   } /* -----------------------------------  */
873 
874   /* wait on sends */
875   if (nsends2) {
876     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
877     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
878     ierr = PetscFree(send_status);CHKERRQ(ierr);
879   }
880 
881   ierr = PetscFree(starts3);CHKERRQ(ierr);
882   ierr = PetscFree(dest);CHKERRQ(ierr);
883   ierr = PetscFree(send_waits);CHKERRQ(ierr);
884 
885   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
886   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
887   ierr = PetscFree(starts);CHKERRQ(ierr);
888   ierr = PetscFree(starts2);CHKERRQ(ierr);
889   ierr = PetscFree(lens2);CHKERRQ(ierr);
890 
891   ierr = PetscFree(source);CHKERRQ(ierr);
892   ierr = PetscFree(len);CHKERRQ(ierr);
893   ierr = PetscFree(recvs);CHKERRQ(ierr);
894   ierr = PetscFree(nprocs);CHKERRQ(ierr);
895   ierr = PetscFree(sends2);CHKERRQ(ierr);
896 
897   /* put the information about myself as the first entry in the list */
898   first_procs    = (*procs)[0];
899   first_numprocs = (*numprocs)[0];
900   first_indices  = (*indices)[0];
901   for (i=0; i<*nproc; i++) {
902     if ((*procs)[i] == rank) {
903       (*procs)[0]    = (*procs)[i];
904       (*numprocs)[0] = (*numprocs)[i];
905       (*indices)[0]  = (*indices)[i];
906       (*procs)[i]    = first_procs;
907       (*numprocs)[i] = first_numprocs;
908       (*indices)[i]  = first_indices;
909       break;
910     }
911   }
912   PetscFunctionReturn(0);
913 }
914 
915 #undef __FUNCT__
916 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
917 /*@C
918     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
919 
920     Collective on ISLocalToGlobalMapping
921 
922     Input Parameters:
923 .   mapping - the mapping from local to global indexing
924 
925     Output Parameter:
926 +   nproc - number of processors that are connected to this one
927 .   proc - neighboring processors
928 .   numproc - number of indices for each processor
929 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
930 
931     Level: advanced
932 
933 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
934           ISLocalToGlobalMappingGetInfo()
935 @*/
936 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
937 {
938   PetscErrorCode ierr;
939   PetscInt i;
940 
941   PetscFunctionBegin;
942   if (*procs) {ierr = PetscFree(*procs);CHKERRQ(ierr);}
943   if (*numprocs) {ierr = PetscFree(*numprocs);CHKERRQ(ierr);}
944   if (*indices) {
945     if ((*indices)[0]) {ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);}
946     for (i=1; i<*nproc; i++) {
947       if ((*indices)[i]) {ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);}
948     }
949     ierr = PetscFree(*indices);CHKERRQ(ierr);
950   }
951   PetscFunctionReturn(0);
952 }
953 
954 
955 
956 
957 
958 
959 
960 
961 
962 
963 
964 
965 
966 
967 
968 
969 
970 
971