xref: /petsc/src/vec/is/utils/isltog.c (revision e1fa1e0fac03c920596a196dc72e351475aeadf9)
1 #define PETSCVEC_DLL
2 
3 #include "petscvec.h"   /*I "petscsys.h" I*/
4 #include "src/vec/is/isimpl.h"    /*I "petscis.h"  I*/
5 
6 PetscCookie PETSCVEC_DLLEXPORT IS_LTOGM_COOKIE = -1;
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
10 /*@C
11     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
12 
13     Not Collective
14 
15     Input Parameter:
16 .   ltog - local to global mapping
17 
18     Output Parameter:
19 .   n - the number of entries in the local mapping
20 
21     Level: advanced
22 
23     Concepts: mapping^local to global
24 
25 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
26 @*/
27 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
28 {
29   PetscFunctionBegin;
30   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
31   PetscValidIntPointer(n,2);
32   *n = mapping->n;
33   PetscFunctionReturn(0);
34 }
35 
36 #undef __FUNCT__
37 #define __FUNCT__ "ISLocalToGlobalMappingView"
38 /*@C
39     ISLocalToGlobalMappingView - View a local to global mapping
40 
41     Not Collective
42 
43     Input Parameters:
44 +   ltog - local to global mapping
45 -   viewer - viewer
46 
47     Level: advanced
48 
49     Concepts: mapping^local to global
50 
51 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
52 @*/
53 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
54 {
55   PetscInt        i;
56   PetscMPIInt     rank;
57   PetscTruth      iascii;
58   PetscErrorCode  ierr;
59 
60   PetscFunctionBegin;
61   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
62   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);
63   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_COOKIE,2);
64 
65   ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr);
66   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
67   if (iascii) {
68     for (i=0; i<mapping->n; i++) {
69       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
70     }
71     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
72   } else {
73     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
74   }
75 
76   PetscFunctionReturn(0);
77 }
78 
79 #undef __FUNCT__
80 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
81 /*@C
82     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
83     ordering and a global parallel ordering.
84 
85     Not collective
86 
87     Input Parameter:
88 .   is - index set containing the global numbers for each local
89 
90     Output Parameter:
91 .   mapping - new mapping data structure
92 
93     Level: advanced
94 
95     Concepts: mapping^local to global
96 
97 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
98 @*/
99 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
100 {
101   PetscErrorCode ierr;
102   PetscInt      n,*indices;
103   MPI_Comm comm;
104 
105   PetscFunctionBegin;
106   PetscValidHeaderSpecific(is,IS_COOKIE,1);
107   PetscValidPointer(mapping,2);
108 
109   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
110   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
111   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
112   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
113   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
114 
115   PetscFunctionReturn(0);
116 }
117 
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
121 /*@
122     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
123     ordering and a global parallel ordering.
124 
125     Not Collective, but communicator may have more than one process
126 
127     Input Parameters:
128 +   comm - MPI communicator
129 .   n - the number of local elements
130 -   indices - the global index for each local element
131 
132     Output Parameter:
133 .   mapping - new mapping data structure
134 
135     Level: advanced
136 
137     Concepts: mapping^local to global
138 
139 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
140 @*/
141 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
142 {
143   PetscErrorCode ierr;
144   PetscInt       *in;
145 
146   PetscFunctionBegin;
147   PetscValidIntPointer(indices,3);
148   PetscValidPointer(mapping,4);
149   ierr = PetscMalloc(n*sizeof(PetscInt),&in);CHKERRQ(ierr);
150   ierr = PetscMemcpy(in,indices,n*sizeof(PetscInt));CHKERRQ(ierr);
151   ierr = ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);CHKERRQ(ierr);
152   PetscFunctionReturn(0);
153 }
154 
155 #undef __FUNCT__
156 #define __FUNCT__ "ISLocalToGlobalMappingCreateNC"
157 /*@C
158     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
159     ordering and a global parallel ordering.
160 
161     Not Collective, but communicator may have more than one process
162 
163     Input Parameters:
164 +   comm - MPI communicator
165 .   n - the number of local elements
166 -   indices - the global index for each local element
167 
168     Output Parameter:
169 .   mapping - new mapping data structure
170 
171     Level: developer
172 
173     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
174     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.
175 
176     Concepts: mapping^local to global
177 
178 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
179 @*/
180 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateNC(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
181 {
182   PetscErrorCode ierr;
183 
184   PetscFunctionBegin;
185   PetscValidIntPointer(indices,3);
186   PetscValidPointer(mapping,4);
187   *mapping = PETSC_NULL;
188 #ifndef PETSC_USE_DYNAMIC_LIBRARIES
189   ierr = VecInitializePackage(PETSC_NULL);CHKERRQ(ierr);
190 #endif
191   if (IS_LTOGM_COOKIE == -1) {
192     ierr = PetscLogClassRegister(&IS_LTOGM_COOKIE,"IS Local to global mapping");CHKERRQ(ierr);
193   }
194 
195   ierr = PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
196                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);CHKERRQ(ierr);
197   ierr = PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(PetscInt));CHKERRQ(ierr);
198 
199   (*mapping)->n       = n;
200   (*mapping)->indices = (PetscInt*)indices;
201 
202   /*
203       Do not create the global to local mapping. This is only created if
204      ISGlobalToLocalMapping() is called
205   */
206   (*mapping)->globals = 0;
207   PetscFunctionReturn(0);
208 }
209 
210 #undef __FUNCT__
211 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
212 /*@
213     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
214        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
215        and VecSetLocalToGlobalMappingBlock().
216 
217     Not Collective, but communicator may have more than one process
218 
219     Input Parameters:
220 +    inmap - original point-wise mapping
221 -    bs - block size
222 
223     Output Parameter:
224 .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
225 
226     Level: advanced
227 
228     Concepts: mapping^local to global
229 
230 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
231 @*/
232 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
233 {
234   PetscErrorCode ierr;
235   PetscInt       *ii,i,n;
236 
237   PetscFunctionBegin;
238   if (bs > 1) {
239     n    = inmap->n/bs;
240     if (n*bs != inmap->n) SETERRQ(PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
241     ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr);
242     for (i=0; i<n; i++) {
243       ii[i] = inmap->indices[bs*i]/bs;
244     }
245     ierr = ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);CHKERRQ(ierr);
246     ierr = PetscFree(ii);CHKERRQ(ierr);
247   } else {
248     *outmap = inmap;
249     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
250   }
251   PetscFunctionReturn(0);
252 }
253 
254 #undef __FUNCT__
255 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
256 /*@
257    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
258    ordering and a global parallel ordering.
259 
260    Note Collective
261 
262    Input Parameters:
263 .  mapping - mapping data structure
264 
265    Level: advanced
266 
267 .seealso: ISLocalToGlobalMappingCreate()
268 @*/
269 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
270 {
271   PetscErrorCode ierr;
272   PetscFunctionBegin;
273   PetscValidPointer(mapping,1);
274   if (--mapping->refct > 0) PetscFunctionReturn(0);
275   if (mapping->refct < 0) {
276     SETERRQ(PETSC_ERR_PLIB,"Mapping already destroyed");
277   }
278 
279   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
280   if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);}
281   ierr = PetscHeaderDestroy(mapping);CHKERRQ(ierr);
282   PetscFunctionReturn(0);
283 }
284 
285 #undef __FUNCT__
286 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
287 /*@
288     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
289     a new index set using the global numbering defined in an ISLocalToGlobalMapping
290     context.
291 
292     Not collective
293 
294     Input Parameters:
295 +   mapping - mapping between local and global numbering
296 -   is - index set in local numbering
297 
298     Output Parameters:
299 .   newis - index set in global numbering
300 
301     Level: advanced
302 
303     Concepts: mapping^local to global
304 
305 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
306           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
307 @*/
308 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
309 {
310   PetscErrorCode ierr;
311   PetscInt            n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
312 
313   PetscFunctionBegin;
314   PetscValidPointer(mapping,1);
315   PetscValidHeaderSpecific(is,IS_COOKIE,2);
316   PetscValidPointer(newis,3);
317 
318   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
319   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
320   idxmap = mapping->indices;
321 
322   ierr = PetscMalloc(n*sizeof(PetscInt),&idxout);CHKERRQ(ierr);
323   for (i=0; i<n; i++) {
324     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
325     idxout[i] = idxmap[idxin[i]];
326   }
327   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
328   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
329   ierr = PetscFree(idxout);CHKERRQ(ierr);
330   PetscFunctionReturn(0);
331 }
332 
333 /*MC
334    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
335    and converts them to the global numbering.
336 
337    Not collective
338 
339    Input Parameters:
340 +  mapping - the local to global mapping context
341 .  N - number of integers
342 -  in - input indices in local numbering
343 
344    Output Parameter:
345 .  out - indices in global numbering
346 
347    Synopsis:
348    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
349 
350    Notes:
351    The in and out array parameters may be identical.
352 
353    Level: advanced
354 
355 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
356           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
357           AOPetscToApplication(), ISGlobalToLocalMappingApply()
358 
359     Concepts: mapping^local to global
360 
361 M*/
362 
363 /* -----------------------------------------------------------------------------------------*/
364 
365 #undef __FUNCT__
366 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
367 /*
368     Creates the global fields in the ISLocalToGlobalMapping structure
369 */
370 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
371 {
372   PetscErrorCode ierr;
373   PetscInt            i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
374 
375   PetscFunctionBegin;
376   end   = 0;
377   start = 100000000;
378 
379   for (i=0; i<n; i++) {
380     if (idx[i] < 0) continue;
381     if (idx[i] < start) start = idx[i];
382     if (idx[i] > end)   end   = idx[i];
383   }
384   if (start > end) {start = 0; end = -1;}
385   mapping->globalstart = start;
386   mapping->globalend   = end;
387 
388   ierr             = PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);CHKERRQ(ierr);
389   mapping->globals = globals;
390   for (i=0; i<end-start+1; i++) {
391     globals[i] = -1;
392   }
393   for (i=0; i<n; i++) {
394     if (idx[i] < 0) continue;
395     globals[idx[i] - start] = i;
396   }
397 
398   ierr = PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));CHKERRQ(ierr);
399   PetscFunctionReturn(0);
400 }
401 
402 #undef __FUNCT__
403 #define __FUNCT__ "ISGlobalToLocalMappingApply"
404 /*@
405     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
406     specified with a global numbering.
407 
408     Not collective
409 
410     Input Parameters:
411 +   mapping - mapping between local and global numbering
412 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
413            IS_GTOLM_DROP - drops the indices with no local value from the output list
414 .   n - number of global indices to map
415 -   idx - global indices to map
416 
417     Output Parameters:
418 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
419 -   idxout - local index of each global index, one must pass in an array long enough
420              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
421              idxout == PETSC_NULL to determine the required length (returned in nout)
422              and then allocate the required space and call ISGlobalToLocalMappingApply()
423              a second time to set the values.
424 
425     Notes:
426     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
427 
428     This is not scalable in memory usage. Each processor requires O(Nglobal) size
429     array to compute these.
430 
431     Level: advanced
432 
433     Concepts: mapping^global to local
434 
435 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
436           ISLocalToGlobalMappingDestroy()
437 @*/
438 PetscErrorCode PETSCVEC_DLLEXPORT ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
439                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
440 {
441   PetscInt i,*globals,nf = 0,tmp,start,end;
442   PetscErrorCode ierr;
443 
444   PetscFunctionBegin;
445   if (!mapping->globals) {
446     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
447   }
448   globals = mapping->globals;
449   start   = mapping->globalstart;
450   end     = mapping->globalend;
451 
452   if (type == IS_GTOLM_MASK) {
453     if (idxout) {
454       for (i=0; i<n; i++) {
455         if (idx[i] < 0) idxout[i] = idx[i];
456         else if (idx[i] < start) idxout[i] = -1;
457         else if (idx[i] > end)   idxout[i] = -1;
458         else                     idxout[i] = globals[idx[i] - start];
459       }
460     }
461     if (nout) *nout = n;
462   } else {
463     if (idxout) {
464       for (i=0; i<n; i++) {
465         if (idx[i] < 0) continue;
466         if (idx[i] < start) continue;
467         if (idx[i] > end) continue;
468         tmp = globals[idx[i] - start];
469         if (tmp < 0) continue;
470         idxout[nf++] = tmp;
471       }
472     } else {
473       for (i=0; i<n; i++) {
474         if (idx[i] < 0) continue;
475         if (idx[i] < start) continue;
476         if (idx[i] > end) continue;
477         tmp = globals[idx[i] - start];
478         if (tmp < 0) continue;
479         nf++;
480       }
481     }
482     if (nout) *nout = nf;
483   }
484 
485   PetscFunctionReturn(0);
486 }
487 
488 #undef __FUNCT__
489 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
490 /*@C
491     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
492      each index shared by more than one processor
493 
494     Collective on ISLocalToGlobalMapping
495 
496     Input Parameters:
497 .   mapping - the mapping from local to global indexing
498 
499     Output Parameter:
500 +   nproc - number of processors that are connected to this one
501 .   proc - neighboring processors
502 .   numproc - number of indices for each subdomain (processor)
503 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
504 
505     Level: advanced
506 
507     Concepts: mapping^local to global
508 
509 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
510           ISLocalToGlobalMappingRestoreInfo()
511 @*/
512 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
513 {
514   PetscErrorCode ierr;
515   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
516   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
517   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
518   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
519   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
520   PetscInt       first_procs,first_numprocs,*first_indices;
521   MPI_Request    *recv_waits,*send_waits;
522   MPI_Status     recv_status,*send_status,*recv_statuses;
523   MPI_Comm       comm = mapping->comm;
524   PetscTruth     debug = PETSC_FALSE;
525 
526   PetscFunctionBegin;
527   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
528   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
529   if (size == 1) {
530     *nproc         = 0;
531     *procs         = PETSC_NULL;
532     ierr           = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr);
533     (*numprocs)[0] = 0;
534     ierr           = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr);
535     (*indices)[0]  = PETSC_NULL;
536     PetscFunctionReturn(0);
537   }
538 
539   ierr = PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);CHKERRQ(ierr);
540 
541   /*
542     Notes on ISLocalToGlobalMappingGetInfo
543 
544     globally owned node - the nodes that have been assigned to this processor in global
545            numbering, just for this routine.
546 
547     nontrivial globally owned node - node assigned to this processor that is on a subdomain
548            boundary (i.e. is has more than one local owner)
549 
550     locally owned node - node that exists on this processors subdomain
551 
552     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
553            local subdomain
554   */
555   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
556   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
557   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
558 
559   for (i=0; i<n; i++) {
560     if (lindices[i] > max) max = lindices[i];
561   }
562   ierr   = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
563   Ng++;
564   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
565   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
566   scale  = Ng/size + 1;
567   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
568   rstart = scale*rank;
569 
570   /* determine ownership ranges of global indices */
571   ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
572   ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
573 
574   /* determine owners of each local node  */
575   ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr);
576   for (i=0; i<n; i++) {
577     proc             = lindices[i]/scale; /* processor that globally owns this index */
578     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
579     owner[i]         = proc;
580     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
581   }
582   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
583   ierr = PetscLogInfo((0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends));CHKERRQ(ierr);
584 
585   /* inform other processors of number of messages and max length*/
586   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
587   ierr = PetscLogInfo((0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs));CHKERRQ(ierr);
588 
589   /* post receives for owned rows */
590   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr);
591   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
592   for (i=0; i<nrecvs; i++) {
593     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
594   }
595 
596   /* pack messages containing lists of local nodes to owners */
597   ierr       = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr);
598   ierr       = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
599   starts[0]  = 0;
600   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
601   for (i=0; i<n; i++) {
602     sends[starts[owner[i]]++] = lindices[i];
603     sends[starts[owner[i]]++] = i;
604   }
605   ierr = PetscFree(owner);CHKERRQ(ierr);
606   starts[0]  = 0;
607   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
608 
609   /* send the messages */
610   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
611   ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr);
612   cnt = 0;
613   for (i=0; i<size; i++) {
614     if (nprocs[2*i]) {
615       ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
616       dest[cnt] = i;
617       cnt++;
618     }
619   }
620   ierr = PetscFree(starts);CHKERRQ(ierr);
621 
622   /* wait on receives */
623   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);CHKERRQ(ierr);
624   ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);CHKERRQ(ierr);
625   cnt  = nrecvs;
626   ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr);
627   ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr);
628   while (cnt) {
629     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
630     /* unpack receives into our local space */
631     ierr           = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr);
632     source[imdex]  = recv_status.MPI_SOURCE;
633     len[imdex]     = len[imdex]/2;
634     /* count how many local owners for each of my global owned indices */
635     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
636     cnt--;
637   }
638   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
639 
640   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
641   nowned  = 0;
642   nownedm = 0;
643   for (i=0; i<ng; i++) {
644     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
645   }
646 
647   /* create single array to contain rank of all local owners of each globally owned index */
648   ierr      = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr);
649   ierr      = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
650   starts[0] = 0;
651   for (i=1; i<ng; i++) {
652     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
653     else starts[i] = starts[i-1];
654   }
655 
656   /* for each nontrival globally owned node list all arriving processors */
657   for (i=0; i<nrecvs; i++) {
658     for (j=0; j<len[i]; j++) {
659       node = recvs[2*i*nmax+2*j]-rstart;
660       if (nownedsenders[node] > 1) {
661         ownedsenders[starts[node]++] = source[i];
662       }
663     }
664   }
665 
666   if (debug) { /* -----------------------------------  */
667     starts[0]    = 0;
668     for (i=1; i<ng; i++) {
669       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
670       else starts[i] = starts[i-1];
671     }
672     for (i=0; i<ng; i++) {
673       if (nownedsenders[i] > 1) {
674         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
675         for (j=0; j<nownedsenders[i]; j++) {
676           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
677         }
678         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
679       }
680     }
681     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
682   }/* -----------------------------------  */
683 
684   /* wait on original sends */
685   if (nsends) {
686     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
687     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
688     ierr = PetscFree(send_status);CHKERRQ(ierr);
689   }
690   ierr = PetscFree(send_waits);CHKERRQ(ierr);
691   ierr = PetscFree(sends);CHKERRQ(ierr);
692   ierr = PetscFree(nprocs);CHKERRQ(ierr);
693 
694   /* pack messages to send back to local owners */
695   starts[0]    = 0;
696   for (i=1; i<ng; i++) {
697     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
698     else starts[i] = starts[i-1];
699   }
700   nsends2 = nrecvs;
701   ierr    = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */
702   for (i=0; i<nrecvs; i++) {
703     nprocs[i] = 1;
704     for (j=0; j<len[i]; j++) {
705       node = recvs[2*i*nmax+2*j]-rstart;
706       if (nownedsenders[node] > 1) {
707         nprocs[i] += 2 + nownedsenders[node];
708       }
709     }
710   }
711   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
712   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr);
713   ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr);
714   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
715   /*
716      Each message is 1 + nprocs[i] long, and consists of
717        (0) the number of nodes being sent back
718        (1) the local node number,
719        (2) the number of processors sharing it,
720        (3) the processors sharing it
721   */
722   for (i=0; i<nsends2; i++) {
723     cnt = 1;
724     sends2[starts2[i]] = 0;
725     for (j=0; j<len[i]; j++) {
726       node = recvs[2*i*nmax+2*j]-rstart;
727       if (nownedsenders[node] > 1) {
728         sends2[starts2[i]]++;
729         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
730         sends2[starts2[i]+cnt++] = nownedsenders[node];
731         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr);
732         cnt += nownedsenders[node];
733       }
734     }
735   }
736 
737   /* receive the message lengths */
738   nrecvs2 = nsends;
739   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr);
740   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr);
741   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
742   for (i=0; i<nrecvs2; i++) {
743     ierr = MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);CHKERRQ(ierr);
744   }
745 
746   /* send the message lengths */
747   for (i=0; i<nsends2; i++) {
748     ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr);
749   }
750 
751   /* wait on receives of lens */
752   if (nrecvs2) {
753     ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
754     ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
755     ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
756   }
757   ierr = PetscFree(recv_waits);
758 
759   starts3[0] = 0;
760   nt         = 0;
761   for (i=0; i<nrecvs2-1; i++) {
762     starts3[i+1] = starts3[i] + lens2[i];
763     nt          += lens2[i];
764   }
765   nt += lens2[nrecvs2-1];
766 
767   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr);
768   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
769   for (i=0; i<nrecvs2; i++) {
770     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
771   }
772 
773   /* send the messages */
774   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
775   for (i=0; i<nsends2; i++) {
776     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
777   }
778 
779   /* wait on receives */
780   if (nrecvs2) {
781     ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
782     ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
783     ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
784   }
785   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
786   ierr = PetscFree(nprocs);CHKERRQ(ierr);
787 
788   if (debug) { /* -----------------------------------  */
789     cnt = 0;
790     for (i=0; i<nrecvs2; i++) {
791       nt = recvs2[cnt++];
792       for (j=0; j<nt; j++) {
793         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
794         for (k=0; k<recvs2[cnt+1]; k++) {
795           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
796         }
797         cnt += 2 + recvs2[cnt+1];
798         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
799       }
800     }
801     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
802   } /* -----------------------------------  */
803 
804   /* count number subdomains for each local node */
805   ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
806   ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr);
807   cnt  = 0;
808   for (i=0; i<nrecvs2; i++) {
809     nt = recvs2[cnt++];
810     for (j=0; j<nt; j++) {
811       for (k=0; k<recvs2[cnt+1]; k++) {
812         nprocs[recvs2[cnt+2+k]]++;
813       }
814       cnt += 2 + recvs2[cnt+1];
815     }
816   }
817   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
818   *nproc    = nt;
819   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr);
820   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr);
821   ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr);
822   ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr);
823   cnt       = 0;
824   for (i=0; i<size; i++) {
825     if (nprocs[i] > 0) {
826       bprocs[i]        = cnt;
827       (*procs)[cnt]    = i;
828       (*numprocs)[cnt] = nprocs[i];
829       ierr             = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr);
830       cnt++;
831     }
832   }
833 
834   /* make the list of subdomains for each nontrivial local node */
835   ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr);
836   cnt  = 0;
837   for (i=0; i<nrecvs2; i++) {
838     nt = recvs2[cnt++];
839     for (j=0; j<nt; j++) {
840       for (k=0; k<recvs2[cnt+1]; k++) {
841         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
842       }
843       cnt += 2 + recvs2[cnt+1];
844     }
845   }
846   ierr = PetscFree(bprocs);CHKERRQ(ierr);
847   ierr = PetscFree(recvs2);CHKERRQ(ierr);
848 
849   /* sort the node indexing by their global numbers */
850   nt = *nproc;
851   for (i=0; i<nt; i++) {
852     ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr);
853     for (j=0; j<(*numprocs)[i]; j++) {
854       tmp[j] = lindices[(*indices)[i][j]];
855     }
856     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
857     ierr = PetscFree(tmp);CHKERRQ(ierr);
858   }
859 
860   if (debug) { /* -----------------------------------  */
861     nt = *nproc;
862     for (i=0; i<nt; i++) {
863       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
864       for (j=0; j<(*numprocs)[i]; j++) {
865         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
866       }
867       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
868     }
869     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
870   } /* -----------------------------------  */
871 
872   /* wait on sends */
873   if (nsends2) {
874     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
875     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
876     ierr = PetscFree(send_status);CHKERRQ(ierr);
877   }
878 
879   ierr = PetscFree(starts3);CHKERRQ(ierr);
880   ierr = PetscFree(dest);CHKERRQ(ierr);
881   ierr = PetscFree(send_waits);CHKERRQ(ierr);
882 
883   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
884   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
885   ierr = PetscFree(starts);CHKERRQ(ierr);
886   ierr = PetscFree(starts2);CHKERRQ(ierr);
887   ierr = PetscFree(lens2);CHKERRQ(ierr);
888 
889   ierr = PetscFree(source);CHKERRQ(ierr);
890   ierr = PetscFree(len);CHKERRQ(ierr);
891   ierr = PetscFree(recvs);CHKERRQ(ierr);
892   ierr = PetscFree(nprocs);CHKERRQ(ierr);
893   ierr = PetscFree(sends2);CHKERRQ(ierr);
894 
895   /* put the information about myself as the first entry in the list */
896   first_procs    = (*procs)[0];
897   first_numprocs = (*numprocs)[0];
898   first_indices  = (*indices)[0];
899   for (i=0; i<*nproc; i++) {
900     if ((*procs)[i] == rank) {
901       (*procs)[0]    = (*procs)[i];
902       (*numprocs)[0] = (*numprocs)[i];
903       (*indices)[0]  = (*indices)[i];
904       (*procs)[i]    = first_procs;
905       (*numprocs)[i] = first_numprocs;
906       (*indices)[i]  = first_indices;
907       break;
908     }
909   }
910   PetscFunctionReturn(0);
911 }
912 
913 #undef __FUNCT__
914 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
915 /*@C
916     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
917 
918     Collective on ISLocalToGlobalMapping
919 
920     Input Parameters:
921 .   mapping - the mapping from local to global indexing
922 
923     Output Parameter:
924 +   nproc - number of processors that are connected to this one
925 .   proc - neighboring processors
926 .   numproc - number of indices for each processor
927 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
928 
929     Level: advanced
930 
931 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
932           ISLocalToGlobalMappingGetInfo()
933 @*/
934 PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
935 {
936   PetscErrorCode ierr;
937   PetscInt i;
938 
939   PetscFunctionBegin;
940   if (*procs) {ierr = PetscFree(*procs);CHKERRQ(ierr);}
941   if (*numprocs) {ierr = PetscFree(*numprocs);CHKERRQ(ierr);}
942   if (*indices) {
943     if ((*indices)[0]) {ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);}
944     for (i=1; i<*nproc; i++) {
945       if ((*indices)[i]) {ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);}
946     }
947     ierr = PetscFree(*indices);CHKERRQ(ierr);
948   }
949   PetscFunctionReturn(0);
950 }
951 
952 
953 
954 
955 
956 
957 
958 
959 
960 
961 
962 
963 
964 
965 
966 
967 
968 
969