xref: /petsc/src/vec/is/utils/isltog.c (revision 6849ba73f22fecb8f92ef896a42e4e8bd4cd6965)
1 
2 #include "petscsys.h"   /*I "petscsys.h" I*/
3 #include "src/vec/is/isimpl.h"    /*I "petscis.h"  I*/
4 
5 EXTERN PetscErrorCode VecInitializePackage(char *);
6 PetscCookie IS_LTOGM_COOKIE = -1;
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
10 /*@C
11     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
12 
13     Not Collective
14 
15     Input Parameter:
16 .   ltog - local to global mapping
17 
18     Output Parameter:
19 .   n - the number of entries in the local mapping
20 
21     Level: advanced
22 
23     Concepts: mapping^local to global
24 
25 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
26 @*/
27 PetscErrorCode ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n)
28 {
29   PetscFunctionBegin;
30   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
31   PetscValidIntPointer(n,2);
32   *n = mapping->n;
33   PetscFunctionReturn(0);
34 }
35 
36 #undef __FUNCT__
37 #define __FUNCT__ "ISLocalToGlobalMappingView"
38 /*@C
39     ISLocalToGlobalMappingView - View a local to global mapping
40 
41     Not Collective
42 
43     Input Parameters:
44 +   ltog - local to global mapping
45 -   viewer - viewer
46 
47     Level: advanced
48 
49     Concepts: mapping^local to global
50 
51 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
52 @*/
53 PetscErrorCode ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
54 {
55   int        i,rank;
56   PetscTruth iascii;
57   PetscErrorCode ierr;
58 
59   PetscFunctionBegin;
60   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
61   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);
62   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_COOKIE,2);
63 
64   ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr);
65   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
66   if (iascii) {
67     for (i=0; i<mapping->n; i++) {
68       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
69     }
70     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
71   } else {
72     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
73   }
74 
75   PetscFunctionReturn(0);
76 }
77 
78 #undef __FUNCT__
79 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
80 /*@C
81     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
82     ordering and a global parallel ordering.
83 
84     Not collective
85 
86     Input Parameter:
87 .   is - index set containing the global numbers for each local
88 
89     Output Parameter:
90 .   mapping - new mapping data structure
91 
92     Level: advanced
93 
94     Concepts: mapping^local to global
95 
96 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
97 @*/
98 PetscErrorCode ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
99 {
100   PetscErrorCode ierr;
101   int      n,*indices;
102   MPI_Comm comm;
103 
104   PetscFunctionBegin;
105   PetscValidHeaderSpecific(is,IS_COOKIE,1);
106   PetscValidPointer(mapping,2);
107 
108   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
109   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
110   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
111   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
112   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
113 
114   PetscFunctionReturn(0);
115 }
116 
117 
118 #undef __FUNCT__
119 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
120 /*@C
121     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
122     ordering and a global parallel ordering.
123 
124     Not Collective, but communicator may have more than one process
125 
126     Input Parameters:
127 +   comm - MPI communicator
128 .   n - the number of local elements
129 -   indices - the global index for each local element
130 
131     Output Parameter:
132 .   mapping - new mapping data structure
133 
134     Level: advanced
135 
136     Concepts: mapping^local to global
137 
138 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
139 @*/
140 PetscErrorCode ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
141 {
142   PetscErrorCode ierr;
143   int *in;
144 
145   PetscFunctionBegin;
146   PetscValidIntPointer(indices,3);
147   PetscValidPointer(mapping,4);
148   ierr = PetscMalloc((n+1)*sizeof(int),&in);CHKERRQ(ierr);
149   ierr = PetscMemcpy(in,indices,n*sizeof(int));CHKERRQ(ierr);
150   ierr = ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);CHKERRQ(ierr);
151   PetscFunctionReturn(0);
152 }
153 
154 #undef __FUNCT__
155 #define __FUNCT__ "ISLocalToGlobalMappingCreateNC"
156 /*@C
157     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
158     ordering and a global parallel ordering.
159 
160     Not Collective, but communicator may have more than one process
161 
162     Input Parameters:
163 +   comm - MPI communicator
164 .   n - the number of local elements
165 -   indices - the global index for each local element
166 
167     Output Parameter:
168 .   mapping - new mapping data structure
169 
170     Level: developer
171 
172     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
173     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.
174 
175     Concepts: mapping^local to global
176 
177 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
178 @*/
179 PetscErrorCode ISLocalToGlobalMappingCreateNC(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
180 {
181   PetscErrorCode ierr;
182 
183   PetscFunctionBegin;
184   PetscValidIntPointer(indices,3);
185   PetscValidPointer(mapping,4);
186   *mapping = PETSC_NULL;
187 #ifndef PETSC_USE_DYNAMIC_LIBRARIES
188   ierr = VecInitializePackage(PETSC_NULL);CHKERRQ(ierr);
189 #endif
190   if (IS_LTOGM_COOKIE == -1) {
191     ierr = PetscLogClassRegister(&IS_LTOGM_COOKIE,"IS Local to global mapping");CHKERRQ(ierr);
192   }
193 
194   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
195                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
196   PetscLogObjectCreate(*mapping);
197   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int));
198 
199   (*mapping)->n       = n;
200   (*mapping)->indices = (int*)indices;
201 
202   /*
203       Do not create the global to local mapping. This is only created if
204      ISGlobalToLocalMapping() is called
205   */
206   (*mapping)->globals = 0;
207   PetscFunctionReturn(0);
208 }
209 
210 #undef __FUNCT__
211 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
212 /*@C
213     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
214        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
215        and VecSetLocalToGlobalMappingBlock().
216 
217     Not Collective, but communicator may have more than one process
218 
219     Input Parameters:
220 +    inmap - original point-wise mapping
221 -    bs - block size
222 
223     Output Parameter:
224 .   outmap - block based mapping
225 
226     Level: advanced
227 
228     Concepts: mapping^local to global
229 
230 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
231 @*/
232 PetscErrorCode ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,int bs,ISLocalToGlobalMapping *outmap)
233 {
234   PetscErrorCode ierr;
235   int            *ii,i,n;
236 
237   PetscFunctionBegin;
238   if (bs > 1) {
239     n    = inmap->n/bs;
240     ierr = PetscMalloc(n*sizeof(int),&ii);CHKERRQ(ierr);
241     for (i=0; i<n; i++) {
242       ii[i] = inmap->indices[bs*i]/bs;
243     }
244     ierr = ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);CHKERRQ(ierr);
245     ierr = PetscFree(ii);CHKERRQ(ierr);
246   } else {
247     *outmap = inmap;
248     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
249   }
250   PetscFunctionReturn(0);
251 }
252 
253 #undef __FUNCT__
254 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
255 /*@
256    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
257    ordering and a global parallel ordering.
258 
259    Note Collective
260 
261    Input Parameters:
262 .  mapping - mapping data structure
263 
264    Level: advanced
265 
266 .seealso: ISLocalToGlobalMappingCreate()
267 @*/
268 PetscErrorCode ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
269 {
270   PetscErrorCode ierr;
271   PetscFunctionBegin;
272   PetscValidPointer(mapping,1);
273   if (--mapping->refct > 0) PetscFunctionReturn(0);
274   if (mapping->refct < 0) {
275     SETERRQ(PETSC_ERR_PLIB,"Mapping already destroyed");
276   }
277 
278   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
279   if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);}
280   PetscLogObjectDestroy(mapping);
281   PetscHeaderDestroy(mapping);
282   PetscFunctionReturn(0);
283 }
284 
285 #undef __FUNCT__
286 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
287 /*@
288     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
289     a new index set using the global numbering defined in an ISLocalToGlobalMapping
290     context.
291 
292     Not collective
293 
294     Input Parameters:
295 +   mapping - mapping between local and global numbering
296 -   is - index set in local numbering
297 
298     Output Parameters:
299 .   newis - index set in global numbering
300 
301     Level: advanced
302 
303     Concepts: mapping^local to global
304 
305 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
306           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
307 @*/
308 PetscErrorCode ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
309 {
310   PetscErrorCode ierr;
311   int            n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
312 
313   PetscFunctionBegin;
314   PetscValidPointer(mapping,1);
315   PetscValidHeaderSpecific(is,IS_COOKIE,2);
316   PetscValidPointer(newis,3);
317 
318   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
319   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
320   idxmap = mapping->indices;
321 
322   ierr = PetscMalloc((n+1)*sizeof(int),&idxout);CHKERRQ(ierr);
323   for (i=0; i<n; i++) {
324     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
325     idxout[i] = idxmap[idxin[i]];
326   }
327   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
328   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
329   ierr = PetscFree(idxout);CHKERRQ(ierr);
330   PetscFunctionReturn(0);
331 }
332 
333 /*MC
334    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
335    and converts them to the global numbering.
336 
337    Not collective
338 
339    Input Parameters:
340 +  mapping - the local to global mapping context
341 .  N - number of integers
342 -  in - input indices in local numbering
343 
344    Output Parameter:
345 .  out - indices in global numbering
346 
347    Synopsis:
348    int ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
349 
350    Notes:
351    The in and out array parameters may be identical.
352 
353    Level: advanced
354 
355 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
356           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
357           AOPetscToApplication(), ISGlobalToLocalMappingApply()
358 
359     Concepts: mapping^local to global
360 
361 M*/
362 
363 /* -----------------------------------------------------------------------------------------*/
364 
365 #undef __FUNCT__
366 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
367 /*
368     Creates the global fields in the ISLocalToGlobalMapping structure
369 */
370 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
371 {
372   PetscErrorCode ierr;
373   int            i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
374 
375   PetscFunctionBegin;
376   end   = 0;
377   start = 100000000;
378 
379   for (i=0; i<n; i++) {
380     if (idx[i] < 0) continue;
381     if (idx[i] < start) start = idx[i];
382     if (idx[i] > end)   end   = idx[i];
383   }
384   if (start > end) {start = 0; end = -1;}
385   mapping->globalstart = start;
386   mapping->globalend   = end;
387 
388   ierr             = PetscMalloc((end-start+2)*sizeof(int),&globals);CHKERRQ(ierr);
389   mapping->globals = globals;
390   for (i=0; i<end-start+1; i++) {
391     globals[i] = -1;
392   }
393   for (i=0; i<n; i++) {
394     if (idx[i] < 0) continue;
395     globals[idx[i] - start] = i;
396   }
397 
398   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(int));
399   PetscFunctionReturn(0);
400 }
401 
402 #undef __FUNCT__
403 #define __FUNCT__ "ISGlobalToLocalMappingApply"
404 /*@
405     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
406     specified with a global numbering.
407 
408     Not collective
409 
410     Input Parameters:
411 +   mapping - mapping between local and global numbering
412 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
413            IS_GTOLM_DROP - drops the indices with no local value from the output list
414 .   n - number of global indices to map
415 -   idx - global indices to map
416 
417     Output Parameters:
418 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
419 -   idxout - local index of each global index, one must pass in an array long enough
420              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
421              idxout == PETSC_NULL to determine the required length (returned in nout)
422              and then allocate the required space and call ISGlobalToLocalMappingApply()
423              a second time to set the values.
424 
425     Notes:
426     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
427 
428     This is not scalable in memory usage. Each processor requires O(Nglobal) size
429     array to compute these.
430 
431     Level: advanced
432 
433     Concepts: mapping^global to local
434 
435 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
436           ISLocalToGlobalMappingDestroy()
437 @*/
438 PetscErrorCode ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
439                                   int n,const int idx[],int *nout,int idxout[])
440 {
441   int i,*globals,nf = 0,tmp,start,end;
442   PetscErrorCode ierr;
443 
444   PetscFunctionBegin;
445   if (!mapping->globals) {
446     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
447   }
448   globals = mapping->globals;
449   start   = mapping->globalstart;
450   end     = mapping->globalend;
451 
452   if (type == IS_GTOLM_MASK) {
453     if (idxout) {
454       for (i=0; i<n; i++) {
455         if (idx[i] < 0) idxout[i] = idx[i];
456         else if (idx[i] < start) idxout[i] = -1;
457         else if (idx[i] > end)   idxout[i] = -1;
458         else                     idxout[i] = globals[idx[i] - start];
459       }
460     }
461     if (nout) *nout = n;
462   } else {
463     if (idxout) {
464       for (i=0; i<n; i++) {
465         if (idx[i] < 0) continue;
466         if (idx[i] < start) continue;
467         if (idx[i] > end) continue;
468         tmp = globals[idx[i] - start];
469         if (tmp < 0) continue;
470         idxout[nf++] = tmp;
471       }
472     } else {
473       for (i=0; i<n; i++) {
474         if (idx[i] < 0) continue;
475         if (idx[i] < start) continue;
476         if (idx[i] > end) continue;
477         tmp = globals[idx[i] - start];
478         if (tmp < 0) continue;
479         nf++;
480       }
481     }
482     if (nout) *nout = nf;
483   }
484 
485   PetscFunctionReturn(0);
486 }
487 
488 #undef __FUNCT__
489 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
490 /*@C
491     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
492      each index shared by more than one processor
493 
494     Collective on ISLocalToGlobalMapping
495 
496     Input Parameters:
497 .   mapping - the mapping from local to global indexing
498 
499     Output Parameter:
500 +   nproc - number of processors that are connected to this one
501 .   proc - neighboring processors
502 .   numproc - number of indices for each subdomain (processor)
503 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
504 
505     Level: advanced
506 
507     Concepts: mapping^local to global
508 
509 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
510           ISLocalToGlobalMappingRestoreInfo()
511 @*/
512 PetscErrorCode ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int *procs[],int *numprocs[],int **indices[])
513 {
514   PetscErrorCode ierr;
515   int         i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
516   int         size,rank,*nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
517   int         tag1,tag2,tag3,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
518   int         node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
519   int         first_procs,first_numprocs,*first_indices;
520   MPI_Request *recv_waits,*send_waits;
521   MPI_Status  recv_status,*send_status,*recv_statuses;
522   MPI_Comm    comm = mapping->comm;
523   PetscTruth  debug = PETSC_FALSE;
524 
525   PetscFunctionBegin;
526   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
527   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
528   if (size == 1) {
529     *nproc         = 0;
530     *procs         = PETSC_NULL;
531     ierr           = PetscMalloc(sizeof(int),numprocs);CHKERRQ(ierr);
532     (*numprocs)[0] = 0;
533     ierr           = PetscMalloc(sizeof(int*),indices);CHKERRQ(ierr);
534     (*indices)[0]  = PETSC_NULL;
535     PetscFunctionReturn(0);
536   }
537 
538   ierr = PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);CHKERRQ(ierr);
539 
540   /*
541     Notes on ISLocalToGlobalMappingGetInfo
542 
543     globally owned node - the nodes that have been assigned to this processor in global
544            numbering, just for this routine.
545 
546     nontrivial globally owned node - node assigned to this processor that is on a subdomain
547            boundary (i.e. is has more than one local owner)
548 
549     locally owned node - node that exists on this processors subdomain
550 
551     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
552            local subdomain
553   */
554   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
555   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
556   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
557 
558   for (i=0; i<n; i++) {
559     if (lindices[i] > max) max = lindices[i];
560   }
561   ierr   = MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
562   Ng++;
563   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
564   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
565   scale  = Ng/size + 1;
566   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
567   rstart = scale*rank;
568 
569   /* determine ownership ranges of global indices */
570   ierr = PetscMalloc((2*size+1)*sizeof(int),&nprocs);CHKERRQ(ierr);
571   ierr = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
572 
573   /* determine owners of each local node  */
574   ierr = PetscMalloc((n+1)*sizeof(int),&owner);CHKERRQ(ierr);
575   for (i=0; i<n; i++) {
576     proc             = lindices[i]/scale; /* processor that globally owns this index */
577     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
578     owner[i]         = proc;
579     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
580   }
581   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
582   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends);
583 
584   /* inform other processors of number of messages and max length*/
585   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
586   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs);
587 
588   /* post receives for owned rows */
589   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int),&recvs);CHKERRQ(ierr);
590   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
591   for (i=0; i<nrecvs; i++) {
592     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
593   }
594 
595   /* pack messages containing lists of local nodes to owners */
596   ierr       = PetscMalloc((2*n+1)*sizeof(int),&sends);CHKERRQ(ierr);
597   ierr       = PetscMalloc((size+1)*sizeof(int),&starts);CHKERRQ(ierr);
598   starts[0]  = 0;
599   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
600   for (i=0; i<n; i++) {
601     sends[starts[owner[i]]++] = lindices[i];
602     sends[starts[owner[i]]++] = i;
603   }
604   ierr = PetscFree(owner);CHKERRQ(ierr);
605   starts[0]  = 0;
606   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
607 
608   /* send the messages */
609   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
610   ierr = PetscMalloc((nsends+1)*sizeof(int),&dest);CHKERRQ(ierr);
611   cnt = 0;
612   for (i=0; i<size; i++) {
613     if (nprocs[2*i]) {
614       ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPI_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
615       dest[cnt] = i;
616       cnt++;
617     }
618   }
619   ierr = PetscFree(starts);CHKERRQ(ierr);
620 
621   /* wait on receives */
622   ierr = PetscMalloc((2*nrecvs+1)*sizeof(int),&source);CHKERRQ(ierr);
623   len  = source + nrecvs;
624   cnt  = nrecvs;
625   ierr = PetscMalloc((ng+1)*sizeof(int),&nownedsenders);CHKERRQ(ierr);
626   ierr = PetscMemzero(nownedsenders,ng*sizeof(int));CHKERRQ(ierr);
627   while (cnt) {
628     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
629     /* unpack receives into our local space */
630     ierr           = MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);CHKERRQ(ierr);
631     source[imdex]  = recv_status.MPI_SOURCE;
632     len[imdex]     = len[imdex]/2;
633     /* count how many local owners for each of my global owned indices */
634     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
635     cnt--;
636   }
637   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
638 
639   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
640   nowned  = 0;
641   nownedm = 0;
642   for (i=0; i<ng; i++) {
643     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
644   }
645 
646   /* create single array to contain rank of all local owners of each globally owned index */
647   ierr      = PetscMalloc((nownedm+1)*sizeof(int),&ownedsenders);CHKERRQ(ierr);
648   ierr      = PetscMalloc((ng+1)*sizeof(int),&starts);CHKERRQ(ierr);
649   starts[0] = 0;
650   for (i=1; i<ng; i++) {
651     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
652     else starts[i] = starts[i-1];
653   }
654 
655   /* for each nontrival globally owned node list all arriving processors */
656   for (i=0; i<nrecvs; i++) {
657     for (j=0; j<len[i]; j++) {
658       node = recvs[2*i*nmax+2*j]-rstart;
659       if (nownedsenders[node] > 1) {
660         ownedsenders[starts[node]++] = source[i];
661       }
662     }
663   }
664 
665   if (debug) { /* -----------------------------------  */
666     starts[0]    = 0;
667     for (i=1; i<ng; i++) {
668       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
669       else starts[i] = starts[i-1];
670     }
671     for (i=0; i<ng; i++) {
672       if (nownedsenders[i] > 1) {
673         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
674         for (j=0; j<nownedsenders[i]; j++) {
675           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
676         }
677         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
678       }
679     }
680     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
681   }/* -----------------------------------  */
682 
683   /* wait on original sends */
684   if (nsends) {
685     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
686     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
687     ierr = PetscFree(send_status);CHKERRQ(ierr);
688   }
689   ierr = PetscFree(send_waits);CHKERRQ(ierr);
690   ierr = PetscFree(sends);CHKERRQ(ierr);
691   ierr = PetscFree(nprocs);CHKERRQ(ierr);
692 
693   /* pack messages to send back to local owners */
694   starts[0]    = 0;
695   for (i=1; i<ng; i++) {
696     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
697     else starts[i] = starts[i-1];
698   }
699   nsends2 = nrecvs;
700   ierr    = PetscMalloc((nsends2+1)*sizeof(int),&nprocs);CHKERRQ(ierr); /* length of each message */
701   for (i=0; i<nrecvs; i++) {
702     nprocs[i] = 1;
703     for (j=0; j<len[i]; j++) {
704       node = recvs[2*i*nmax+2*j]-rstart;
705       if (nownedsenders[node] > 1) {
706         nprocs[i] += 2 + nownedsenders[node];
707       }
708     }
709   }
710   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
711   ierr = PetscMalloc((nt+1)*sizeof(int),&sends2);CHKERRQ(ierr);
712   ierr = PetscMalloc((nsends2+1)*sizeof(int),&starts2);CHKERRQ(ierr);
713   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
714   /*
715      Each message is 1 + nprocs[i] long, and consists of
716        (0) the number of nodes being sent back
717        (1) the local node number,
718        (2) the number of processors sharing it,
719        (3) the processors sharing it
720   */
721   for (i=0; i<nsends2; i++) {
722     cnt = 1;
723     sends2[starts2[i]] = 0;
724     for (j=0; j<len[i]; j++) {
725       node = recvs[2*i*nmax+2*j]-rstart;
726       if (nownedsenders[node] > 1) {
727         sends2[starts2[i]]++;
728         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
729         sends2[starts2[i]+cnt++] = nownedsenders[node];
730         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));CHKERRQ(ierr);
731         cnt += nownedsenders[node];
732       }
733     }
734   }
735 
736   /* send the message lengths */
737   for (i=0; i<nsends2; i++) {
738     ierr = MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag2,comm);CHKERRQ(ierr);
739   }
740 
741   /* receive the message lengths */
742   nrecvs2 = nsends;
743   ierr = PetscMalloc((nrecvs2+1)*sizeof(int),&lens2);CHKERRQ(ierr);
744   ierr = PetscMalloc((nrecvs2+1)*sizeof(int),&starts3);CHKERRQ(ierr);
745   nt      = 0;
746   for (i=0; i<nrecvs2; i++) {
747     ierr =  MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag2,comm,&recv_status);CHKERRQ(ierr);
748     nt   += lens2[i];
749   }
750   starts3[0] = 0;
751   for (i=0; i<nrecvs2-1; i++) {
752     starts3[i+1] = starts3[i] + lens2[i];
753   }
754   ierr = PetscMalloc((nt+1)*sizeof(int),&recvs2);CHKERRQ(ierr);
755   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
756   for (i=0; i<nrecvs2; i++) {
757     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
758   }
759 
760   /* send the messages */
761   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
762   for (i=0; i<nsends2; i++) {
763     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
764   }
765 
766   /* wait on receives */
767   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
768   ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
769   ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
770   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
771   ierr = PetscFree(nprocs);CHKERRQ(ierr);
772 
773   if (debug) { /* -----------------------------------  */
774     cnt = 0;
775     for (i=0; i<nrecvs2; i++) {
776       nt = recvs2[cnt++];
777       for (j=0; j<nt; j++) {
778         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
779         for (k=0; k<recvs2[cnt+1]; k++) {
780           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
781         }
782         cnt += 2 + recvs2[cnt+1];
783         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
784       }
785     }
786     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
787   } /* -----------------------------------  */
788 
789   /* count number subdomains for each local node */
790   ierr = PetscMalloc(size*sizeof(int),&nprocs);CHKERRQ(ierr);
791   ierr = PetscMemzero(nprocs,size*sizeof(int));CHKERRQ(ierr);
792   cnt  = 0;
793   for (i=0; i<nrecvs2; i++) {
794     nt = recvs2[cnt++];
795     for (j=0; j<nt; j++) {
796       for (k=0; k<recvs2[cnt+1]; k++) {
797         nprocs[recvs2[cnt+2+k]]++;
798       }
799       cnt += 2 + recvs2[cnt+1];
800     }
801   }
802   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
803   *nproc    = nt;
804   ierr = PetscMalloc((nt+1)*sizeof(int),procs);CHKERRQ(ierr);
805   ierr = PetscMalloc((nt+1)*sizeof(int),numprocs);CHKERRQ(ierr);
806   ierr = PetscMalloc((nt+1)*sizeof(int*),indices);CHKERRQ(ierr);
807   ierr = PetscMalloc(size*sizeof(int),&bprocs);CHKERRQ(ierr);
808   cnt       = 0;
809   for (i=0; i<size; i++) {
810     if (nprocs[i] > 0) {
811       bprocs[i]        = cnt;
812       (*procs)[cnt]    = i;
813       (*numprocs)[cnt] = nprocs[i];
814       ierr             = PetscMalloc(nprocs[i]*sizeof(int),&(*indices)[cnt]);CHKERRQ(ierr);
815       cnt++;
816     }
817   }
818 
819   /* make the list of subdomains for each nontrivial local node */
820   ierr = PetscMemzero(*numprocs,nt*sizeof(int));CHKERRQ(ierr);
821   cnt  = 0;
822   for (i=0; i<nrecvs2; i++) {
823     nt = recvs2[cnt++];
824     for (j=0; j<nt; j++) {
825       for (k=0; k<recvs2[cnt+1]; k++) {
826         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
827       }
828       cnt += 2 + recvs2[cnt+1];
829     }
830   }
831   ierr = PetscFree(bprocs);CHKERRQ(ierr);
832   ierr = PetscFree(recvs2);CHKERRQ(ierr);
833 
834   /* sort the node indexing by their global numbers */
835   nt = *nproc;
836   for (i=0; i<nt; i++) {
837     ierr = PetscMalloc(((*numprocs)[i])*sizeof(int),&tmp);CHKERRQ(ierr);
838     for (j=0; j<(*numprocs)[i]; j++) {
839       tmp[j] = lindices[(*indices)[i][j]];
840     }
841     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
842     ierr = PetscFree(tmp);CHKERRQ(ierr);
843   }
844 
845   if (debug) { /* -----------------------------------  */
846     nt = *nproc;
847     for (i=0; i<nt; i++) {
848       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
849       for (j=0; j<(*numprocs)[i]; j++) {
850         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
851       }
852       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
853     }
854     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
855   } /* -----------------------------------  */
856 
857   /* wait on sends */
858   if (nsends2) {
859     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
860     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
861     ierr = PetscFree(send_status);CHKERRQ(ierr);
862   }
863 
864   ierr = PetscFree(starts3);CHKERRQ(ierr);
865   ierr = PetscFree(dest);CHKERRQ(ierr);
866   ierr = PetscFree(send_waits);CHKERRQ(ierr);
867 
868   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
869   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
870   ierr = PetscFree(starts);CHKERRQ(ierr);
871   ierr = PetscFree(starts2);CHKERRQ(ierr);
872   ierr = PetscFree(lens2);CHKERRQ(ierr);
873 
874   ierr = PetscFree(source);CHKERRQ(ierr);
875   ierr = PetscFree(recvs);CHKERRQ(ierr);
876   ierr = PetscFree(nprocs);CHKERRQ(ierr);
877   ierr = PetscFree(sends2);CHKERRQ(ierr);
878 
879   /* put the information about myself as the first entry in the list */
880   first_procs    = (*procs)[0];
881   first_numprocs = (*numprocs)[0];
882   first_indices  = (*indices)[0];
883   for (i=0; i<*nproc; i++) {
884     if ((*procs)[i] == rank) {
885       (*procs)[0]    = (*procs)[i];
886       (*numprocs)[0] = (*numprocs)[i];
887       (*indices)[0]  = (*indices)[i];
888       (*procs)[i]    = first_procs;
889       (*numprocs)[i] = first_numprocs;
890       (*indices)[i]  = first_indices;
891       break;
892     }
893   }
894 
895   PetscFunctionReturn(0);
896 }
897 
898 #undef __FUNCT__
899 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
900 /*@C
901     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
902 
903     Collective on ISLocalToGlobalMapping
904 
905     Input Parameters:
906 .   mapping - the mapping from local to global indexing
907 
908     Output Parameter:
909 +   nproc - number of processors that are connected to this one
910 .   proc - neighboring processors
911 .   numproc - number of indices for each processor
912 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
913 
914     Level: advanced
915 
916 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
917           ISLocalToGlobalMappingGetInfo()
918 @*/
919 PetscErrorCode ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,int *nproc,int *procs[],int *numprocs[],int **indices[])
920 {
921   PetscErrorCode ierr;
922   int i;
923 
924   PetscFunctionBegin;
925   if (*procs) {ierr = PetscFree(*procs);CHKERRQ(ierr);}
926   if (*numprocs) {ierr = PetscFree(*numprocs);CHKERRQ(ierr);}
927   if (*indices) {
928     if ((*indices)[0]) {ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);}
929     for (i=1; i<*nproc; i++) {
930       if ((*indices)[i]) {ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);}
931     }
932     ierr = PetscFree(*indices);CHKERRQ(ierr);
933   }
934   PetscFunctionReturn(0);
935 }
936 
937 
938 
939 
940 
941 
942 
943 
944 
945 
946 
947 
948 
949 
950 
951 
952 
953 
954