xref: /petsc/src/vec/is/ao/impls/memscalable/aomemscalable.c (revision 503c0ea9b45bcfbcebbb1ea5341243bbc69f0bea)
1 
2 /*
3     The memory scalable AO application ordering routines. These store the
4   orderings on each processor for that processor's range of values
5 */
6 
7 #include <../src/vec/is/ao/aoimpl.h>          /*I  "petscao.h"   I*/
8 
9 typedef struct {
10   PetscInt    *app_loc;    /* app_loc[i] is the partner for the ith local PETSc slot */
11   PetscInt    *petsc_loc;  /* petsc_loc[j] is the partner for the jth local app slot */
12   PetscLayout map;         /* determines the local sizes of ao */
13 } AO_MemoryScalable;
14 
15 /*
16        All processors ship the data to process 0 to be printed; note that this is not scalable because
17        process 0 allocates space for all the orderings entry across all the processes
18 */
19 PetscErrorCode AOView_MemoryScalable(AO ao,PetscViewer viewer)
20 {
21   PetscMPIInt       rank,size;
22   AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
23   PetscBool         iascii;
24   PetscMPIInt       tag_app,tag_petsc;
25   PetscLayout       map = aomems->map;
26   PetscInt          *app,*app_loc,*petsc,*petsc_loc,len,i,j;
27   MPI_Status        status;
28 
29   PetscFunctionBegin;
30   PetscCall(PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii));
31   PetscCheck(iascii,PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"Viewer type %s not supported for AO MemoryScalable",((PetscObject)viewer)->type_name);
32 
33   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)ao),&rank));
34   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)ao),&size));
35 
36   PetscCall(PetscObjectGetNewTag((PetscObject)ao,&tag_app));
37   PetscCall(PetscObjectGetNewTag((PetscObject)ao,&tag_petsc));
38 
39   if (rank == 0) {
40     PetscCall(PetscViewerASCIIPrintf(viewer,"Number of elements in ordering %" PetscInt_FMT "\n",ao->N));
41     PetscCall(PetscViewerASCIIPrintf(viewer,  "PETSc->App  App->PETSc\n"));
42 
43     PetscCall(PetscMalloc2(map->N,&app,map->N,&petsc));
44     len  = map->n;
45     /* print local AO */
46     PetscCall(PetscViewerASCIIPrintf(viewer,"Process [%d]\n",rank));
47     for (i=0; i<len; i++) {
48       PetscCall(PetscViewerASCIIPrintf(viewer,"%3" PetscInt_FMT "  %3" PetscInt_FMT "    %3" PetscInt_FMT "  %3" PetscInt_FMT "\n",i,aomems->app_loc[i],i,aomems->petsc_loc[i]));
49     }
50 
51     /* recv and print off-processor's AO */
52     for (i=1; i<size; i++) {
53       len       = map->range[i+1] - map->range[i];
54       app_loc   = app  + map->range[i];
55       petsc_loc = petsc+ map->range[i];
56       PetscCallMPI(MPI_Recv(app_loc,(PetscMPIInt)len,MPIU_INT,i,tag_app,PetscObjectComm((PetscObject)ao),&status));
57       PetscCallMPI(MPI_Recv(petsc_loc,(PetscMPIInt)len,MPIU_INT,i,tag_petsc,PetscObjectComm((PetscObject)ao),&status));
58       PetscCall(PetscViewerASCIIPrintf(viewer,"Process [%" PetscInt_FMT "]\n",i));
59       for (j=0; j<len; j++) {
60         PetscCall(PetscViewerASCIIPrintf(viewer,"%3" PetscInt_FMT "  %3" PetscInt_FMT "    %3" PetscInt_FMT "  %3" PetscInt_FMT "\n",map->range[i]+j,app_loc[j],map->range[i]+j,petsc_loc[j]));
61       }
62     }
63     PetscCall(PetscFree2(app,petsc));
64 
65   } else {
66     /* send values */
67     PetscCallMPI(MPI_Send((void*)aomems->app_loc,map->n,MPIU_INT,0,tag_app,PetscObjectComm((PetscObject)ao)));
68     PetscCallMPI(MPI_Send((void*)aomems->petsc_loc,map->n,MPIU_INT,0,tag_petsc,PetscObjectComm((PetscObject)ao)));
69   }
70   PetscCall(PetscViewerFlush(viewer));
71   PetscFunctionReturn(0);
72 }
73 
74 PetscErrorCode AODestroy_MemoryScalable(AO ao)
75 {
76   AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
77 
78   PetscFunctionBegin;
79   PetscCall(PetscFree2(aomems->app_loc,aomems->petsc_loc));
80   PetscCall(PetscLayoutDestroy(&aomems->map));
81   PetscCall(PetscFree(aomems));
82   PetscFunctionReturn(0);
83 }
84 
85 /*
86    Input Parameters:
87 +   ao - the application ordering context
88 .   n  - the number of integers in ia[]
89 .   ia - the integers; these are replaced with their mapped value
90 -   maploc - app_loc or petsc_loc in struct "AO_MemoryScalable"
91 
92    Output Parameter:
93 .   ia - the mapped interges
94  */
95 PetscErrorCode AOMap_MemoryScalable_private(AO ao,PetscInt n,PetscInt *ia,const PetscInt *maploc)
96 {
97   AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
98   MPI_Comm          comm;
99   PetscMPIInt       rank,size,tag1,tag2;
100   PetscInt          *owner,*start,*sizes,nsends,nreceives;
101   PetscInt          nmax,count,*sindices,*rindices,i,j,idx,lastidx,*sindices2,*rindices2;
102   const PetscInt    *owners = aomems->map->range;
103   MPI_Request       *send_waits,*recv_waits,*send_waits2,*recv_waits2;
104   MPI_Status        recv_status;
105   PetscMPIInt       nindices,source,widx;
106   PetscInt          *rbuf,*sbuf;
107   MPI_Status        *send_status,*send_status2;
108 
109   PetscFunctionBegin;
110   PetscCall(PetscObjectGetComm((PetscObject)ao,&comm));
111   PetscCallMPI(MPI_Comm_rank(comm,&rank));
112   PetscCallMPI(MPI_Comm_size(comm,&size));
113 
114   /*  first count number of contributors to each processor */
115   PetscCall(PetscMalloc1(size,&start));
116   PetscCall(PetscCalloc2(2*size,&sizes,n,&owner));
117 
118   j       = 0;
119   lastidx = -1;
120   for (i=0; i<n; i++) {
121     if (ia[i] < 0) owner[i] = -1; /* mark negative entries (which are not to be mapped) with a special negative value */
122     if (ia[i] >= ao->N) owner[i] = -2; /* mark out of range entries with special negative value */
123     else {
124       /* if indices are NOT locally sorted, need to start search at the beginning */
125       if (lastidx > (idx = ia[i])) j = 0;
126       lastidx = idx;
127       for (; j<size; j++) {
128         if (idx >= owners[j] && idx < owners[j+1]) {
129           sizes[2*j]++;     /* num of indices to be sent */
130           sizes[2*j+1] = 1; /* send to proc[j] */
131           owner[i]     = j;
132           break;
133         }
134       }
135     }
136   }
137   sizes[2*rank]=sizes[2*rank+1]=0; /* do not receive from self! */
138   nsends        = 0;
139   for (i=0; i<size; i++) nsends += sizes[2*i+1];
140 
141   /* inform other processors of number of messages and max length*/
142   PetscCall(PetscMaxSum(comm,sizes,&nmax,&nreceives));
143 
144   /* allocate arrays */
145   PetscCall(PetscObjectGetNewTag((PetscObject)ao,&tag1));
146   PetscCall(PetscObjectGetNewTag((PetscObject)ao,&tag2));
147 
148   PetscCall(PetscMalloc2(nreceives*nmax,&rindices,nreceives,&recv_waits));
149   PetscCall(PetscMalloc2(nsends*nmax,&rindices2,nsends,&recv_waits2));
150 
151   PetscCall(PetscMalloc3(n,&sindices,nsends,&send_waits,nsends,&send_status));
152   PetscCall(PetscMalloc3(n,&sindices2,nreceives,&send_waits2,nreceives,&send_status2));
153 
154   /* post 1st receives: receive others requests
155      since we don't know how long each individual message is we
156      allocate the largest needed buffer for each receive. Potentially
157      this is a lot of wasted space.
158   */
159   for (i=0,count=0; i<nreceives; i++) {
160     PetscCallMPI(MPI_Irecv(rindices+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+count++));
161   }
162 
163   /* do 1st sends:
164       1) starts[i] gives the starting index in svalues for stuff going to
165          the ith processor
166   */
167   start[0] = 0;
168   for (i=1; i<size; i++) start[i] = start[i-1] + sizes[2*i-2];
169   for (i=0; i<n; i++) {
170     j = owner[i];
171     if (j == -1) continue; /* do not remap negative entries in ia[] */
172     else if (j == -2) { /* out of range entries get mapped to -1 */
173       ia[i] = -1;
174       continue;
175     } else if (j != rank) {
176       sindices[start[j]++]  = ia[i];
177     } else { /* compute my own map */
178       ia[i] = maploc[ia[i]-owners[rank]];
179     }
180   }
181 
182   start[0] = 0;
183   for (i=1; i<size; i++) start[i] = start[i-1] + sizes[2*i-2];
184   for (i=0,count=0; i<size; i++) {
185     if (sizes[2*i+1]) {
186       /* send my request to others */
187       PetscCallMPI(MPI_Isend(sindices+start[i],sizes[2*i],MPIU_INT,i,tag1,comm,send_waits+count));
188       /* post receive for the answer of my request */
189       PetscCallMPI(MPI_Irecv(sindices2+start[i],sizes[2*i],MPIU_INT,i,tag2,comm,recv_waits2+count));
190       count++;
191     }
192   }
193   PetscCheckFalse(nsends != count,comm,PETSC_ERR_SUP,"nsends %" PetscInt_FMT " != count %" PetscInt_FMT,nsends,count);
194 
195   /* wait on 1st sends */
196   if (nsends) {
197     PetscCallMPI(MPI_Waitall(nsends,send_waits,send_status));
198   }
199 
200   /* 1st recvs: other's requests */
201   for (j=0; j< nreceives; j++) {
202     PetscCallMPI(MPI_Waitany(nreceives,recv_waits,&widx,&recv_status)); /* idx: index of handle for operation that completed */
203     PetscCallMPI(MPI_Get_count(&recv_status,MPIU_INT,&nindices));
204     rbuf   = rindices+nmax*widx; /* global index */
205     source = recv_status.MPI_SOURCE;
206 
207     /* compute mapping */
208     sbuf = rbuf;
209     for (i=0; i<nindices; i++) sbuf[i] = maploc[rbuf[i]-owners[rank]];
210 
211     /* send mapping back to the sender */
212     PetscCallMPI(MPI_Isend(sbuf,nindices,MPIU_INT,source,tag2,comm,send_waits2+widx));
213   }
214 
215   /* wait on 2nd sends */
216   if (nreceives) {
217     PetscCallMPI(MPI_Waitall(nreceives,send_waits2,send_status2));
218   }
219 
220   /* 2nd recvs: for the answer of my request */
221   for (j=0; j< nsends; j++) {
222     PetscCallMPI(MPI_Waitany(nsends,recv_waits2,&widx,&recv_status));
223     PetscCallMPI(MPI_Get_count(&recv_status,MPIU_INT,&nindices));
224     source = recv_status.MPI_SOURCE;
225     /* pack output ia[] */
226     rbuf  = sindices2+start[source];
227     count = 0;
228     for (i=0; i<n; i++) {
229       if (source == owner[i]) ia[i] = rbuf[count++];
230     }
231   }
232 
233   /* free arrays */
234   PetscCall(PetscFree(start));
235   PetscCall(PetscFree2(sizes,owner));
236   PetscCall(PetscFree2(rindices,recv_waits));
237   PetscCall(PetscFree2(rindices2,recv_waits2));
238   PetscCall(PetscFree3(sindices,send_waits,send_status));
239   PetscCall(PetscFree3(sindices2,send_waits2,send_status2));
240   PetscFunctionReturn(0);
241 }
242 
243 PetscErrorCode AOPetscToApplication_MemoryScalable(AO ao,PetscInt n,PetscInt *ia)
244 {
245   AO_MemoryScalable *aomems  = (AO_MemoryScalable*)ao->data;
246   PetscInt          *app_loc = aomems->app_loc;
247 
248   PetscFunctionBegin;
249   PetscCall(AOMap_MemoryScalable_private(ao,n,ia,app_loc));
250   PetscFunctionReturn(0);
251 }
252 
253 PetscErrorCode AOApplicationToPetsc_MemoryScalable(AO ao,PetscInt n,PetscInt *ia)
254 {
255   AO_MemoryScalable *aomems    = (AO_MemoryScalable*)ao->data;
256   PetscInt          *petsc_loc = aomems->petsc_loc;
257 
258   PetscFunctionBegin;
259   PetscCall(AOMap_MemoryScalable_private(ao,n,ia,petsc_loc));
260   PetscFunctionReturn(0);
261 }
262 
263 static struct _AOOps AOOps_MemoryScalable = {
264   PetscDesignatedInitializer(view,AOView_MemoryScalable),
265   PetscDesignatedInitializer(destroy,AODestroy_MemoryScalable),
266   PetscDesignatedInitializer(petsctoapplication,AOPetscToApplication_MemoryScalable),
267   PetscDesignatedInitializer(applicationtopetsc,AOApplicationToPetsc_MemoryScalable),
268 };
269 
270 PetscErrorCode  AOCreateMemoryScalable_private(MPI_Comm comm,PetscInt napp,const PetscInt from_array[],const PetscInt to_array[],AO ao, PetscInt *aomap_loc)
271 {
272   AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
273   PetscLayout       map     = aomems->map;
274   PetscInt          n_local = map->n,i,j;
275   PetscMPIInt       rank,size,tag;
276   PetscInt          *owner,*start,*sizes,nsends,nreceives;
277   PetscInt          nmax,count,*sindices,*rindices,idx,lastidx;
278   PetscInt          *owners = aomems->map->range;
279   MPI_Request       *send_waits,*recv_waits;
280   MPI_Status        recv_status;
281   PetscMPIInt       nindices,widx;
282   PetscInt          *rbuf;
283   PetscInt          n=napp,ip,ia;
284   MPI_Status        *send_status;
285 
286   PetscFunctionBegin;
287   PetscCall(PetscArrayzero(aomap_loc,n_local));
288 
289   PetscCallMPI(MPI_Comm_rank(comm,&rank));
290   PetscCallMPI(MPI_Comm_size(comm,&size));
291 
292   /*  first count number of contributors (of from_array[]) to each processor */
293   PetscCall(PetscCalloc1(2*size,&sizes));
294   PetscCall(PetscMalloc1(n,&owner));
295 
296   j       = 0;
297   lastidx = -1;
298   for (i=0; i<n; i++) {
299     /* if indices are NOT locally sorted, need to start search at the beginning */
300     if (lastidx > (idx = from_array[i])) j = 0;
301     lastidx = idx;
302     for (; j<size; j++) {
303       if (idx >= owners[j] && idx < owners[j+1]) {
304         sizes[2*j]  += 2; /* num of indices to be sent - in pairs (ip,ia) */
305         sizes[2*j+1] = 1; /* send to proc[j] */
306         owner[i]     = j;
307         break;
308       }
309     }
310   }
311   sizes[2*rank]=sizes[2*rank+1]=0; /* do not receive from self! */
312   nsends        = 0;
313   for (i=0; i<size; i++) nsends += sizes[2*i+1];
314 
315   /* inform other processors of number of messages and max length*/
316   PetscCall(PetscMaxSum(comm,sizes,&nmax,&nreceives));
317 
318   /* allocate arrays */
319   PetscCall(PetscObjectGetNewTag((PetscObject)ao,&tag));
320   PetscCall(PetscMalloc2(nreceives*nmax,&rindices,nreceives,&recv_waits));
321   PetscCall(PetscMalloc3(2*n,&sindices,nsends,&send_waits,nsends,&send_status));
322   PetscCall(PetscMalloc1(size,&start));
323 
324   /* post receives: */
325   for (i=0; i<nreceives; i++) {
326     PetscCallMPI(MPI_Irecv(rindices+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i));
327   }
328 
329   /* do sends:
330       1) starts[i] gives the starting index in svalues for stuff going to
331          the ith processor
332   */
333   start[0] = 0;
334   for (i=1; i<size; i++) start[i] = start[i-1] + sizes[2*i-2];
335   for (i=0; i<n; i++) {
336     j = owner[i];
337     if (j != rank) {
338       ip                   = from_array[i];
339       ia                   = to_array[i];
340       sindices[start[j]++] = ip;
341       sindices[start[j]++] = ia;
342     } else { /* compute my own map */
343       ip            = from_array[i] - owners[rank];
344       ia            = to_array[i];
345       aomap_loc[ip] = ia;
346     }
347   }
348 
349   start[0] = 0;
350   for (i=1; i<size; i++) start[i] = start[i-1] + sizes[2*i-2];
351   for (i=0,count=0; i<size; i++) {
352     if (sizes[2*i+1]) {
353       PetscCallMPI(MPI_Isend(sindices+start[i],sizes[2*i],MPIU_INT,i,tag,comm,send_waits+count));
354       count++;
355     }
356   }
357   PetscCheckFalse(nsends != count,comm,PETSC_ERR_SUP,"nsends %" PetscInt_FMT " != count %" PetscInt_FMT,nsends,count);
358 
359   /* wait on sends */
360   if (nsends) {
361     PetscCallMPI(MPI_Waitall(nsends,send_waits,send_status));
362   }
363 
364   /* recvs */
365   count=0;
366   for (j= nreceives; j>0; j--) {
367     PetscCallMPI(MPI_Waitany(nreceives,recv_waits,&widx,&recv_status));
368     PetscCallMPI(MPI_Get_count(&recv_status,MPIU_INT,&nindices));
369     rbuf = rindices+nmax*widx; /* global index */
370 
371     /* compute local mapping */
372     for (i=0; i<nindices; i+=2) { /* pack aomap_loc */
373       ip            = rbuf[i] - owners[rank]; /* local index */
374       ia            = rbuf[i+1];
375       aomap_loc[ip] = ia;
376     }
377     count++;
378   }
379 
380   PetscCall(PetscFree(start));
381   PetscCall(PetscFree3(sindices,send_waits,send_status));
382   PetscCall(PetscFree2(rindices,recv_waits));
383   PetscCall(PetscFree(owner));
384   PetscCall(PetscFree(sizes));
385   PetscFunctionReturn(0);
386 }
387 
388 PETSC_EXTERN PetscErrorCode AOCreate_MemoryScalable(AO ao)
389 {
390   IS                isapp=ao->isapp,ispetsc=ao->ispetsc;
391   const PetscInt    *mypetsc,*myapp;
392   PetscInt          napp,n_local,N,i,start,*petsc,*lens,*disp;
393   MPI_Comm          comm;
394   AO_MemoryScalable *aomems;
395   PetscLayout       map;
396   PetscMPIInt       size,rank;
397 
398   PetscFunctionBegin;
399   PetscCheck(isapp,PetscObjectComm((PetscObject)ao),PETSC_ERR_ARG_WRONGSTATE,"AOSetIS() must be called before AOSetType()");
400   /* create special struct aomems */
401   PetscCall(PetscNewLog(ao,&aomems));
402   ao->data = (void*) aomems;
403   PetscCall(PetscMemcpy(ao->ops,&AOOps_MemoryScalable,sizeof(struct _AOOps)));
404   PetscCall(PetscObjectChangeTypeName((PetscObject)ao,AOMEMORYSCALABLE));
405 
406   /* transmit all local lengths of isapp to all processors */
407   PetscCall(PetscObjectGetComm((PetscObject)isapp,&comm));
408   PetscCallMPI(MPI_Comm_size(comm, &size));
409   PetscCallMPI(MPI_Comm_rank(comm, &rank));
410   PetscCall(PetscMalloc2(size,&lens,size,&disp));
411   PetscCall(ISGetLocalSize(isapp,&napp));
412   PetscCallMPI(MPI_Allgather(&napp, 1, MPIU_INT, lens, 1, MPIU_INT, comm));
413 
414   N = 0;
415   for (i = 0; i < size; i++) {
416     disp[i] = N;
417     N      += lens[i];
418   }
419 
420   /* If ispetsc is 0 then use "natural" numbering */
421   if (napp) {
422     if (!ispetsc) {
423       start = disp[rank];
424       PetscCall(PetscMalloc1(napp+1, &petsc));
425       for (i=0; i<napp; i++) petsc[i] = start + i;
426     } else {
427       PetscCall(ISGetIndices(ispetsc,&mypetsc));
428       petsc = (PetscInt*)mypetsc;
429     }
430   } else {
431     petsc = NULL;
432   }
433 
434   /* create a map with global size N - used to determine the local sizes of ao - shall we use local napp instead of N? */
435   PetscCall(PetscLayoutCreate(comm,&map));
436   map->bs = 1;
437   map->N  = N;
438   PetscCall(PetscLayoutSetUp(map));
439 
440   ao->N       = N;
441   ao->n       = map->n;
442   aomems->map = map;
443 
444   /* create distributed indices app_loc: petsc->app and petsc_loc: app->petsc */
445   n_local = map->n;
446   PetscCall(PetscCalloc2(n_local, &aomems->app_loc,n_local,&aomems->petsc_loc));
447   PetscCall(PetscLogObjectMemory((PetscObject)ao,2*n_local*sizeof(PetscInt)));
448   PetscCall(ISGetIndices(isapp,&myapp));
449 
450   PetscCall(AOCreateMemoryScalable_private(comm,napp,petsc,myapp,ao,aomems->app_loc));
451   PetscCall(AOCreateMemoryScalable_private(comm,napp,myapp,petsc,ao,aomems->petsc_loc));
452 
453   PetscCall(ISRestoreIndices(isapp,&myapp));
454   if (napp) {
455     if (ispetsc) {
456       PetscCall(ISRestoreIndices(ispetsc,&mypetsc));
457     } else {
458       PetscCall(PetscFree(petsc));
459     }
460   }
461   PetscCall(PetscFree2(lens,disp));
462   PetscFunctionReturn(0);
463 }
464 
465 /*@C
466    AOCreateMemoryScalable - Creates a memory scalable application ordering using two integer arrays.
467 
468    Collective
469 
470    Input Parameters:
471 +  comm - MPI communicator that is to share AO
472 .  napp - size of integer arrays
473 .  myapp - integer array that defines an ordering
474 -  mypetsc - integer array that defines another ordering (may be NULL to
475              indicate the natural ordering, that is 0,1,2,3,...)
476 
477    Output Parameter:
478 .  aoout - the new application ordering
479 
480    Level: beginner
481 
482     Notes:
483     The arrays myapp and mypetsc must contain the all the integers 0 to napp-1 with no duplicates; that is there cannot be any "holes"
484            in the indices. Use AOCreateMapping() or AOCreateMappingIS() if you wish to have "holes" in the indices.
485            Comparing with AOCreateBasic(), this routine trades memory with message communication.
486 
487 .seealso: AOCreateMemoryScalableIS(), AODestroy(), AOPetscToApplication(), AOApplicationToPetsc()
488 @*/
489 PetscErrorCode AOCreateMemoryScalable(MPI_Comm comm,PetscInt napp,const PetscInt myapp[],const PetscInt mypetsc[],AO *aoout)
490 {
491   IS             isapp,ispetsc;
492   const PetscInt *app=myapp,*petsc=mypetsc;
493 
494   PetscFunctionBegin;
495   PetscCall(ISCreateGeneral(comm,napp,app,PETSC_USE_POINTER,&isapp));
496   if (mypetsc) {
497     PetscCall(ISCreateGeneral(comm,napp,petsc,PETSC_USE_POINTER,&ispetsc));
498   } else {
499     ispetsc = NULL;
500   }
501   PetscCall(AOCreateMemoryScalableIS(isapp,ispetsc,aoout));
502   PetscCall(ISDestroy(&isapp));
503   if (mypetsc) {
504     PetscCall(ISDestroy(&ispetsc));
505   }
506   PetscFunctionReturn(0);
507 }
508 
509 /*@C
510    AOCreateMemoryScalableIS - Creates a memory scalable application ordering using two index sets.
511 
512    Collective on IS
513 
514    Input Parameters:
515 +  isapp - index set that defines an ordering
516 -  ispetsc - index set that defines another ordering (may be NULL to use the
517              natural ordering)
518 
519    Output Parameter:
520 .  aoout - the new application ordering
521 
522    Level: beginner
523 
524     Notes:
525     The index sets isapp and ispetsc must contain the all the integers 0 to napp-1 (where napp is the length of the index sets) with no duplicates;
526            that is there cannot be any "holes".
527            Comparing with AOCreateBasicIS(), this routine trades memory with message communication.
528 .seealso: AOCreateMemoryScalable(),  AODestroy()
529 @*/
530 PetscErrorCode  AOCreateMemoryScalableIS(IS isapp,IS ispetsc,AO *aoout)
531 {
532   MPI_Comm       comm;
533   AO             ao;
534 
535   PetscFunctionBegin;
536   PetscCall(PetscObjectGetComm((PetscObject)isapp,&comm));
537   PetscCall(AOCreate(comm,&ao));
538   PetscCall(AOSetIS(ao,isapp,ispetsc));
539   PetscCall(AOSetType(ao,AOMEMORYSCALABLE));
540   PetscCall(AOViewFromOptions(ao,NULL,"-ao_view"));
541   *aoout = ao;
542   PetscFunctionReturn(0);
543 }
544