xref: /petsc/src/vec/is/ao/impls/memscalable/aomemscalable.c (revision 98d129c30f3ee9fdddc40fdbc5a989b7be64f888)
1 /*
2     The memory scalable AO application ordering routines. These store the
3   orderings on each processor for that processor's range of values
4 */
5 
6 #include <../src/vec/is/ao/aoimpl.h> /*I  "petscao.h"   I*/
7 
8 typedef struct {
9   PetscInt   *app_loc;   /* app_loc[i] is the partner for the ith local PETSc slot */
10   PetscInt   *petsc_loc; /* petsc_loc[j] is the partner for the jth local app slot */
11   PetscLayout map;       /* determines the local sizes of ao */
12 } AO_MemoryScalable;
13 
14 /*
15        All processors ship the data to process 0 to be printed; note that this is not scalable because
16        process 0 allocates space for all the orderings entry across all the processes
17 */
18 static PetscErrorCode AOView_MemoryScalable(AO ao, PetscViewer viewer)
19 {
20   PetscMPIInt        rank, size;
21   AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
22   PetscBool          iascii;
23   PetscMPIInt        tag_app, tag_petsc;
24   PetscLayout        map = aomems->map;
25   PetscInt          *app, *app_loc, *petsc, *petsc_loc, len, i, j;
26   MPI_Status         status;
27 
28   PetscFunctionBegin;
29   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
30   PetscCheck(iascii, PetscObjectComm((PetscObject)viewer), PETSC_ERR_SUP, "Viewer type %s not supported for AO MemoryScalable", ((PetscObject)viewer)->type_name);
31 
32   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)ao), &rank));
33   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)ao), &size));
34 
35   PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag_app));
36   PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag_petsc));
37 
38   if (rank == 0) {
39     PetscCall(PetscViewerASCIIPrintf(viewer, "Number of elements in ordering %" PetscInt_FMT "\n", ao->N));
40     PetscCall(PetscViewerASCIIPrintf(viewer, "PETSc->App  App->PETSc\n"));
41 
42     PetscCall(PetscMalloc2(map->N, &app, map->N, &petsc));
43     len = map->n;
44     /* print local AO */
45     PetscCall(PetscViewerASCIIPrintf(viewer, "Process [%d]\n", rank));
46     for (i = 0; i < len; i++) PetscCall(PetscViewerASCIIPrintf(viewer, "%3" PetscInt_FMT "  %3" PetscInt_FMT "    %3" PetscInt_FMT "  %3" PetscInt_FMT "\n", i, aomems->app_loc[i], i, aomems->petsc_loc[i]));
47 
48     /* recv and print off-processor's AO */
49     for (i = 1; i < size; i++) {
50       len       = map->range[i + 1] - map->range[i];
51       app_loc   = app + map->range[i];
52       petsc_loc = petsc + map->range[i];
53       PetscCallMPI(MPI_Recv(app_loc, (PetscMPIInt)len, MPIU_INT, i, tag_app, PetscObjectComm((PetscObject)ao), &status));
54       PetscCallMPI(MPI_Recv(petsc_loc, (PetscMPIInt)len, MPIU_INT, i, tag_petsc, PetscObjectComm((PetscObject)ao), &status));
55       PetscCall(PetscViewerASCIIPrintf(viewer, "Process [%" PetscInt_FMT "]\n", i));
56       for (j = 0; j < len; j++) PetscCall(PetscViewerASCIIPrintf(viewer, "%3" PetscInt_FMT "  %3" PetscInt_FMT "    %3" PetscInt_FMT "  %3" PetscInt_FMT "\n", map->range[i] + j, app_loc[j], map->range[i] + j, petsc_loc[j]));
57     }
58     PetscCall(PetscFree2(app, petsc));
59 
60   } else {
61     /* send values */
62     PetscCallMPI(MPI_Send((void *)aomems->app_loc, map->n, MPIU_INT, 0, tag_app, PetscObjectComm((PetscObject)ao)));
63     PetscCallMPI(MPI_Send((void *)aomems->petsc_loc, map->n, MPIU_INT, 0, tag_petsc, PetscObjectComm((PetscObject)ao)));
64   }
65   PetscCall(PetscViewerFlush(viewer));
66   PetscFunctionReturn(PETSC_SUCCESS);
67 }
68 
69 static PetscErrorCode AODestroy_MemoryScalable(AO ao)
70 {
71   AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
72 
73   PetscFunctionBegin;
74   PetscCall(PetscFree2(aomems->app_loc, aomems->petsc_loc));
75   PetscCall(PetscLayoutDestroy(&aomems->map));
76   PetscCall(PetscFree(aomems));
77   PetscFunctionReturn(PETSC_SUCCESS);
78 }
79 
80 /*
81    Input Parameters:
82 +   ao - the application ordering context
83 .   n  - the number of integers in ia[]
84 .   ia - the integers; these are replaced with their mapped value
85 -   maploc - app_loc or petsc_loc in struct "AO_MemoryScalable"
86 
87    Output Parameter:
88 .   ia - the mapped interges
89  */
90 static PetscErrorCode AOMap_MemoryScalable_private(AO ao, PetscInt n, PetscInt *ia, const PetscInt *maploc)
91 {
92   AO_MemoryScalable *aomems = (AO_MemoryScalable *)ao->data;
93   MPI_Comm           comm;
94   PetscMPIInt        rank, size, tag1, tag2;
95   PetscInt          *owner, *start, *sizes, nsends, nreceives;
96   PetscInt           nmax, count, *sindices, *rindices, i, j, idx, lastidx, *sindices2, *rindices2;
97   const PetscInt    *owners = aomems->map->range;
98   MPI_Request       *send_waits, *recv_waits, *send_waits2, *recv_waits2;
99   MPI_Status         recv_status;
100   PetscMPIInt        nindices, source, widx;
101   PetscInt          *rbuf, *sbuf;
102   MPI_Status        *send_status, *send_status2;
103 
104   PetscFunctionBegin;
105   PetscCall(PetscObjectGetComm((PetscObject)ao, &comm));
106   PetscCallMPI(MPI_Comm_rank(comm, &rank));
107   PetscCallMPI(MPI_Comm_size(comm, &size));
108 
109   /*  first count number of contributors to each processor */
110   PetscCall(PetscMalloc1(size, &start));
111   PetscCall(PetscCalloc2(2 * size, &sizes, n, &owner));
112 
113   j       = 0;
114   lastidx = -1;
115   for (i = 0; i < n; i++) {
116     if (ia[i] < 0) owner[i] = -1;      /* mark negative entries (which are not to be mapped) with a special negative value */
117     if (ia[i] >= ao->N) owner[i] = -2; /* mark out of range entries with special negative value */
118     else {
119       /* if indices are NOT locally sorted, need to start search at the beginning */
120       if (lastidx > (idx = ia[i])) j = 0;
121       lastidx = idx;
122       for (; j < size; j++) {
123         if (idx >= owners[j] && idx < owners[j + 1]) {
124           sizes[2 * j]++;       /* num of indices to be sent */
125           sizes[2 * j + 1] = 1; /* send to proc[j] */
126           owner[i]         = j;
127           break;
128         }
129       }
130     }
131   }
132   sizes[2 * rank] = sizes[2 * rank + 1] = 0; /* do not receive from self! */
133   nsends                                = 0;
134   for (i = 0; i < size; i++) nsends += sizes[2 * i + 1];
135 
136   /* inform other processors of number of messages and max length*/
137   PetscCall(PetscMaxSum(comm, sizes, &nmax, &nreceives));
138 
139   /* allocate arrays */
140   PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag1));
141   PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag2));
142 
143   PetscCall(PetscMalloc2(nreceives * nmax, &rindices, nreceives, &recv_waits));
144   PetscCall(PetscMalloc2(nsends * nmax, &rindices2, nsends, &recv_waits2));
145 
146   PetscCall(PetscMalloc3(n, &sindices, nsends, &send_waits, nsends, &send_status));
147   PetscCall(PetscMalloc3(n, &sindices2, nreceives, &send_waits2, nreceives, &send_status2));
148 
149   /* post 1st receives: receive others requests
150      since we don't know how long each individual message is we
151      allocate the largest needed buffer for each receive. Potentially
152      this is a lot of wasted space.
153   */
154   for (i = 0, count = 0; i < nreceives; i++) PetscCallMPI(MPI_Irecv(rindices + nmax * i, nmax, MPIU_INT, MPI_ANY_SOURCE, tag1, comm, recv_waits + count++));
155 
156   /* do 1st sends:
157       1) starts[i] gives the starting index in svalues for stuff going to
158          the ith processor
159   */
160   start[0] = 0;
161   for (i = 1; i < size; i++) start[i] = start[i - 1] + sizes[2 * i - 2];
162   for (i = 0; i < n; i++) {
163     j = owner[i];
164     if (j == -1) continue; /* do not remap negative entries in ia[] */
165     else if (j == -2) { /* out of range entries get mapped to -1 */ ia[i] = -1;
166       continue;
167     } else if (j != rank) {
168       sindices[start[j]++] = ia[i];
169     } else { /* compute my own map */
170       ia[i] = maploc[ia[i] - owners[rank]];
171     }
172   }
173 
174   start[0] = 0;
175   for (i = 1; i < size; i++) start[i] = start[i - 1] + sizes[2 * i - 2];
176   for (i = 0, count = 0; i < size; i++) {
177     if (sizes[2 * i + 1]) {
178       /* send my request to others */
179       PetscCallMPI(MPI_Isend(sindices + start[i], sizes[2 * i], MPIU_INT, i, tag1, comm, send_waits + count));
180       /* post receive for the answer of my request */
181       PetscCallMPI(MPI_Irecv(sindices2 + start[i], sizes[2 * i], MPIU_INT, i, tag2, comm, recv_waits2 + count));
182       count++;
183     }
184   }
185   PetscCheck(nsends == count, comm, PETSC_ERR_SUP, "nsends %" PetscInt_FMT " != count %" PetscInt_FMT, nsends, count);
186 
187   /* wait on 1st sends */
188   if (nsends) PetscCallMPI(MPI_Waitall(nsends, send_waits, send_status));
189 
190   /* 1st recvs: other's requests */
191   for (j = 0; j < nreceives; j++) {
192     PetscCallMPI(MPI_Waitany(nreceives, recv_waits, &widx, &recv_status)); /* idx: index of handle for operation that completed */
193     PetscCallMPI(MPI_Get_count(&recv_status, MPIU_INT, &nindices));
194     rbuf   = rindices + nmax * widx; /* global index */
195     source = recv_status.MPI_SOURCE;
196 
197     /* compute mapping */
198     sbuf = rbuf;
199     for (i = 0; i < nindices; i++) sbuf[i] = maploc[rbuf[i] - owners[rank]];
200 
201     /* send mapping back to the sender */
202     PetscCallMPI(MPI_Isend(sbuf, nindices, MPIU_INT, source, tag2, comm, send_waits2 + widx));
203   }
204 
205   /* wait on 2nd sends */
206   if (nreceives) PetscCallMPI(MPI_Waitall(nreceives, send_waits2, send_status2));
207 
208   /* 2nd recvs: for the answer of my request */
209   for (j = 0; j < nsends; j++) {
210     PetscCallMPI(MPI_Waitany(nsends, recv_waits2, &widx, &recv_status));
211     PetscCallMPI(MPI_Get_count(&recv_status, MPIU_INT, &nindices));
212     source = recv_status.MPI_SOURCE;
213     /* pack output ia[] */
214     rbuf  = sindices2 + start[source];
215     count = 0;
216     for (i = 0; i < n; i++) {
217       if (source == owner[i]) ia[i] = rbuf[count++];
218     }
219   }
220 
221   /* free arrays */
222   PetscCall(PetscFree(start));
223   PetscCall(PetscFree2(sizes, owner));
224   PetscCall(PetscFree2(rindices, recv_waits));
225   PetscCall(PetscFree2(rindices2, recv_waits2));
226   PetscCall(PetscFree3(sindices, send_waits, send_status));
227   PetscCall(PetscFree3(sindices2, send_waits2, send_status2));
228   PetscFunctionReturn(PETSC_SUCCESS);
229 }
230 
231 static PetscErrorCode AOPetscToApplication_MemoryScalable(AO ao, PetscInt n, PetscInt *ia)
232 {
233   AO_MemoryScalable *aomems  = (AO_MemoryScalable *)ao->data;
234   PetscInt          *app_loc = aomems->app_loc;
235 
236   PetscFunctionBegin;
237   PetscCall(AOMap_MemoryScalable_private(ao, n, ia, app_loc));
238   PetscFunctionReturn(PETSC_SUCCESS);
239 }
240 
241 static PetscErrorCode AOApplicationToPetsc_MemoryScalable(AO ao, PetscInt n, PetscInt *ia)
242 {
243   AO_MemoryScalable *aomems    = (AO_MemoryScalable *)ao->data;
244   PetscInt          *petsc_loc = aomems->petsc_loc;
245 
246   PetscFunctionBegin;
247   PetscCall(AOMap_MemoryScalable_private(ao, n, ia, petsc_loc));
248   PetscFunctionReturn(PETSC_SUCCESS);
249 }
250 
251 static const struct _AOOps AOOps_MemoryScalable = {
252   PetscDesignatedInitializer(view, AOView_MemoryScalable),
253   PetscDesignatedInitializer(destroy, AODestroy_MemoryScalable),
254   PetscDesignatedInitializer(petsctoapplication, AOPetscToApplication_MemoryScalable),
255   PetscDesignatedInitializer(applicationtopetsc, AOApplicationToPetsc_MemoryScalable),
256 };
257 
258 static PetscErrorCode AOCreateMemoryScalable_private(MPI_Comm comm, PetscInt napp, const PetscInt from_array[], const PetscInt to_array[], AO ao, PetscInt *aomap_loc)
259 {
260   AO_MemoryScalable *aomems  = (AO_MemoryScalable *)ao->data;
261   PetscLayout        map     = aomems->map;
262   PetscInt           n_local = map->n, i, j;
263   PetscMPIInt        rank, size, tag;
264   PetscInt          *owner, *start, *sizes, nsends, nreceives;
265   PetscInt           nmax, count, *sindices, *rindices, idx, lastidx;
266   PetscInt          *owners = aomems->map->range;
267   MPI_Request       *send_waits, *recv_waits;
268   MPI_Status         recv_status;
269   PetscMPIInt        nindices, widx;
270   PetscInt          *rbuf;
271   PetscInt           n = napp, ip, ia;
272   MPI_Status        *send_status;
273 
274   PetscFunctionBegin;
275   PetscCall(PetscArrayzero(aomap_loc, n_local));
276 
277   PetscCallMPI(MPI_Comm_rank(comm, &rank));
278   PetscCallMPI(MPI_Comm_size(comm, &size));
279 
280   /*  first count number of contributors (of from_array[]) to each processor */
281   PetscCall(PetscCalloc1(2 * size, &sizes));
282   PetscCall(PetscMalloc1(n, &owner));
283 
284   j       = 0;
285   lastidx = -1;
286   for (i = 0; i < n; i++) {
287     /* if indices are NOT locally sorted, need to start search at the beginning */
288     if (lastidx > (idx = from_array[i])) j = 0;
289     lastidx = idx;
290     for (; j < size; j++) {
291       if (idx >= owners[j] && idx < owners[j + 1]) {
292         sizes[2 * j] += 2;    /* num of indices to be sent - in pairs (ip,ia) */
293         sizes[2 * j + 1] = 1; /* send to proc[j] */
294         owner[i]         = j;
295         break;
296       }
297     }
298   }
299   sizes[2 * rank] = sizes[2 * rank + 1] = 0; /* do not receive from self! */
300   nsends                                = 0;
301   for (i = 0; i < size; i++) nsends += sizes[2 * i + 1];
302 
303   /* inform other processors of number of messages and max length*/
304   PetscCall(PetscMaxSum(comm, sizes, &nmax, &nreceives));
305 
306   /* allocate arrays */
307   PetscCall(PetscObjectGetNewTag((PetscObject)ao, &tag));
308   PetscCall(PetscMalloc2(nreceives * nmax, &rindices, nreceives, &recv_waits));
309   PetscCall(PetscMalloc3(2 * n, &sindices, nsends, &send_waits, nsends, &send_status));
310   PetscCall(PetscMalloc1(size, &start));
311 
312   /* post receives: */
313   for (i = 0; i < nreceives; i++) PetscCallMPI(MPI_Irecv(rindices + nmax * i, nmax, MPIU_INT, MPI_ANY_SOURCE, tag, comm, recv_waits + i));
314 
315   /* do sends:
316       1) starts[i] gives the starting index in svalues for stuff going to
317          the ith processor
318   */
319   start[0] = 0;
320   for (i = 1; i < size; i++) start[i] = start[i - 1] + sizes[2 * i - 2];
321   for (i = 0; i < n; i++) {
322     j = owner[i];
323     if (j != rank) {
324       ip                   = from_array[i];
325       ia                   = to_array[i];
326       sindices[start[j]++] = ip;
327       sindices[start[j]++] = ia;
328     } else { /* compute my own map */
329       ip            = from_array[i] - owners[rank];
330       ia            = to_array[i];
331       aomap_loc[ip] = ia;
332     }
333   }
334 
335   start[0] = 0;
336   for (i = 1; i < size; i++) start[i] = start[i - 1] + sizes[2 * i - 2];
337   for (i = 0, count = 0; i < size; i++) {
338     if (sizes[2 * i + 1]) {
339       PetscCallMPI(MPI_Isend(sindices + start[i], sizes[2 * i], MPIU_INT, i, tag, comm, send_waits + count));
340       count++;
341     }
342   }
343   PetscCheck(nsends == count, comm, PETSC_ERR_SUP, "nsends %" PetscInt_FMT " != count %" PetscInt_FMT, nsends, count);
344 
345   /* wait on sends */
346   if (nsends) PetscCallMPI(MPI_Waitall(nsends, send_waits, send_status));
347 
348   /* recvs */
349   count = 0;
350   for (j = nreceives; j > 0; j--) {
351     PetscCallMPI(MPI_Waitany(nreceives, recv_waits, &widx, &recv_status));
352     PetscCallMPI(MPI_Get_count(&recv_status, MPIU_INT, &nindices));
353     rbuf = rindices + nmax * widx; /* global index */
354 
355     /* compute local mapping */
356     for (i = 0; i < nindices; i += 2) {       /* pack aomap_loc */
357       ip            = rbuf[i] - owners[rank]; /* local index */
358       ia            = rbuf[i + 1];
359       aomap_loc[ip] = ia;
360     }
361     count++;
362   }
363 
364   PetscCall(PetscFree(start));
365   PetscCall(PetscFree3(sindices, send_waits, send_status));
366   PetscCall(PetscFree2(rindices, recv_waits));
367   PetscCall(PetscFree(owner));
368   PetscCall(PetscFree(sizes));
369   PetscFunctionReturn(PETSC_SUCCESS);
370 }
371 
372 PETSC_INTERN PetscErrorCode AOCreate_MemoryScalable(AO ao)
373 {
374   IS                 isapp = ao->isapp, ispetsc = ao->ispetsc;
375   const PetscInt    *mypetsc, *myapp;
376   PetscInt           napp, n_local, N, i, start, *petsc, *lens, *disp;
377   MPI_Comm           comm;
378   AO_MemoryScalable *aomems;
379   PetscLayout        map;
380   PetscMPIInt        size, rank;
381 
382   PetscFunctionBegin;
383   PetscCheck(isapp, PetscObjectComm((PetscObject)ao), PETSC_ERR_ARG_WRONGSTATE, "AOSetIS() must be called before AOSetType()");
384   /* create special struct aomems */
385   PetscCall(PetscNew(&aomems));
386   ao->data   = (void *)aomems;
387   ao->ops[0] = AOOps_MemoryScalable;
388   PetscCall(PetscObjectChangeTypeName((PetscObject)ao, AOMEMORYSCALABLE));
389 
390   /* transmit all local lengths of isapp to all processors */
391   PetscCall(PetscObjectGetComm((PetscObject)isapp, &comm));
392   PetscCallMPI(MPI_Comm_size(comm, &size));
393   PetscCallMPI(MPI_Comm_rank(comm, &rank));
394   PetscCall(PetscMalloc2(size, &lens, size, &disp));
395   PetscCall(ISGetLocalSize(isapp, &napp));
396   PetscCallMPI(MPI_Allgather(&napp, 1, MPIU_INT, lens, 1, MPIU_INT, comm));
397 
398   N = 0;
399   for (i = 0; i < size; i++) {
400     disp[i] = N;
401     N += lens[i];
402   }
403 
404   /* If ispetsc is 0 then use "natural" numbering */
405   if (napp) {
406     if (!ispetsc) {
407       start = disp[rank];
408       PetscCall(PetscMalloc1(napp + 1, &petsc));
409       for (i = 0; i < napp; i++) petsc[i] = start + i;
410     } else {
411       PetscCall(ISGetIndices(ispetsc, &mypetsc));
412       petsc = (PetscInt *)mypetsc;
413     }
414   } else {
415     petsc = NULL;
416   }
417 
418   /* create a map with global size N - used to determine the local sizes of ao - shall we use local napp instead of N? */
419   PetscCall(PetscLayoutCreate(comm, &map));
420   map->bs = 1;
421   map->N  = N;
422   PetscCall(PetscLayoutSetUp(map));
423 
424   ao->N       = N;
425   ao->n       = map->n;
426   aomems->map = map;
427 
428   /* create distributed indices app_loc: petsc->app and petsc_loc: app->petsc */
429   n_local = map->n;
430   PetscCall(PetscCalloc2(n_local, &aomems->app_loc, n_local, &aomems->petsc_loc));
431   PetscCall(ISGetIndices(isapp, &myapp));
432 
433   PetscCall(AOCreateMemoryScalable_private(comm, napp, petsc, myapp, ao, aomems->app_loc));
434   PetscCall(AOCreateMemoryScalable_private(comm, napp, myapp, petsc, ao, aomems->petsc_loc));
435 
436   PetscCall(ISRestoreIndices(isapp, &myapp));
437   if (napp) {
438     if (ispetsc) {
439       PetscCall(ISRestoreIndices(ispetsc, &mypetsc));
440     } else {
441       PetscCall(PetscFree(petsc));
442     }
443   }
444   PetscCall(PetscFree2(lens, disp));
445   PetscFunctionReturn(PETSC_SUCCESS);
446 }
447 
448 /*@C
449   AOCreateMemoryScalable - Creates a memory scalable application ordering using two integer arrays.
450 
451   Collective
452 
453   Input Parameters:
454 + comm    - MPI communicator that is to share the `AO`
455 . napp    - size of integer arrays
456 . myapp   - integer array that defines an ordering
457 - mypetsc - integer array that defines another ordering (may be `NULL` to indicate the natural ordering, that is 0,1,2,3,...)
458 
459   Output Parameter:
460 . aoout - the new application ordering
461 
462   Level: beginner
463 
464   Note:
465   The arrays `myapp` and `mypetsc` must contain the all the integers 0 to `napp`-1 with no duplicates; that is there cannot be any "holes"
466   in the indices. Use `AOCreateMapping()` or `AOCreateMappingIS()` if you wish to have "holes" in the indices.
467   Comparing with `AOCreateBasic()`, this routine trades memory with message communication.
468 
469 .seealso: [](sec_ao), [](sec_scatter), `AO`, `AOCreateMemoryScalableIS()`, `AODestroy()`, `AOPetscToApplication()`, `AOApplicationToPetsc()`
470 @*/
471 PetscErrorCode AOCreateMemoryScalable(MPI_Comm comm, PetscInt napp, const PetscInt myapp[], const PetscInt mypetsc[], AO *aoout)
472 {
473   IS              isapp, ispetsc;
474   const PetscInt *app = myapp, *petsc = mypetsc;
475 
476   PetscFunctionBegin;
477   PetscCall(ISCreateGeneral(comm, napp, app, PETSC_USE_POINTER, &isapp));
478   if (mypetsc) {
479     PetscCall(ISCreateGeneral(comm, napp, petsc, PETSC_USE_POINTER, &ispetsc));
480   } else {
481     ispetsc = NULL;
482   }
483   PetscCall(AOCreateMemoryScalableIS(isapp, ispetsc, aoout));
484   PetscCall(ISDestroy(&isapp));
485   if (mypetsc) PetscCall(ISDestroy(&ispetsc));
486   PetscFunctionReturn(PETSC_SUCCESS);
487 }
488 
489 /*@C
490   AOCreateMemoryScalableIS - Creates a memory scalable application ordering using two index sets.
491 
492   Collective
493 
494   Input Parameters:
495 + isapp   - index set that defines an ordering
496 - ispetsc - index set that defines another ordering (may be `NULL` to use the natural ordering)
497 
498   Output Parameter:
499 . aoout - the new application ordering
500 
501   Level: beginner
502 
503   Notes:
504   The index sets `isapp` and `ispetsc` must contain the all the integers 0 to napp-1 (where napp is the length of the index sets) with no duplicates;
505   that is there cannot be any "holes".
506 
507   Comparing with `AOCreateBasicIS()`, this routine trades memory with message communication.
508 
509 .seealso: [](sec_ao), [](sec_scatter), `AO`, `AOCreateBasicIS()`, `AOCreateMemoryScalable()`, `AODestroy()`
510 @*/
511 PetscErrorCode AOCreateMemoryScalableIS(IS isapp, IS ispetsc, AO *aoout)
512 {
513   MPI_Comm comm;
514   AO       ao;
515 
516   PetscFunctionBegin;
517   PetscCall(PetscObjectGetComm((PetscObject)isapp, &comm));
518   PetscCall(AOCreate(comm, &ao));
519   PetscCall(AOSetIS(ao, isapp, ispetsc));
520   PetscCall(AOSetType(ao, AOMEMORYSCALABLE));
521   PetscCall(AOViewFromOptions(ao, NULL, "-ao_view"));
522   *aoout = ao;
523   PetscFunctionReturn(PETSC_SUCCESS);
524 }
525