xref: /petsc/src/vec/is/utils/psort.c (revision 2da392cc7c10228af19ad9843ce5155178acb644)
1 
2 #include <petsc/private/petscimpl.h>
3 #include <petscis.h> /*I "petscis.h" I*/
4 
5 /* This is the bitonic merge that works on non-power-of-2 sizes found at http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/oddn.htm */
6 static PetscErrorCode PetscParallelSortInt_Bitonic_Merge(MPI_Comm comm, PetscMPIInt tag, PetscMPIInt rankStart, PetscMPIInt rankEnd, PetscMPIInt rank, PetscMPIInt n, PetscInt keys[], PetscInt buffer[], PetscBool forward)
7 {
8   PetscInt       diff;
9   PetscInt       split, mid, partner;
10   PetscErrorCode ierr;
11 
12   PetscFunctionBegin;
13   diff = rankEnd - rankStart;
14   if (diff <= 0) PetscFunctionReturn(0);
15   if (diff == 1) {
16     if (forward) {
17       ierr = PetscSortInt((PetscInt) n, keys);CHKERRQ(ierr);
18     } else {
19       ierr = PetscSortReverseInt((PetscInt) n, keys);CHKERRQ(ierr);
20     }
21     PetscFunctionReturn(0);
22   }
23   split = 1;
24   while (2 * split < diff) split *= 2;
25   mid = rankStart + split;
26   if (rank < mid) {
27     partner = rank + split;
28   } else {
29     partner = rank - split;
30   }
31   if (partner < rankEnd) {
32     PetscMPIInt i;
33 
34     ierr = MPI_Sendrecv(keys, n, MPIU_INT, partner, tag, buffer, n, MPIU_INT, partner, tag, comm, MPI_STATUS_IGNORE);CHKERRQ(ierr);
35     if ((rank < partner) == (forward == PETSC_TRUE)) {
36       for (i = 0; i < n; i++) {
37         keys[i] = (keys[i] <= buffer[i]) ? keys[i] : buffer[i];
38       }
39     } else {
40       for (i = 0; i < n; i++) {
41         keys[i] = (keys[i] > buffer[i]) ? keys[i] : buffer[i];
42       }
43     }
44   }
45   /* divide and conquer */
46   if (rank < mid) {
47     ierr = PetscParallelSortInt_Bitonic_Merge(comm, tag, rankStart, mid, rank, n, keys, buffer, forward);CHKERRQ(ierr);
48   } else {
49     ierr = PetscParallelSortInt_Bitonic_Merge(comm, tag, mid, rankEnd, rank, n, keys, buffer, forward);CHKERRQ(ierr);
50   }
51   PetscFunctionReturn(0);
52 }
53 
54 /* This is the bitonic sort that works on non-power-of-2 sizes found at http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/oddn.htm */
55 static PetscErrorCode PetscParallelSortInt_Bitonic_Recursive(MPI_Comm comm, PetscMPIInt tag, PetscMPIInt rankStart, PetscMPIInt rankEnd, PetscMPIInt rank, PetscMPIInt n, PetscInt keys[], PetscInt buffer[], PetscBool forward)
56 {
57   PetscInt       diff;
58   PetscInt       mid;
59   PetscErrorCode ierr;
60 
61   PetscFunctionBegin;
62   diff = rankEnd - rankStart;
63   if (diff <= 0) PetscFunctionReturn(0);
64   if (diff == 1) {
65     if (forward) {
66       ierr = PetscSortInt(n, keys);CHKERRQ(ierr);
67     } else {
68       ierr = PetscSortReverseInt(n, keys);CHKERRQ(ierr);
69     }
70     PetscFunctionReturn(0);
71   }
72   mid = rankStart + diff / 2;
73   /* divide and conquer */
74   if (rank < mid) {
75     ierr = PetscParallelSortInt_Bitonic_Recursive(comm, tag, rankStart, mid, rank, n, keys, buffer, (PetscBool) !forward);CHKERRQ(ierr);
76   } else {
77     ierr = PetscParallelSortInt_Bitonic_Recursive(comm, tag, mid, rankEnd, rank, n, keys, buffer, forward);CHKERRQ(ierr);
78   }
79   /* bitonic merge */
80   ierr = PetscParallelSortInt_Bitonic_Merge(comm, tag, rankStart, rankEnd, rank, n, keys, buffer, forward);CHKERRQ(ierr);
81   PetscFunctionReturn(0);
82 }
83 
84 static PetscErrorCode PetscParallelSortInt_Bitonic(MPI_Comm comm, PetscInt n, PetscInt keys[])
85 {
86   PetscMPIInt size, rank, tag, mpin;
87   PetscInt       *buffer;
88   PetscErrorCode ierr;
89 
90   PetscFunctionBegin;
91   PetscValidIntPointer(keys, 3);
92   ierr = PetscCommGetNewTag(comm, &tag);CHKERRQ(ierr);
93   ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
94   ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
95   ierr = PetscMPIIntCast(n, &mpin);CHKERRQ(ierr);
96   ierr = PetscMalloc1(n, &buffer);CHKERRQ(ierr);
97   ierr = PetscParallelSortInt_Bitonic_Recursive(comm, tag, 0, size, rank, mpin, keys, buffer, PETSC_TRUE);CHKERRQ(ierr);
98   ierr = PetscFree(buffer);CHKERRQ(ierr);
99   PetscFunctionReturn(0);
100 }
101 
102 static PetscErrorCode PetscParallelSampleSelect(PetscLayout mapin, PetscLayout mapout, PetscInt keysin[], PetscInt *outpivots[])
103 {
104   PetscMPIInt    size, rank;
105   PetscInt       *pivots, *finalpivots, i;
106   PetscInt       non_empty, my_first, count;
107   PetscMPIInt    *keys_per, max_keys_per;
108   PetscErrorCode ierr;
109 
110   PetscFunctionBegin;
111   ierr = MPI_Comm_size(mapin->comm, &size);CHKERRQ(ierr);
112   ierr = MPI_Comm_rank(mapin->comm, &rank);CHKERRQ(ierr);
113 
114   /* Choose P - 1 pivots that would be ideal for the distribution on this process */
115   ierr = PetscMalloc1(size - 1, &pivots);CHKERRQ(ierr);
116   for (i = 0; i < size - 1; i++) {
117     PetscInt index;
118 
119     if (!mapin->n) {
120       /* if this rank is empty, put "infinity" in the list.  each process knows how many empty
121        * processes are in the layout, so those values will be ignored from the end of the sorted
122        * pivots */
123       pivots[i] = PETSC_MAX_INT;
124     } else {
125       /* match the distribution to the desired output described by mapout by getting the index
126        * that is approximately the appropriate fraction through the list */
127       index = ((PetscReal) mapout->range[i + 1]) * ((PetscReal) mapin->n) / ((PetscReal) mapout->N);
128       index = PetscMin(index, (mapin->n - 1));
129       index = PetscMax(index, 0);
130       pivots[i] = keysin[index];
131     }
132   }
133   /* sort the pivots in parallel */
134   ierr = PetscParallelSortInt_Bitonic(mapin->comm, size - 1, pivots);CHKERRQ(ierr);
135   if (PetscDefined(USE_DEBUG)) {
136     PetscBool sorted;
137 
138     ierr = PetscParallelSortedInt(mapin->comm, size - 1, pivots, &sorted);CHKERRQ(ierr);
139     if (!sorted) SETERRQ(mapin->comm, PETSC_ERR_PLIB, "bitonic sort failed");CHKERRQ(ierr);
140   }
141 
142   /* if there are Z nonempty processes, we have (P - 1) * Z real pivots, and we want to select
143    * at indices Z - 1, 2*Z - 1, ... (P - 1) * Z - 1 */
144   non_empty = size;
145   for (i = 0; i < size; i++) if (mapout->range[i] == mapout->range[i+1]) non_empty--;
146   ierr = PetscCalloc1(size + 1, &keys_per);CHKERRQ(ierr);
147   my_first = -1;
148   if (non_empty) {
149     for (i = 0; i < size - 1; i++) {
150       size_t sample = (i + 1) * non_empty - 1;
151       size_t sample_rank = sample / (size - 1);
152 
153       keys_per[sample_rank]++;
154       if (my_first < 0 && (PetscMPIInt) sample_rank == rank) {
155         my_first = (PetscInt) (sample - sample_rank * (size - 1));
156       }
157     }
158   }
159   for (i = 0, max_keys_per = 0; i < size; i++) max_keys_per = PetscMax(keys_per[i], max_keys_per);
160   ierr = PetscMalloc1(size * max_keys_per, &finalpivots);CHKERRQ(ierr);
161   /* now that we know how many pivots each process will provide, gather the selected pivots at the start of the array
162    * and allgather them */
163   for (i = 0; i < keys_per[rank]; i++) pivots[i] = pivots[my_first + i * non_empty];
164   for (i = keys_per[rank]; i < max_keys_per; i++) pivots[i] = PETSC_MAX_INT;
165   ierr = MPI_Allgather(pivots, max_keys_per, MPIU_INT, finalpivots, max_keys_per, MPIU_INT, mapin->comm);
166   for (i = 0, count = 0; i < size; i++) {
167     PetscInt j;
168 
169     for (j = 0; j < max_keys_per; j++) {
170       if (j < keys_per[i]) {
171         finalpivots[count++] = finalpivots[i * max_keys_per + j];
172       }
173     }
174   }
175   *outpivots = finalpivots;
176   ierr = PetscFree(keys_per);CHKERRQ(ierr);
177   ierr = PetscFree(pivots);CHKERRQ(ierr);
178   PetscFunctionReturn(0);
179 }
180 
181 static PetscErrorCode PetscParallelRedistribute(PetscLayout map, PetscInt n, PetscInt arrayin[], PetscInt arrayout[])
182 {
183   PetscMPIInt    size, rank;
184   PetscInt       myOffset, nextOffset;
185   PetscInt       i;
186   PetscMPIInt    total, filled;
187   PetscMPIInt    sender, nfirst, nsecond;
188   PetscMPIInt    firsttag, secondtag;
189   MPI_Request    firstreqrcv;
190   MPI_Request    *firstreqs;
191   MPI_Request    *secondreqs;
192   MPI_Status     firststatus;
193 
194   PetscErrorCode ierr;
195 
196   PetscFunctionBegin;
197   ierr = MPI_Comm_size(map->comm, &size);CHKERRQ(ierr);
198   ierr = MPI_Comm_rank(map->comm, &rank);CHKERRQ(ierr);
199   ierr = PetscCommGetNewTag(map->comm, &firsttag);CHKERRQ(ierr);
200   ierr = PetscCommGetNewTag(map->comm, &secondtag);CHKERRQ(ierr);
201   myOffset = 0;
202   ierr = PetscMalloc2(size, &firstreqs, size, &secondreqs);CHKERRQ(ierr);
203   ierr = MPI_Scan(&n, &nextOffset, 1, MPIU_INT, MPI_SUM, map->comm);CHKERRQ(ierr);
204   myOffset = nextOffset - n;
205   total = map->range[rank + 1] - map->range[rank];
206   if (total > 0) {
207     ierr = MPI_Irecv(arrayout, total, MPIU_INT, MPI_ANY_SOURCE, firsttag, map->comm, &firstreqrcv);CHKERRQ(ierr);
208   }
209   for (i = 0, nsecond = 0, nfirst = 0; i < size; i++) {
210     PetscInt itotal;
211     PetscInt overlap, oStart, oEnd;
212 
213     itotal = map->range[i + 1] - map->range[i];
214     if (itotal <= 0) continue;
215     oStart = PetscMax(myOffset, map->range[i]);
216     oEnd   = PetscMin(nextOffset, map->range[i + 1]);
217     overlap = oEnd - oStart;
218     if (map->range[i] >= myOffset && map->range[i] < nextOffset) {
219       /* send first message */
220       ierr = MPI_Isend(&arrayin[map->range[i] - myOffset], overlap, MPIU_INT, i, firsttag, map->comm, &(firstreqs[nfirst++]));CHKERRQ(ierr);
221     } else if (overlap > 0) {
222       /* send second message */
223       ierr = MPI_Isend(&arrayin[oStart - myOffset], overlap, MPIU_INT, i, secondtag, map->comm, &(secondreqs[nsecond++]));CHKERRQ(ierr);
224     } else if (overlap == 0 && myOffset > map->range[i] && myOffset < map->range[i + 1]) {
225       /* send empty second message */
226       ierr = MPI_Isend(&arrayin[oStart - myOffset], 0, MPIU_INT, i, secondtag, map->comm, &(secondreqs[nsecond++]));CHKERRQ(ierr);
227     }
228   }
229   filled = 0;
230   sender = -1;
231   if (total > 0) {
232     ierr = MPI_Wait(&firstreqrcv, &firststatus);CHKERRQ(ierr);
233     sender = firststatus.MPI_SOURCE;
234     ierr = MPI_Get_count(&firststatus, MPIU_INT, &filled);CHKERRQ(ierr);
235   }
236   while (filled < total) {
237     PetscMPIInt mfilled;
238     MPI_Status stat;
239 
240     sender++;
241     ierr = MPI_Recv(&arrayout[filled], total - filled, MPIU_INT, sender, secondtag, map->comm, &stat);CHKERRQ(ierr);
242     ierr = MPI_Get_count(&stat, MPIU_INT, &mfilled);CHKERRQ(ierr);
243     filled += mfilled;
244   }
245   ierr = MPI_Waitall(nfirst, firstreqs, MPI_STATUSES_IGNORE);CHKERRQ(ierr);
246   ierr = MPI_Waitall(nsecond, secondreqs, MPI_STATUSES_IGNORE);CHKERRQ(ierr);
247   ierr = PetscFree2(firstreqs, secondreqs);CHKERRQ(ierr);
248   PetscFunctionReturn(0);
249 }
250 
251 static PetscErrorCode PetscParallelSortInt_Samplesort(PetscLayout mapin, PetscLayout mapout, PetscInt keysin[], PetscInt keysout[])
252 {
253   PetscMPIInt    size, rank;
254   PetscInt       *pivots = NULL, *buffer;
255   PetscInt       i, j;
256   PetscMPIInt    *keys_per_snd, *keys_per_rcv, *offsets_snd, *offsets_rcv, nrecv;
257   PetscErrorCode ierr;
258 
259   PetscFunctionBegin;
260   ierr = MPI_Comm_size(mapin->comm, &size);CHKERRQ(ierr);
261   ierr = MPI_Comm_rank(mapin->comm, &rank);CHKERRQ(ierr);
262   ierr = PetscMalloc4(size, &keys_per_snd, size, &keys_per_rcv, size + 1, &offsets_snd, size + 1, &offsets_rcv);CHKERRQ(ierr);
263   /* sort locally */
264   ierr = PetscSortInt(mapin->n, keysin);CHKERRQ(ierr);
265   /* get P - 1 pivots */
266   ierr = PetscParallelSampleSelect(mapin, mapout, keysin, &pivots);CHKERRQ(ierr);
267   /* determine which entries in the sorted array go to which other processes based on the pivots */
268   for (i = 0, j = 0; i < size - 1; i++) {
269     PetscInt prev = j;
270 
271     while ((j < mapin->n) && (keysin[j] < pivots[i])) j++;
272     offsets_snd[i] = prev;
273     keys_per_snd[i] = j - prev;
274   }
275   offsets_snd[size - 1] = j;
276   keys_per_snd[size - 1] = mapin->n - j;
277   offsets_snd[size] = mapin->n;
278   /* get the incoming sizes */
279   ierr = MPI_Alltoall(keys_per_snd, 1, MPI_INT, keys_per_rcv, 1, MPI_INT, mapin->comm);CHKERRQ(ierr);
280   offsets_rcv[0] = 0;
281   for (i = 0; i < size; i++) {
282     offsets_rcv[i+1] = offsets_rcv[i] + keys_per_rcv[i];
283   }
284   nrecv = offsets_rcv[size];
285   /* all to all exchange */
286   ierr = PetscMalloc1(nrecv, &buffer);CHKERRQ(ierr);
287   ierr = MPI_Alltoallv(keysin, keys_per_snd, offsets_snd, MPIU_INT, buffer, keys_per_rcv, offsets_rcv, MPIU_INT, mapin->comm);CHKERRQ(ierr);
288   ierr = PetscFree(pivots);CHKERRQ(ierr);
289   ierr = PetscFree4(keys_per_snd, keys_per_rcv, offsets_snd, offsets_rcv);CHKERRQ(ierr);
290 
291   /* local sort */
292   ierr = PetscSortInt(nrecv, buffer);CHKERRQ(ierr);
293 #if defined(PETSC_USE_DEBUG)
294   {
295     PetscBool sorted;
296 
297     ierr = PetscParallelSortedInt(mapin->comm, nrecv, buffer, &sorted);CHKERRQ(ierr);
298     if (!sorted) SETERRQ(mapin->comm, PETSC_ERR_PLIB, "samplesort (pre-redistribute) sort failed");CHKERRQ(ierr);
299   }
300 #endif
301 
302   /* redistribute to the desired order */
303   ierr = PetscParallelRedistribute(mapout, nrecv, buffer, keysout);CHKERRQ(ierr);
304   ierr = PetscFree(buffer);CHKERRQ(ierr);
305   PetscFunctionReturn(0);
306 }
307 
308 /*@
309   PetscParallelSortInt - Globally sort a distributed array of integers
310 
311   Collective
312 
313   Input Parameters:
314 + mapin - PetscLayout describing the distribution of the input keys
315 . mapout - PetscLayout describing the distribution of the output keys
316 - keysin - the pre-sorted array of integers
317 
318   Output Parameter:
319 . keysout - the array in which the sorted integers will be stored.  If mapin == mapout, then keysin may be equal to keysout.
320 
321   Level: developer
322 
323   Notes: This implements a distributed samplesort, which, with local array sizes n_in and n_out, global size N, and global number of processes P, does:
324 
325   - sorts locally
326   - chooses pivots by sorting (in parallel) (P-1) pivot suggestions from each process using bitonic sort and allgathering a subset of (P-1) of those
327   - using to the pivots to repartition the keys by all-to-all exchange
328   - sorting the repartitioned keys locally (the array is now globally sorted, but does not match the mapout layout)
329   - redistributing to match the mapout layout
330 
331   If keysin != keysout, then keysin will not be changed during PetscParallelSortInt.
332 
333 .seealso: PetscParallelSortedInt()
334 @*/
335 PetscErrorCode PetscParallelSortInt(PetscLayout mapin, PetscLayout mapout, PetscInt keysin[], PetscInt keysout[])
336 {
337   PetscMPIInt    size;
338   PetscMPIInt    result;
339   PetscInt       *keysincopy = NULL;
340   PetscErrorCode ierr;
341 
342   PetscFunctionBegin;
343   PetscValidPointer(mapin, 1);
344   PetscValidPointer(mapout, 2);
345   ierr = MPI_Comm_compare(mapin->comm, mapout->comm, &result);CHKERRQ(ierr);
346   if (result != MPI_IDENT && result != MPI_CONGRUENT) SETERRQ(mapin->comm, PETSC_ERR_ARG_NOTSAMECOMM, "layouts are not on the same communicator");
347   ierr = PetscLayoutSetUp(mapin);CHKERRQ(ierr);
348   ierr = PetscLayoutSetUp(mapout);CHKERRQ(ierr);
349   if (mapin->n) PetscValidIntPointer(keysin, 3);
350   if (mapout->n) PetscValidIntPointer(keysout, 4);
351   if (mapin->N != mapout->N) SETERRQ2(mapin->comm, PETSC_ERR_ARG_SIZ, "Input and output layouts have different global sizes (%D != %D)", mapin->N, mapout->N);
352   ierr = MPI_Comm_size(mapin->comm, &size);CHKERRQ(ierr);
353   if (size == 1) {
354     if (keysout != keysin) {
355       ierr = PetscMemcpy(keysout, keysin, mapin->n * sizeof(PetscInt));CHKERRQ(ierr);
356     }
357     ierr = PetscSortInt(mapout->n, keysout);CHKERRQ(ierr);
358     if (size == 1) PetscFunctionReturn(0);
359   }
360   if (keysout != keysin) {
361     ierr = PetscMalloc1(mapin->n, &keysincopy);CHKERRQ(ierr);
362     ierr = PetscMemcpy(keysincopy, keysin, mapin->n * sizeof(PetscInt));CHKERRQ(ierr);
363     keysin = keysincopy;
364   }
365   ierr = PetscParallelSortInt_Samplesort(mapin, mapout, keysin, keysout);CHKERRQ(ierr);
366 #if defined(PETSC_USE_DEBUG)
367   {
368     PetscBool sorted;
369 
370     ierr = PetscParallelSortedInt(mapout->comm, mapout->n, keysout, &sorted);CHKERRQ(ierr);
371     if (!sorted) SETERRQ(mapout->comm, PETSC_ERR_PLIB, "samplesort sort failed");CHKERRQ(ierr);
372   }
373 #endif
374   ierr = PetscFree(keysincopy);CHKERRQ(ierr);
375   PetscFunctionReturn(0);
376 }
377