xref: /petsc/src/mat/impls/sbaij/mpi/sbaijov.c (revision f13dfd9ea68e0ddeee984e65c377a1819eab8a8a)
1 /*
2    Routines to compute overlapping regions of a parallel MPI matrix.
3    Used for finding submatrices that were shared across processors.
4 */
5 #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
6 #include <petscbt.h>
7 
8 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat, PetscInt, IS *);
9 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat, PetscInt *, PetscInt, PetscInt *, PetscBT *);
10 
11 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C, PetscInt is_max, IS is[], PetscInt ov)
12 {
13   PetscInt        i, N = C->cmap->N, bs = C->rmap->bs, M = C->rmap->N, Mbs = M / bs, *nidx, isz, iov;
14   IS             *is_new, *is_row;
15   Mat            *submats;
16   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ *)C->data;
17   Mat_SeqSBAIJ   *asub_i;
18   PetscBT         table;
19   PetscInt       *ai, brow, nz, nis, l, nmax, nstages_local, nstages, max_no, pos;
20   const PetscInt *idx;
21   PetscBool       flg;
22 
23   PetscFunctionBegin;
24   PetscCall(PetscMalloc1(is_max, &is_new));
25   /* Convert the indices into block format */
26   PetscCall(ISCompressIndicesGeneral(N, C->rmap->n, bs, is_max, is, is_new));
27   PetscCheck(ov >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Negative overlap specified");
28 
29   /*  previous non-scalable implementation  */
30   flg = PETSC_FALSE;
31   PetscCall(PetscOptionsHasName(NULL, NULL, "-IncreaseOverlap_old", &flg));
32   if (flg) { /* previous non-scalable implementation */
33     printf("use previous non-scalable implementation...\n");
34     for (i = 0; i < ov; ++i) PetscCall(MatIncreaseOverlap_MPISBAIJ_Once(C, is_max, is_new));
35   } else { /* implementation using modified BAIJ routines */
36 
37     PetscCall(PetscMalloc1(Mbs + 1, &nidx));
38     PetscCall(PetscBTCreate(Mbs, &table)); /* for column search */
39 
40     /* Create is_row */
41     PetscCall(PetscMalloc1(is_max, &is_row));
42     PetscCall(ISCreateStride(PETSC_COMM_SELF, Mbs, 0, 1, &is_row[0]));
43 
44     for (i = 1; i < is_max; i++) { is_row[i] = is_row[0]; /* reuse is_row[0] */ }
45 
46     /* Allocate memory to hold all the submatrices - Modified from MatCreateSubMatrices_MPIBAIJ() */
47     PetscCall(PetscMalloc1(is_max + 1, &submats));
48 
49     /* Determine the number of stages through which submatrices are done */
50     nmax = 20 * 1000000 / (c->Nbs * sizeof(PetscInt));
51     if (!nmax) nmax = 1;
52     nstages_local = is_max / nmax + ((is_max % nmax) ? 1 : 0);
53 
54     /* Make sure every processor loops through the nstages */
55     PetscCall(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C)));
56 
57     {
58       const PetscObject obj = (PetscObject)c->A;
59       size_t            new_len, cur_len, max_len;
60 
61       PetscCall(PetscStrlen(MATSEQBAIJ, &new_len));
62       PetscCall(PetscStrlen(MATSEQSBAIJ, &cur_len));
63       max_len = PetscMax(cur_len, new_len) + 1;
64       PetscCall(PetscRealloc(max_len * sizeof(*obj->type_name), &obj->type_name));
65       /* The resulting submatrices should be BAIJ, not SBAIJ, hence we change this value to
66          trigger that */
67       for (iov = 0; iov < ov; ++iov) {
68         /* 1) Get submats for column search */
69         PetscCall(PetscStrncpy(obj->type_name, MATSEQBAIJ, max_len));
70         for (i = 0, pos = 0; i < nstages; i++) {
71           if (pos + nmax <= is_max) max_no = nmax;
72           else if (pos == is_max) max_no = 0;
73           else max_no = is_max - pos;
74           c->ijonly = PETSC_TRUE; /* only matrix data structures are requested */
75 
76           PetscCall(MatCreateSubMatrices_MPIBAIJ_local(C, max_no, is_row + pos, is_new + pos, MAT_INITIAL_MATRIX, submats + pos, PETSC_TRUE));
77           pos += max_no;
78         }
79         PetscCall(PetscStrncpy(obj->type_name, MATSEQSBAIJ, max_len));
80 
81         /* 2) Row search */
82         PetscCall(MatIncreaseOverlap_MPIBAIJ_Once(C, is_max, is_new));
83 
84         /* 3) Column search */
85         for (i = 0; i < is_max; i++) {
86           asub_i = (Mat_SeqSBAIJ *)submats[i]->data;
87           ai     = asub_i->i;
88 
89           /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
90           PetscCall(PetscBTMemzero(Mbs, table));
91 
92           PetscCall(ISGetIndices(is_new[i], &idx));
93           PetscCall(ISGetLocalSize(is_new[i], &nis));
94           for (l = 0; l < nis; l++) {
95             PetscCall(PetscBTSet(table, idx[l]));
96             nidx[l] = idx[l];
97           }
98           isz = nis;
99 
100           /* add column entries to table */
101           for (brow = 0; brow < Mbs; brow++) {
102             nz = ai[brow + 1] - ai[brow];
103             if (nz) {
104               if (!PetscBTLookupSet(table, brow)) nidx[isz++] = brow;
105             }
106           }
107           PetscCall(ISRestoreIndices(is_new[i], &idx));
108           PetscCall(ISDestroy(&is_new[i]));
109 
110           /* create updated is_new */
111           PetscCall(ISCreateGeneral(PETSC_COMM_SELF, isz, nidx, PETSC_COPY_VALUES, is_new + i));
112         }
113 
114         /* Free tmp spaces */
115         for (i = 0; i < is_max; i++) PetscCall(MatDestroy(&submats[i]));
116       }
117 
118       PetscCall(PetscBTDestroy(&table));
119       PetscCall(PetscFree(submats));
120       PetscCall(ISDestroy(&is_row[0]));
121       PetscCall(PetscFree(is_row));
122       PetscCall(PetscFree(nidx));
123     }
124   }
125   for (i = 0; i < is_max; i++) {
126     PetscCall(ISDestroy(&is[i]));
127     PetscCall(ISGetLocalSize(is_new[i], &nis));
128     PetscCall(ISGetIndices(is_new[i], &idx));
129     PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)is_new[i]), bs, nis, idx, PETSC_COPY_VALUES, &is[i]));
130     PetscCall(ISDestroy(&is_new[i]));
131   }
132   PetscCall(PetscFree(is_new));
133   PetscFunctionReturn(PETSC_SUCCESS);
134 }
135 
136 typedef enum {
137   MINE,
138   OTHER
139 } WhoseOwner;
140 /*  data1, odata1 and odata2 are packed in the format (for communication):
141        data[0]          = is_max, no of is
142        data[1]          = size of is[0]
143         ...
144        data[is_max]     = size of is[is_max-1]
145        data[is_max + 1] = data(is[0])
146         ...
147        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
148         ...
149    data2 is packed in the format (for creating output is[]):
150        data[0]          = is_max, no of is
151        data[1]          = size of is[0]
152         ...
153        data[is_max]     = size of is[is_max-1]
154        data[is_max + 1] = data(is[0])
155         ...
156        data[is_max + 1 + Mbs*i) = data(is[i])
157         ...
158 */
159 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C, PetscInt is_max, IS is[])
160 {
161   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ *)C->data;
162   PetscMPIInt     size, rank, tag1, tag2, *len_s, nrqr, nrqs, *id_r1, *len_r1, flag, len, *iwork;
163   const PetscInt *idx_i;
164   PetscInt        idx, isz, col, *n, *data1, **data1_start, *data2, *data2_i, *data, *data_i;
165   PetscInt        Mbs, i, j, k, *odata1, *odata2;
166   PetscInt        proc_id, **odata2_ptr, *ctable = NULL, *btable, len_max, len_est;
167   PetscInt        proc_end = 0, len_unused, nodata2;
168   PetscInt        ois_max; /* max no of is[] in each of processor */
169   char           *t_p;
170   MPI_Comm        comm;
171   MPI_Request    *s_waits1, *s_waits2, r_req;
172   MPI_Status     *s_status, r_status;
173   PetscBT        *table; /* mark indices of this processor's is[] */
174   PetscBT         table_i;
175   PetscBT         otable; /* mark indices of other processors' is[] */
176   PetscInt        bs = C->rmap->bs, Bn = c->B->cmap->n, Bnbs = Bn / bs, *Bowners;
177   IS              garray_local, garray_gl;
178 
179   PetscFunctionBegin;
180   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
181   size = c->size;
182   rank = c->rank;
183   Mbs  = c->Mbs;
184 
185   PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1));
186   PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag2));
187 
188   /* create tables used in
189      step 1: table[i] - mark c->garray of proc [i]
190      step 3: table[i] - mark indices of is[i] when whose=MINE
191              table[0] - mark incideces of is[] when whose=OTHER */
192   len = PetscMax(is_max, size);
193   PetscCall(PetscMalloc2(len, &table, (Mbs / PETSC_BITS_PER_BYTE + 1) * len, &t_p));
194   for (i = 0; i < len; i++) table[i] = t_p + (Mbs / PETSC_BITS_PER_BYTE + 1) * i;
195 
196   PetscCall(MPIU_Allreduce(&is_max, &ois_max, 1, MPIU_INT, MPI_MAX, comm));
197 
198   /* 1. Send this processor's is[] to other processors */
199   /* allocate spaces */
200   PetscCall(PetscMalloc1(is_max, &n));
201   len = 0;
202   for (i = 0; i < is_max; i++) {
203     PetscCall(ISGetLocalSize(is[i], &n[i]));
204     len += n[i];
205   }
206   if (!len) {
207     is_max = 0;
208   } else {
209     len += 1 + is_max; /* max length of data1 for one processor */
210   }
211 
212   PetscCall(PetscMalloc1(size * len + 1, &data1));
213   PetscCall(PetscMalloc1(size, &data1_start));
214   for (i = 0; i < size; i++) data1_start[i] = data1 + i * len;
215 
216   PetscCall(PetscMalloc4(size, &len_s, size, &btable, size, &iwork, size + 1, &Bowners));
217 
218   /* gather c->garray from all processors */
219   PetscCall(ISCreateGeneral(comm, Bnbs, c->garray, PETSC_COPY_VALUES, &garray_local));
220   PetscCall(ISAllGather(garray_local, &garray_gl));
221   PetscCall(ISDestroy(&garray_local));
222   PetscCallMPI(MPI_Allgather(&Bnbs, 1, MPIU_INT, Bowners + 1, 1, MPIU_INT, comm));
223 
224   Bowners[0] = 0;
225   for (i = 0; i < size; i++) Bowners[i + 1] += Bowners[i];
226 
227   if (is_max) {
228     /* hash table ctable which maps c->row to proc_id) */
229     PetscCall(PetscMalloc1(Mbs, &ctable));
230     for (proc_id = 0, j = 0; proc_id < size; proc_id++) {
231       for (; j < C->rmap->range[proc_id + 1] / bs; j++) ctable[j] = proc_id;
232     }
233 
234     /* hash tables marking c->garray */
235     PetscCall(ISGetIndices(garray_gl, &idx_i));
236     for (i = 0; i < size; i++) {
237       table_i = table[i];
238       PetscCall(PetscBTMemzero(Mbs, table_i));
239       for (j = Bowners[i]; j < Bowners[i + 1]; j++) { /* go through B cols of proc[i]*/
240         PetscCall(PetscBTSet(table_i, idx_i[j]));
241       }
242     }
243     PetscCall(ISRestoreIndices(garray_gl, &idx_i));
244   } /* if (is_max) */
245   PetscCall(ISDestroy(&garray_gl));
246 
247   /* evaluate communication - mesg to who, length, and buffer space */
248   for (i = 0; i < size; i++) len_s[i] = 0;
249 
250   /* header of data1 */
251   for (proc_id = 0; proc_id < size; proc_id++) {
252     iwork[proc_id]        = 0;
253     *data1_start[proc_id] = is_max;
254     data1_start[proc_id]++;
255     for (j = 0; j < is_max; j++) {
256       if (proc_id == rank) {
257         *data1_start[proc_id] = n[j];
258       } else {
259         *data1_start[proc_id] = 0;
260       }
261       data1_start[proc_id]++;
262     }
263   }
264 
265   for (i = 0; i < is_max; i++) {
266     PetscCall(ISGetIndices(is[i], &idx_i));
267     for (j = 0; j < n[i]; j++) {
268       idx                = idx_i[j];
269       *data1_start[rank] = idx;
270       data1_start[rank]++; /* for local processing */
271       proc_end = ctable[idx];
272       for (proc_id = 0; proc_id <= proc_end; proc_id++) {                        /* for others to process */
273         if (proc_id == rank) continue;                                           /* done before this loop */
274         if (proc_id < proc_end && !PetscBTLookup(table[proc_id], idx)) continue; /* no need for sending idx to [proc_id] */
275         *data1_start[proc_id] = idx;
276         data1_start[proc_id]++;
277         len_s[proc_id]++;
278       }
279     }
280     /* update header data */
281     for (proc_id = 0; proc_id < size; proc_id++) {
282       if (proc_id == rank) continue;
283       *(data1 + proc_id * len + 1 + i) = len_s[proc_id] - iwork[proc_id];
284       iwork[proc_id]                   = len_s[proc_id];
285     }
286     PetscCall(ISRestoreIndices(is[i], &idx_i));
287   }
288 
289   nrqs = 0;
290   nrqr = 0;
291   for (i = 0; i < size; i++) {
292     data1_start[i] = data1 + i * len;
293     if (len_s[i]) {
294       nrqs++;
295       len_s[i] += 1 + is_max; /* add no. of header msg */
296     }
297   }
298 
299   for (i = 0; i < is_max; i++) PetscCall(ISDestroy(&is[i]));
300   PetscCall(PetscFree(n));
301   PetscCall(PetscFree(ctable));
302 
303   /* Determine the number of messages to expect, their lengths, from from-ids */
304   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &nrqr));
305   PetscCall(PetscGatherMessageLengths(comm, nrqs, nrqr, len_s, &id_r1, &len_r1));
306 
307   /*  Now  post the sends */
308   PetscCall(PetscMalloc2(size, &s_waits1, size, &s_waits2));
309   k = 0;
310   for (proc_id = 0; proc_id < size; proc_id++) { /* send data1 to processor [proc_id] */
311     if (len_s[proc_id]) {
312       PetscCallMPI(MPI_Isend(data1_start[proc_id], len_s[proc_id], MPIU_INT, proc_id, tag1, comm, s_waits1 + k));
313       k++;
314     }
315   }
316 
317   /* 2. Receive other's is[] and process. Then send back */
318   len = 0;
319   for (i = 0; i < nrqr; i++) {
320     if (len_r1[i] > len) len = len_r1[i];
321   }
322   PetscCall(PetscFree(len_r1));
323   PetscCall(PetscFree(id_r1));
324 
325   for (proc_id = 0; proc_id < size; proc_id++) len_s[proc_id] = iwork[proc_id] = 0;
326 
327   PetscCall(PetscMalloc1(len + 1, &odata1));
328   PetscCall(PetscMalloc1(size, &odata2_ptr));
329   PetscCall(PetscBTCreate(Mbs, &otable));
330 
331   len_max = ois_max * (Mbs + 1); /* max space storing all is[] for each receive */
332   len_est = 2 * len_max;         /* estimated space of storing is[] for all receiving messages */
333   PetscCall(PetscMalloc1(len_est + 1, &odata2));
334   nodata2 = 0; /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
335 
336   odata2_ptr[nodata2] = odata2;
337 
338   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
339 
340   k = 0;
341   while (k < nrqr) {
342     /* Receive messages */
343     PetscCallMPI(MPI_Iprobe(MPI_ANY_SOURCE, tag1, comm, &flag, &r_status));
344     if (flag) {
345       PetscCallMPI(MPI_Get_count(&r_status, MPIU_INT, &len));
346       proc_id = r_status.MPI_SOURCE;
347       PetscCallMPI(MPI_Irecv(odata1, len, MPIU_INT, proc_id, r_status.MPI_TAG, comm, &r_req));
348       PetscCallMPI(MPI_Wait(&r_req, &r_status));
349 
350       /*  Process messages */
351       /*  make sure there is enough unused space in odata2 array */
352       if (len_unused < len_max) { /* allocate more space for odata2 */
353         PetscCall(PetscMalloc1(len_est + 1, &odata2));
354 
355         odata2_ptr[++nodata2] = odata2;
356 
357         len_unused = len_est;
358       }
359 
360       PetscCall(MatIncreaseOverlap_MPISBAIJ_Local(C, odata1, OTHER, odata2, &otable));
361       len = 1 + odata2[0];
362       for (i = 0; i < odata2[0]; i++) len += odata2[1 + i];
363 
364       /* Send messages back */
365       PetscCallMPI(MPI_Isend(odata2, len, MPIU_INT, proc_id, tag2, comm, s_waits2 + k));
366       k++;
367       odata2 += len;
368       len_unused -= len;
369       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
370     }
371   }
372   PetscCall(PetscFree(odata1));
373   PetscCall(PetscBTDestroy(&otable));
374 
375   /* 3. Do local work on this processor's is[] */
376   /* make sure there is enough unused space in odata2(=data) array */
377   len_max = is_max * (Mbs + 1); /* max space storing all is[] for this processor */
378   if (len_unused < len_max) {   /* allocate more space for odata2 */
379     PetscCall(PetscMalloc1(len_est + 1, &odata2));
380 
381     odata2_ptr[++nodata2] = odata2;
382   }
383 
384   data = odata2;
385   PetscCall(MatIncreaseOverlap_MPISBAIJ_Local(C, data1_start[rank], MINE, data, table));
386   PetscCall(PetscFree(data1_start));
387 
388   /* 4. Receive work done on other processors, then merge */
389   /* get max number of messages that this processor expects to recv */
390   PetscCall(MPIU_Allreduce(len_s, iwork, size, MPI_INT, MPI_MAX, comm));
391   PetscCall(PetscMalloc1(iwork[rank] + 1, &data2));
392   PetscCall(PetscFree4(len_s, btable, iwork, Bowners));
393 
394   k = 0;
395   while (k < nrqs) {
396     /* Receive messages */
397     PetscCallMPI(MPI_Iprobe(MPI_ANY_SOURCE, tag2, comm, &flag, &r_status));
398     if (flag) {
399       PetscCallMPI(MPI_Get_count(&r_status, MPIU_INT, &len));
400 
401       proc_id = r_status.MPI_SOURCE;
402 
403       PetscCallMPI(MPI_Irecv(data2, len, MPIU_INT, proc_id, r_status.MPI_TAG, comm, &r_req));
404       PetscCallMPI(MPI_Wait(&r_req, &r_status));
405       if (len > 1 + is_max) { /* Add data2 into data */
406         data2_i = data2 + 1 + is_max;
407         for (i = 0; i < is_max; i++) {
408           table_i = table[i];
409           data_i  = data + 1 + is_max + Mbs * i;
410           isz     = data[1 + i];
411           for (j = 0; j < data2[1 + i]; j++) {
412             col = data2_i[j];
413             if (!PetscBTLookupSet(table_i, col)) data_i[isz++] = col;
414           }
415           data[1 + i] = isz;
416           if (i < is_max - 1) data2_i += data2[1 + i];
417         }
418       }
419       k++;
420     }
421   }
422   PetscCall(PetscFree(data2));
423   PetscCall(PetscFree2(table, t_p));
424 
425   /* phase 1 sends are complete */
426   PetscCall(PetscMalloc1(size, &s_status));
427   if (nrqs) PetscCallMPI(MPI_Waitall(nrqs, s_waits1, s_status));
428   PetscCall(PetscFree(data1));
429 
430   /* phase 2 sends are complete */
431   if (nrqr) PetscCallMPI(MPI_Waitall(nrqr, s_waits2, s_status));
432   PetscCall(PetscFree2(s_waits1, s_waits2));
433   PetscCall(PetscFree(s_status));
434 
435   /* 5. Create new is[] */
436   for (i = 0; i < is_max; i++) {
437     data_i = data + 1 + is_max + Mbs * i;
438     PetscCall(ISCreateGeneral(PETSC_COMM_SELF, data[1 + i], data_i, PETSC_COPY_VALUES, is + i));
439   }
440   for (k = 0; k <= nodata2; k++) PetscCall(PetscFree(odata2_ptr[k]));
441   PetscCall(PetscFree(odata2_ptr));
442   PetscFunctionReturn(PETSC_SUCCESS);
443 }
444 
445 /*
446    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
447        the work on the local processor.
448 
449      Inputs:
450       C      - MAT_MPISBAIJ;
451       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
452       whose  - whose is[] to be processed,
453                MINE:  this processor's is[]
454                OTHER: other processor's is[]
455      Output:
456        nidx  - whose = MINE:
457                      holds input and newly found indices in the same format as data
458                whose = OTHER:
459                      only holds the newly found indices
460        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
461 */
462 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
463 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C, PetscInt *data, PetscInt whose, PetscInt *nidx, PetscBT *table)
464 {
465   Mat_MPISBAIJ *c = (Mat_MPISBAIJ *)C->data;
466   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ *)c->A->data;
467   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ *)c->B->data;
468   PetscInt      row, mbs, Mbs, *nidx_i, col, col_max, isz, isz0, *ai, *aj, *bi, *bj, *garray, rstart, l;
469   PetscInt      a_start, a_end, b_start, b_end, i, j, k, is_max, *idx_i, n;
470   PetscBT       table0;  /* mark the indices of input is[] for look up */
471   PetscBT       table_i; /* points to i-th table. When whose=OTHER, a single table is used for all is[] */
472 
473   PetscFunctionBegin;
474   Mbs    = c->Mbs;
475   mbs    = a->mbs;
476   ai     = a->i;
477   aj     = a->j;
478   bi     = b->i;
479   bj     = b->j;
480   garray = c->garray;
481   rstart = c->rstartbs;
482   is_max = data[0];
483 
484   PetscCall(PetscBTCreate(Mbs, &table0));
485 
486   nidx[0] = is_max;
487   idx_i   = data + is_max + 1;   /* ptr to input is[0] array */
488   nidx_i  = nidx + is_max + 1;   /* ptr to output is[0] array */
489   for (i = 0; i < is_max; i++) { /* for each is */
490     isz = 0;
491     n   = data[1 + i]; /* size of input is[i] */
492 
493     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
494     if (whose == MINE) { /* process this processor's is[] */
495       table_i = table[i];
496       nidx_i  = nidx + 1 + is_max + Mbs * i;
497     } else { /* process other processor's is[] - only use one temp table */
498       table_i = table[0];
499     }
500     PetscCall(PetscBTMemzero(Mbs, table_i));
501     PetscCall(PetscBTMemzero(Mbs, table0));
502     if (n == 0) {
503       nidx[1 + i] = 0; /* size of new is[i] */
504       continue;
505     }
506 
507     isz0    = 0;
508     col_max = 0;
509     for (j = 0; j < n; j++) {
510       col = idx_i[j];
511       PetscCheck(col < Mbs, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "index col %" PetscInt_FMT " >= Mbs %" PetscInt_FMT, col, Mbs);
512       if (!PetscBTLookupSet(table_i, col)) {
513         PetscCall(PetscBTSet(table0, col));
514         if (whose == MINE) nidx_i[isz0] = col;
515         if (col_max < col) col_max = col;
516         isz0++;
517       }
518     }
519 
520     if (whose == MINE) isz = isz0;
521     k = 0; /* no. of indices from input is[i] that have been examined */
522     for (row = 0; row < mbs; row++) {
523       a_start = ai[row];
524       a_end   = ai[row + 1];
525       b_start = bi[row];
526       b_end   = bi[row + 1];
527       if (PetscBTLookup(table0, row + rstart)) { /* row is on input is[i]:
528                                                 do row search: collect all col in this row */
529         for (l = a_start; l < a_end; l++) {      /* Amat */
530           col = aj[l] + rstart;
531           if (!PetscBTLookupSet(table_i, col)) nidx_i[isz++] = col;
532         }
533         for (l = b_start; l < b_end; l++) { /* Bmat */
534           col = garray[bj[l]];
535           if (!PetscBTLookupSet(table_i, col)) nidx_i[isz++] = col;
536         }
537         k++;
538         if (k >= isz0) break;               /* for (row=0; row<mbs; row++) */
539       } else {                              /* row is not on input is[i]:
540                   do col search: add row onto nidx_i if there is a col in nidx_i */
541         for (l = a_start; l < a_end; l++) { /* Amat */
542           col = aj[l] + rstart;
543           if (col > col_max) break;
544           if (PetscBTLookup(table0, col)) {
545             if (!PetscBTLookupSet(table_i, row + rstart)) nidx_i[isz++] = row + rstart;
546             break; /* for l = start; l<end ; l++) */
547           }
548         }
549         for (l = b_start; l < b_end; l++) { /* Bmat */
550           col = garray[bj[l]];
551           if (col > col_max) break;
552           if (PetscBTLookup(table0, col)) {
553             if (!PetscBTLookupSet(table_i, row + rstart)) nidx_i[isz++] = row + rstart;
554             break; /* for l = start; l<end ; l++) */
555           }
556         }
557       }
558     }
559 
560     if (i < is_max - 1) {
561       idx_i += n;    /* ptr to input is[i+1] array */
562       nidx_i += isz; /* ptr to output is[i+1] array */
563     }
564     nidx[1 + i] = isz; /* size of new is[i] */
565   } /* for each is */
566   PetscCall(PetscBTDestroy(&table0));
567   PetscFunctionReturn(PETSC_SUCCESS);
568 }
569