xref: /petsc/src/mat/impls/sbaij/mpi/sbaijov.c (revision bebe2cf65d55febe21a5af8db2bd2e168caaa2e7)
1 
2 /*
3    Routines to compute overlapping regions of a parallel MPI matrix.
4    Used for finding submatrices that were shared across processors.
5 */
6 #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
7 #include <petscbt.h>
8 
9 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
10 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ"
14 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
15 {
16   PetscErrorCode ierr;
17   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
18   IS             *is_new,*is_row;
19   Mat            *submats;
20   Mat_MPISBAIJ   *c=(Mat_MPISBAIJ*)C->data;
21   Mat_SeqSBAIJ   *asub_i;
22   PetscBT        table;
23   PetscInt       *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
24   const PetscInt *idx;
25   PetscBool      flg,*allcolumns,*allrows;
26 
27   PetscFunctionBegin;
28   ierr = PetscMalloc1(is_max,&is_new);CHKERRQ(ierr);
29   /* Convert the indices into block format */
30   ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);CHKERRQ(ierr);
31   if (ov < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");
32 
33   /* ----- previous non-scalable implementation ----- */
34   flg  = PETSC_FALSE;
35   ierr = PetscOptionsHasName(NULL, "-IncreaseOverlap_old", &flg);CHKERRQ(ierr);
36   if (flg) { /* previous non-scalable implementation */
37     printf("use previous non-scalable implementation...\n");
38     for (i=0; i<ov; ++i) {
39       ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
40     }
41   } else { /* implementation using modified BAIJ routines */
42 
43     ierr = PetscMalloc1(Mbs+1,&nidx);CHKERRQ(ierr);
44     ierr = PetscBTCreate(Mbs,&table);CHKERRQ(ierr); /* for column search */
45     ierr = PetscMalloc2(is_max+1,&allcolumns,is_max+1,&allrows);CHKERRQ(ierr);
46 
47     /* Create is_row */
48     ierr = PetscMalloc1(is_max,&is_row);CHKERRQ(ierr);
49     ierr = ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);CHKERRQ(ierr);
50 
51     allrows[0] = PETSC_TRUE;
52     for (i=1; i<is_max; i++) {
53       is_row[i]  = is_row[0]; /* reuse is_row[0] */
54       allrows[i] = PETSC_TRUE;
55     }
56 
57     /* Allocate memory to hold all the submatrices - Modified from MatGetSubMatrices_MPIBAIJ() */
58     ierr = PetscMalloc1(is_max+1,&submats);CHKERRQ(ierr);
59 
60     /* Check for special case: each processor gets entire matrix columns */
61     for (i=0; i<is_max; i++) {
62       ierr = ISIdentity(is_new[i],&flg);CHKERRQ(ierr);
63       ierr = ISGetLocalSize(is_new[i],&isz);CHKERRQ(ierr);
64       if (flg && isz == Mbs) {
65         allcolumns[i] = PETSC_TRUE;
66       } else {
67         allcolumns[i] = PETSC_FALSE;
68       }
69     }
70 
71     /* Determine the number of stages through which submatrices are done */
72     nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
73     if (!nmax) nmax = 1;
74     nstages_local = is_max/nmax + ((is_max % nmax) ? 1 : 0);
75 
76     /* Make sure every processor loops through the nstages */
77     ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr);
78 
79     for (iov=0; iov<ov; ++iov) {
80       /* 1) Get submats for column search */
81       for (i=0,pos=0; i<nstages; i++) {
82         if (pos+nmax <= is_max) max_no = nmax;
83         else if (pos == is_max) max_no = 0;
84         else                    max_no = is_max-pos;
85         c->ijonly = PETSC_TRUE;
86         ierr      = MatGetSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,allrows+pos,allcolumns+pos,submats+pos);CHKERRQ(ierr);
87         pos      += max_no;
88       }
89 
90       /* 2) Row search */
91       ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
92 
93       /* 3) Column search */
94       for (i=0; i<is_max; i++) {
95         asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
96         ai     = asub_i->i;;
97 
98         /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
99         ierr = PetscBTMemzero(Mbs,table);CHKERRQ(ierr);
100 
101         ierr = ISGetIndices(is_new[i],&idx);CHKERRQ(ierr);
102         ierr = ISGetLocalSize(is_new[i],&nis);CHKERRQ(ierr);
103         for (l=0; l<nis; l++) {
104           ierr    = PetscBTSet(table,idx[l]);CHKERRQ(ierr);
105           nidx[l] = idx[l];
106         }
107         isz = nis;
108 
109         /* add column entries to table */
110         for (brow=0; brow<Mbs; brow++) {
111           nz = ai[brow+1] - ai[brow];
112           if (nz) {
113             if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
114           }
115         }
116         ierr = ISRestoreIndices(is_new[i],&idx);CHKERRQ(ierr);
117         ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);
118 
119         /* create updated is_new */
120         ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);CHKERRQ(ierr);
121       }
122 
123       /* Free tmp spaces */
124       for (i=0; i<is_max; i++) {
125         ierr = MatDestroy(&submats[i]);CHKERRQ(ierr);
126       }
127     }
128     ierr = PetscFree2(allcolumns,allrows);CHKERRQ(ierr);
129     ierr = PetscBTDestroy(&table);CHKERRQ(ierr);
130     ierr = PetscFree(submats);CHKERRQ(ierr);
131     ierr = ISDestroy(&is_row[0]);CHKERRQ(ierr);
132     ierr = PetscFree(is_row);CHKERRQ(ierr);
133     ierr = PetscFree(nidx);CHKERRQ(ierr);
134 
135   }
136 
137   for (i=0; i<is_max; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);}
138   ierr = ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);CHKERRQ(ierr);
139 
140   for (i=0; i<is_max; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);}
141   ierr = PetscFree(is_new);CHKERRQ(ierr);
142   PetscFunctionReturn(0);
143 }
144 
145 typedef enum {MINE,OTHER} WhoseOwner;
146 /*  data1, odata1 and odata2 are packed in the format (for communication):
147        data[0]          = is_max, no of is
148        data[1]          = size of is[0]
149         ...
150        data[is_max]     = size of is[is_max-1]
151        data[is_max + 1] = data(is[0])
152         ...
153        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
154         ...
155    data2 is packed in the format (for creating output is[]):
156        data[0]          = is_max, no of is
157        data[1]          = size of is[0]
158         ...
159        data[is_max]     = size of is[is_max-1]
160        data[is_max + 1] = data(is[0])
161         ...
162        data[is_max + 1 + Mbs*i) = data(is[i])
163         ...
164 */
165 #undef __FUNCT__
166 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Once"
167 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
168 {
169   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
170   PetscErrorCode ierr;
171   PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len,*iwork;
172   const PetscInt *idx_i;
173   PetscInt       idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i;
174   PetscInt       Mbs,i,j,k,*odata1,*odata2;
175   PetscInt       proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
176   PetscInt       proc_end=0,len_unused,nodata2;
177   PetscInt       ois_max; /* max no of is[] in each of processor */
178   char           *t_p;
179   MPI_Comm       comm;
180   MPI_Request    *s_waits1,*s_waits2,r_req;
181   MPI_Status     *s_status,r_status;
182   PetscBT        *table;  /* mark indices of this processor's is[] */
183   PetscBT        table_i;
184   PetscBT        otable; /* mark indices of other processors' is[] */
185   PetscInt       bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
186   IS             garray_local,garray_gl;
187 
188   PetscFunctionBegin;
189   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
190   size = c->size;
191   rank = c->rank;
192   Mbs  = c->Mbs;
193 
194   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
195   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
196 
197   /* create tables used in
198      step 1: table[i] - mark c->garray of proc [i]
199      step 3: table[i] - mark indices of is[i] when whose=MINE
200              table[0] - mark incideces of is[] when whose=OTHER */
201   len  = PetscMax(is_max, size);CHKERRQ(ierr);
202   ierr = PetscMalloc2(len,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,&t_p);CHKERRQ(ierr);
203   for (i=0; i<len; i++) {
204     table[i] = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
205   }
206 
207   ierr = MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
208 
209   /* 1. Send this processor's is[] to other processors */
210   /*---------------------------------------------------*/
211   /* allocate spaces */
212   ierr = PetscMalloc1(is_max,&n);CHKERRQ(ierr);
213   len  = 0;
214   for (i=0; i<is_max; i++) {
215     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
216     len += n[i];
217   }
218   if (!len) {
219     is_max = 0;
220   } else {
221     len += 1 + is_max; /* max length of data1 for one processor */
222   }
223 
224 
225   ierr = PetscMalloc1(size*len+1,&data1);CHKERRQ(ierr);
226   ierr = PetscMalloc1(size,&data1_start);CHKERRQ(ierr);
227   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
228 
229   ierr = PetscMalloc4(size,&len_s,size,&btable,size,&iwork,size+1,&Bowners);CHKERRQ(ierr);
230 
231   /* gather c->garray from all processors */
232   ierr = ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);CHKERRQ(ierr);
233   ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr);
234   ierr = ISDestroy(&garray_local);CHKERRQ(ierr);
235   ierr = MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
236 
237   Bowners[0] = 0;
238   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
239 
240   if (is_max) {
241     /* hash table ctable which maps c->row to proc_id) */
242     ierr = PetscMalloc1(Mbs,&ctable);CHKERRQ(ierr);
243     for (proc_id=0,j=0; proc_id<size; proc_id++) {
244       for (; j<C->rmap->range[proc_id+1]/bs; j++) ctable[j] = proc_id;
245     }
246 
247     /* hash tables marking c->garray */
248     ierr = ISGetIndices(garray_gl,&idx_i);CHKERRQ(ierr);
249     for (i=0; i<size; i++) {
250       table_i = table[i];
251       ierr    = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
252       for (j = Bowners[i]; j<Bowners[i+1]; j++) { /* go through B cols of proc[i]*/
253         ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr);
254       }
255     }
256     ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr);
257   }  /* if (is_max) */
258   ierr = ISDestroy(&garray_gl);CHKERRQ(ierr);
259 
260   /* evaluate communication - mesg to who, length, and buffer space */
261   for (i=0; i<size; i++) len_s[i] = 0;
262 
263   /* header of data1 */
264   for (proc_id=0; proc_id<size; proc_id++) {
265     iwork[proc_id]        = 0;
266     *data1_start[proc_id] = is_max;
267     data1_start[proc_id]++;
268     for (j=0; j<is_max; j++) {
269       if (proc_id == rank) {
270         *data1_start[proc_id] = n[j];
271       } else {
272         *data1_start[proc_id] = 0;
273       }
274       data1_start[proc_id]++;
275     }
276   }
277 
278   for (i=0; i<is_max; i++) {
279     ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr);
280     for (j=0; j<n[i]; j++) {
281       idx                = idx_i[j];
282       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
283       proc_end           = ctable[idx];
284       for (proc_id=0; proc_id<=proc_end; proc_id++) {  /* for others to process */
285         if (proc_id == rank) continue; /* done before this loop */
286         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx)) continue; /* no need for sending idx to [proc_id] */
287         *data1_start[proc_id] = idx; data1_start[proc_id]++;
288         len_s[proc_id]++;
289       }
290     }
291     /* update header data */
292     for (proc_id=0; proc_id<size; proc_id++) {
293       if (proc_id== rank) continue;
294       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
295       iwork[proc_id]                 = len_s[proc_id];
296     }
297     ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr);
298   }
299 
300   nrqs = 0; nrqr = 0;
301   for (i=0; i<size; i++) {
302     data1_start[i] = data1 + i*len;
303     if (len_s[i]) {
304       nrqs++;
305       len_s[i] += 1 + is_max; /* add no. of header msg */
306     }
307   }
308 
309   for (i=0; i<is_max; i++) {
310     ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
311   }
312   ierr = PetscFree(n);CHKERRQ(ierr);
313   ierr = PetscFree(ctable);CHKERRQ(ierr);
314 
315   /* Determine the number of messages to expect, their lengths, from from-ids */
316   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrqr);CHKERRQ(ierr);
317   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr);
318 
319   /*  Now  post the sends */
320   ierr = PetscMalloc2(size,&s_waits1,size,&s_waits2);CHKERRQ(ierr);
321   k    = 0;
322   for (proc_id=0; proc_id<size; proc_id++) {  /* send data1 to processor [proc_id] */
323     if (len_s[proc_id]) {
324       ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr);
325       k++;
326     }
327   }
328 
329   /* 2. Receive other's is[] and process. Then send back */
330   /*-----------------------------------------------------*/
331   len = 0;
332   for (i=0; i<nrqr; i++) {
333     if (len_r1[i] > len) len = len_r1[i];
334   }
335   ierr = PetscFree(len_r1);CHKERRQ(ierr);
336   ierr = PetscFree(id_r1);CHKERRQ(ierr);
337 
338   for (proc_id=0; proc_id<size; proc_id++) len_s[proc_id] = iwork[proc_id] = 0;
339 
340   ierr = PetscMalloc1(len+1,&odata1);CHKERRQ(ierr);
341   ierr = PetscMalloc1(size,&odata2_ptr);CHKERRQ(ierr);
342   ierr = PetscBTCreate(Mbs,&otable);CHKERRQ(ierr);
343 
344   len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */
345   len_est = 2*len_max;       /* estimated space of storing is[] for all receiving messages */
346   ierr    = PetscMalloc1(len_est+1,&odata2);CHKERRQ(ierr);
347   nodata2 = 0;               /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
348 
349   odata2_ptr[nodata2] = odata2;
350 
351   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
352 
353   k = 0;
354   while (k < nrqr) {
355     /* Receive messages */
356     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr);
357     if (flag) {
358       ierr    = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
359       proc_id = r_status.MPI_SOURCE;
360       ierr    = MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
361       ierr    = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
362 
363       /*  Process messages */
364       /*  make sure there is enough unused space in odata2 array */
365       if (len_unused < len_max) { /* allocate more space for odata2 */
366         ierr = PetscMalloc1(len_est+1,&odata2);CHKERRQ(ierr);
367 
368         odata2_ptr[++nodata2] = odata2;
369 
370         len_unused = len_est;
371       }
372 
373       ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr);
374       len  = 1 + odata2[0];
375       for (i=0; i<odata2[0]; i++) len += odata2[1 + i];
376 
377       /* Send messages back */
378       ierr = MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr);
379       k++;
380       odata2        += len;
381       len_unused    -= len;
382       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
383     }
384   }
385   ierr = PetscFree(odata1);CHKERRQ(ierr);
386   ierr = PetscBTDestroy(&otable);CHKERRQ(ierr);
387 
388   /* 3. Do local work on this processor's is[] */
389   /*-------------------------------------------*/
390   /* make sure there is enough unused space in odata2(=data) array */
391   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
392   if (len_unused < len_max) { /* allocate more space for odata2 */
393     ierr = PetscMalloc1(len_est+1,&odata2);CHKERRQ(ierr);
394 
395     odata2_ptr[++nodata2] = odata2;
396   }
397 
398   data = odata2;
399   ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr);
400   ierr = PetscFree(data1_start);CHKERRQ(ierr);
401 
402   /* 4. Receive work done on other processors, then merge */
403   /*------------------------------------------------------*/
404   /* get max number of messages that this processor expects to recv */
405   ierr = MPI_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
406   ierr = PetscMalloc1(iwork[rank]+1,&data2);CHKERRQ(ierr);
407   ierr = PetscFree4(len_s,btable,iwork,Bowners);CHKERRQ(ierr);
408 
409   k = 0;
410   while (k < nrqs) {
411     /* Receive messages */
412     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);CHKERRQ(ierr);
413     if (flag) {
414       ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
415 
416       proc_id = r_status.MPI_SOURCE;
417 
418       ierr = MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
419       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
420       if (len > 1+is_max) { /* Add data2 into data */
421         data2_i = data2 + 1 + is_max;
422         for (i=0; i<is_max; i++) {
423           table_i = table[i];
424           data_i  = data + 1 + is_max + Mbs*i;
425           isz     = data[1+i];
426           for (j=0; j<data2[1+i]; j++) {
427             col = data2_i[j];
428             if (!PetscBTLookupSet(table_i,col)) data_i[isz++] = col;
429           }
430           data[1+i] = isz;
431           if (i < is_max - 1) data2_i += data2[1+i];
432         }
433       }
434       k++;
435     }
436   }
437   ierr = PetscFree(data2);CHKERRQ(ierr);
438   ierr = PetscFree2(table,t_p);CHKERRQ(ierr);
439 
440   /* phase 1 sends are complete */
441   ierr = PetscMalloc1(size,&s_status);CHKERRQ(ierr);
442   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);}
443   ierr = PetscFree(data1);CHKERRQ(ierr);
444 
445   /* phase 2 sends are complete */
446   if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);}
447   ierr = PetscFree2(s_waits1,s_waits2);CHKERRQ(ierr);
448   ierr = PetscFree(s_status);CHKERRQ(ierr);
449 
450   /* 5. Create new is[] */
451   /*--------------------*/
452   for (i=0; i<is_max; i++) {
453     data_i = data + 1 + is_max + Mbs*i;
454     ierr   = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);CHKERRQ(ierr);
455   }
456   for (k=0; k<=nodata2; k++) {
457     ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr);
458   }
459   ierr = PetscFree(odata2_ptr);CHKERRQ(ierr);
460   PetscFunctionReturn(0);
461 }
462 
463 #undef __FUNCT__
464 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Local"
465 /*
466    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
467        the work on the local processor.
468 
469      Inputs:
470       C      - MAT_MPISBAIJ;
471       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
472       whose  - whose is[] to be processed,
473                MINE:  this processor's is[]
474                OTHER: other processor's is[]
475      Output:
476        nidx  - whose = MINE:
477                      holds input and newly found indices in the same format as data
478                whose = OTHER:
479                      only holds the newly found indices
480        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
481 */
482 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
483 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
484 {
485   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
486   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(c->A)->data;
487   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(c->B)->data;
488   PetscErrorCode ierr;
489   PetscInt       row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
490   PetscInt       a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
491   PetscBT        table0;  /* mark the indices of input is[] for look up */
492   PetscBT        table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
493 
494   PetscFunctionBegin;
495   Mbs    = c->Mbs; mbs = a->mbs;
496   ai     = a->i; aj = a->j;
497   bi     = b->i; bj = b->j;
498   garray = c->garray;
499   rstart = c->rstartbs;
500   is_max = data[0];
501 
502   ierr = PetscBTCreate(Mbs,&table0);CHKERRQ(ierr);
503 
504   nidx[0] = is_max;
505   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
506   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
507   for (i=0; i<is_max; i++) { /* for each is */
508     isz = 0;
509     n   = data[1+i]; /* size of input is[i] */
510 
511     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
512     if (whose == MINE) { /* process this processor's is[] */
513       table_i = table[i];
514       nidx_i  = nidx + 1+ is_max + Mbs*i;
515     } else {            /* process other processor's is[] - only use one temp table */
516       table_i = table[0];
517     }
518     ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
519     ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr);
520     if (n==0) {
521       nidx[1+i] = 0;  /* size of new is[i] */
522       continue;
523     }
524 
525     isz0 = 0; col_max = 0;
526     for (j=0; j<n; j++) {
527       col = idx_i[j];
528       if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
529       if (!PetscBTLookupSet(table_i,col)) {
530         ierr = PetscBTSet(table0,col);CHKERRQ(ierr);
531         if (whose == MINE) nidx_i[isz0] = col;
532         if (col_max < col) col_max = col;
533         isz0++;
534       }
535     }
536 
537     if (whose == MINE) isz = isz0;
538     k = 0;  /* no. of indices from input is[i] that have been examined */
539     for (row=0; row<mbs; row++) {
540       a_start = ai[row]; a_end = ai[row+1];
541       b_start = bi[row]; b_end = bi[row+1];
542       if (PetscBTLookup(table0,row+rstart)) { /* row is on input is[i]:
543                                                 do row search: collect all col in this row */
544         for (l = a_start; l<a_end ; l++) { /* Amat */
545           col = aj[l] + rstart;
546           if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col;
547         }
548         for (l = b_start; l<b_end ; l++) { /* Bmat */
549           col = garray[bj[l]];
550           if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col;
551         }
552         k++;
553         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
554       } else { /* row is not on input is[i]:
555                   do col serach: add row onto nidx_i if there is a col in nidx_i */
556         for (l = a_start; l<a_end; l++) {  /* Amat */
557           col = aj[l] + rstart;
558           if (col > col_max) break;
559           if (PetscBTLookup(table0,col)) {
560             if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart;
561             break; /* for l = start; l<end ; l++) */
562           }
563         }
564         for (l = b_start; l<b_end; l++) {  /* Bmat */
565           col = garray[bj[l]];
566           if (col > col_max) break;
567           if (PetscBTLookup(table0,col)) {
568             if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart;
569             break; /* for l = start; l<end ; l++) */
570           }
571         }
572       }
573     }
574 
575     if (i < is_max - 1) {
576       idx_i  += n;   /* ptr to input is[i+1] array */
577       nidx_i += isz; /* ptr to output is[i+1] array */
578     }
579     nidx[1+i] = isz; /* size of new is[i] */
580   } /* for each is */
581   ierr = PetscBTDestroy(&table0);CHKERRQ(ierr);
582   PetscFunctionReturn(0);
583 }
584 
585 
586