xref: /petsc/src/mat/impls/dense/mpi/mmdense.c (revision 6831982abb6453c2f3e25efecb5d0951d333371e)
1 #ifdef PETSC_RCS_HEADER
2 static char vcid[] = "$Id: mmdense.c,v 1.24 1999/09/02 14:53:19 bsmith Exp bsmith $";
3 #endif
4 
5 /*
6    Support for the parallel dense matrix vector multiply
7 */
8 #include "src/mat/impls/dense/mpi/mpidense.h"
9 #include "src/vec/vecimpl.h"
10 
11 #undef __FUNC__
12 #define __FUNC__ "MatSetUpMultiply_MPIDense"
13 int MatSetUpMultiply_MPIDense(Mat mat)
14 {
15   Mat_MPIDense *mdn = (Mat_MPIDense *) mat->data;
16   int          ierr;
17   IS           tofrom;
18   Vec          gvec;
19 
20   PetscFunctionBegin;
21   /* Create local vector that is used to scatter into */
22   ierr = VecCreateSeq(PETSC_COMM_SELF,mdn->N,&mdn->lvec);CHKERRQ(ierr);
23 
24   /* Create temporary index set for building scatter gather */
25   ierr = ISCreateStride(PETSC_COMM_SELF,mdn->N,0,1,&tofrom);CHKERRQ(ierr);
26 
27   /* Create temporary global vector to generate scatter context */
28   /* n    = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */
29 
30   ierr = VecCreateMPI(mat->comm,mdn->nvec,mdn->N,&gvec);CHKERRQ(ierr);
31 
32   /* Generate the scatter context */
33   ierr = VecScatterCreate(gvec,tofrom,mdn->lvec,tofrom,&mdn->Mvctx);CHKERRQ(ierr);
34   PLogObjectParent(mat,mdn->Mvctx);
35   PLogObjectParent(mat,mdn->lvec);
36   PLogObjectParent(mat,tofrom);
37   PLogObjectParent(mat,gvec);
38 
39   ierr = ISDestroy(tofrom);CHKERRQ(ierr);
40   ierr = VecDestroy(gvec);CHKERRQ(ierr);
41   PetscFunctionReturn(0);
42 }
43 
44 extern int MatGetSubMatrices_MPIDense_Local(Mat,int,IS*,IS*,MatReuse,Mat*);
45 #undef __FUNC__
46 #define __FUNC__ "MatGetSubMatrices_MPIDense"
47 int MatGetSubMatrices_MPIDense(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat **submat)
48 {
49   Mat_MPIDense  *c = (Mat_MPIDense *) C->data;
50   int           nmax,nstages_local,nstages,i,pos,max_no,ierr;
51 
52   PetscFunctionBegin;
53   /* Allocate memory to hold all the submatrices */
54   if (scall != MAT_REUSE_MATRIX) {
55     *submat = (Mat *)PetscMalloc((ismax+1)*sizeof(Mat));CHKPTRQ(*submat);
56   }
57   /* Determine the number of stages through which submatrices are done */
58   nmax          = 20*1000000 / (c->N * sizeof(int));
59   if (nmax == 0) nmax = 1;
60   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
61 
62   /* Make sure every processor loops through the nstages */
63   ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);CHKERRQ(ierr);
64 
65 
66   for ( i=0,pos=0; i<nstages; i++ ) {
67     if (pos+nmax <= ismax) max_no = nmax;
68     else if (pos == ismax) max_no = 0;
69     else                   max_no = ismax-pos;
70     ierr = MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr);
71     pos += max_no;
72   }
73   PetscFunctionReturn(0);
74 }
75 /* -------------------------------------------------------------------------*/
76 #undef __FUNC__
77 #define __FUNC__ "MatGetSubMatrices_MPIDense_Local"
78 int MatGetSubMatrices_MPIDense_Local(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat *submats)
79 {
80   Mat_MPIDense  *c = (Mat_MPIDense *) C->data;
81   Mat            A = c->A;
82   Mat_SeqDense  *a = (Mat_SeqDense*)A->data, *mat;
83   int           N = c->N, rstart = c->rstart,count;
84   int           **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,*rtable,start,end,size;
85   int           **sbuf1, rank, m,i,j,k,l,ct1,ierr, **rbuf1,row,proc;
86   int           nrqs, msz, **ptr,index,*ctr,*pa,*tmp,bsz,nrqr;
87   int           is_no,jmax,*irow_i,**rmap,*rmap_i;
88   int           len,ctr_j,*sbuf1_j,*rbuf1_i;
89   int           tag0,tag1;
90   MPI_Request   *s_waits1,*r_waits1,*s_waits2,*r_waits2;
91   MPI_Status    *r_status1,*r_status2,*s_status1,*s_status2;
92   MPI_Comm      comm;
93   Scalar        **rbuf2,**sbuf2;
94 
95   PetscFunctionBegin;
96   comm   = C->comm;
97   tag0   = C->tag;
98   size   = c->size;
99   rank   = c->rank;
100   m      = c->M;
101 
102   /* Get some new tags to keep the communication clean */
103   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
104 
105     /* Check if the col indices are sorted */
106   for ( i=0; i<ismax; i++ ) {
107     ierr = ISSorted(isrow[i],(PetscTruth*)&j);CHKERRQ(ierr);
108     if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,0,"ISrow is not sorted");
109     ierr = ISSorted(iscol[i],(PetscTruth*)&j);CHKERRQ(ierr);
110     if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,0,"IScol is not sorted");
111   }
112 
113   len    =  2*ismax*(sizeof(int *)+sizeof(int)) + (m+1)*sizeof(int);
114   irow   = (int **)PetscMalloc(len);CHKPTRQ(irow);
115   icol   = irow + ismax;
116   nrow   = (int *) (icol + ismax);
117   ncol   = nrow + ismax;
118   rtable = ncol + ismax;
119 
120   for ( i=0; i<ismax; i++ ) {
121     ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr);
122     ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr);
123     ierr = ISGetSize(isrow[i],&nrow[i]);CHKERRQ(ierr);
124     ierr = ISGetSize(iscol[i],&ncol[i]);CHKERRQ(ierr);
125   }
126 
127   /* Create hash table for the mapping :row -> proc*/
128   for ( i=0,j=0; i<size; i++ ) {
129     jmax = c->rowners[i+1];
130     for ( ; j<jmax; j++ ) {
131       rtable[j] = i;
132     }
133   }
134 
135   /* evaluate communication - mesg to who, length of mesg, and buffer space
136      required. Based on this, buffers are allocated, and data copied into them*/
137   w1     = (int *)PetscMalloc(size*4*sizeof(int));CHKPTRQ(w1); /* mesg size */
138   w2     = w1 + size;      /* if w2[i] marked, then a message to proc i*/
139   w3     = w2 + size;      /* no of IS that needs to be sent to proc i */
140   w4     = w3 + size;      /* temp work space used in determining w1, w2, w3 */
141   ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/
142   for ( i=0; i<ismax; i++ ) {
143     ierr   = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/
144     jmax   = nrow[i];
145     irow_i = irow[i];
146     for ( j=0; j<jmax; j++ ) {
147       row  = irow_i[j];
148       proc = rtable[row];
149       w4[proc]++;
150     }
151     for ( j=0; j<size; j++ ) {
152       if (w4[j]) { w1[j] += w4[j];  w3[j]++;}
153     }
154   }
155 
156   nrqs     = 0;              /* no of outgoing messages */
157   msz      = 0;              /* total mesg length (for all procs) */
158   w1[rank] = 0;              /* no mesg sent to self */
159   w3[rank] = 0;
160   for ( i=0; i<size; i++ ) {
161     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
162   }
163   pa = (int *)PetscMalloc((nrqs+1)*sizeof(int));CHKPTRQ(pa); /*(proc -array)*/
164   for ( i=0, j=0; i<size; i++ ) {
165     if (w1[i]) { pa[j] = i; j++; }
166   }
167 
168   /* Each message would have a header = 1 + 2*(no of IS) + data */
169   for ( i=0; i<nrqs; i++ ) {
170     j     = pa[i];
171     w1[j] += w2[j] + 2* w3[j];
172     msz   += w1[j];
173   }
174   /* Do a global reduction to determine how many messages to expect*/
175   {
176     int *rw1;
177     rw1   = (int *)PetscMalloc(2*size*sizeof(int));CHKPTRQ(rw1);
178     ierr  = MPI_Allreduce(w1, rw1, 2*size, MPI_INT, PetscMaxSum_Op, comm);CHKERRQ(ierr);
179     bsz   = rw1[rank];
180     nrqr  = rw1[size+rank];
181     ierr  = PetscFree(rw1);CHKERRQ(ierr);
182   }
183 
184   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
185   len      = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
186   rbuf1    = (int**) PetscMalloc(len);CHKPTRQ(rbuf1);
187   rbuf1[0] = (int *) (rbuf1 + nrqr);
188   for ( i=1; i<nrqr; ++i ) rbuf1[i] = rbuf1[i-1] + bsz;
189 
190   /* Post the receives */
191   r_waits1 = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(r_waits1);
192   for ( i=0; i<nrqr; ++i ) {
193     ierr = MPI_Irecv(rbuf1[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr);
194   }
195 
196   /* Allocate Memory for outgoing messages */
197   len   = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int);
198   sbuf1 = (int **)PetscMalloc(len);CHKPTRQ(sbuf1);
199   ptr   = sbuf1 + size;   /* Pointers to the data in outgoing buffers */
200   ierr  = PetscMemzero(sbuf1,2*size*sizeof(int*));CHKERRQ(ierr);
201   /* allocate memory for outgoing data + buf to receive the first reply */
202   tmp   = (int *) (ptr + size);
203   ctr   = tmp + 2*msz;
204 
205   {
206     int *iptr = tmp,ict = 0;
207     for ( i=0; i<nrqs; i++ ) {
208       j         = pa[i];
209       iptr     += ict;
210       sbuf1[j]  = iptr;
211       ict       = w1[j];
212     }
213   }
214 
215   /* Form the outgoing messages */
216   /* Initialize the header space */
217   for ( i=0; i<nrqs; i++ ) {
218     j           = pa[i];
219     sbuf1[j][0] = 0;
220     ierr        = PetscMemzero(sbuf1[j]+1, 2*w3[j]*sizeof(int));CHKERRQ(ierr);
221     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
222   }
223 
224   /* Parse the isrow and copy data into outbuf */
225   for ( i=0; i<ismax; i++ ) {
226     ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr);
227     irow_i = irow[i];
228     jmax   = nrow[i];
229     for ( j=0; j<jmax; j++ ) {  /* parse the indices of each IS */
230       row  = irow_i[j];
231       proc = rtable[row];
232       if (proc != rank) { /* copy to the outgoing buf*/
233         ctr[proc]++;
234         *ptr[proc] = row;
235         ptr[proc]++;
236       }
237     }
238     /* Update the headers for the current IS */
239     for ( j=0; j<size; j++ ) { /* Can Optimise this loop too */
240       if ((ctr_j = ctr[j])) {
241         sbuf1_j        = sbuf1[j];
242         k              = ++sbuf1_j[0];
243         sbuf1_j[2*k]   = ctr_j;
244         sbuf1_j[2*k-1] = i;
245       }
246     }
247   }
248 
249   /*  Now  post the sends */
250   s_waits1 = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(s_waits1);
251   for ( i=0; i<nrqs; ++i ) {
252     j = pa[i];
253     ierr = MPI_Isend( sbuf1[j], w1[j], MPI_INT, j, tag0, comm, s_waits1+i);CHKERRQ(ierr);
254   }
255 
256   /* Post recieves to capture the row_data from other procs */
257   r_waits2 = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits2);
258   rbuf2    = (Scalar**)PetscMalloc((nrqs+1)*sizeof(Scalar *));CHKPTRQ(rbuf2);
259   for ( i=0; i<nrqs; i++ ) {
260     j        = pa[i];
261     count    = (w1[j] - (2*sbuf1[j][0] + 1))*N;
262     rbuf2[i] = (Scalar *)PetscMalloc((count+1)*sizeof(Scalar));CHKPTRQ(rbuf2[i]);
263     ierr     = MPI_Irecv( rbuf2[i], count, MPIU_SCALAR, j, tag1, comm, r_waits2+i);CHKERRQ(ierr);
264   }
265 
266   /* Receive messages(row_nos) and then, pack and send off the rowvalues
267      to the correct processors */
268 
269   s_waits2  = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits2);
270   r_status1 = (MPI_Status *) PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(r_status1);
271   sbuf2       = (Scalar **) PetscMalloc((nrqr+1)*sizeof(Scalar*));CHKPTRQ(sbuf2);
272 
273   {
274     Scalar *sbuf2_i,*v_start;
275     int    s_proc;
276     for ( i=0; i<nrqr; ++i ) {
277       ierr = MPI_Waitany(nrqr, r_waits1, &index, r_status1+i);CHKERRQ(ierr);
278       s_proc          = r_status1[i].MPI_SOURCE; /* send processor */
279       rbuf1_i         = rbuf1[index]; /* Actual message from s_proc */
280       /* no of rows = end - start; since start is array index[], 0index, whel end
281          is length of the buffer - which is 1index */
282       start           = 2*rbuf1_i[0] + 1;
283       MPI_Get_count(r_status1+i,MPI_INT, &end);
284       /* allocate memory sufficinet to hold all the row values */
285       sbuf2[index] = (Scalar *)PetscMalloc((end-start)*N*sizeof(Scalar));CHKPTRQ(sbuf2[index]);
286       sbuf2_i      = sbuf2[index];
287       /* Now pack the data */
288       for ( j=start; j<end; j++ ) {
289         row = rbuf1_i[j] - rstart;
290         v_start = a->v + row;
291         for ( k=0; k<N; k++ ) {
292           sbuf2_i[0] = v_start[0];
293           sbuf2_i++; v_start+=a->m;
294         }
295       }
296       /* Now send off the data */
297       ierr = MPI_Isend(sbuf2[index],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRQ(ierr);
298     }
299   }
300   /* End Send-Recv of IS + row_numbers */
301   ierr = PetscFree(r_status1);CHKERRQ(ierr);
302   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
303   s_status1 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(s_status1);
304   ierr      = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);
305   ierr = PetscFree(s_status1);CHKERRQ(ierr);
306   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
307 
308   /* Create the submatrices */
309   if (scall == MAT_REUSE_MATRIX) {
310     for ( i=0; i<ismax; i++ ) {
311       mat = (Mat_SeqDense *)(submats[i]->data);
312       if ((mat->m != nrow[i]) || (mat->n != ncol[i])) {
313         SETERRQ(PETSC_ERR_ARG_SIZ,0,"Cannot reuse matrix. wrong size");
314       }
315       ierr = PetscMemzero(mat->v,mat->m*mat->n*sizeof(Scalar));CHKERRQ(ierr);
316       submats[i]->factor = C->factor;
317     }
318   } else {
319     for ( i=0; i<ismax; i++ ) {
320       ierr = MatCreateSeqDense(PETSC_COMM_SELF,nrow[i],ncol[i],PETSC_NULL,submats+i);CHKERRQ(ierr);
321     }
322   }
323 
324   /* Assemble the matrices */
325   {
326     int    col;
327     Scalar *imat_v,*mat_v,*imat_vi,*mat_vi;
328 
329     for ( i=0; i<ismax; i++ ) {
330       mat       = (Mat_SeqDense *) submats[i]->data;
331       mat_v     = a->v;
332       imat_v    = mat->v;
333       irow_i    = irow[i];
334       m         = nrow[i];
335       for ( j=0; j<m; j++ ) {
336         row      = irow_i[j] ;
337         proc     = rtable[row];
338         if (proc == rank) {
339           row      = row - rstart;
340           mat_vi   = mat_v + row;
341           imat_vi  = imat_v + j;
342           for ( k=0; k<ncol[i]; k++ ) {
343             col = icol[i][k];
344             imat_vi[k*m] = mat_vi[col*a->m];
345           }
346         }
347       }
348     }
349   }
350 
351   /* Create row map. This maps c->row to submat->row for each submat*/
352   /* this is a very expensive operation wrt memory usage */
353   len     = (1+ismax)*sizeof(int *) + ismax*c->M*sizeof(int);
354   rmap    = (int **)PetscMalloc(len);CHKPTRQ(rmap);
355   rmap[0] = (int *)(rmap + ismax);
356   ierr    = PetscMemzero(rmap[0],ismax*c->M*sizeof(int));CHKERRQ(ierr);
357   for ( i=1; i<ismax; i++ ) { rmap[i] = rmap[i-1] + c->M;}
358   for ( i=0; i<ismax; i++ ) {
359     rmap_i = rmap[i];
360     irow_i = irow[i];
361     jmax   = nrow[i];
362     for ( j=0; j<jmax; j++ ) {
363       rmap_i[irow_i[j]] = j;
364     }
365   }
366 
367   /* Now Receive the row_values and assemble the rest of the matrix */
368 
369   r_status2 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status2);
370 
371   {
372     int    is_max,tmp1,col,*sbuf1_i,is_sz;
373     Scalar *rbuf2_i,*imat_v,*imat_vi;
374 
375     for ( tmp1=0; tmp1<nrqs; tmp1++ ) { /* For each message */
376       ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRQ(ierr);
377       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
378       sbuf1_i = sbuf1[pa[i]];
379       is_max  = sbuf1_i[0];
380       ct1     = 2*is_max+1;
381       rbuf2_i = rbuf2[i];
382       for ( j=1; j<=is_max; j++ ) { /* For each IS belonging to the message */
383         is_no     = sbuf1_i[2*j-1];
384         is_sz     = sbuf1_i[2*j];
385         mat       = (Mat_SeqDense *) submats[is_no]->data;
386         imat_v    = mat->v;
387         rmap_i    = rmap[is_no];
388         m         = nrow[is_no];
389         for ( k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
390           row      = sbuf1_i[ct1]; ct1++;
391           row      = rmap_i[row];
392           imat_vi  = imat_v + row;
393           for ( l=0; l<ncol[is_no]; l++ ) { /* For each col */
394             col = icol[is_no][l];
395             imat_vi[l*m] = rbuf2_i[col];
396           }
397         }
398       }
399     }
400   }
401   /* End Send-Recv of row_values */
402   ierr = PetscFree(r_status2);CHKERRQ(ierr);
403   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
404   s_status2 = (MPI_Status *) PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status1);
405   ierr      = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);
406   ierr = PetscFree(s_status2);CHKERRQ(ierr);
407   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
408 
409   /* Restore the indices */
410   for ( i=0; i<ismax; i++ ) {
411     ierr = ISRestoreIndices(isrow[i], irow+i);CHKERRQ(ierr);
412     ierr = ISRestoreIndices(iscol[i], icol+i);CHKERRQ(ierr);
413   }
414 
415   /* Destroy allocated memory */
416   ierr = PetscFree(irow);CHKERRQ(ierr);
417   ierr = PetscFree(w1);CHKERRQ(ierr);
418   ierr = PetscFree(pa);CHKERRQ(ierr);
419 
420 
421   for ( i=0; i<nrqs; ++i ) {
422     ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr);
423   }
424   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
425   ierr = PetscFree(sbuf1);CHKERRQ(ierr);
426   ierr = PetscFree(rbuf1);CHKERRQ(ierr);
427 
428   for ( i=0; i<nrqr; ++i ) {
429     ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr);
430   }
431 
432   ierr = PetscFree(sbuf2);CHKERRQ(ierr);
433   ierr = PetscFree(rmap);CHKERRQ(ierr);
434 
435   ierr = PetscObjectRestoreNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
436 
437   for ( i=0; i<ismax; i++ ) {
438     ierr = MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
439     ierr = MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
440   }
441 
442   PetscFunctionReturn(0);
443 }
444 
445