xref: /petsc/src/mat/impls/dense/mpi/mmdense.c (revision be0abb6ddea392ceaee217d3645fed7c6ea71822)
1 #ifdef PETSC_RCS_HEADER
2 static char vcid[] = "$Id: mmdense.c,v 1.23 1999/08/17 19:00:49 balay Exp bsmith $";
3 #endif
4 
5 /*
6    Support for the parallel dense matrix vector multiply
7 */
8 #include "src/mat/impls/dense/mpi/mpidense.h"
9 #include "src/vec/vecimpl.h"
10 
11 #undef __FUNC__
12 #define __FUNC__ "MatSetUpMultiply_MPIDense"
13 int MatSetUpMultiply_MPIDense(Mat mat)
14 {
15   Mat_MPIDense *mdn = (Mat_MPIDense *) mat->data;
16   int          ierr;
17   IS           tofrom;
18   Vec          gvec;
19 
20   PetscFunctionBegin;
21   /* Create local vector that is used to scatter into */
22   ierr = VecCreateSeq(PETSC_COMM_SELF,mdn->N,&mdn->lvec);CHKERRQ(ierr);
23 
24   /* Create temporary index set for building scatter gather */
25   ierr = ISCreateStride(PETSC_COMM_SELF,mdn->N,0,1,&tofrom);CHKERRQ(ierr);
26 
27   /* Create temporary global vector to generate scatter context */
28   /* n    = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */
29 
30   ierr = VecCreateMPI(mat->comm,mdn->nvec,mdn->N,&gvec);CHKERRQ(ierr);
31 
32   /* Generate the scatter context */
33   ierr = VecScatterCreate(gvec,tofrom,mdn->lvec,tofrom,&mdn->Mvctx);CHKERRQ(ierr);
34   PLogObjectParent(mat,mdn->Mvctx);
35   PLogObjectParent(mat,mdn->lvec);
36   PLogObjectParent(mat,tofrom);
37   PLogObjectParent(mat,gvec);
38 
39   ierr = ISDestroy(tofrom);CHKERRQ(ierr);
40   ierr = VecDestroy(gvec);CHKERRQ(ierr);
41   PetscFunctionReturn(0);
42 }
43 
44 extern int MatGetSubMatrices_MPIDense_Local(Mat,int,IS*,IS*,MatReuse,Mat*);
45 #undef __FUNC__
46 #define __FUNC__ "MatGetSubMatrices_MPIDense"
47 int MatGetSubMatrices_MPIDense(Mat C,int ismax,IS *isrow,IS *iscol,
48                              MatReuse scall,Mat **submat)
49 {
50   Mat_MPIDense  *c = (Mat_MPIDense *) C->data;
51   int           nmax,nstages_local,nstages,i,pos,max_no,ierr;
52 
53   PetscFunctionBegin;
54   /* Allocate memory to hold all the submatrices */
55   if (scall != MAT_REUSE_MATRIX) {
56     *submat = (Mat *)PetscMalloc((ismax+1)*sizeof(Mat));CHKPTRQ(*submat);
57   }
58   /* Determine the number of stages through which submatrices are done */
59   nmax          = 20*1000000 / (c->N * sizeof(int));
60   if (nmax == 0) nmax = 1;
61   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
62 
63   /* Make sure every processor loops through the nstages */
64   ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);CHKERRQ(ierr);
65 
66 
67   for ( i=0,pos=0; i<nstages; i++ ) {
68     if (pos+nmax <= ismax) max_no = nmax;
69     else if (pos == ismax) max_no = 0;
70     else                   max_no = ismax-pos;
71     ierr = MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr);
72     pos += max_no;
73   }
74   PetscFunctionReturn(0);
75 }
76 /* -------------------------------------------------------------------------*/
77 #undef __FUNC__
78 #define __FUNC__ "MatGetSubMatrices_MPIDense_Local"
79 int MatGetSubMatrices_MPIDense_Local(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat *submats)
80 {
81   Mat_MPIDense  *c = (Mat_MPIDense *) C->data;
82   Mat            A = c->A;
83   Mat_SeqDense  *a = (Mat_SeqDense*)A->data, *mat;
84   int           N = c->N, rstart = c->rstart,count;
85   int           **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,*rtable,start,end,size;
86   int           **sbuf1, rank, m,i,j,k,l,ct1,ierr, **rbuf1,row,proc;
87   int           nrqs, msz, **ptr,index,*ctr,*pa,*tmp,bsz,nrqr;
88   int           is_no,jmax,*irow_i,**rmap,*rmap_i;
89   int           len,ctr_j,*sbuf1_j,*rbuf1_i;
90   int           tag0,tag1;
91   MPI_Request   *s_waits1,*r_waits1,*s_waits2,*r_waits2;
92   MPI_Status    *r_status1,*r_status2,*s_status1,*s_status2;
93   MPI_Comm      comm;
94   Scalar        **rbuf2,**sbuf2;
95 
96   PetscFunctionBegin;
97   comm   = C->comm;
98   tag0   = C->tag;
99   size   = c->size;
100   rank   = c->rank;
101   m      = c->M;
102 
103   /* Get some new tags to keep the communication clean */
104   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
105 
106     /* Check if the col indices are sorted */
107   for ( i=0; i<ismax; i++ ) {
108     ierr = ISSorted(isrow[i],(PetscTruth*)&j);CHKERRQ(ierr);
109     if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,0,"ISrow is not sorted");
110     ierr = ISSorted(iscol[i],(PetscTruth*)&j);CHKERRQ(ierr);
111     if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,0,"IScol is not sorted");
112   }
113 
114   len    =  2*ismax*(sizeof(int *)+sizeof(int)) + (m+1)*sizeof(int);
115   irow   = (int **)PetscMalloc(len);CHKPTRQ(irow);
116   icol   = irow + ismax;
117   nrow   = (int *) (icol + ismax);
118   ncol   = nrow + ismax;
119   rtable = ncol + ismax;
120 
121   for ( i=0; i<ismax; i++ ) {
122     ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr);
123     ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr);
124     ierr = ISGetSize(isrow[i],&nrow[i]);CHKERRQ(ierr);
125     ierr = ISGetSize(iscol[i],&ncol[i]);CHKERRQ(ierr);
126   }
127 
128   /* Create hash table for the mapping :row -> proc*/
129   for ( i=0,j=0; i<size; i++ ) {
130     jmax = c->rowners[i+1];
131     for ( ; j<jmax; j++ ) {
132       rtable[j] = i;
133     }
134   }
135 
136   /* evaluate communication - mesg to who, length of mesg, and buffer space
137      required. Based on this, buffers are allocated, and data copied into them*/
138   w1     = (int *)PetscMalloc(size*4*sizeof(int));CHKPTRQ(w1); /* mesg size */
139   w2     = w1 + size;      /* if w2[i] marked, then a message to proc i*/
140   w3     = w2 + size;      /* no of IS that needs to be sent to proc i */
141   w4     = w3 + size;      /* temp work space used in determining w1, w2, w3 */
142   ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/
143   for ( i=0; i<ismax; i++ ) {
144     ierr   = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/
145     jmax   = nrow[i];
146     irow_i = irow[i];
147     for ( j=0; j<jmax; j++ ) {
148       row  = irow_i[j];
149       proc = rtable[row];
150       w4[proc]++;
151     }
152     for ( j=0; j<size; j++ ) {
153       if (w4[j]) { w1[j] += w4[j];  w3[j]++;}
154     }
155   }
156 
157   nrqs     = 0;              /* no of outgoing messages */
158   msz      = 0;              /* total mesg length (for all procs) */
159   w1[rank] = 0;              /* no mesg sent to self */
160   w3[rank] = 0;
161   for ( i=0; i<size; i++ ) {
162     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
163   }
164   pa = (int *)PetscMalloc((nrqs+1)*sizeof(int));CHKPTRQ(pa); /*(proc -array)*/
165   for ( i=0, j=0; i<size; i++ ) {
166     if (w1[i]) { pa[j] = i; j++; }
167   }
168 
169   /* Each message would have a header = 1 + 2*(no of IS) + data */
170   for ( i=0; i<nrqs; i++ ) {
171     j     = pa[i];
172     w1[j] += w2[j] + 2* w3[j];
173     msz   += w1[j];
174   }
175   /* Do a global reduction to determine how many messages to expect*/
176   {
177     int *rw1, *rw2;
178     rw1   = (int *)PetscMalloc(2*size*sizeof(int));CHKPTRQ(rw1);
179     rw2   = rw1+size;
180     ierr  = MPI_Allreduce(w1, rw1, size, MPI_INT, MPI_MAX, comm);CHKERRQ(ierr);
181     bsz   = rw1[rank];
182     ierr  = MPI_Allreduce(w2, rw2, size, MPI_INT, MPI_SUM, comm);CHKERRQ(ierr);
183     nrqr  = rw2[rank];
184     ierr  = PetscFree(rw1);CHKERRQ(ierr);
185   }
186 
187   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
188   len      = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
189   rbuf1    = (int**) PetscMalloc(len);CHKPTRQ(rbuf1);
190   rbuf1[0] = (int *) (rbuf1 + nrqr);
191   for ( i=1; i<nrqr; ++i ) rbuf1[i] = rbuf1[i-1] + bsz;
192 
193   /* Post the receives */
194   r_waits1 = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(r_waits1);
195   for ( i=0; i<nrqr; ++i ) {
196     ierr = MPI_Irecv(rbuf1[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr);
197   }
198 
199   /* Allocate Memory for outgoing messages */
200   len   = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int);
201   sbuf1 = (int **)PetscMalloc(len);CHKPTRQ(sbuf1);
202   ptr   = sbuf1 + size;   /* Pointers to the data in outgoing buffers */
203   ierr  = PetscMemzero(sbuf1,2*size*sizeof(int*));CHKERRQ(ierr);
204   /* allocate memory for outgoing data + buf to receive the first reply */
205   tmp   = (int *) (ptr + size);
206   ctr   = tmp + 2*msz;
207 
208   {
209     int *iptr = tmp,ict = 0;
210     for ( i=0; i<nrqs; i++ ) {
211       j         = pa[i];
212       iptr     += ict;
213       sbuf1[j]  = iptr;
214       ict       = w1[j];
215     }
216   }
217 
218   /* Form the outgoing messages */
219   /* Initialize the header space */
220   for ( i=0; i<nrqs; i++ ) {
221     j           = pa[i];
222     sbuf1[j][0] = 0;
223     ierr        = PetscMemzero(sbuf1[j]+1, 2*w3[j]*sizeof(int));CHKERRQ(ierr);
224     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
225   }
226 
227   /* Parse the isrow and copy data into outbuf */
228   for ( i=0; i<ismax; i++ ) {
229     ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr);
230     irow_i = irow[i];
231     jmax   = nrow[i];
232     for ( j=0; j<jmax; j++ ) {  /* parse the indices of each IS */
233       row  = irow_i[j];
234       proc = rtable[row];
235       if (proc != rank) { /* copy to the outgoing buf*/
236         ctr[proc]++;
237         *ptr[proc] = row;
238         ptr[proc]++;
239       }
240     }
241     /* Update the headers for the current IS */
242     for ( j=0; j<size; j++ ) { /* Can Optimise this loop too */
243       if ((ctr_j = ctr[j])) {
244         sbuf1_j        = sbuf1[j];
245         k              = ++sbuf1_j[0];
246         sbuf1_j[2*k]   = ctr_j;
247         sbuf1_j[2*k-1] = i;
248       }
249     }
250   }
251 
252   /*  Now  post the sends */
253   s_waits1 = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(s_waits1);
254   for ( i=0; i<nrqs; ++i ) {
255     j = pa[i];
256     ierr = MPI_Isend( sbuf1[j], w1[j], MPI_INT, j, tag0, comm, s_waits1+i);CHKERRQ(ierr);
257   }
258 
259   /* Post recieves to capture the row_data from other procs */
260   r_waits2 = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits2);
261   rbuf2    = (Scalar**)PetscMalloc((nrqs+1)*sizeof(Scalar *));CHKPTRQ(rbuf2);
262   for ( i=0; i<nrqs; i++ ) {
263     j        = pa[i];
264     count    = (w1[j] - (2*sbuf1[j][0] + 1))*N;
265     rbuf2[i] = (Scalar *)PetscMalloc((count+1)*sizeof(Scalar));CHKPTRQ(rbuf2[i]);
266     ierr     = MPI_Irecv( rbuf2[i], count, MPIU_SCALAR, j, tag1, comm, r_waits2+i);CHKERRQ(ierr);
267   }
268 
269   /* Receive messages(row_nos) and then, pack and send off the rowvalues
270      to the correct processors */
271 
272   s_waits2  = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits2);
273   r_status1 = (MPI_Status *) PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(r_status1);
274   sbuf2       = (Scalar **) PetscMalloc((nrqr+1)*sizeof(Scalar*));CHKPTRQ(sbuf2);
275 
276   {
277     Scalar *sbuf2_i,*v_start;
278     int    s_proc;
279     for ( i=0; i<nrqr; ++i ) {
280       ierr = MPI_Waitany(nrqr, r_waits1, &index, r_status1+i);CHKERRQ(ierr);
281       s_proc          = r_status1[i].MPI_SOURCE; /* send processor */
282       rbuf1_i         = rbuf1[index]; /* Actual message from s_proc */
283       /* no of rows = end - start; since start is array index[], 0index, whel end
284          is length of the buffer - which is 1index */
285       start           = 2*rbuf1_i[0] + 1;
286       MPI_Get_count(r_status1+i,MPI_INT, &end);
287       /* allocate memory sufficinet to hold all the row values */
288       sbuf2[index] = (Scalar *)PetscMalloc((end-start)*N*sizeof(Scalar));CHKPTRQ(sbuf2[index]);
289       sbuf2_i      = sbuf2[index];
290       /* Now pack the data */
291       for ( j=start; j<end; j++ ) {
292         row = rbuf1_i[j] - rstart;
293         v_start = a->v + row;
294         for ( k=0; k<N; k++ ) {
295           sbuf2_i[0] = v_start[0];
296           sbuf2_i++; v_start+=a->m;
297         }
298       }
299       /* Now send off the data */
300       ierr = MPI_Isend(sbuf2[index],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRQ(ierr);
301     }
302   }
303   /* End Send-Recv of IS + row_numbers */
304   ierr = PetscFree(r_status1);CHKERRQ(ierr);
305   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
306   s_status1 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(s_status1);
307   ierr      = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);
308   ierr = PetscFree(s_status1);CHKERRQ(ierr);
309   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
310 
311   /* Create the submatrices */
312   if (scall == MAT_REUSE_MATRIX) {
313     for ( i=0; i<ismax; i++ ) {
314       mat = (Mat_SeqDense *)(submats[i]->data);
315       if ((mat->m != nrow[i]) || (mat->n != ncol[i])) {
316         SETERRQ(PETSC_ERR_ARG_SIZ,0,"Cannot reuse matrix. wrong size");
317       }
318       ierr = PetscMemzero(mat->v,mat->m*mat->n*sizeof(Scalar));CHKERRQ(ierr);
319       submats[i]->factor = C->factor;
320     }
321   } else {
322     for ( i=0; i<ismax; i++ ) {
323       ierr = MatCreateSeqDense(PETSC_COMM_SELF,nrow[i],ncol[i],PETSC_NULL,submats+i);CHKERRQ(ierr);
324     }
325   }
326 
327   /* Assemble the matrices */
328   {
329     int    col;
330     Scalar *imat_v,*mat_v,*imat_vi,*mat_vi;
331 
332     for ( i=0; i<ismax; i++ ) {
333       mat       = (Mat_SeqDense *) submats[i]->data;
334       mat_v     = a->v;
335       imat_v    = mat->v;
336       irow_i    = irow[i];
337       m         = nrow[i];
338       for ( j=0; j<m; j++ ) {
339         row      = irow_i[j] ;
340         proc     = rtable[row];
341         if (proc == rank) {
342           row      = row - rstart;
343           mat_vi   = mat_v + row;
344           imat_vi  = imat_v + j;
345           for ( k=0; k<ncol[i]; k++ ) {
346             col = icol[i][k];
347             imat_vi[k*m] = mat_vi[col*a->m];
348           }
349         }
350       }
351     }
352   }
353 
354   /* Create row map. This maps c->row to submat->row for each submat*/
355   /* this is a very expensive operation wrt memory usage */
356   len     = (1+ismax)*sizeof(int *) + ismax*c->M*sizeof(int);
357   rmap    = (int **)PetscMalloc(len);CHKPTRQ(rmap);
358   rmap[0] = (int *)(rmap + ismax);
359   ierr    = PetscMemzero(rmap[0],ismax*c->M*sizeof(int));CHKERRQ(ierr);
360   for ( i=1; i<ismax; i++ ) { rmap[i] = rmap[i-1] + c->M;}
361   for ( i=0; i<ismax; i++ ) {
362     rmap_i = rmap[i];
363     irow_i = irow[i];
364     jmax   = nrow[i];
365     for ( j=0; j<jmax; j++ ) {
366       rmap_i[irow_i[j]] = j;
367     }
368   }
369 
370   /* Now Receive the row_values and assemble the rest of the matrix */
371 
372   r_status2 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status2);
373 
374   {
375     int    is_max,tmp1,col,*sbuf1_i,is_sz;
376     Scalar *rbuf2_i,*imat_v,*imat_vi;
377 
378     for ( tmp1=0; tmp1<nrqs; tmp1++ ) { /* For each message */
379       ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRQ(ierr);
380       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
381       sbuf1_i = sbuf1[pa[i]];
382       is_max  = sbuf1_i[0];
383       ct1     = 2*is_max+1;
384       rbuf2_i = rbuf2[i];
385       for ( j=1; j<=is_max; j++ ) { /* For each IS belonging to the message */
386         is_no     = sbuf1_i[2*j-1];
387         is_sz     = sbuf1_i[2*j];
388         mat       = (Mat_SeqDense *) submats[is_no]->data;
389         imat_v    = mat->v;
390         rmap_i    = rmap[is_no];
391         m         = nrow[is_no];
392         for ( k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
393           row      = sbuf1_i[ct1]; ct1++;
394           row      = rmap_i[row];
395           imat_vi  = imat_v + row;
396           for ( l=0; l<ncol[is_no]; l++ ) { /* For each col */
397             col = icol[is_no][l];
398             imat_vi[l*m] = rbuf2_i[col];
399           }
400         }
401       }
402     }
403   }
404   /* End Send-Recv of row_values */
405   ierr = PetscFree(r_status2);CHKERRQ(ierr);
406   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
407   s_status2 = (MPI_Status *) PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status1);
408   ierr      = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);
409   ierr = PetscFree(s_status2);CHKERRQ(ierr);
410   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
411 
412   /* Restore the indices */
413   for ( i=0; i<ismax; i++ ) {
414     ierr = ISRestoreIndices(isrow[i], irow+i);CHKERRQ(ierr);
415     ierr = ISRestoreIndices(iscol[i], icol+i);CHKERRQ(ierr);
416   }
417 
418   /* Destroy allocated memory */
419   ierr = PetscFree(irow);CHKERRQ(ierr);
420   ierr = PetscFree(w1);CHKERRQ(ierr);
421   ierr = PetscFree(pa);CHKERRQ(ierr);
422 
423 
424   for ( i=0; i<nrqs; ++i ) {
425     ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr);
426   }
427   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
428   ierr = PetscFree(sbuf1);CHKERRQ(ierr);
429   ierr = PetscFree(rbuf1);CHKERRQ(ierr);
430 
431   for ( i=0; i<nrqr; ++i ) {
432     ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr);
433   }
434 
435   ierr = PetscFree(sbuf2);CHKERRQ(ierr);
436   ierr = PetscFree(rmap);CHKERRQ(ierr);
437 
438   ierr = PetscObjectRestoreNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
439 
440   for ( i=0; i<ismax; i++ ) {
441     ierr = MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
442     ierr = MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
443   }
444 
445   PetscFunctionReturn(0);
446 }
447 
448