xref: /petsc/src/mat/impls/dense/mpi/mmdense.c (revision 4482741e5b2e2bbc854fb1f8dba65221386520f2)
1 /*$Id: mmdense.c,v 1.40 2001/09/07 20:09:22 bsmith Exp $*/
2 
3 /*
4    Support for the parallel dense matrix vector multiply
5 */
6 #include "src/mat/impls/dense/mpi/mpidense.h"
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "MatSetUpMultiply_MPIDense"
10 int MatSetUpMultiply_MPIDense(Mat mat)
11 {
12   Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
13   int          ierr;
14   IS           from,to;
15   Vec          gvec;
16 
17   PetscFunctionBegin;
18   /* Create local vector that is used to scatter into */
19   ierr = VecCreateSeq(PETSC_COMM_SELF,mat->N,&mdn->lvec);CHKERRQ(ierr);
20 
21   /* Create temporary index set for building scatter gather */
22   ierr = ISCreateStride(mat->comm,mat->N,0,1,&from);CHKERRQ(ierr);
23   ierr = ISCreateStride(PETSC_COMM_SELF,mat->N,0,1,&to);CHKERRQ(ierr);
24 
25   /* Create temporary global vector to generate scatter context */
26   /* n    = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */
27 
28   ierr = VecCreateMPI(mat->comm,mdn->nvec,mat->N,&gvec);CHKERRQ(ierr);
29 
30   /* Generate the scatter context */
31   ierr = VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);CHKERRQ(ierr);
32   PetscLogObjectParent(mat,mdn->Mvctx);
33   PetscLogObjectParent(mat,mdn->lvec);
34   PetscLogObjectParent(mat,from);
35   PetscLogObjectParent(mat,to);
36   PetscLogObjectParent(mat,gvec);
37 
38   ierr = ISDestroy(to);CHKERRQ(ierr);
39   ierr = ISDestroy(from);CHKERRQ(ierr);
40   ierr = VecDestroy(gvec);CHKERRQ(ierr);
41   PetscFunctionReturn(0);
42 }
43 
44 EXTERN int MatGetSubMatrices_MPIDense_Local(Mat,int,const IS[],const IS[],MatReuse,Mat*);
45 #undef __FUNCT__
46 #define __FUNCT__ "MatGetSubMatrices_MPIDense"
47 int MatGetSubMatrices_MPIDense(Mat C,int ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
48 {
49   int           nmax,nstages_local,nstages,i,pos,max_no,ierr;
50 
51   PetscFunctionBegin;
52   /* Allocate memory to hold all the submatrices */
53   if (scall != MAT_REUSE_MATRIX) {
54     ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr);
55   }
56   /* Determine the number of stages through which submatrices are done */
57   nmax          = 20*1000000 / (C->N * sizeof(int));
58   if (!nmax) nmax = 1;
59   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
60 
61   /* Make sure every processor loops through the nstages */
62   ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);CHKERRQ(ierr);
63 
64 
65   for (i=0,pos=0; i<nstages; i++) {
66     if (pos+nmax <= ismax) max_no = nmax;
67     else if (pos == ismax) max_no = 0;
68     else                   max_no = ismax-pos;
69     ierr = MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr);
70     pos += max_no;
71   }
72   PetscFunctionReturn(0);
73 }
74 /* -------------------------------------------------------------------------*/
75 #undef __FUNCT__
76 #define __FUNCT__ "MatGetSubMatrices_MPIDense_Local"
77 int MatGetSubMatrices_MPIDense_Local(Mat C,int ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
78 {
79   Mat_MPIDense  *c = (Mat_MPIDense*)C->data;
80   Mat           A = c->A;
81   Mat_SeqDense  *a = (Mat_SeqDense*)A->data,*mat;
82   int           N = C->N,rstart = c->rstart,count;
83   int           **irow,**icol,*nrow,*ncol,*w1,*w3,*w4,*rtable,start,end,size;
84   int           **sbuf1,rank,m,i,j,k,l,ct1,ierr,**rbuf1,row,proc;
85   int           nrqs,msz,**ptr,idex,*ctr,*pa,*tmp,bsz,nrqr;
86   int           is_no,jmax,*irow_i,**rmap,*rmap_i;
87   int           len,ctr_j,*sbuf1_j,*rbuf1_i;
88   int           tag0,tag1;
89   MPI_Request   *s_waits1,*r_waits1,*s_waits2,*r_waits2;
90   MPI_Status    *r_status1,*r_status2,*s_status1,*s_status2;
91   MPI_Comm      comm;
92   PetscScalar   **rbuf2,**sbuf2;
93 
94   PetscFunctionBegin;
95   comm   = C->comm;
96   tag0   = C->tag;
97   size   = c->size;
98   rank   = c->rank;
99   m      = C->M;
100 
101   /* Get some new tags to keep the communication clean */
102   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
103 
104     /* Check if the col indices are sorted */
105   for (i=0; i<ismax; i++) {
106     ierr = ISSorted(isrow[i],(PetscTruth*)&j);CHKERRQ(ierr);
107     if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
108     ierr = ISSorted(iscol[i],(PetscTruth*)&j);CHKERRQ(ierr);
109     if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
110   }
111 
112   len    =  2*ismax*(sizeof(int *)+sizeof(int)) + (m+1)*sizeof(int);
113   ierr   = PetscMalloc(len,&irow);CHKERRQ(ierr);
114   icol   = irow + ismax;
115   nrow   = (int*)(icol + ismax);
116   ncol   = nrow + ismax;
117   rtable = ncol + ismax;
118 
119   for (i=0; i<ismax; i++) {
120     ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr);
121     ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr);
122     ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr);
123     ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr);
124   }
125 
126   /* Create hash table for the mapping :row -> proc*/
127   for (i=0,j=0; i<size; i++) {
128     jmax = c->rowners[i+1];
129     for (; j<jmax; j++) {
130       rtable[j] = i;
131     }
132   }
133 
134   /* evaluate communication - mesg to who,length of mesg, and buffer space
135      required. Based on this, buffers are allocated, and data copied into them*/
136   ierr   = PetscMalloc(size*4*sizeof(int),&w1);CHKERRQ(ierr); /* mesg size */
137   w3     = w1 + 2*size;      /* no of IS that needs to be sent to proc i */
138   w4     = w3 + size;      /* temp work space used in determining w1,  w3 */
139   ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/
140   for (i=0; i<ismax; i++) {
141     ierr   = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/
142     jmax   = nrow[i];
143     irow_i = irow[i];
144     for (j=0; j<jmax; j++) {
145       row  = irow_i[j];
146       proc = rtable[row];
147       w4[proc]++;
148     }
149     for (j=0; j<size; j++) {
150       if (w4[j]) { w1[2*j] += w4[j];  w3[j]++;}
151     }
152   }
153 
154   nrqs       = 0;              /* no of outgoing messages */
155   msz        = 0;              /* total mesg length (for all procs) */
156   w1[2*rank] = 0;              /* no mesg sent to self */
157   w3[rank]   = 0;
158   for (i=0; i<size; i++) {
159     if (w1[2*i])  { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
160   }
161   ierr = PetscMalloc((nrqs+1)*sizeof(int),&pa);CHKERRQ(ierr); /*(proc -array)*/
162   for (i=0,j=0; i<size; i++) {
163     if (w1[2*i]) { pa[j] = i; j++; }
164   }
165 
166   /* Each message would have a header = 1 + 2*(no of IS) + data */
167   for (i=0; i<nrqs; i++) {
168     j       = pa[i];
169     w1[2*j] += w1[2*j+1] + 2* w3[j];
170     msz     += w1[2*j];
171   }
172   /* Do a global reduction to determine how many messages to expect*/
173   ierr = PetscMaxSum(comm,w1,&bsz,&nrqr);CHKERRQ(ierr);
174 
175   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
176   len      = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
177   ierr     = PetscMalloc(len,&rbuf1);CHKERRQ(ierr);
178   rbuf1[0] = (int*)(rbuf1 + nrqr);
179   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;
180 
181   /* Post the receives */
182   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&r_waits1);CHKERRQ(ierr);
183   for (i=0; i<nrqr; ++i) {
184     ierr = MPI_Irecv(rbuf1[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr);
185   }
186 
187   /* Allocate Memory for outgoing messages */
188   len   = 2*size*sizeof(int*) + 2*msz*sizeof(int)+ size*sizeof(int);
189   ierr  = PetscMalloc(len,&sbuf1);CHKERRQ(ierr);
190   ptr   = sbuf1 + size;   /* Pointers to the data in outgoing buffers */
191   ierr  = PetscMemzero(sbuf1,2*size*sizeof(int*));CHKERRQ(ierr);
192   /* allocate memory for outgoing data + buf to receive the first reply */
193   tmp   = (int*)(ptr + size);
194   ctr   = tmp + 2*msz;
195 
196   {
197     int *iptr = tmp,ict = 0;
198     for (i=0; i<nrqs; i++) {
199       j         = pa[i];
200       iptr     += ict;
201       sbuf1[j]  = iptr;
202       ict       = w1[2*j];
203     }
204   }
205 
206   /* Form the outgoing messages */
207   /* Initialize the header space */
208   for (i=0; i<nrqs; i++) {
209     j           = pa[i];
210     sbuf1[j][0] = 0;
211     ierr        = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr);
212     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
213   }
214 
215   /* Parse the isrow and copy data into outbuf */
216   for (i=0; i<ismax; i++) {
217     ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr);
218     irow_i = irow[i];
219     jmax   = nrow[i];
220     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
221       row  = irow_i[j];
222       proc = rtable[row];
223       if (proc != rank) { /* copy to the outgoing buf*/
224         ctr[proc]++;
225         *ptr[proc] = row;
226         ptr[proc]++;
227       }
228     }
229     /* Update the headers for the current IS */
230     for (j=0; j<size; j++) { /* Can Optimise this loop too */
231       if ((ctr_j = ctr[j])) {
232         sbuf1_j        = sbuf1[j];
233         k              = ++sbuf1_j[0];
234         sbuf1_j[2*k]   = ctr_j;
235         sbuf1_j[2*k-1] = i;
236       }
237     }
238   }
239 
240   /*  Now  post the sends */
241   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr);
242   for (i=0; i<nrqs; ++i) {
243     j = pa[i];
244     ierr = MPI_Isend(sbuf1[j],w1[2*j],MPI_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr);
245   }
246 
247   /* Post recieves to capture the row_data from other procs */
248   ierr  = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr);
249   ierr  = PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf2);CHKERRQ(ierr);
250   for (i=0; i<nrqs; i++) {
251     j        = pa[i];
252     count    = (w1[2*j] - (2*sbuf1[j][0] + 1))*N;
253     ierr     = PetscMalloc((count+1)*sizeof(PetscScalar),&rbuf2[i]);CHKERRQ(ierr);
254     ierr     = MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);CHKERRQ(ierr);
255   }
256 
257   /* Receive messages(row_nos) and then, pack and send off the rowvalues
258      to the correct processors */
259 
260   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr);
261   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr);
262   ierr = PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf2);CHKERRQ(ierr);
263 
264   {
265     PetscScalar *sbuf2_i,*v_start;
266     int         s_proc;
267     for (i=0; i<nrqr; ++i) {
268       ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr);
269       s_proc          = r_status1[i].MPI_SOURCE; /* send processor */
270       rbuf1_i         = rbuf1[idex]; /* Actual message from s_proc */
271       /* no of rows = end - start; since start is array idex[], 0idex, whel end
272          is length of the buffer - which is 1idex */
273       start           = 2*rbuf1_i[0] + 1;
274       ierr            = MPI_Get_count(r_status1+i,MPI_INT,&end);CHKERRQ(ierr);
275       /* allocate memory sufficinet to hold all the row values */
276       ierr = PetscMalloc((end-start)*N*sizeof(PetscScalar),&sbuf2[idex]);CHKERRQ(ierr);
277       sbuf2_i      = sbuf2[idex];
278       /* Now pack the data */
279       for (j=start; j<end; j++) {
280         row = rbuf1_i[j] - rstart;
281         v_start = a->v + row;
282         for (k=0; k<N; k++) {
283           sbuf2_i[0] = v_start[0];
284           sbuf2_i++; v_start += C->m;
285         }
286       }
287       /* Now send off the data */
288       ierr = MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRQ(ierr);
289     }
290   }
291   /* End Send-Recv of IS + row_numbers */
292   ierr = PetscFree(r_status1);CHKERRQ(ierr);
293   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
294   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr);
295   ierr      = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);
296   ierr = PetscFree(s_status1);CHKERRQ(ierr);
297   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
298 
299   /* Create the submatrices */
300   if (scall == MAT_REUSE_MATRIX) {
301     for (i=0; i<ismax; i++) {
302       mat = (Mat_SeqDense *)(submats[i]->data);
303       if ((submats[i]->m != nrow[i]) || (submats[i]->n != ncol[i])) {
304         SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
305       }
306       ierr = PetscMemzero(mat->v,submats[i]->m*submats[i]->n*sizeof(PetscScalar));CHKERRQ(ierr);
307       submats[i]->factor = C->factor;
308     }
309   } else {
310     for (i=0; i<ismax; i++) {
311       ierr = MatCreateSeqDense(PETSC_COMM_SELF,nrow[i],ncol[i],PETSC_NULL,submats+i);CHKERRQ(ierr);
312     }
313   }
314 
315   /* Assemble the matrices */
316   {
317     int         col;
318     PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;
319 
320     for (i=0; i<ismax; i++) {
321       mat       = (Mat_SeqDense*)submats[i]->data;
322       mat_v     = a->v;
323       imat_v    = mat->v;
324       irow_i    = irow[i];
325       m         = nrow[i];
326       for (j=0; j<m; j++) {
327         row      = irow_i[j] ;
328         proc     = rtable[row];
329         if (proc == rank) {
330           row      = row - rstart;
331           mat_vi   = mat_v + row;
332           imat_vi  = imat_v + j;
333           for (k=0; k<ncol[i]; k++) {
334             col = icol[i][k];
335             imat_vi[k*m] = mat_vi[col*C->m];
336           }
337         }
338       }
339     }
340   }
341 
342   /* Create row map. This maps c->row to submat->row for each submat*/
343   /* this is a very expensive operation wrt memory usage */
344   len     = (1+ismax)*sizeof(int*)+ ismax*C->M*sizeof(int);
345   ierr    = PetscMalloc(len,&rmap);CHKERRQ(ierr);
346   rmap[0] = (int *)(rmap + ismax);
347   ierr    = PetscMemzero(rmap[0],ismax*C->M*sizeof(int));CHKERRQ(ierr);
348   for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->M;}
349   for (i=0; i<ismax; i++) {
350     rmap_i = rmap[i];
351     irow_i = irow[i];
352     jmax   = nrow[i];
353     for (j=0; j<jmax; j++) {
354       rmap_i[irow_i[j]] = j;
355     }
356   }
357 
358   /* Now Receive the row_values and assemble the rest of the matrix */
359   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr);
360 
361   {
362     int         is_max,tmp1,col,*sbuf1_i,is_sz;
363     PetscScalar *rbuf2_i,*imat_v,*imat_vi;
364 
365     for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
366       ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRQ(ierr);
367       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
368       sbuf1_i = sbuf1[pa[i]];
369       is_max  = sbuf1_i[0];
370       ct1     = 2*is_max+1;
371       rbuf2_i = rbuf2[i];
372       for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
373         is_no     = sbuf1_i[2*j-1];
374         is_sz     = sbuf1_i[2*j];
375         mat       = (Mat_SeqDense*)submats[is_no]->data;
376         imat_v    = mat->v;
377         rmap_i    = rmap[is_no];
378         m         = nrow[is_no];
379         for (k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
380           row      = sbuf1_i[ct1]; ct1++;
381           row      = rmap_i[row];
382           imat_vi  = imat_v + row;
383           for (l=0; l<ncol[is_no]; l++) { /* For each col */
384             col = icol[is_no][l];
385             imat_vi[l*m] = rbuf2_i[col];
386           }
387         }
388       }
389     }
390   }
391   /* End Send-Recv of row_values */
392   ierr = PetscFree(r_status2);CHKERRQ(ierr);
393   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
394   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr);
395   ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);
396   ierr = PetscFree(s_status2);CHKERRQ(ierr);
397   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
398 
399   /* Restore the indices */
400   for (i=0; i<ismax; i++) {
401     ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr);
402     ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr);
403   }
404 
405   /* Destroy allocated memory */
406   ierr = PetscFree(irow);CHKERRQ(ierr);
407   ierr = PetscFree(w1);CHKERRQ(ierr);
408   ierr = PetscFree(pa);CHKERRQ(ierr);
409 
410 
411   for (i=0; i<nrqs; ++i) {
412     ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr);
413   }
414   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
415   ierr = PetscFree(sbuf1);CHKERRQ(ierr);
416   ierr = PetscFree(rbuf1);CHKERRQ(ierr);
417 
418   for (i=0; i<nrqr; ++i) {
419     ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr);
420   }
421 
422   ierr = PetscFree(sbuf2);CHKERRQ(ierr);
423   ierr = PetscFree(rmap);CHKERRQ(ierr);
424 
425   for (i=0; i<ismax; i++) {
426     ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
427     ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
428   }
429 
430   PetscFunctionReturn(0);
431 }
432 
433