xref: /petsc/src/mat/impls/dense/mpi/mmdense.c (revision 8ebe3e4e9e00d86ece2e9fcd0cc84910b0ad437c)
1 
2 /*
3    Support for the parallel dense matrix vector multiply
4 */
5 #include <../src/mat/impls/dense/mpi/mpidense.h>
6 #include <petscblaslapack.h>
7 
8 PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
9 {
10   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
11   PetscErrorCode ierr;
12 
13   PetscFunctionBegin;
14   /* Create local vector that is used to scatter into */
15   ierr = VecDestroy(&mdn->lvec);CHKERRQ(ierr);
16   if (mdn->A) {
17     ierr = MatCreateVecs(mdn->A,&mdn->lvec,NULL);CHKERRQ(ierr);
18     ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->lvec);CHKERRQ(ierr);
19   }
20   if (!mdn->Mvctx) {
21     ierr = PetscLayoutSetUp(mat->cmap);CHKERRQ(ierr);
22     ierr = PetscSFCreate(PetscObjectComm((PetscObject)mat),&mdn->Mvctx);CHKERRQ(ierr);
23     ierr = PetscSFSetGraphWithPattern(mdn->Mvctx,mat->cmap,PETSCSF_PATTERN_ALLGATHER);CHKERRQ(ierr);
24     ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->Mvctx);CHKERRQ(ierr);
25   }
26   PetscFunctionReturn(0);
27 }
28 
29 static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*);
30 
31 PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
32 {
33   PetscErrorCode ierr;
34   PetscInt       nmax,nstages_local,nstages,i,pos,max_no;
35 
36   PetscFunctionBegin;
37   /* Allocate memory to hold all the submatrices */
38   if (scall != MAT_REUSE_MATRIX) {
39     ierr = PetscCalloc1(ismax+1,submat);CHKERRQ(ierr);
40   }
41   /* Determine the number of stages through which submatrices are done */
42   nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt));
43   if (!nmax) nmax = 1;
44   nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0);
45 
46   /* Make sure every processor loops through the nstages */
47   ierr = MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRMPI(ierr);
48 
49   for (i=0,pos=0; i<nstages; i++) {
50     if (pos+nmax <= ismax) max_no = nmax;
51     else if (pos == ismax) max_no = 0;
52     else                   max_no = ismax-pos;
53     ierr = MatCreateSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr);
54     pos += max_no;
55   }
56   PetscFunctionReturn(0);
57 }
58 /* -------------------------------------------------------------------------*/
59 PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
60 {
61   Mat_MPIDense   *c = (Mat_MPIDense*)C->data;
62   Mat            A  = c->A;
63   Mat_SeqDense   *a = (Mat_SeqDense*)A->data,*mat;
64   PetscErrorCode ierr;
65   PetscMPIInt    rank,size,tag0,tag1,idex,end,i;
66   PetscInt       N = C->cmap->N,rstart = C->rmap->rstart,count;
67   const PetscInt **irow,**icol,*irow_i;
68   PetscInt       *nrow,*ncol,*w1,*w3,*w4,*rtable,start;
69   PetscInt       **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc;
70   PetscInt       nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr;
71   PetscInt       is_no,jmax,**rmap,*rmap_i;
72   PetscInt       ctr_j,*sbuf1_j,*rbuf1_i;
73   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
74   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status2;
75   MPI_Comm       comm;
76   PetscScalar    **rbuf2,**sbuf2;
77   PetscBool      sorted;
78 
79   PetscFunctionBegin;
80   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
81   tag0 = ((PetscObject)C)->tag;
82   ierr = MPI_Comm_rank(comm,&rank);CHKERRMPI(ierr);
83   ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
84   m    = C->rmap->N;
85 
86   /* Get some new tags to keep the communication clean */
87   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
88 
89   /* Check if the col indices are sorted */
90   for (i=0; i<ismax; i++) {
91     ierr = ISSorted(isrow[i],&sorted);CHKERRQ(ierr);
92     if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
93     ierr = ISSorted(iscol[i],&sorted);CHKERRQ(ierr);
94     if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
95   }
96 
97   ierr = PetscMalloc5(ismax,(PetscInt***)&irow,ismax,(PetscInt***)&icol,ismax,&nrow,ismax,&ncol,m,&rtable);CHKERRQ(ierr);
98   for (i=0; i<ismax; i++) {
99     ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr);
100     ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr);
101     ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr);
102     ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr);
103   }
104 
105   /* Create hash table for the mapping :row -> proc*/
106   for (i=0,j=0; i<size; i++) {
107     jmax = C->rmap->range[i+1];
108     for (; j<jmax; j++) rtable[j] = i;
109   }
110 
111   /* evaluate communication - mesg to who,length of mesg, and buffer space
112      required. Based on this, buffers are allocated, and data copied into them*/
113   ierr = PetscMalloc3(2*size,&w1,size,&w3,size,&w4);CHKERRQ(ierr);
114   ierr = PetscArrayzero(w1,size*2);CHKERRQ(ierr); /* initialize work vector*/
115   ierr = PetscArrayzero(w3,size);CHKERRQ(ierr); /* initialize work vector*/
116   for (i=0; i<ismax; i++) {
117     ierr   = PetscArrayzero(w4,size);CHKERRQ(ierr); /* initialize work vector*/
118     jmax   = nrow[i];
119     irow_i = irow[i];
120     for (j=0; j<jmax; j++) {
121       row  = irow_i[j];
122       proc = rtable[row];
123       w4[proc]++;
124     }
125     for (j=0; j<size; j++) {
126       if (w4[j]) { w1[2*j] += w4[j];  w3[j]++;}
127     }
128   }
129 
130   nrqs       = 0;              /* no of outgoing messages */
131   msz        = 0;              /* total mesg length (for all procs) */
132   w1[2*rank] = 0;              /* no mesg sent to self */
133   w3[rank]   = 0;
134   for (i=0; i<size; i++) {
135     if (w1[2*i])  { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
136   }
137   ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/
138   for (i=0,j=0; i<size; i++) {
139     if (w1[2*i]) { pa[j] = i; j++; }
140   }
141 
142   /* Each message would have a header = 1 + 2*(no of IS) + data */
143   for (i=0; i<nrqs; i++) {
144     j        = pa[i];
145     w1[2*j] += w1[2*j+1] + 2* w3[j];
146     msz     += w1[2*j];
147   }
148   /* Do a global reduction to determine how many messages to expect*/
149   ierr = PetscMaxSum(comm,w1,&bsz,&nrqr);CHKERRQ(ierr);
150 
151   /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
152   ierr = PetscMalloc1(nrqr+1,&rbuf1);CHKERRQ(ierr);
153   ierr = PetscMalloc1(nrqr*bsz,&rbuf1[0]);CHKERRQ(ierr);
154   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;
155 
156   /* Post the receives */
157   ierr = PetscMalloc1(nrqr+1,&r_waits1);CHKERRQ(ierr);
158   for (i=0; i<nrqr; ++i) {
159     ierr = MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRMPI(ierr);
160   }
161 
162   /* Allocate Memory for outgoing messages */
163   ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr);
164   ierr = PetscArrayzero(sbuf1,size);CHKERRQ(ierr);
165   ierr = PetscArrayzero(ptr,size);CHKERRQ(ierr);
166   {
167     PetscInt *iptr = tmp,ict = 0;
168     for (i=0; i<nrqs; i++) {
169       j        = pa[i];
170       iptr    += ict;
171       sbuf1[j] = iptr;
172       ict      = w1[2*j];
173     }
174   }
175 
176   /* Form the outgoing messages */
177   /* Initialize the header space */
178   for (i=0; i<nrqs; i++) {
179     j           = pa[i];
180     sbuf1[j][0] = 0;
181     ierr        = PetscArrayzero(sbuf1[j]+1,2*w3[j]);CHKERRQ(ierr);
182     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
183   }
184 
185   /* Parse the isrow and copy data into outbuf */
186   for (i=0; i<ismax; i++) {
187     ierr   = PetscArrayzero(ctr,size);CHKERRQ(ierr);
188     irow_i = irow[i];
189     jmax   = nrow[i];
190     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
191       row  = irow_i[j];
192       proc = rtable[row];
193       if (proc != rank) { /* copy to the outgoing buf*/
194         ctr[proc]++;
195         *ptr[proc] = row;
196         ptr[proc]++;
197       }
198     }
199     /* Update the headers for the current IS */
200     for (j=0; j<size; j++) { /* Can Optimise this loop too */
201       if ((ctr_j = ctr[j])) {
202         sbuf1_j        = sbuf1[j];
203         k              = ++sbuf1_j[0];
204         sbuf1_j[2*k]   = ctr_j;
205         sbuf1_j[2*k-1] = i;
206       }
207     }
208   }
209 
210   /*  Now  post the sends */
211   ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr);
212   for (i=0; i<nrqs; ++i) {
213     j    = pa[i];
214     ierr = MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRMPI(ierr);
215   }
216 
217   /* Post receives to capture the row_data from other procs */
218   ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr);
219   ierr = PetscMalloc1(nrqs+1,&rbuf2);CHKERRQ(ierr);
220   for (i=0; i<nrqs; i++) {
221     j     = pa[i];
222     count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N;
223     ierr  = PetscMalloc1(count+1,&rbuf2[i]);CHKERRQ(ierr);
224     ierr  = MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);CHKERRMPI(ierr);
225   }
226 
227   /* Receive messages(row_nos) and then, pack and send off the rowvalues
228      to the correct processors */
229 
230   ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr);
231   ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr);
232   ierr = PetscMalloc1(nrqr+1,&sbuf2);CHKERRQ(ierr);
233 
234   {
235     PetscScalar *sbuf2_i,*v_start;
236     PetscInt    s_proc;
237     for (i=0; i<nrqr; ++i) {
238       ierr    = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRMPI(ierr);
239       s_proc  = r_status1[i].MPI_SOURCE;         /* send processor */
240       rbuf1_i = rbuf1[idex];         /* Actual message from s_proc */
241       /* no of rows = end - start; since start is array idex[], 0idex, whel end
242          is length of the buffer - which is 1idex */
243       start = 2*rbuf1_i[0] + 1;
244       ierr  = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRMPI(ierr);
245       /* allocate memory sufficinet to hold all the row values */
246       ierr    = PetscMalloc1((end-start)*N,&sbuf2[idex]);CHKERRQ(ierr);
247       sbuf2_i = sbuf2[idex];
248       /* Now pack the data */
249       for (j=start; j<end; j++) {
250         row     = rbuf1_i[j] - rstart;
251         v_start = a->v + row;
252         for (k=0; k<N; k++) {
253           sbuf2_i[0] = v_start[0];
254           sbuf2_i++;
255           v_start += a->lda;
256         }
257       }
258       /* Now send off the data */
259       ierr = MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRMPI(ierr);
260     }
261   }
262   /* End Send-Recv of IS + row_numbers */
263   ierr = PetscFree(r_status1);CHKERRQ(ierr);
264   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
265   ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr);
266   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRMPI(ierr);}
267   ierr = PetscFree(s_status1);CHKERRQ(ierr);
268   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
269 
270   /* Create the submatrices */
271   if (scall == MAT_REUSE_MATRIX) {
272     for (i=0; i<ismax; i++) {
273       mat = (Mat_SeqDense*)(submats[i]->data);
274       if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
275       ierr = PetscArrayzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n);CHKERRQ(ierr);
276 
277       submats[i]->factortype = C->factortype;
278     }
279   } else {
280     for (i=0; i<ismax; i++) {
281       ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr);
282       ierr = MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);CHKERRQ(ierr);
283       ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr);
284       ierr = MatSeqDenseSetPreallocation(submats[i],NULL);CHKERRQ(ierr);
285     }
286   }
287 
288   /* Assemble the matrices */
289   {
290     PetscInt    col;
291     PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;
292 
293     for (i=0; i<ismax; i++) {
294       mat    = (Mat_SeqDense*)submats[i]->data;
295       mat_v  = a->v;
296       imat_v = mat->v;
297       irow_i = irow[i];
298       m      = nrow[i];
299       for (j=0; j<m; j++) {
300         row  = irow_i[j];
301         proc = rtable[row];
302         if (proc == rank) {
303           row     = row - rstart;
304           mat_vi  = mat_v + row;
305           imat_vi = imat_v + j;
306           for (k=0; k<ncol[i]; k++) {
307             col          = icol[i][k];
308             imat_vi[k*m] = mat_vi[col*a->lda];
309           }
310         }
311       }
312     }
313   }
314 
315   /* Create row map-> This maps c->row to submat->row for each submat*/
316   /* this is a very expensive operation wrt memory usage */
317   ierr = PetscMalloc1(ismax,&rmap);CHKERRQ(ierr);
318   ierr = PetscCalloc1(ismax*C->rmap->N,&rmap[0]);CHKERRQ(ierr);
319   for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + C->rmap->N;
320   for (i=0; i<ismax; i++) {
321     rmap_i = rmap[i];
322     irow_i = irow[i];
323     jmax   = nrow[i];
324     for (j=0; j<jmax; j++) {
325       rmap_i[irow_i[j]] = j;
326     }
327   }
328 
329   /* Now Receive the row_values and assemble the rest of the matrix */
330   ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr);
331   {
332     PetscInt    is_max,tmp1,col,*sbuf1_i,is_sz;
333     PetscScalar *rbuf2_i,*imat_v,*imat_vi;
334 
335     for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
336       ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRMPI(ierr);
337       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
338       sbuf1_i = sbuf1[pa[i]];
339       is_max  = sbuf1_i[0];
340       ct1     = 2*is_max+1;
341       rbuf2_i = rbuf2[i];
342       for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
343         is_no  = sbuf1_i[2*j-1];
344         is_sz  = sbuf1_i[2*j];
345         mat    = (Mat_SeqDense*)submats[is_no]->data;
346         imat_v = mat->v;
347         rmap_i = rmap[is_no];
348         m      = nrow[is_no];
349         for (k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
350           row     = sbuf1_i[ct1]; ct1++;
351           row     = rmap_i[row];
352           imat_vi = imat_v + row;
353           for (l=0; l<ncol[is_no]; l++) { /* For each col */
354             col          = icol[is_no][l];
355             imat_vi[l*m] = rbuf2_i[col];
356           }
357         }
358       }
359     }
360   }
361   /* End Send-Recv of row_values */
362   ierr = PetscFree(r_status2);CHKERRQ(ierr);
363   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
364   ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr);
365   if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRMPI(ierr);}
366   ierr = PetscFree(s_status2);CHKERRQ(ierr);
367   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
368 
369   /* Restore the indices */
370   for (i=0; i<ismax; i++) {
371     ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr);
372     ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr);
373   }
374 
375   ierr = PetscFree5(*(PetscInt***)&irow,*(PetscInt***)&icol,nrow,ncol,rtable);CHKERRQ(ierr);
376   ierr = PetscFree3(w1,w3,w4);CHKERRQ(ierr);
377   ierr = PetscFree(pa);CHKERRQ(ierr);
378 
379   for (i=0; i<nrqs; ++i) {
380     ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr);
381   }
382   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
383   ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr);
384   ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr);
385   ierr = PetscFree(rbuf1);CHKERRQ(ierr);
386 
387   for (i=0; i<nrqr; ++i) {
388     ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr);
389   }
390 
391   ierr = PetscFree(sbuf2);CHKERRQ(ierr);
392   ierr = PetscFree(rmap[0]);CHKERRQ(ierr);
393   ierr = PetscFree(rmap);CHKERRQ(ierr);
394 
395   for (i=0; i<ismax; i++) {
396     ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
397     ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
398   }
399   PetscFunctionReturn(0);
400 }
401 
402 PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha)
403 {
404   Mat_MPIDense   *A = (Mat_MPIDense*)inA->data;
405   PetscErrorCode ierr;
406 
407   PetscFunctionBegin;
408   ierr = MatScale(A->A,alpha);CHKERRQ(ierr);
409   PetscFunctionReturn(0);
410 }
411