1 /*$Id: mmdense.c,v 1.31 2000/07/10 03:39:30 bsmith Exp bsmith $*/ 2 3 /* 4 Support for the parallel dense matrix vector multiply 5 */ 6 #include "src/mat/impls/dense/mpi/mpidense.h" 7 #include "src/vec/vecimpl.h" 8 9 #undef __FUNC__ 10 #define __FUNC__ /*<a name=""></a>*/"MatSetUpMultiply_MPIDense" 11 int MatSetUpMultiply_MPIDense(Mat mat) 12 { 13 Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data; 14 int ierr; 15 IS from,to; 16 Vec gvec; 17 18 PetscFunctionBegin; 19 /* Create local vector that is used to scatter into */ 20 ierr = VecCreateSeq(PETSC_COMM_SELF,mdn->N,&mdn->lvec);CHKERRQ(ierr); 21 22 /* Create temporary index set for building scatter gather */ 23 ierr = ISCreateStride(mat->comm,mdn->N,0,1,&from);CHKERRQ(ierr); 24 ierr = ISCreateStride(PETSC_COMM_SELF,mdn->N,0,1,&to);CHKERRQ(ierr); 25 26 /* Create temporary global vector to generate scatter context */ 27 /* n = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */ 28 29 ierr = VecCreateMPI(mat->comm,mdn->nvec,mdn->N,&gvec);CHKERRQ(ierr); 30 31 /* Generate the scatter context */ 32 ierr = VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);CHKERRQ(ierr); 33 PLogObjectParent(mat,mdn->Mvctx); 34 PLogObjectParent(mat,mdn->lvec); 35 PLogObjectParent(mat,from); 36 PLogObjectParent(mat,to); 37 PLogObjectParent(mat,gvec); 38 39 ierr = ISDestroy(to);CHKERRQ(ierr); 40 ierr = ISDestroy(from);CHKERRQ(ierr); 41 ierr = VecDestroy(gvec);CHKERRQ(ierr); 42 PetscFunctionReturn(0); 43 } 44 45 EXTERN int MatGetSubMatrices_MPIDense_Local(Mat,int,IS*,IS*,MatReuse,Mat*); 46 #undef __FUNC__ 47 #define __FUNC__ /*<a name=""></a>*/"MatGetSubMatrices_MPIDense" 48 int MatGetSubMatrices_MPIDense(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat **submat) 49 { 50 Mat_MPIDense *c = (Mat_MPIDense*)C->data; 51 int nmax,nstages_local,nstages,i,pos,max_no,ierr; 52 53 PetscFunctionBegin; 54 /* Allocate memory to hold all the submatrices */ 55 if (scall != MAT_REUSE_MATRIX) { 56 *submat = (Mat *)PetscMalloc((ismax+1)*sizeof(Mat));CHKPTRQ(*submat); 57 } 58 /* Determine the number of stages through which submatrices are done */ 59 nmax = 20*1000000 / (c->N * sizeof(int)); 60 if (!nmax) nmax = 1; 61 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 62 63 /* Make sure every processor loops through the nstages */ 64 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);CHKERRQ(ierr); 65 66 67 for (i=0,pos=0; i<nstages; i++) { 68 if (pos+nmax <= ismax) max_no = nmax; 69 else if (pos == ismax) max_no = 0; 70 else max_no = ismax-pos; 71 ierr = MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr); 72 pos += max_no; 73 } 74 PetscFunctionReturn(0); 75 } 76 /* -------------------------------------------------------------------------*/ 77 #undef __FUNC__ 78 #define __FUNC__ /*<a name=""></a>*/"MatGetSubMatrices_MPIDense_Local" 79 int MatGetSubMatrices_MPIDense_Local(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat *submats) 80 { 81 Mat_MPIDense *c = (Mat_MPIDense*)C->data; 82 Mat A = c->A; 83 Mat_SeqDense *a = (Mat_SeqDense*)A->data,*mat; 84 int N = c->N,rstart = c->rstart,count; 85 int **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,*rtable,start,end,size; 86 int **sbuf1,rank,m,i,j,k,l,ct1,ierr,**rbuf1,row,proc; 87 int nrqs,msz,**ptr,index,*ctr,*pa,*tmp,bsz,nrqr; 88 int is_no,jmax,*irow_i,**rmap,*rmap_i; 89 int len,ctr_j,*sbuf1_j,*rbuf1_i; 90 int tag0,tag1; 91 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 92 MPI_Status *r_status1,*r_status2,*s_status1,*s_status2; 93 MPI_Comm comm; 94 Scalar **rbuf2,**sbuf2; 95 96 PetscFunctionBegin; 97 comm = C->comm; 98 tag0 = C->tag; 99 size = c->size; 100 rank = c->rank; 101 m = c->M; 102 103 /* Get some new tags to keep the communication clean */ 104 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 105 106 /* Check if the col indices are sorted */ 107 for (i=0; i<ismax; i++) { 108 ierr = ISSorted(isrow[i],(PetscTruth*)&j);CHKERRQ(ierr); 109 if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted"); 110 ierr = ISSorted(iscol[i],(PetscTruth*)&j);CHKERRQ(ierr); 111 if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted"); 112 } 113 114 len = 2*ismax*(sizeof(int *)+sizeof(int)) + (m+1)*sizeof(int); 115 irow = (int **)PetscMalloc(len);CHKPTRQ(irow); 116 icol = irow + ismax; 117 nrow = (int*)(icol + ismax); 118 ncol = nrow + ismax; 119 rtable = ncol + ismax; 120 121 for (i=0; i<ismax; i++) { 122 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 123 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 124 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 125 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 126 } 127 128 /* Create hash table for the mapping :row -> proc*/ 129 for (i=0,j=0; i<size; i++) { 130 jmax = c->rowners[i+1]; 131 for (; j<jmax; j++) { 132 rtable[j] = i; 133 } 134 } 135 136 /* evaluate communication - mesg to who,length of mesg, and buffer space 137 required. Based on this, buffers are allocated, and data copied into them*/ 138 w1 = (int *)PetscMalloc(size*4*sizeof(int));CHKPTRQ(w1); /* mesg size */ 139 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 140 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 141 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 142 ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/ 143 for (i=0; i<ismax; i++) { 144 ierr = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/ 145 jmax = nrow[i]; 146 irow_i = irow[i]; 147 for (j=0; j<jmax; j++) { 148 row = irow_i[j]; 149 proc = rtable[row]; 150 w4[proc]++; 151 } 152 for (j=0; j<size; j++) { 153 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 154 } 155 } 156 157 nrqs = 0; /* no of outgoing messages */ 158 msz = 0; /* total mesg length (for all procs) */ 159 w1[rank] = 0; /* no mesg sent to self */ 160 w3[rank] = 0; 161 for (i=0; i<size; i++) { 162 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 163 } 164 pa = (int *)PetscMalloc((nrqs+1)*sizeof(int));CHKPTRQ(pa); /*(proc -array)*/ 165 for (i=0,j=0; i<size; i++) { 166 if (w1[i]) { pa[j] = i; j++; } 167 } 168 169 /* Each message would have a header = 1 + 2*(no of IS) + data */ 170 for (i=0; i<nrqs; i++) { 171 j = pa[i]; 172 w1[j] += w2[j] + 2* w3[j]; 173 msz += w1[j]; 174 } 175 /* Do a global reduction to determine how many messages to expect*/ 176 { 177 int *rw1; 178 rw1 = (int *)PetscMalloc(2*size*sizeof(int));CHKPTRQ(rw1); 179 ierr = MPI_Allreduce(w1,rw1,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr); 180 bsz = rw1[rank]; 181 nrqr = rw1[size+rank]; 182 ierr = PetscFree(rw1);CHKERRQ(ierr); 183 } 184 185 /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */ 186 len = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int); 187 rbuf1 = (int**)PetscMalloc(len);CHKPTRQ(rbuf1); 188 rbuf1[0] = (int*)(rbuf1 + nrqr); 189 for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz; 190 191 /* Post the receives */ 192 r_waits1 = (MPI_Request*)PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(r_waits1); 193 for (i=0; i<nrqr; ++i) { 194 ierr = MPI_Irecv(rbuf1[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr); 195 } 196 197 /* Allocate Memory for outgoing messages */ 198 len = 2*size*sizeof(int*) + 2*msz*sizeof(int)+ size*sizeof(int); 199 sbuf1 = (int **)PetscMalloc(len);CHKPTRQ(sbuf1); 200 ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */ 201 ierr = PetscMemzero(sbuf1,2*size*sizeof(int*));CHKERRQ(ierr); 202 /* allocate memory for outgoing data + buf to receive the first reply */ 203 tmp = (int*)(ptr + size); 204 ctr = tmp + 2*msz; 205 206 { 207 int *iptr = tmp,ict = 0; 208 for (i=0; i<nrqs; i++) { 209 j = pa[i]; 210 iptr += ict; 211 sbuf1[j] = iptr; 212 ict = w1[j]; 213 } 214 } 215 216 /* Form the outgoing messages */ 217 /* Initialize the header space */ 218 for (i=0; i<nrqs; i++) { 219 j = pa[i]; 220 sbuf1[j][0] = 0; 221 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr); 222 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 223 } 224 225 /* Parse the isrow and copy data into outbuf */ 226 for (i=0; i<ismax; i++) { 227 ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr); 228 irow_i = irow[i]; 229 jmax = nrow[i]; 230 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 231 row = irow_i[j]; 232 proc = rtable[row]; 233 if (proc != rank) { /* copy to the outgoing buf*/ 234 ctr[proc]++; 235 *ptr[proc] = row; 236 ptr[proc]++; 237 } 238 } 239 /* Update the headers for the current IS */ 240 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 241 if ((ctr_j = ctr[j])) { 242 sbuf1_j = sbuf1[j]; 243 k = ++sbuf1_j[0]; 244 sbuf1_j[2*k] = ctr_j; 245 sbuf1_j[2*k-1] = i; 246 } 247 } 248 } 249 250 /* Now post the sends */ 251 s_waits1 = (MPI_Request*)PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(s_waits1); 252 for (i=0; i<nrqs; ++i) { 253 j = pa[i]; 254 ierr = MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 255 } 256 257 /* Post recieves to capture the row_data from other procs */ 258 r_waits2 = (MPI_Request*)PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits2); 259 rbuf2 = (Scalar**)PetscMalloc((nrqs+1)*sizeof(Scalar *));CHKPTRQ(rbuf2); 260 for (i=0; i<nrqs; i++) { 261 j = pa[i]; 262 count = (w1[j] - (2*sbuf1[j][0] + 1))*N; 263 rbuf2[i] = (Scalar *)PetscMalloc((count+1)*sizeof(Scalar));CHKPTRQ(rbuf2[i]); 264 ierr = MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 265 } 266 267 /* Receive messages(row_nos) and then, pack and send off the rowvalues 268 to the correct processors */ 269 270 s_waits2 = (MPI_Request*)PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits2); 271 r_status1 = (MPI_Status*)PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(r_status1); 272 sbuf2 = (Scalar**)PetscMalloc((nrqr+1)*sizeof(Scalar*));CHKPTRQ(sbuf2); 273 274 { 275 Scalar *sbuf2_i,*v_start; 276 int s_proc; 277 for (i=0; i<nrqr; ++i) { 278 ierr = MPI_Waitany(nrqr,r_waits1,&index,r_status1+i);CHKERRQ(ierr); 279 s_proc = r_status1[i].MPI_SOURCE; /* send processor */ 280 rbuf1_i = rbuf1[index]; /* Actual message from s_proc */ 281 /* no of rows = end - start; since start is array index[], 0index, whel end 282 is length of the buffer - which is 1index */ 283 start = 2*rbuf1_i[0] + 1; 284 MPI_Get_count(r_status1+i,MPI_INT,&end); 285 /* allocate memory sufficinet to hold all the row values */ 286 sbuf2[index] = (Scalar *)PetscMalloc((end-start)*N*sizeof(Scalar));CHKPTRQ(sbuf2[index]); 287 sbuf2_i = sbuf2[index]; 288 /* Now pack the data */ 289 for (j=start; j<end; j++) { 290 row = rbuf1_i[j] - rstart; 291 v_start = a->v + row; 292 for (k=0; k<N; k++) { 293 sbuf2_i[0] = v_start[0]; 294 sbuf2_i++; v_start+=a->m; 295 } 296 } 297 /* Now send off the data */ 298 ierr = MPI_Isend(sbuf2[index],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRQ(ierr); 299 } 300 } 301 /* End Send-Recv of IS + row_numbers */ 302 ierr = PetscFree(r_status1);CHKERRQ(ierr); 303 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 304 s_status1 = (MPI_Status*)PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(s_status1); 305 ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr); 306 ierr = PetscFree(s_status1);CHKERRQ(ierr); 307 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 308 309 /* Create the submatrices */ 310 if (scall == MAT_REUSE_MATRIX) { 311 for (i=0; i<ismax; i++) { 312 mat = (Mat_SeqDense *)(submats[i]->data); 313 if ((mat->m != nrow[i]) || (mat->n != ncol[i])) { 314 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 315 } 316 ierr = PetscMemzero(mat->v,mat->m*mat->n*sizeof(Scalar));CHKERRQ(ierr); 317 submats[i]->factor = C->factor; 318 } 319 } else { 320 for (i=0; i<ismax; i++) { 321 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nrow[i],ncol[i],PETSC_NULL,submats+i);CHKERRQ(ierr); 322 } 323 } 324 325 /* Assemble the matrices */ 326 { 327 int col; 328 Scalar *imat_v,*mat_v,*imat_vi,*mat_vi; 329 330 for (i=0; i<ismax; i++) { 331 mat = (Mat_SeqDense*)submats[i]->data; 332 mat_v = a->v; 333 imat_v = mat->v; 334 irow_i = irow[i]; 335 m = nrow[i]; 336 for (j=0; j<m; j++) { 337 row = irow_i[j] ; 338 proc = rtable[row]; 339 if (proc == rank) { 340 row = row - rstart; 341 mat_vi = mat_v + row; 342 imat_vi = imat_v + j; 343 for (k=0; k<ncol[i]; k++) { 344 col = icol[i][k]; 345 imat_vi[k*m] = mat_vi[col*a->m]; 346 } 347 } 348 } 349 } 350 } 351 352 /* Create row map. This maps c->row to submat->row for each submat*/ 353 /* this is a very expensive operation wrt memory usage */ 354 len = (1+ismax)*sizeof(int*)+ ismax*c->M*sizeof(int); 355 rmap = (int **)PetscMalloc(len);CHKPTRQ(rmap); 356 rmap[0] = (int *)(rmap + ismax); 357 ierr = PetscMemzero(rmap[0],ismax*c->M*sizeof(int));CHKERRQ(ierr); 358 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + c->M;} 359 for (i=0; i<ismax; i++) { 360 rmap_i = rmap[i]; 361 irow_i = irow[i]; 362 jmax = nrow[i]; 363 for (j=0; j<jmax; j++) { 364 rmap_i[irow_i[j]] = j; 365 } 366 } 367 368 /* Now Receive the row_values and assemble the rest of the matrix */ 369 370 r_status2 = (MPI_Status*)PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status2); 371 372 { 373 int is_max,tmp1,col,*sbuf1_i,is_sz; 374 Scalar *rbuf2_i,*imat_v,*imat_vi; 375 376 for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */ 377 ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRQ(ierr); 378 /* Now dig out the corresponding sbuf1, which contains the IS data_structure */ 379 sbuf1_i = sbuf1[pa[i]]; 380 is_max = sbuf1_i[0]; 381 ct1 = 2*is_max+1; 382 rbuf2_i = rbuf2[i]; 383 for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */ 384 is_no = sbuf1_i[2*j-1]; 385 is_sz = sbuf1_i[2*j]; 386 mat = (Mat_SeqDense*)submats[is_no]->data; 387 imat_v = mat->v; 388 rmap_i = rmap[is_no]; 389 m = nrow[is_no]; 390 for (k=0; k<is_sz; k++,rbuf2_i+=N) { /* For each row */ 391 row = sbuf1_i[ct1]; ct1++; 392 row = rmap_i[row]; 393 imat_vi = imat_v + row; 394 for (l=0; l<ncol[is_no]; l++) { /* For each col */ 395 col = icol[is_no][l]; 396 imat_vi[l*m] = rbuf2_i[col]; 397 } 398 } 399 } 400 } 401 } 402 /* End Send-Recv of row_values */ 403 ierr = PetscFree(r_status2);CHKERRQ(ierr); 404 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 405 s_status2 = (MPI_Status*)PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status1); 406 ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr); 407 ierr = PetscFree(s_status2);CHKERRQ(ierr); 408 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 409 410 /* Restore the indices */ 411 for (i=0; i<ismax; i++) { 412 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 413 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 414 } 415 416 /* Destroy allocated memory */ 417 ierr = PetscFree(irow);CHKERRQ(ierr); 418 ierr = PetscFree(w1);CHKERRQ(ierr); 419 ierr = PetscFree(pa);CHKERRQ(ierr); 420 421 422 for (i=0; i<nrqs; ++i) { 423 ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr); 424 } 425 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 426 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 427 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 428 429 for (i=0; i<nrqr; ++i) { 430 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 431 } 432 433 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 434 ierr = PetscFree(rmap);CHKERRQ(ierr); 435 436 for (i=0; i<ismax; i++) { 437 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 438 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 439 } 440 441 PetscFunctionReturn(0); 442 } 443 444