1 #define PETSCMAT_DLL 2 3 /* 4 Support for the parallel dense matrix vector multiply 5 */ 6 #include "src/mat/impls/dense/mpi/mpidense.h" 7 8 #undef __FUNCT__ 9 #define __FUNCT__ "MatSetUpMultiply_MPIDense" 10 PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat) 11 { 12 Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data; 13 PetscErrorCode ierr; 14 IS from,to; 15 Vec gvec; 16 17 PetscFunctionBegin; 18 /* Create local vector that is used to scatter into */ 19 ierr = VecCreateSeq(PETSC_COMM_SELF,mat->cmap->N,&mdn->lvec);CHKERRQ(ierr); 20 21 /* Create temporary index set for building scatter gather */ 22 ierr = ISCreateStride(((PetscObject)mat)->comm,mat->cmap->N,0,1,&from);CHKERRQ(ierr); 23 ierr = ISCreateStride(PETSC_COMM_SELF,mat->cmap->N,0,1,&to);CHKERRQ(ierr); 24 25 /* Create temporary global vector to generate scatter context */ 26 /* n = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */ 27 28 ierr = VecCreateMPIWithArray(((PetscObject)mat)->comm,mdn->nvec,mat->cmap->N,PETSC_NULL,&gvec);CHKERRQ(ierr); 29 30 /* Generate the scatter context */ 31 ierr = VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);CHKERRQ(ierr); 32 ierr = PetscLogObjectParent(mat,mdn->Mvctx);CHKERRQ(ierr); 33 ierr = PetscLogObjectParent(mat,mdn->lvec);CHKERRQ(ierr); 34 ierr = PetscLogObjectParent(mat,from);CHKERRQ(ierr); 35 ierr = PetscLogObjectParent(mat,to);CHKERRQ(ierr); 36 ierr = PetscLogObjectParent(mat,gvec);CHKERRQ(ierr); 37 38 ierr = ISDestroy(to);CHKERRQ(ierr); 39 ierr = ISDestroy(from);CHKERRQ(ierr); 40 ierr = VecDestroy(gvec);CHKERRQ(ierr); 41 PetscFunctionReturn(0); 42 } 43 44 EXTERN PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*); 45 #undef __FUNCT__ 46 #define __FUNCT__ "MatGetSubMatrices_MPIDense" 47 PetscErrorCode MatGetSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 48 { 49 PetscErrorCode ierr; 50 PetscInt nmax,nstages_local,nstages,i,pos,max_no; 51 52 PetscFunctionBegin; 53 /* Allocate memory to hold all the submatrices */ 54 if (scall != MAT_REUSE_MATRIX) { 55 ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr); 56 } 57 /* Determine the number of stages through which submatrices are done */ 58 nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt)); 59 if (!nmax) nmax = 1; 60 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 61 62 /* Make sure every processor loops through the nstages */ 63 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr); 64 65 66 for (i=0,pos=0; i<nstages; i++) { 67 if (pos+nmax <= ismax) max_no = nmax; 68 else if (pos == ismax) max_no = 0; 69 else max_no = ismax-pos; 70 ierr = MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr); 71 pos += max_no; 72 } 73 PetscFunctionReturn(0); 74 } 75 /* -------------------------------------------------------------------------*/ 76 #undef __FUNCT__ 77 #define __FUNCT__ "MatGetSubMatrices_MPIDense_Local" 78 PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 79 { 80 Mat_MPIDense *c = (Mat_MPIDense*)C->data; 81 Mat A = c->A; 82 Mat_SeqDense *a = (Mat_SeqDense*)A->data,*mat; 83 PetscErrorCode ierr; 84 PetscMPIInt rank,size,tag0,tag1,idex,end,i; 85 PetscInt N = C->cmap->N,rstart = C->rmap->rstart,count; 86 const PetscInt **irow,**icol,*irow_i; 87 PetscInt *nrow,*ncol,*w1,*w3,*w4,*rtable,start; 88 PetscInt **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc; 89 PetscInt nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr; 90 PetscInt is_no,jmax,**rmap,*rmap_i; 91 PetscInt len,ctr_j,*sbuf1_j,*rbuf1_i; 92 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 93 MPI_Status *r_status1,*r_status2,*s_status1,*s_status2; 94 MPI_Comm comm; 95 PetscScalar **rbuf2,**sbuf2; 96 PetscTruth sorted; 97 98 PetscFunctionBegin; 99 comm = ((PetscObject)C)->comm; 100 tag0 = ((PetscObject)C)->tag; 101 size = c->size; 102 rank = c->rank; 103 m = C->rmap->N; 104 105 /* Get some new tags to keep the communication clean */ 106 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 107 108 /* Check if the col indices are sorted */ 109 for (i=0; i<ismax; i++) { 110 ierr = ISSorted(isrow[i],&sorted);CHKERRQ(ierr); 111 if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted"); 112 ierr = ISSorted(iscol[i],&sorted);CHKERRQ(ierr); 113 if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted"); 114 } 115 116 len = 2*ismax*(sizeof(PetscInt*)+sizeof(PetscInt)) + (m+1)*sizeof(PetscInt); 117 ierr = PetscMalloc(len,&irow);CHKERRQ(ierr); 118 icol = irow + ismax; 119 nrow = (PetscInt*)(icol + ismax); 120 ncol = nrow + ismax; 121 rtable = ncol + ismax; 122 123 for (i=0; i<ismax; i++) { 124 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 125 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 126 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 127 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 128 } 129 130 /* Create hash table for the mapping :row -> proc*/ 131 for (i=0,j=0; i<size; i++) { 132 jmax = C->rmap->range[i+1]; 133 for (; j<jmax; j++) { 134 rtable[j] = i; 135 } 136 } 137 138 /* evaluate communication - mesg to who,length of mesg, and buffer space 139 required. Based on this, buffers are allocated, and data copied into them*/ 140 ierr = PetscMalloc(size*4*sizeof(PetscInt),&w1);CHKERRQ(ierr); /* mesg size */ 141 w3 = w1 + 2*size; /* no of IS that needs to be sent to proc i */ 142 w4 = w3 + size; /* temp work space used in determining w1, w3 */ 143 ierr = PetscMemzero(w1,size*3*sizeof(PetscInt));CHKERRQ(ierr); /* initialize work vector*/ 144 for (i=0; i<ismax; i++) { 145 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialize work vector*/ 146 jmax = nrow[i]; 147 irow_i = irow[i]; 148 for (j=0; j<jmax; j++) { 149 row = irow_i[j]; 150 proc = rtable[row]; 151 w4[proc]++; 152 } 153 for (j=0; j<size; j++) { 154 if (w4[j]) { w1[2*j] += w4[j]; w3[j]++;} 155 } 156 } 157 158 nrqs = 0; /* no of outgoing messages */ 159 msz = 0; /* total mesg length (for all procs) */ 160 w1[2*rank] = 0; /* no mesg sent to self */ 161 w3[rank] = 0; 162 for (i=0; i<size; i++) { 163 if (w1[2*i]) { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */ 164 } 165 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); /*(proc -array)*/ 166 for (i=0,j=0; i<size; i++) { 167 if (w1[2*i]) { pa[j] = i; j++; } 168 } 169 170 /* Each message would have a header = 1 + 2*(no of IS) + data */ 171 for (i=0; i<nrqs; i++) { 172 j = pa[i]; 173 w1[2*j] += w1[2*j+1] + 2* w3[j]; 174 msz += w1[2*j]; 175 } 176 /* Do a global reduction to determine how many messages to expect*/ 177 ierr = PetscMaxSum(comm,w1,&bsz,&nrqr);CHKERRQ(ierr); 178 179 /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */ 180 len = (nrqr+1)*sizeof(PetscInt*) + nrqr*bsz*sizeof(PetscInt); 181 ierr = PetscMalloc(len,&rbuf1);CHKERRQ(ierr); 182 rbuf1[0] = (PetscInt*)(rbuf1 + nrqr); 183 for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz; 184 185 /* Post the receives */ 186 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&r_waits1);CHKERRQ(ierr); 187 for (i=0; i<nrqr; ++i) { 188 ierr = MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr); 189 } 190 191 /* Allocate Memory for outgoing messages */ 192 len = 2*size*sizeof(PetscInt*) + 2*msz*sizeof(PetscInt)+ size*sizeof(PetscInt); 193 ierr = PetscMalloc(len,&sbuf1);CHKERRQ(ierr); 194 ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */ 195 ierr = PetscMemzero(sbuf1,2*size*sizeof(PetscInt*));CHKERRQ(ierr); 196 /* allocate memory for outgoing data + buf to receive the first reply */ 197 tmp = (PetscInt*)(ptr + size); 198 ctr = tmp + 2*msz; 199 200 { 201 PetscInt *iptr = tmp,ict = 0; 202 for (i=0; i<nrqs; i++) { 203 j = pa[i]; 204 iptr += ict; 205 sbuf1[j] = iptr; 206 ict = w1[2*j]; 207 } 208 } 209 210 /* Form the outgoing messages */ 211 /* Initialize the header space */ 212 for (i=0; i<nrqs; i++) { 213 j = pa[i]; 214 sbuf1[j][0] = 0; 215 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 216 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 217 } 218 219 /* Parse the isrow and copy data into outbuf */ 220 for (i=0; i<ismax; i++) { 221 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 222 irow_i = irow[i]; 223 jmax = nrow[i]; 224 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 225 row = irow_i[j]; 226 proc = rtable[row]; 227 if (proc != rank) { /* copy to the outgoing buf*/ 228 ctr[proc]++; 229 *ptr[proc] = row; 230 ptr[proc]++; 231 } 232 } 233 /* Update the headers for the current IS */ 234 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 235 if ((ctr_j = ctr[j])) { 236 sbuf1_j = sbuf1[j]; 237 k = ++sbuf1_j[0]; 238 sbuf1_j[2*k] = ctr_j; 239 sbuf1_j[2*k-1] = i; 240 } 241 } 242 } 243 244 /* Now post the sends */ 245 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 246 for (i=0; i<nrqs; ++i) { 247 j = pa[i]; 248 ierr = MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 249 } 250 251 /* Post recieves to capture the row_data from other procs */ 252 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr); 253 ierr = PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf2);CHKERRQ(ierr); 254 for (i=0; i<nrqs; i++) { 255 j = pa[i]; 256 count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N; 257 ierr = PetscMalloc((count+1)*sizeof(PetscScalar),&rbuf2[i]);CHKERRQ(ierr); 258 ierr = MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 259 } 260 261 /* Receive messages(row_nos) and then, pack and send off the rowvalues 262 to the correct processors */ 263 264 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 265 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr); 266 ierr = PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf2);CHKERRQ(ierr); 267 268 { 269 PetscScalar *sbuf2_i,*v_start; 270 PetscInt s_proc; 271 for (i=0; i<nrqr; ++i) { 272 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 273 s_proc = r_status1[i].MPI_SOURCE; /* send processor */ 274 rbuf1_i = rbuf1[idex]; /* Actual message from s_proc */ 275 /* no of rows = end - start; since start is array idex[], 0idex, whel end 276 is length of the buffer - which is 1idex */ 277 start = 2*rbuf1_i[0] + 1; 278 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 279 /* allocate memory sufficinet to hold all the row values */ 280 ierr = PetscMalloc((end-start)*N*sizeof(PetscScalar),&sbuf2[idex]);CHKERRQ(ierr); 281 sbuf2_i = sbuf2[idex]; 282 /* Now pack the data */ 283 for (j=start; j<end; j++) { 284 row = rbuf1_i[j] - rstart; 285 v_start = a->v + row; 286 for (k=0; k<N; k++) { 287 sbuf2_i[0] = v_start[0]; 288 sbuf2_i++; v_start += C->rmap->n; 289 } 290 } 291 /* Now send off the data */ 292 ierr = MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRQ(ierr); 293 } 294 } 295 /* End Send-Recv of IS + row_numbers */ 296 ierr = PetscFree(r_status1);CHKERRQ(ierr); 297 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 298 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr); 299 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 300 ierr = PetscFree(s_status1);CHKERRQ(ierr); 301 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 302 303 /* Create the submatrices */ 304 if (scall == MAT_REUSE_MATRIX) { 305 for (i=0; i<ismax; i++) { 306 mat = (Mat_SeqDense *)(submats[i]->data); 307 if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) { 308 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 309 } 310 ierr = PetscMemzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n*sizeof(PetscScalar));CHKERRQ(ierr); 311 submats[i]->factor = C->factor; 312 } 313 } else { 314 for (i=0; i<ismax; i++) { 315 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 316 ierr = MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);CHKERRQ(ierr); 317 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 318 ierr = MatSeqDenseSetPreallocation(submats[i],PETSC_NULL);CHKERRQ(ierr); 319 } 320 } 321 322 /* Assemble the matrices */ 323 { 324 PetscInt col; 325 PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi; 326 327 for (i=0; i<ismax; i++) { 328 mat = (Mat_SeqDense*)submats[i]->data; 329 mat_v = a->v; 330 imat_v = mat->v; 331 irow_i = irow[i]; 332 m = nrow[i]; 333 for (j=0; j<m; j++) { 334 row = irow_i[j] ; 335 proc = rtable[row]; 336 if (proc == rank) { 337 row = row - rstart; 338 mat_vi = mat_v + row; 339 imat_vi = imat_v + j; 340 for (k=0; k<ncol[i]; k++) { 341 col = icol[i][k]; 342 imat_vi[k*m] = mat_vi[col*C->rmap->n]; 343 } 344 } 345 } 346 } 347 } 348 349 /* Create row map-> This maps c->row to submat->row for each submat*/ 350 /* this is a very expensive operation wrt memory usage */ 351 len = (1+ismax)*sizeof(PetscInt*)+ ismax*C->rmap->N*sizeof(PetscInt); 352 ierr = PetscMalloc(len,&rmap);CHKERRQ(ierr); 353 rmap[0] = (PetscInt*)(rmap + ismax); 354 ierr = PetscMemzero(rmap[0],ismax*C->rmap->N*sizeof(PetscInt));CHKERRQ(ierr); 355 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->rmap->N;} 356 for (i=0; i<ismax; i++) { 357 rmap_i = rmap[i]; 358 irow_i = irow[i]; 359 jmax = nrow[i]; 360 for (j=0; j<jmax; j++) { 361 rmap_i[irow_i[j]] = j; 362 } 363 } 364 365 /* Now Receive the row_values and assemble the rest of the matrix */ 366 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr); 367 368 { 369 PetscInt is_max,tmp1,col,*sbuf1_i,is_sz; 370 PetscScalar *rbuf2_i,*imat_v,*imat_vi; 371 372 for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */ 373 ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRQ(ierr); 374 /* Now dig out the corresponding sbuf1, which contains the IS data_structure */ 375 sbuf1_i = sbuf1[pa[i]]; 376 is_max = sbuf1_i[0]; 377 ct1 = 2*is_max+1; 378 rbuf2_i = rbuf2[i]; 379 for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */ 380 is_no = sbuf1_i[2*j-1]; 381 is_sz = sbuf1_i[2*j]; 382 mat = (Mat_SeqDense*)submats[is_no]->data; 383 imat_v = mat->v; 384 rmap_i = rmap[is_no]; 385 m = nrow[is_no]; 386 for (k=0; k<is_sz; k++,rbuf2_i+=N) { /* For each row */ 387 row = sbuf1_i[ct1]; ct1++; 388 row = rmap_i[row]; 389 imat_vi = imat_v + row; 390 for (l=0; l<ncol[is_no]; l++) { /* For each col */ 391 col = icol[is_no][l]; 392 imat_vi[l*m] = rbuf2_i[col]; 393 } 394 } 395 } 396 } 397 } 398 /* End Send-Recv of row_values */ 399 ierr = PetscFree(r_status2);CHKERRQ(ierr); 400 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 401 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr); 402 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 403 ierr = PetscFree(s_status2);CHKERRQ(ierr); 404 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 405 406 /* Restore the indices */ 407 for (i=0; i<ismax; i++) { 408 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 409 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 410 } 411 412 /* Destroy allocated memory */ 413 ierr = PetscFree(irow);CHKERRQ(ierr); 414 ierr = PetscFree(w1);CHKERRQ(ierr); 415 ierr = PetscFree(pa);CHKERRQ(ierr); 416 417 418 for (i=0; i<nrqs; ++i) { 419 ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr); 420 } 421 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 422 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 423 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 424 425 for (i=0; i<nrqr; ++i) { 426 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 427 } 428 429 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 430 ierr = PetscFree(rmap);CHKERRQ(ierr); 431 432 for (i=0; i<ismax; i++) { 433 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 434 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 435 } 436 437 PetscFunctionReturn(0); 438 } 439 440