1 #define PETSCMAT_DLL 2 3 /* 4 Support for the parallel dense matrix vector multiply 5 */ 6 #include "src/mat/impls/dense/mpi/mpidense.h" 7 8 #undef __FUNCT__ 9 #define __FUNCT__ "MatSetUpMultiply_MPIDense" 10 PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat) 11 { 12 Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data; 13 PetscErrorCode ierr; 14 IS from,to; 15 Vec gvec; 16 17 PetscFunctionBegin; 18 /* Create local vector that is used to scatter into */ 19 ierr = VecCreateSeq(PETSC_COMM_SELF,mat->cmap.N,&mdn->lvec);CHKERRQ(ierr); 20 21 /* Create temporary index set for building scatter gather */ 22 ierr = ISCreateStride(((PetscObject)mat)->comm,mat->cmap.N,0,1,&from);CHKERRQ(ierr); 23 ierr = ISCreateStride(PETSC_COMM_SELF,mat->cmap.N,0,1,&to);CHKERRQ(ierr); 24 25 /* Create temporary global vector to generate scatter context */ 26 /* n = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */ 27 28 ierr = VecCreateMPI(((PetscObject)mat)->comm,mdn->nvec,mat->cmap.N,&gvec);CHKERRQ(ierr); 29 30 /* Generate the scatter context */ 31 ierr = VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);CHKERRQ(ierr); 32 ierr = PetscLogObjectParent(mat,mdn->Mvctx);CHKERRQ(ierr); 33 ierr = PetscLogObjectParent(mat,mdn->lvec);CHKERRQ(ierr); 34 ierr = PetscLogObjectParent(mat,from);CHKERRQ(ierr); 35 ierr = PetscLogObjectParent(mat,to);CHKERRQ(ierr); 36 ierr = PetscLogObjectParent(mat,gvec);CHKERRQ(ierr); 37 38 ierr = ISDestroy(to);CHKERRQ(ierr); 39 ierr = ISDestroy(from);CHKERRQ(ierr); 40 ierr = VecDestroy(gvec);CHKERRQ(ierr); 41 PetscFunctionReturn(0); 42 } 43 44 EXTERN PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*); 45 #undef __FUNCT__ 46 #define __FUNCT__ "MatGetSubMatrices_MPIDense" 47 PetscErrorCode MatGetSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 48 { 49 PetscErrorCode ierr; 50 PetscInt nmax,nstages_local,nstages,i,pos,max_no; 51 52 PetscFunctionBegin; 53 /* Allocate memory to hold all the submatrices */ 54 if (scall != MAT_REUSE_MATRIX) { 55 ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr); 56 } 57 /* Determine the number of stages through which submatrices are done */ 58 nmax = 20*1000000 / (C->cmap.N * sizeof(PetscInt)); 59 if (!nmax) nmax = 1; 60 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 61 62 /* Make sure every processor loops through the nstages */ 63 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr); 64 65 66 for (i=0,pos=0; i<nstages; i++) { 67 if (pos+nmax <= ismax) max_no = nmax; 68 else if (pos == ismax) max_no = 0; 69 else max_no = ismax-pos; 70 ierr = MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr); 71 pos += max_no; 72 } 73 PetscFunctionReturn(0); 74 } 75 /* -------------------------------------------------------------------------*/ 76 #undef __FUNCT__ 77 #define __FUNCT__ "MatGetSubMatrices_MPIDense_Local" 78 PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 79 { 80 Mat_MPIDense *c = (Mat_MPIDense*)C->data; 81 Mat A = c->A; 82 Mat_SeqDense *a = (Mat_SeqDense*)A->data,*mat; 83 PetscErrorCode ierr; 84 PetscMPIInt rank,size,tag0,tag1,idex,end,i; 85 PetscInt N = C->cmap.N,rstart = C->rmap.rstart,count; 86 PetscInt **irow,**icol,*nrow,*ncol,*w1,*w3,*w4,*rtable,start; 87 PetscInt **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc; 88 PetscInt nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr; 89 PetscInt is_no,jmax,*irow_i,**rmap,*rmap_i; 90 PetscInt len,ctr_j,*sbuf1_j,*rbuf1_i; 91 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 92 MPI_Status *r_status1,*r_status2,*s_status1,*s_status2; 93 MPI_Comm comm; 94 PetscScalar **rbuf2,**sbuf2; 95 PetscTruth sorted; 96 97 PetscFunctionBegin; 98 comm = ((PetscObject)C)->comm; 99 tag0 = ((PetscObject)C)->tag; 100 size = c->size; 101 rank = c->rank; 102 m = C->rmap.N; 103 104 /* Get some new tags to keep the communication clean */ 105 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 106 107 /* Check if the col indices are sorted */ 108 for (i=0; i<ismax; i++) { 109 ierr = ISSorted(isrow[i],&sorted);CHKERRQ(ierr); 110 if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted"); 111 ierr = ISSorted(iscol[i],&sorted);CHKERRQ(ierr); 112 if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted"); 113 } 114 115 len = 2*ismax*(sizeof(PetscInt*)+sizeof(PetscInt)) + (m+1)*sizeof(PetscInt); 116 ierr = PetscMalloc(len,&irow);CHKERRQ(ierr); 117 icol = irow + ismax; 118 nrow = (PetscInt*)(icol + ismax); 119 ncol = nrow + ismax; 120 rtable = ncol + ismax; 121 122 for (i=0; i<ismax; i++) { 123 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 124 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 125 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 126 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 127 } 128 129 /* Create hash table for the mapping :row -> proc*/ 130 for (i=0,j=0; i<size; i++) { 131 jmax = C->rmap.range[i+1]; 132 for (; j<jmax; j++) { 133 rtable[j] = i; 134 } 135 } 136 137 /* evaluate communication - mesg to who,length of mesg, and buffer space 138 required. Based on this, buffers are allocated, and data copied into them*/ 139 ierr = PetscMalloc(size*4*sizeof(PetscInt),&w1);CHKERRQ(ierr); /* mesg size */ 140 w3 = w1 + 2*size; /* no of IS that needs to be sent to proc i */ 141 w4 = w3 + size; /* temp work space used in determining w1, w3 */ 142 ierr = PetscMemzero(w1,size*3*sizeof(PetscInt));CHKERRQ(ierr); /* initialize work vector*/ 143 for (i=0; i<ismax; i++) { 144 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialize work vector*/ 145 jmax = nrow[i]; 146 irow_i = irow[i]; 147 for (j=0; j<jmax; j++) { 148 row = irow_i[j]; 149 proc = rtable[row]; 150 w4[proc]++; 151 } 152 for (j=0; j<size; j++) { 153 if (w4[j]) { w1[2*j] += w4[j]; w3[j]++;} 154 } 155 } 156 157 nrqs = 0; /* no of outgoing messages */ 158 msz = 0; /* total mesg length (for all procs) */ 159 w1[2*rank] = 0; /* no mesg sent to self */ 160 w3[rank] = 0; 161 for (i=0; i<size; i++) { 162 if (w1[2*i]) { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */ 163 } 164 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); /*(proc -array)*/ 165 for (i=0,j=0; i<size; i++) { 166 if (w1[2*i]) { pa[j] = i; j++; } 167 } 168 169 /* Each message would have a header = 1 + 2*(no of IS) + data */ 170 for (i=0; i<nrqs; i++) { 171 j = pa[i]; 172 w1[2*j] += w1[2*j+1] + 2* w3[j]; 173 msz += w1[2*j]; 174 } 175 /* Do a global reduction to determine how many messages to expect*/ 176 ierr = PetscMaxSum(comm,w1,&bsz,&nrqr);CHKERRQ(ierr); 177 178 /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */ 179 len = (nrqr+1)*sizeof(PetscInt*) + nrqr*bsz*sizeof(PetscInt); 180 ierr = PetscMalloc(len,&rbuf1);CHKERRQ(ierr); 181 rbuf1[0] = (PetscInt*)(rbuf1 + nrqr); 182 for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz; 183 184 /* Post the receives */ 185 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&r_waits1);CHKERRQ(ierr); 186 for (i=0; i<nrqr; ++i) { 187 ierr = MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr); 188 } 189 190 /* Allocate Memory for outgoing messages */ 191 len = 2*size*sizeof(PetscInt*) + 2*msz*sizeof(PetscInt)+ size*sizeof(PetscInt); 192 ierr = PetscMalloc(len,&sbuf1);CHKERRQ(ierr); 193 ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */ 194 ierr = PetscMemzero(sbuf1,2*size*sizeof(PetscInt*));CHKERRQ(ierr); 195 /* allocate memory for outgoing data + buf to receive the first reply */ 196 tmp = (PetscInt*)(ptr + size); 197 ctr = tmp + 2*msz; 198 199 { 200 PetscInt *iptr = tmp,ict = 0; 201 for (i=0; i<nrqs; i++) { 202 j = pa[i]; 203 iptr += ict; 204 sbuf1[j] = iptr; 205 ict = w1[2*j]; 206 } 207 } 208 209 /* Form the outgoing messages */ 210 /* Initialize the header space */ 211 for (i=0; i<nrqs; i++) { 212 j = pa[i]; 213 sbuf1[j][0] = 0; 214 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 215 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 216 } 217 218 /* Parse the isrow and copy data into outbuf */ 219 for (i=0; i<ismax; i++) { 220 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 221 irow_i = irow[i]; 222 jmax = nrow[i]; 223 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 224 row = irow_i[j]; 225 proc = rtable[row]; 226 if (proc != rank) { /* copy to the outgoing buf*/ 227 ctr[proc]++; 228 *ptr[proc] = row; 229 ptr[proc]++; 230 } 231 } 232 /* Update the headers for the current IS */ 233 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 234 if ((ctr_j = ctr[j])) { 235 sbuf1_j = sbuf1[j]; 236 k = ++sbuf1_j[0]; 237 sbuf1_j[2*k] = ctr_j; 238 sbuf1_j[2*k-1] = i; 239 } 240 } 241 } 242 243 /* Now post the sends */ 244 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 245 for (i=0; i<nrqs; ++i) { 246 j = pa[i]; 247 ierr = MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 248 } 249 250 /* Post recieves to capture the row_data from other procs */ 251 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr); 252 ierr = PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf2);CHKERRQ(ierr); 253 for (i=0; i<nrqs; i++) { 254 j = pa[i]; 255 count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N; 256 ierr = PetscMalloc((count+1)*sizeof(PetscScalar),&rbuf2[i]);CHKERRQ(ierr); 257 ierr = MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 258 } 259 260 /* Receive messages(row_nos) and then, pack and send off the rowvalues 261 to the correct processors */ 262 263 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 264 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr); 265 ierr = PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf2);CHKERRQ(ierr); 266 267 { 268 PetscScalar *sbuf2_i,*v_start; 269 PetscInt s_proc; 270 for (i=0; i<nrqr; ++i) { 271 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 272 s_proc = r_status1[i].MPI_SOURCE; /* send processor */ 273 rbuf1_i = rbuf1[idex]; /* Actual message from s_proc */ 274 /* no of rows = end - start; since start is array idex[], 0idex, whel end 275 is length of the buffer - which is 1idex */ 276 start = 2*rbuf1_i[0] + 1; 277 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 278 /* allocate memory sufficinet to hold all the row values */ 279 ierr = PetscMalloc((end-start)*N*sizeof(PetscScalar),&sbuf2[idex]);CHKERRQ(ierr); 280 sbuf2_i = sbuf2[idex]; 281 /* Now pack the data */ 282 for (j=start; j<end; j++) { 283 row = rbuf1_i[j] - rstart; 284 v_start = a->v + row; 285 for (k=0; k<N; k++) { 286 sbuf2_i[0] = v_start[0]; 287 sbuf2_i++; v_start += C->rmap.n; 288 } 289 } 290 /* Now send off the data */ 291 ierr = MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRQ(ierr); 292 } 293 } 294 /* End Send-Recv of IS + row_numbers */ 295 ierr = PetscFree(r_status1);CHKERRQ(ierr); 296 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 297 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr); 298 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 299 ierr = PetscFree(s_status1);CHKERRQ(ierr); 300 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 301 302 /* Create the submatrices */ 303 if (scall == MAT_REUSE_MATRIX) { 304 for (i=0; i<ismax; i++) { 305 mat = (Mat_SeqDense *)(submats[i]->data); 306 if ((submats[i]->rmap.n != nrow[i]) || (submats[i]->cmap.n != ncol[i])) { 307 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 308 } 309 ierr = PetscMemzero(mat->v,submats[i]->rmap.n*submats[i]->cmap.n*sizeof(PetscScalar));CHKERRQ(ierr); 310 submats[i]->factor = C->factor; 311 } 312 } else { 313 for (i=0; i<ismax; i++) { 314 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 315 ierr = MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);CHKERRQ(ierr); 316 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 317 ierr = MatSeqDenseSetPreallocation(submats[i],PETSC_NULL);CHKERRQ(ierr); 318 } 319 } 320 321 /* Assemble the matrices */ 322 { 323 PetscInt col; 324 PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi; 325 326 for (i=0; i<ismax; i++) { 327 mat = (Mat_SeqDense*)submats[i]->data; 328 mat_v = a->v; 329 imat_v = mat->v; 330 irow_i = irow[i]; 331 m = nrow[i]; 332 for (j=0; j<m; j++) { 333 row = irow_i[j] ; 334 proc = rtable[row]; 335 if (proc == rank) { 336 row = row - rstart; 337 mat_vi = mat_v + row; 338 imat_vi = imat_v + j; 339 for (k=0; k<ncol[i]; k++) { 340 col = icol[i][k]; 341 imat_vi[k*m] = mat_vi[col*C->rmap.n]; 342 } 343 } 344 } 345 } 346 } 347 348 /* Create row map. This maps c->row to submat->row for each submat*/ 349 /* this is a very expensive operation wrt memory usage */ 350 len = (1+ismax)*sizeof(PetscInt*)+ ismax*C->rmap.N*sizeof(PetscInt); 351 ierr = PetscMalloc(len,&rmap);CHKERRQ(ierr); 352 rmap[0] = (PetscInt*)(rmap + ismax); 353 ierr = PetscMemzero(rmap[0],ismax*C->rmap.N*sizeof(PetscInt));CHKERRQ(ierr); 354 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->rmap.N;} 355 for (i=0; i<ismax; i++) { 356 rmap_i = rmap[i]; 357 irow_i = irow[i]; 358 jmax = nrow[i]; 359 for (j=0; j<jmax; j++) { 360 rmap_i[irow_i[j]] = j; 361 } 362 } 363 364 /* Now Receive the row_values and assemble the rest of the matrix */ 365 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr); 366 367 { 368 PetscInt is_max,tmp1,col,*sbuf1_i,is_sz; 369 PetscScalar *rbuf2_i,*imat_v,*imat_vi; 370 371 for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */ 372 ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRQ(ierr); 373 /* Now dig out the corresponding sbuf1, which contains the IS data_structure */ 374 sbuf1_i = sbuf1[pa[i]]; 375 is_max = sbuf1_i[0]; 376 ct1 = 2*is_max+1; 377 rbuf2_i = rbuf2[i]; 378 for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */ 379 is_no = sbuf1_i[2*j-1]; 380 is_sz = sbuf1_i[2*j]; 381 mat = (Mat_SeqDense*)submats[is_no]->data; 382 imat_v = mat->v; 383 rmap_i = rmap[is_no]; 384 m = nrow[is_no]; 385 for (k=0; k<is_sz; k++,rbuf2_i+=N) { /* For each row */ 386 row = sbuf1_i[ct1]; ct1++; 387 row = rmap_i[row]; 388 imat_vi = imat_v + row; 389 for (l=0; l<ncol[is_no]; l++) { /* For each col */ 390 col = icol[is_no][l]; 391 imat_vi[l*m] = rbuf2_i[col]; 392 } 393 } 394 } 395 } 396 } 397 /* End Send-Recv of row_values */ 398 ierr = PetscFree(r_status2);CHKERRQ(ierr); 399 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 400 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr); 401 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 402 ierr = PetscFree(s_status2);CHKERRQ(ierr); 403 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 404 405 /* Restore the indices */ 406 for (i=0; i<ismax; i++) { 407 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 408 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 409 } 410 411 /* Destroy allocated memory */ 412 ierr = PetscFree(irow);CHKERRQ(ierr); 413 ierr = PetscFree(w1);CHKERRQ(ierr); 414 ierr = PetscFree(pa);CHKERRQ(ierr); 415 416 417 for (i=0; i<nrqs; ++i) { 418 ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr); 419 } 420 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 421 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 422 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 423 424 for (i=0; i<nrqr; ++i) { 425 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 426 } 427 428 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 429 ierr = PetscFree(rmap);CHKERRQ(ierr); 430 431 for (i=0; i<ismax; i++) { 432 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 433 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 434 } 435 436 PetscFunctionReturn(0); 437 } 438 439