1 2 /* 3 Support for the parallel dense matrix vector multiply 4 */ 5 #include <../src/mat/impls/dense/mpi/mpidense.h> 6 #include <petscblaslapack.h> 7 8 PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat) 9 { 10 Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data; 11 PetscErrorCode ierr; 12 13 PetscFunctionBegin; 14 /* Create local vector that is used to scatter into */ 15 ierr = VecDestroy(&mdn->lvec);CHKERRQ(ierr); 16 if (mdn->A) { 17 ierr = MatCreateVecs(mdn->A,&mdn->lvec,NULL);CHKERRQ(ierr); 18 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->lvec);CHKERRQ(ierr); 19 } 20 if (!mdn->Mvctx) { 21 ierr = PetscLayoutSetUp(mat->cmap);CHKERRQ(ierr); 22 ierr = PetscSFCreate(PetscObjectComm((PetscObject)mat),&mdn->Mvctx);CHKERRQ(ierr); 23 ierr = PetscSFSetGraphWithPattern(mdn->Mvctx,mat->cmap,PETSCSF_PATTERN_ALLGATHER);CHKERRQ(ierr); 24 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->Mvctx);CHKERRQ(ierr); 25 } 26 PetscFunctionReturn(0); 27 } 28 29 static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*); 30 31 PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 32 { 33 PetscErrorCode ierr; 34 PetscInt nmax,nstages_local,nstages,i,pos,max_no; 35 36 PetscFunctionBegin; 37 /* Allocate memory to hold all the submatrices */ 38 if (scall != MAT_REUSE_MATRIX) { 39 ierr = PetscCalloc1(ismax+1,submat);CHKERRQ(ierr); 40 } 41 /* Determine the number of stages through which submatrices are done */ 42 nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt)); 43 if (!nmax) nmax = 1; 44 nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0); 45 46 /* Make sure every processor loops through the nstages */ 47 ierr = MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 48 49 50 for (i=0,pos=0; i<nstages; i++) { 51 if (pos+nmax <= ismax) max_no = nmax; 52 else if (pos == ismax) max_no = 0; 53 else max_no = ismax-pos; 54 ierr = MatCreateSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr); 55 pos += max_no; 56 } 57 PetscFunctionReturn(0); 58 } 59 /* -------------------------------------------------------------------------*/ 60 PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 61 { 62 Mat_MPIDense *c = (Mat_MPIDense*)C->data; 63 Mat A = c->A; 64 Mat_SeqDense *a = (Mat_SeqDense*)A->data,*mat; 65 PetscErrorCode ierr; 66 PetscMPIInt rank,size,tag0,tag1,idex,end,i; 67 PetscInt N = C->cmap->N,rstart = C->rmap->rstart,count; 68 const PetscInt **irow,**icol,*irow_i; 69 PetscInt *nrow,*ncol,*w1,*w3,*w4,*rtable,start; 70 PetscInt **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc; 71 PetscInt nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr; 72 PetscInt is_no,jmax,**rmap,*rmap_i; 73 PetscInt ctr_j,*sbuf1_j,*rbuf1_i; 74 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 75 MPI_Status *r_status1,*r_status2,*s_status1,*s_status2; 76 MPI_Comm comm; 77 PetscScalar **rbuf2,**sbuf2; 78 PetscBool sorted; 79 80 PetscFunctionBegin; 81 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 82 tag0 = ((PetscObject)C)->tag; 83 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 84 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 85 m = C->rmap->N; 86 87 /* Get some new tags to keep the communication clean */ 88 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 89 90 /* Check if the col indices are sorted */ 91 for (i=0; i<ismax; i++) { 92 ierr = ISSorted(isrow[i],&sorted);CHKERRQ(ierr); 93 if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted"); 94 ierr = ISSorted(iscol[i],&sorted);CHKERRQ(ierr); 95 if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted"); 96 } 97 98 ierr = PetscMalloc5(ismax,(PetscInt***)&irow,ismax,(PetscInt***)&icol,ismax,&nrow,ismax,&ncol,m,&rtable);CHKERRQ(ierr); 99 for (i=0; i<ismax; i++) { 100 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 101 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 102 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 103 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 104 } 105 106 /* Create hash table for the mapping :row -> proc*/ 107 for (i=0,j=0; i<size; i++) { 108 jmax = C->rmap->range[i+1]; 109 for (; j<jmax; j++) rtable[j] = i; 110 } 111 112 /* evaluate communication - mesg to who,length of mesg, and buffer space 113 required. Based on this, buffers are allocated, and data copied into them*/ 114 ierr = PetscMalloc3(2*size,&w1,size,&w3,size,&w4);CHKERRQ(ierr); 115 ierr = PetscArrayzero(w1,size*2);CHKERRQ(ierr); /* initialize work vector*/ 116 ierr = PetscArrayzero(w3,size);CHKERRQ(ierr); /* initialize work vector*/ 117 for (i=0; i<ismax; i++) { 118 ierr = PetscArrayzero(w4,size);CHKERRQ(ierr); /* initialize work vector*/ 119 jmax = nrow[i]; 120 irow_i = irow[i]; 121 for (j=0; j<jmax; j++) { 122 row = irow_i[j]; 123 proc = rtable[row]; 124 w4[proc]++; 125 } 126 for (j=0; j<size; j++) { 127 if (w4[j]) { w1[2*j] += w4[j]; w3[j]++;} 128 } 129 } 130 131 nrqs = 0; /* no of outgoing messages */ 132 msz = 0; /* total mesg length (for all procs) */ 133 w1[2*rank] = 0; /* no mesg sent to self */ 134 w3[rank] = 0; 135 for (i=0; i<size; i++) { 136 if (w1[2*i]) { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */ 137 } 138 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 139 for (i=0,j=0; i<size; i++) { 140 if (w1[2*i]) { pa[j] = i; j++; } 141 } 142 143 /* Each message would have a header = 1 + 2*(no of IS) + data */ 144 for (i=0; i<nrqs; i++) { 145 j = pa[i]; 146 w1[2*j] += w1[2*j+1] + 2* w3[j]; 147 msz += w1[2*j]; 148 } 149 /* Do a global reduction to determine how many messages to expect*/ 150 ierr = PetscMaxSum(comm,w1,&bsz,&nrqr);CHKERRQ(ierr); 151 152 /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */ 153 ierr = PetscMalloc1(nrqr+1,&rbuf1);CHKERRQ(ierr); 154 ierr = PetscMalloc1(nrqr*bsz,&rbuf1[0]);CHKERRQ(ierr); 155 for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz; 156 157 /* Post the receives */ 158 ierr = PetscMalloc1(nrqr+1,&r_waits1);CHKERRQ(ierr); 159 for (i=0; i<nrqr; ++i) { 160 ierr = MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr); 161 } 162 163 /* Allocate Memory for outgoing messages */ 164 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 165 ierr = PetscArrayzero(sbuf1,size);CHKERRQ(ierr); 166 ierr = PetscArrayzero(ptr,size);CHKERRQ(ierr); 167 { 168 PetscInt *iptr = tmp,ict = 0; 169 for (i=0; i<nrqs; i++) { 170 j = pa[i]; 171 iptr += ict; 172 sbuf1[j] = iptr; 173 ict = w1[2*j]; 174 } 175 } 176 177 /* Form the outgoing messages */ 178 /* Initialize the header space */ 179 for (i=0; i<nrqs; i++) { 180 j = pa[i]; 181 sbuf1[j][0] = 0; 182 ierr = PetscArrayzero(sbuf1[j]+1,2*w3[j]);CHKERRQ(ierr); 183 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 184 } 185 186 /* Parse the isrow and copy data into outbuf */ 187 for (i=0; i<ismax; i++) { 188 ierr = PetscArrayzero(ctr,size);CHKERRQ(ierr); 189 irow_i = irow[i]; 190 jmax = nrow[i]; 191 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 192 row = irow_i[j]; 193 proc = rtable[row]; 194 if (proc != rank) { /* copy to the outgoing buf*/ 195 ctr[proc]++; 196 *ptr[proc] = row; 197 ptr[proc]++; 198 } 199 } 200 /* Update the headers for the current IS */ 201 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 202 if ((ctr_j = ctr[j])) { 203 sbuf1_j = sbuf1[j]; 204 k = ++sbuf1_j[0]; 205 sbuf1_j[2*k] = ctr_j; 206 sbuf1_j[2*k-1] = i; 207 } 208 } 209 } 210 211 /* Now post the sends */ 212 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 213 for (i=0; i<nrqs; ++i) { 214 j = pa[i]; 215 ierr = MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 216 } 217 218 /* Post receives to capture the row_data from other procs */ 219 ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr); 220 ierr = PetscMalloc1(nrqs+1,&rbuf2);CHKERRQ(ierr); 221 for (i=0; i<nrqs; i++) { 222 j = pa[i]; 223 count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N; 224 ierr = PetscMalloc1(count+1,&rbuf2[i]);CHKERRQ(ierr); 225 ierr = MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 226 } 227 228 /* Receive messages(row_nos) and then, pack and send off the rowvalues 229 to the correct processors */ 230 231 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 232 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 233 ierr = PetscMalloc1(nrqr+1,&sbuf2);CHKERRQ(ierr); 234 235 { 236 PetscScalar *sbuf2_i,*v_start; 237 PetscInt s_proc; 238 for (i=0; i<nrqr; ++i) { 239 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 240 s_proc = r_status1[i].MPI_SOURCE; /* send processor */ 241 rbuf1_i = rbuf1[idex]; /* Actual message from s_proc */ 242 /* no of rows = end - start; since start is array idex[], 0idex, whel end 243 is length of the buffer - which is 1idex */ 244 start = 2*rbuf1_i[0] + 1; 245 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 246 /* allocate memory sufficinet to hold all the row values */ 247 ierr = PetscMalloc1((end-start)*N,&sbuf2[idex]);CHKERRQ(ierr); 248 sbuf2_i = sbuf2[idex]; 249 /* Now pack the data */ 250 for (j=start; j<end; j++) { 251 row = rbuf1_i[j] - rstart; 252 v_start = a->v + row; 253 for (k=0; k<N; k++) { 254 sbuf2_i[0] = v_start[0]; 255 sbuf2_i++; 256 v_start += a->lda; 257 } 258 } 259 /* Now send off the data */ 260 ierr = MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRQ(ierr); 261 } 262 } 263 /* End Send-Recv of IS + row_numbers */ 264 ierr = PetscFree(r_status1);CHKERRQ(ierr); 265 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 266 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 267 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 268 ierr = PetscFree(s_status1);CHKERRQ(ierr); 269 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 270 271 /* Create the submatrices */ 272 if (scall == MAT_REUSE_MATRIX) { 273 for (i=0; i<ismax; i++) { 274 mat = (Mat_SeqDense*)(submats[i]->data); 275 if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 276 ierr = PetscArrayzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n);CHKERRQ(ierr); 277 278 submats[i]->factortype = C->factortype; 279 } 280 } else { 281 for (i=0; i<ismax; i++) { 282 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 283 ierr = MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);CHKERRQ(ierr); 284 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 285 ierr = MatSeqDenseSetPreallocation(submats[i],NULL);CHKERRQ(ierr); 286 } 287 } 288 289 /* Assemble the matrices */ 290 { 291 PetscInt col; 292 PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi; 293 294 for (i=0; i<ismax; i++) { 295 mat = (Mat_SeqDense*)submats[i]->data; 296 mat_v = a->v; 297 imat_v = mat->v; 298 irow_i = irow[i]; 299 m = nrow[i]; 300 for (j=0; j<m; j++) { 301 row = irow_i[j]; 302 proc = rtable[row]; 303 if (proc == rank) { 304 row = row - rstart; 305 mat_vi = mat_v + row; 306 imat_vi = imat_v + j; 307 for (k=0; k<ncol[i]; k++) { 308 col = icol[i][k]; 309 imat_vi[k*m] = mat_vi[col*a->lda]; 310 } 311 } 312 } 313 } 314 } 315 316 /* Create row map-> This maps c->row to submat->row for each submat*/ 317 /* this is a very expensive operation wrt memory usage */ 318 ierr = PetscMalloc1(ismax,&rmap);CHKERRQ(ierr); 319 ierr = PetscCalloc1(ismax*C->rmap->N,&rmap[0]);CHKERRQ(ierr); 320 for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + C->rmap->N; 321 for (i=0; i<ismax; i++) { 322 rmap_i = rmap[i]; 323 irow_i = irow[i]; 324 jmax = nrow[i]; 325 for (j=0; j<jmax; j++) { 326 rmap_i[irow_i[j]] = j; 327 } 328 } 329 330 /* Now Receive the row_values and assemble the rest of the matrix */ 331 ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr); 332 { 333 PetscInt is_max,tmp1,col,*sbuf1_i,is_sz; 334 PetscScalar *rbuf2_i,*imat_v,*imat_vi; 335 336 for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */ 337 ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRQ(ierr); 338 /* Now dig out the corresponding sbuf1, which contains the IS data_structure */ 339 sbuf1_i = sbuf1[pa[i]]; 340 is_max = sbuf1_i[0]; 341 ct1 = 2*is_max+1; 342 rbuf2_i = rbuf2[i]; 343 for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */ 344 is_no = sbuf1_i[2*j-1]; 345 is_sz = sbuf1_i[2*j]; 346 mat = (Mat_SeqDense*)submats[is_no]->data; 347 imat_v = mat->v; 348 rmap_i = rmap[is_no]; 349 m = nrow[is_no]; 350 for (k=0; k<is_sz; k++,rbuf2_i+=N) { /* For each row */ 351 row = sbuf1_i[ct1]; ct1++; 352 row = rmap_i[row]; 353 imat_vi = imat_v + row; 354 for (l=0; l<ncol[is_no]; l++) { /* For each col */ 355 col = icol[is_no][l]; 356 imat_vi[l*m] = rbuf2_i[col]; 357 } 358 } 359 } 360 } 361 } 362 /* End Send-Recv of row_values */ 363 ierr = PetscFree(r_status2);CHKERRQ(ierr); 364 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 365 ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr); 366 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 367 ierr = PetscFree(s_status2);CHKERRQ(ierr); 368 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 369 370 /* Restore the indices */ 371 for (i=0; i<ismax; i++) { 372 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 373 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 374 } 375 376 ierr = PetscFree5(*(PetscInt***)&irow,*(PetscInt***)&icol,nrow,ncol,rtable);CHKERRQ(ierr); 377 ierr = PetscFree3(w1,w3,w4);CHKERRQ(ierr); 378 ierr = PetscFree(pa);CHKERRQ(ierr); 379 380 for (i=0; i<nrqs; ++i) { 381 ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr); 382 } 383 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 384 ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr); 385 ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr); 386 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 387 388 for (i=0; i<nrqr; ++i) { 389 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 390 } 391 392 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 393 ierr = PetscFree(rmap[0]);CHKERRQ(ierr); 394 ierr = PetscFree(rmap);CHKERRQ(ierr); 395 396 for (i=0; i<ismax; i++) { 397 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 398 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 399 } 400 PetscFunctionReturn(0); 401 } 402 403 PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha) 404 { 405 Mat_MPIDense *A = (Mat_MPIDense*)inA->data; 406 PetscErrorCode ierr; 407 408 PetscFunctionBegin; 409 ierr = MatScale(A->A,alpha);CHKERRQ(ierr); 410 PetscFunctionReturn(0); 411 } 412