1 2 /* 3 Support for the parallel dense matrix vector multiply 4 */ 5 #include <../src/mat/impls/dense/mpi/mpidense.h> 6 #include <petscblaslapack.h> 7 8 PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat) 9 { 10 Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data; 11 PetscErrorCode ierr; 12 IS from,to; 13 Vec gvec; 14 15 PetscFunctionBegin; 16 /* Create local vector that is used to scatter into */ 17 ierr = VecCreateSeq(PETSC_COMM_SELF,mat->cmap->N,&mdn->lvec);CHKERRQ(ierr); 18 19 /* Create temporary index set for building scatter gather */ 20 ierr = ISCreateStride(PETSC_COMM_SELF,mat->cmap->N,0,1,&from);CHKERRQ(ierr); 21 ierr = ISCreateStride(PETSC_COMM_SELF,mat->cmap->N,0,1,&to);CHKERRQ(ierr); 22 23 /* Create temporary global vector to generate scatter context */ 24 /* n = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */ 25 26 ierr = VecCreateMPIWithArray(PetscObjectComm((PetscObject)mat),1,mdn->nvec,mat->cmap->N,NULL,&gvec);CHKERRQ(ierr); 27 28 /* Generate the scatter context */ 29 ierr = VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);CHKERRQ(ierr); 30 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->Mvctx);CHKERRQ(ierr); 31 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->lvec);CHKERRQ(ierr); 32 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)from);CHKERRQ(ierr); 33 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)to);CHKERRQ(ierr); 34 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)gvec);CHKERRQ(ierr); 35 36 ierr = ISDestroy(&to);CHKERRQ(ierr); 37 ierr = ISDestroy(&from);CHKERRQ(ierr); 38 ierr = VecDestroy(&gvec);CHKERRQ(ierr); 39 PetscFunctionReturn(0); 40 } 41 42 extern PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*); 43 PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 44 { 45 PetscErrorCode ierr; 46 PetscInt nmax,nstages_local,nstages,i,pos,max_no; 47 48 PetscFunctionBegin; 49 /* Allocate memory to hold all the submatrices */ 50 if (scall != MAT_REUSE_MATRIX) { 51 ierr = PetscCalloc1(ismax+1,submat);CHKERRQ(ierr); 52 } 53 /* Determine the number of stages through which submatrices are done */ 54 nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt)); 55 if (!nmax) nmax = 1; 56 nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0); 57 58 /* Make sure every processor loops through the nstages */ 59 ierr = MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 60 61 62 for (i=0,pos=0; i<nstages; i++) { 63 if (pos+nmax <= ismax) max_no = nmax; 64 else if (pos == ismax) max_no = 0; 65 else max_no = ismax-pos; 66 ierr = MatCreateSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr); 67 pos += max_no; 68 } 69 PetscFunctionReturn(0); 70 } 71 /* -------------------------------------------------------------------------*/ 72 PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 73 { 74 Mat_MPIDense *c = (Mat_MPIDense*)C->data; 75 Mat A = c->A; 76 Mat_SeqDense *a = (Mat_SeqDense*)A->data,*mat; 77 PetscErrorCode ierr; 78 PetscMPIInt rank,size,tag0,tag1,idex,end,i; 79 PetscInt N = C->cmap->N,rstart = C->rmap->rstart,count; 80 const PetscInt **irow,**icol,*irow_i; 81 PetscInt *nrow,*ncol,*w1,*w3,*w4,*rtable,start; 82 PetscInt **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc; 83 PetscInt nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr; 84 PetscInt is_no,jmax,**rmap,*rmap_i; 85 PetscInt ctr_j,*sbuf1_j,*rbuf1_i; 86 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 87 MPI_Status *r_status1,*r_status2,*s_status1,*s_status2; 88 MPI_Comm comm; 89 PetscScalar **rbuf2,**sbuf2; 90 PetscBool sorted; 91 92 PetscFunctionBegin; 93 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 94 tag0 = ((PetscObject)C)->tag; 95 size = c->size; 96 rank = c->rank; 97 m = C->rmap->N; 98 99 /* Get some new tags to keep the communication clean */ 100 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 101 102 /* Check if the col indices are sorted */ 103 for (i=0; i<ismax; i++) { 104 ierr = ISSorted(isrow[i],&sorted);CHKERRQ(ierr); 105 if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted"); 106 ierr = ISSorted(iscol[i],&sorted);CHKERRQ(ierr); 107 if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted"); 108 } 109 110 ierr = PetscMalloc5(ismax,(PetscInt***)&irow,ismax,(PetscInt***)&icol,ismax,&nrow,ismax,&ncol,m,&rtable);CHKERRQ(ierr); 111 for (i=0; i<ismax; i++) { 112 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 113 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 114 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 115 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 116 } 117 118 /* Create hash table for the mapping :row -> proc*/ 119 for (i=0,j=0; i<size; i++) { 120 jmax = C->rmap->range[i+1]; 121 for (; j<jmax; j++) rtable[j] = i; 122 } 123 124 /* evaluate communication - mesg to who,length of mesg, and buffer space 125 required. Based on this, buffers are allocated, and data copied into them*/ 126 ierr = PetscMalloc3(2*size,&w1,size,&w3,size,&w4);CHKERRQ(ierr); 127 ierr = PetscArrayzero(w1,size*2);CHKERRQ(ierr); /* initialize work vector*/ 128 ierr = PetscArrayzero(w3,size);CHKERRQ(ierr); /* initialize work vector*/ 129 for (i=0; i<ismax; i++) { 130 ierr = PetscArrayzero(w4,size);CHKERRQ(ierr); /* initialize work vector*/ 131 jmax = nrow[i]; 132 irow_i = irow[i]; 133 for (j=0; j<jmax; j++) { 134 row = irow_i[j]; 135 proc = rtable[row]; 136 w4[proc]++; 137 } 138 for (j=0; j<size; j++) { 139 if (w4[j]) { w1[2*j] += w4[j]; w3[j]++;} 140 } 141 } 142 143 nrqs = 0; /* no of outgoing messages */ 144 msz = 0; /* total mesg length (for all procs) */ 145 w1[2*rank] = 0; /* no mesg sent to self */ 146 w3[rank] = 0; 147 for (i=0; i<size; i++) { 148 if (w1[2*i]) { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */ 149 } 150 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 151 for (i=0,j=0; i<size; i++) { 152 if (w1[2*i]) { pa[j] = i; j++; } 153 } 154 155 /* Each message would have a header = 1 + 2*(no of IS) + data */ 156 for (i=0; i<nrqs; i++) { 157 j = pa[i]; 158 w1[2*j] += w1[2*j+1] + 2* w3[j]; 159 msz += w1[2*j]; 160 } 161 /* Do a global reduction to determine how many messages to expect*/ 162 ierr = PetscMaxSum(comm,w1,&bsz,&nrqr);CHKERRQ(ierr); 163 164 /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */ 165 ierr = PetscMalloc1(nrqr+1,&rbuf1);CHKERRQ(ierr); 166 ierr = PetscMalloc1(nrqr*bsz,&rbuf1[0]);CHKERRQ(ierr); 167 for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz; 168 169 /* Post the receives */ 170 ierr = PetscMalloc1(nrqr+1,&r_waits1);CHKERRQ(ierr); 171 for (i=0; i<nrqr; ++i) { 172 ierr = MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr); 173 } 174 175 /* Allocate Memory for outgoing messages */ 176 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 177 ierr = PetscArrayzero(sbuf1,size);CHKERRQ(ierr); 178 ierr = PetscArrayzero(ptr,size);CHKERRQ(ierr); 179 { 180 PetscInt *iptr = tmp,ict = 0; 181 for (i=0; i<nrqs; i++) { 182 j = pa[i]; 183 iptr += ict; 184 sbuf1[j] = iptr; 185 ict = w1[2*j]; 186 } 187 } 188 189 /* Form the outgoing messages */ 190 /* Initialize the header space */ 191 for (i=0; i<nrqs; i++) { 192 j = pa[i]; 193 sbuf1[j][0] = 0; 194 ierr = PetscArrayzero(sbuf1[j]+1,2*w3[j]);CHKERRQ(ierr); 195 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 196 } 197 198 /* Parse the isrow and copy data into outbuf */ 199 for (i=0; i<ismax; i++) { 200 ierr = PetscArrayzero(ctr,size);CHKERRQ(ierr); 201 irow_i = irow[i]; 202 jmax = nrow[i]; 203 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 204 row = irow_i[j]; 205 proc = rtable[row]; 206 if (proc != rank) { /* copy to the outgoing buf*/ 207 ctr[proc]++; 208 *ptr[proc] = row; 209 ptr[proc]++; 210 } 211 } 212 /* Update the headers for the current IS */ 213 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 214 if ((ctr_j = ctr[j])) { 215 sbuf1_j = sbuf1[j]; 216 k = ++sbuf1_j[0]; 217 sbuf1_j[2*k] = ctr_j; 218 sbuf1_j[2*k-1] = i; 219 } 220 } 221 } 222 223 /* Now post the sends */ 224 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 225 for (i=0; i<nrqs; ++i) { 226 j = pa[i]; 227 ierr = MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 228 } 229 230 /* Post recieves to capture the row_data from other procs */ 231 ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr); 232 ierr = PetscMalloc1(nrqs+1,&rbuf2);CHKERRQ(ierr); 233 for (i=0; i<nrqs; i++) { 234 j = pa[i]; 235 count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N; 236 ierr = PetscMalloc1(count+1,&rbuf2[i]);CHKERRQ(ierr); 237 ierr = MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 238 } 239 240 /* Receive messages(row_nos) and then, pack and send off the rowvalues 241 to the correct processors */ 242 243 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 244 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 245 ierr = PetscMalloc1(nrqr+1,&sbuf2);CHKERRQ(ierr); 246 247 { 248 PetscScalar *sbuf2_i,*v_start; 249 PetscInt s_proc; 250 for (i=0; i<nrqr; ++i) { 251 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 252 s_proc = r_status1[i].MPI_SOURCE; /* send processor */ 253 rbuf1_i = rbuf1[idex]; /* Actual message from s_proc */ 254 /* no of rows = end - start; since start is array idex[], 0idex, whel end 255 is length of the buffer - which is 1idex */ 256 start = 2*rbuf1_i[0] + 1; 257 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 258 /* allocate memory sufficinet to hold all the row values */ 259 ierr = PetscMalloc1((end-start)*N,&sbuf2[idex]);CHKERRQ(ierr); 260 sbuf2_i = sbuf2[idex]; 261 /* Now pack the data */ 262 for (j=start; j<end; j++) { 263 row = rbuf1_i[j] - rstart; 264 v_start = a->v + row; 265 for (k=0; k<N; k++) { 266 sbuf2_i[0] = v_start[0]; 267 sbuf2_i++; 268 v_start += C->rmap->n; 269 } 270 } 271 /* Now send off the data */ 272 ierr = MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);CHKERRQ(ierr); 273 } 274 } 275 /* End Send-Recv of IS + row_numbers */ 276 ierr = PetscFree(r_status1);CHKERRQ(ierr); 277 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 278 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 279 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 280 ierr = PetscFree(s_status1);CHKERRQ(ierr); 281 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 282 283 /* Create the submatrices */ 284 if (scall == MAT_REUSE_MATRIX) { 285 for (i=0; i<ismax; i++) { 286 mat = (Mat_SeqDense*)(submats[i]->data); 287 if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 288 ierr = PetscArrayzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n);CHKERRQ(ierr); 289 290 submats[i]->factortype = C->factortype; 291 } 292 } else { 293 for (i=0; i<ismax; i++) { 294 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 295 ierr = MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);CHKERRQ(ierr); 296 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 297 ierr = MatSeqDenseSetPreallocation(submats[i],NULL);CHKERRQ(ierr); 298 } 299 } 300 301 /* Assemble the matrices */ 302 { 303 PetscInt col; 304 PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi; 305 306 for (i=0; i<ismax; i++) { 307 mat = (Mat_SeqDense*)submats[i]->data; 308 mat_v = a->v; 309 imat_v = mat->v; 310 irow_i = irow[i]; 311 m = nrow[i]; 312 for (j=0; j<m; j++) { 313 row = irow_i[j]; 314 proc = rtable[row]; 315 if (proc == rank) { 316 row = row - rstart; 317 mat_vi = mat_v + row; 318 imat_vi = imat_v + j; 319 for (k=0; k<ncol[i]; k++) { 320 col = icol[i][k]; 321 imat_vi[k*m] = mat_vi[col*C->rmap->n]; 322 } 323 } 324 } 325 } 326 } 327 328 /* Create row map-> This maps c->row to submat->row for each submat*/ 329 /* this is a very expensive operation wrt memory usage */ 330 ierr = PetscMalloc1(ismax,&rmap);CHKERRQ(ierr); 331 ierr = PetscCalloc1(ismax*C->rmap->N,&rmap[0]);CHKERRQ(ierr); 332 for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + C->rmap->N; 333 for (i=0; i<ismax; i++) { 334 rmap_i = rmap[i]; 335 irow_i = irow[i]; 336 jmax = nrow[i]; 337 for (j=0; j<jmax; j++) { 338 rmap_i[irow_i[j]] = j; 339 } 340 } 341 342 /* Now Receive the row_values and assemble the rest of the matrix */ 343 ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr); 344 { 345 PetscInt is_max,tmp1,col,*sbuf1_i,is_sz; 346 PetscScalar *rbuf2_i,*imat_v,*imat_vi; 347 348 for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */ 349 ierr = MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);CHKERRQ(ierr); 350 /* Now dig out the corresponding sbuf1, which contains the IS data_structure */ 351 sbuf1_i = sbuf1[pa[i]]; 352 is_max = sbuf1_i[0]; 353 ct1 = 2*is_max+1; 354 rbuf2_i = rbuf2[i]; 355 for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */ 356 is_no = sbuf1_i[2*j-1]; 357 is_sz = sbuf1_i[2*j]; 358 mat = (Mat_SeqDense*)submats[is_no]->data; 359 imat_v = mat->v; 360 rmap_i = rmap[is_no]; 361 m = nrow[is_no]; 362 for (k=0; k<is_sz; k++,rbuf2_i+=N) { /* For each row */ 363 row = sbuf1_i[ct1]; ct1++; 364 row = rmap_i[row]; 365 imat_vi = imat_v + row; 366 for (l=0; l<ncol[is_no]; l++) { /* For each col */ 367 col = icol[is_no][l]; 368 imat_vi[l*m] = rbuf2_i[col]; 369 } 370 } 371 } 372 } 373 } 374 /* End Send-Recv of row_values */ 375 ierr = PetscFree(r_status2);CHKERRQ(ierr); 376 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 377 ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr); 378 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 379 ierr = PetscFree(s_status2);CHKERRQ(ierr); 380 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 381 382 /* Restore the indices */ 383 for (i=0; i<ismax; i++) { 384 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 385 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 386 } 387 388 ierr = PetscFree5(*(PetscInt***)&irow,*(PetscInt***)&icol,nrow,ncol,rtable);CHKERRQ(ierr); 389 ierr = PetscFree3(w1,w3,w4);CHKERRQ(ierr); 390 ierr = PetscFree(pa);CHKERRQ(ierr); 391 392 for (i=0; i<nrqs; ++i) { 393 ierr = PetscFree(rbuf2[i]);CHKERRQ(ierr); 394 } 395 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 396 ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr); 397 ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr); 398 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 399 400 for (i=0; i<nrqr; ++i) { 401 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 402 } 403 404 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 405 ierr = PetscFree(rmap[0]);CHKERRQ(ierr); 406 ierr = PetscFree(rmap);CHKERRQ(ierr); 407 408 for (i=0; i<ismax; i++) { 409 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 410 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 411 } 412 PetscFunctionReturn(0); 413 } 414 415 PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha) 416 { 417 Mat_MPIDense *A = (Mat_MPIDense*)inA->data; 418 Mat_SeqDense *a = (Mat_SeqDense*)A->A->data; 419 PetscScalar oalpha = alpha; 420 PetscErrorCode ierr; 421 PetscBLASInt one = 1,nz; 422 423 PetscFunctionBegin; 424 ierr = PetscBLASIntCast(inA->rmap->n*inA->cmap->N,&nz);CHKERRQ(ierr); 425 PetscStackCallBLAS("BLASscal",BLASscal_(&nz,&oalpha,a->v,&one)); 426 ierr = PetscLogFlops(nz);CHKERRQ(ierr); 427 PetscFunctionReturn(0); 428 } 429