1 #define PETSCMAT_DLL 2 3 /* 4 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 5 C = A * B 6 */ 7 #include "src/mat/impls/aij/seq/aij.h" /*I "petscmat.h" I*/ 8 #include "src/mat/utils/freespace.h" 9 #include "src/mat/impls/aij/mpi/mpiaij.h" 10 #include "petscbt.h" 11 #include "src/mat/impls/dense/mpi/mpidense.h" 12 13 #undef __FUNCT__ 14 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ" 15 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 16 { 17 PetscErrorCode ierr; 18 19 PetscFunctionBegin; 20 if (scall == MAT_INITIAL_MATRIX){ 21 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */ 22 } else if (scall == MAT_REUSE_MATRIX){ 23 ierr = MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);CHKERRQ(ierr); 24 } else { 25 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 26 } 27 PetscFunctionReturn(0); 28 } 29 30 #undef __FUNCT__ 31 #define __FUNCT__ "PetscContainerDestroy_Mat_MatMatMultMPI" 32 PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr) 33 { 34 PetscErrorCode ierr; 35 Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr; 36 37 PetscFunctionBegin; 38 ierr = PetscFree(mult->startsj);CHKERRQ(ierr); 39 ierr = PetscFree(mult->bufa);CHKERRQ(ierr); 40 if (mult->isrowa){ierr = ISDestroy(mult->isrowa);CHKERRQ(ierr);} 41 if (mult->isrowb){ierr = ISDestroy(mult->isrowb);CHKERRQ(ierr);} 42 if (mult->iscolb){ierr = ISDestroy(mult->iscolb);CHKERRQ(ierr);} 43 if (mult->C_seq){ierr = MatDestroy(mult->C_seq);CHKERRQ(ierr);} 44 if (mult->A_loc){ierr = MatDestroy(mult->A_loc);CHKERRQ(ierr); } 45 if (mult->B_seq){ierr = MatDestroy(mult->B_seq);CHKERRQ(ierr);} 46 if (mult->B_loc){ierr = MatDestroy(mult->B_loc);CHKERRQ(ierr);} 47 if (mult->B_oth){ierr = MatDestroy(mult->B_oth);CHKERRQ(ierr);} 48 ierr = PetscFree(mult->abi);CHKERRQ(ierr); 49 ierr = PetscFree(mult->abj);CHKERRQ(ierr); 50 ierr = PetscFree(mult);CHKERRQ(ierr); 51 PetscFunctionReturn(0); 52 } 53 54 EXTERN PetscErrorCode MatDestroy_AIJ(Mat); 55 #undef __FUNCT__ 56 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult" 57 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 58 { 59 PetscErrorCode ierr; 60 PetscContainer container; 61 Mat_MatMatMultMPI *mult=PETSC_NULL; 62 63 PetscFunctionBegin; 64 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 65 if (container) { 66 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 67 } else { 68 SETERRQ(PETSC_ERR_PLIB,"Container does not exit"); 69 } 70 A->ops->destroy = mult->MatDestroy; 71 ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr); 72 ierr = (*A->ops->destroy)(A);CHKERRQ(ierr); 73 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 74 PetscFunctionReturn(0); 75 } 76 77 #undef __FUNCT__ 78 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult" 79 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) { 80 PetscErrorCode ierr; 81 Mat_MatMatMultMPI *mult; 82 PetscContainer container; 83 84 PetscFunctionBegin; 85 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 86 if (container) { 87 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 88 } else { 89 SETERRQ(PETSC_ERR_PLIB,"Container does not exit"); 90 } 91 /* Note: the container is not duplicated, because it requires deep copying of 92 several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()). 93 These data sets are only used for repeated calling of MatMatMultNumeric(). 94 *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */ 95 ierr = (*mult->MatDuplicate)(A,op,M);CHKERRQ(ierr); 96 (*M)->ops->destroy = mult->MatDestroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */ 97 (*M)->ops->duplicate = mult->MatDuplicate; /* = MatDuplicate_MPIAIJ */ 98 PetscFunctionReturn(0); 99 } 100 101 #undef __FUNCT__ 102 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 103 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C) 104 { 105 PetscErrorCode ierr; 106 PetscInt start,end; 107 Mat_MatMatMultMPI *mult; 108 PetscContainer container; 109 110 PetscFunctionBegin; 111 if (A->cmap.rstart != B->rmap.rstart || A->cmap.rend != B->rmap.rend){ 112 SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap.rstart,A->cmap.rend,B->rmap.rstart,B->rmap.rend); 113 } 114 ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr); 115 116 /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */ 117 ierr = MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);CHKERRQ(ierr); 118 119 /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */ 120 start = A->rmap.rstart; end = A->rmap.rend; 121 ierr = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);CHKERRQ(ierr); 122 ierr = MatGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);CHKERRQ(ierr); 123 124 /* compute C_seq = A_seq * B_seq */ 125 ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);CHKERRQ(ierr); 126 127 /* create mpi matrix C by concatinating C_seq */ 128 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */ 129 ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap.n,MAT_INITIAL_MATRIX,C);CHKERRQ(ierr); 130 131 /* attach the supporting struct to C for reuse of symbolic C */ 132 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 133 ierr = PetscContainerSetPointer(container,mult);CHKERRQ(ierr); 134 ierr = PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr); 135 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr); 136 mult->MatDestroy = (*C)->ops->destroy; 137 mult->MatDuplicate = (*C)->ops->duplicate; 138 139 (*C)->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 140 (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 141 PetscFunctionReturn(0); 142 } 143 144 /* This routine is called ONLY in the case of reusing previously computed symbolic C */ 145 #undef __FUNCT__ 146 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ" 147 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C) 148 { 149 PetscErrorCode ierr; 150 Mat *seq; 151 Mat_MatMatMultMPI *mult; 152 PetscContainer container; 153 154 PetscFunctionBegin; 155 ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 156 if (container) { 157 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 158 } else { 159 SETERRQ(PETSC_ERR_PLIB,"Container does not exit"); 160 } 161 162 seq = &mult->B_seq; 163 ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 164 mult->B_seq = *seq; 165 166 seq = &mult->A_loc; 167 ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 168 mult->A_loc = *seq; 169 170 ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);CHKERRQ(ierr); 171 172 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); 173 ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap.n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr); 174 PetscFunctionReturn(0); 175 } 176 177 #undef __FUNCT__ 178 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense" 179 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 180 { 181 PetscErrorCode ierr; 182 183 PetscFunctionBegin; 184 if (scall == MAT_INITIAL_MATRIX){ 185 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 186 } 187 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 188 PetscFunctionReturn(0); 189 } 190 191 typedef struct { 192 Mat workB; 193 PetscScalar *rvalues,*svalues; 194 MPI_Request *rwaits,*swaits; 195 } MPIAIJ_MPIDense; 196 197 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx) 198 { 199 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 200 PetscErrorCode ierr; 201 202 PetscFunctionBegin; 203 if (contents->workB) {ierr = MatDestroy(contents->workB);CHKERRQ(ierr);} 204 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 205 ierr = PetscFree(contents);CHKERRQ(ierr); 206 PetscFunctionReturn(0); 207 } 208 209 #undef __FUNCT__ 210 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense" 211 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 212 { 213 PetscErrorCode ierr; 214 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 215 PetscInt nz = aij->B->cmap.n; 216 PetscContainer cont; 217 MPIAIJ_MPIDense *contents; 218 VecScatter ctx = aij->Mvctx; 219 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 220 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 221 222 PetscFunctionBegin; 223 ierr = MatMatMultSymbolic_MPIDense_MPIDense(A,B,0.0,C); 224 225 ierr = PetscContainerCreate(((PetscObject)A)->comm,&cont);CHKERRQ(ierr); 226 ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr); 227 ierr = PetscContainerSetPointer(cont,contents);CHKERRQ(ierr); 228 ierr = PetscContainerSetUserDestroy(cont,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 229 230 /* Create work matrix used to store off processor rows of B needed for local product */ 231 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap.N,PETSC_NULL,&contents->workB);CHKERRQ(ierr); 232 233 /* Create work arrays needed */ 234 ierr = PetscMalloc4(B->cmap.N*from->starts[from->n],PetscScalar,&contents->rvalues, 235 B->cmap.N*to->starts[to->n],PetscScalar,&contents->svalues, 236 from->n,MPI_Request,&contents->rwaits, 237 to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr); 238 239 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)cont);CHKERRQ(ierr); 240 ierr = PetscContainerDestroy(cont);CHKERRQ(ierr); 241 PetscFunctionReturn(0); 242 } 243 244 /* 245 Performs an efficient scatter on the rows of B needed by this process; this is 246 a modification of the VecScatterBegin_() routines. 247 */ 248 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 249 { 250 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 251 PetscErrorCode ierr; 252 PetscScalar *b,*w,*svalues,*rvalues; 253 VecScatter ctx = aij->Mvctx; 254 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 255 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 256 PetscInt i,j,k; 257 PetscInt *sindices,*sstarts,*rindices,*rstarts; 258 PetscMPIInt *sprocs,*rprocs,nrecvs; 259 MPI_Request *swaits,*rwaits; 260 MPI_Comm comm = ((PetscObject)A)->comm; 261 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap.N, nrows = aij->B->cmap.n,imdex,nrowsB = B->rmap.n; 262 MPI_Status status; 263 MPIAIJ_MPIDense *contents; 264 PetscContainer cont; 265 Mat workB; 266 267 PetscFunctionBegin; 268 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&cont);CHKERRQ(ierr); 269 ierr = PetscContainerGetPointer(cont,(void**)&contents);CHKERRQ(ierr); 270 271 workB = *outworkB = contents->workB; 272 if (nrows != workB->rmap.n) SETERRQ2(PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap.n); 273 sindices = to->indices; 274 sstarts = to->starts; 275 sprocs = to->procs; 276 swaits = contents->swaits; 277 svalues = contents->svalues; 278 279 rindices = from->indices; 280 rstarts = from->starts; 281 rprocs = from->procs; 282 rwaits = contents->rwaits; 283 rvalues = contents->rvalues; 284 285 ierr = MatGetArray(B,&b);CHKERRQ(ierr); 286 ierr = MatGetArray(workB,&w);CHKERRQ(ierr); 287 288 for (i=0; i<from->n; i++) { 289 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 290 } 291 292 for (i=0; i<to->n; i++) { 293 /* pack a message at a time */ 294 CHKMEMQ; 295 for (j=0; j<sstarts[i+1]-sstarts[i]; j++){ 296 for (k=0; k<ncols; k++) { 297 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 298 } 299 } 300 CHKMEMQ; 301 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 302 } 303 304 nrecvs = from->n; 305 while (nrecvs) { 306 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 307 nrecvs--; 308 /* unpack a message at a time */ 309 CHKMEMQ; 310 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){ 311 for (k=0; k<ncols; k++) { 312 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 313 } 314 } 315 CHKMEMQ; 316 } 317 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr)} 318 319 ierr = MatRestoreArray(B,&b);CHKERRQ(ierr); 320 ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr); 321 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 322 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 323 PetscFunctionReturn(0); 324 } 325 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 326 327 #undef __FUNCT__ 328 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense" 329 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 330 { 331 PetscErrorCode ierr; 332 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 333 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 334 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 335 Mat workB; 336 337 PetscFunctionBegin; 338 339 /* diagonal block of A times all local rows of B*/ 340 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 341 342 /* get off processor parts of B needed to complete the product */ 343 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 344 345 /* off-diagonal block of A times nonlocal rows of B */ 346 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 347 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 348 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 349 PetscFunctionReturn(0); 350 } 351 352