1 #define PETSCMAT_DLL 2 3 /* 4 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 5 C = A * B 6 */ 7 #include "src/mat/impls/aij/seq/aij.h" /*I "petscmat.h" I*/ 8 #include "src/mat/utils/freespace.h" 9 #include "src/mat/impls/aij/mpi/mpiaij.h" 10 #include "petscbt.h" 11 #include "src/mat/impls/dense/mpi/mpidense.h" 12 13 #undef __FUNCT__ 14 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ" 15 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 16 { 17 PetscErrorCode ierr; 18 19 PetscFunctionBegin; 20 if (scall == MAT_INITIAL_MATRIX){ 21 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */ 22 } else if (scall == MAT_REUSE_MATRIX){ 23 ierr = MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);CHKERRQ(ierr); 24 } else { 25 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 26 } 27 PetscFunctionReturn(0); 28 } 29 30 #undef __FUNCT__ 31 #define __FUNCT__ "PetscObjectContainerDestroy_Mat_MatMatMultMPI" 32 PetscErrorCode PetscObjectContainerDestroy_Mat_MatMatMultMPI(void *ptr) 33 { 34 PetscErrorCode ierr; 35 Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr; 36 37 PetscFunctionBegin; 38 ierr = PetscFree(mult->startsj);CHKERRQ(ierr); 39 ierr = PetscFree(mult->bufa);CHKERRQ(ierr); 40 if (mult->isrowa){ierr = ISDestroy(mult->isrowa);CHKERRQ(ierr);} 41 if (mult->isrowb){ierr = ISDestroy(mult->isrowb);CHKERRQ(ierr);} 42 if (mult->iscolb){ierr = ISDestroy(mult->iscolb);CHKERRQ(ierr);} 43 if (mult->C_seq){ierr = MatDestroy(mult->C_seq);CHKERRQ(ierr);} 44 if (mult->A_loc){ierr = MatDestroy(mult->A_loc);CHKERRQ(ierr); } 45 if (mult->B_seq){ierr = MatDestroy(mult->B_seq);CHKERRQ(ierr);} 46 if (mult->B_loc){ierr = MatDestroy(mult->B_loc);CHKERRQ(ierr);} 47 if (mult->B_oth){ierr = MatDestroy(mult->B_oth);CHKERRQ(ierr);} 48 ierr = PetscFree(mult->abi);CHKERRQ(ierr); 49 ierr = PetscFree(mult->abj);CHKERRQ(ierr); 50 ierr = PetscFree(mult);CHKERRQ(ierr); 51 PetscFunctionReturn(0); 52 } 53 54 EXTERN PetscErrorCode MatDestroy_AIJ(Mat); 55 #undef __FUNCT__ 56 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult" 57 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 58 { 59 PetscErrorCode ierr; 60 PetscObjectContainer container; 61 Mat_MatMatMultMPI *mult=PETSC_NULL; 62 63 PetscFunctionBegin; 64 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 65 if (container) { 66 ierr = PetscObjectContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 67 } else { 68 SETERRQ(PETSC_ERR_PLIB,"Container does not exit"); 69 } 70 A->ops->destroy = mult->MatDestroy; 71 ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr); 72 ierr = (*A->ops->destroy)(A);CHKERRQ(ierr); 73 ierr = PetscObjectContainerDestroy(container);CHKERRQ(ierr); 74 PetscFunctionReturn(0); 75 } 76 77 #undef __FUNCT__ 78 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult" 79 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) { 80 PetscErrorCode ierr; 81 Mat_MatMatMultMPI *mult; 82 PetscObjectContainer container; 83 84 PetscFunctionBegin; 85 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 86 if (container) { 87 ierr = PetscObjectContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 88 } else { 89 SETERRQ(PETSC_ERR_PLIB,"Container does not exit"); 90 } 91 ierr = (*mult->MatDuplicate)(A,op,M);CHKERRQ(ierr); 92 (*M)->ops->destroy = mult->MatDestroy; /* =MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */ 93 (*M)->ops->duplicate = mult->MatDuplicate; /* =MatDuplicate_ MPIAIJ */ 94 PetscFunctionReturn(0); 95 } 96 97 #undef __FUNCT__ 98 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 99 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C) 100 { 101 PetscErrorCode ierr; 102 PetscInt start,end; 103 Mat_MatMatMultMPI *mult; 104 PetscObjectContainer container; 105 106 PetscFunctionBegin; 107 if (A->cmap.rstart != B->rmap.rstart || A->cmap.rend != B->rmap.rend){ 108 SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap.rstart,A->cmap.rend,B->rmap.rstart,B->rmap.rend); 109 } 110 ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr); 111 112 /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */ 113 ierr = MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);CHKERRQ(ierr); 114 115 /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */ 116 start = A->rmap.rstart; end = A->rmap.rend; 117 ierr = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);CHKERRQ(ierr); 118 ierr = MatGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);CHKERRQ(ierr); 119 120 /* compute C_seq = A_seq * B_seq */ 121 ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);CHKERRQ(ierr); 122 123 /* create mpi matrix C by concatinating C_seq */ 124 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */ 125 ierr = MatMerge(A->comm,mult->C_seq,B->cmap.n,MAT_INITIAL_MATRIX,C);CHKERRQ(ierr); 126 127 /* attach the supporting struct to C for reuse of symbolic C */ 128 ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 129 ierr = PetscObjectContainerSetPointer(container,mult);CHKERRQ(ierr); 130 ierr = PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr); 131 ierr = PetscObjectContainerSetUserDestroy(container,PetscObjectContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr); 132 mult->MatDestroy = (*C)->ops->destroy; 133 mult->MatDuplicate = (*C)->ops->duplicate; 134 135 (*C)->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 136 (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 137 PetscFunctionReturn(0); 138 } 139 140 /* This routine is called ONLY in the case of reusing previously computed symbolic C */ 141 #undef __FUNCT__ 142 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ" 143 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C) 144 { 145 PetscErrorCode ierr; 146 Mat *seq; 147 Mat_MatMatMultMPI *mult; 148 PetscObjectContainer container; 149 150 PetscFunctionBegin; 151 ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 152 if (container) { 153 ierr = PetscObjectContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 154 } else { 155 SETERRQ(PETSC_ERR_PLIB,"Container does not exit"); 156 } 157 158 seq = &mult->B_seq; 159 ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 160 mult->B_seq = *seq; 161 162 seq = &mult->A_loc; 163 ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 164 mult->A_loc = *seq; 165 166 ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);CHKERRQ(ierr); 167 168 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); 169 ierr = MatMerge(A->comm,mult->C_seq,B->cmap.n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr); 170 PetscFunctionReturn(0); 171 } 172 173 #undef __FUNCT__ 174 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense" 175 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 176 { 177 PetscErrorCode ierr; 178 179 PetscFunctionBegin; 180 if (scall == MAT_INITIAL_MATRIX){ 181 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 182 } 183 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 184 PetscFunctionReturn(0); 185 } 186 187 typedef struct { 188 Mat workB; 189 PetscScalar *rvalues,*svalues; 190 MPI_Request *rwaits,*swaits; 191 } MPIAIJ_MPIDense; 192 193 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx) 194 { 195 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 196 PetscErrorCode ierr; 197 198 PetscFunctionBegin; 199 if (contents->workB) {ierr = MatDestroy(contents->workB);CHKERRQ(ierr);} 200 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 201 ierr = PetscFree(contents);CHKERRQ(ierr); 202 PetscFunctionReturn(0); 203 } 204 205 #undef __FUNCT__ 206 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense" 207 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 208 { 209 PetscErrorCode ierr; 210 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 211 PetscInt nz = aij->B->cmap.n; 212 PetscObjectContainer cont; 213 MPIAIJ_MPIDense *contents; 214 VecScatter ctx = aij->Mvctx; 215 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 216 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 217 218 PetscFunctionBegin; 219 ierr = MatMatMultSymbolic_MPIDense_MPIDense(A,B,0.0,C); 220 221 ierr = PetscObjectContainerCreate(A->comm,&cont);CHKERRQ(ierr); 222 ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr); 223 ierr = PetscObjectContainerSetPointer(cont,contents);CHKERRQ(ierr); 224 ierr = PetscObjectContainerSetUserDestroy(cont,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 225 226 /* Create work matrix used to store off processor rows of B needed for local product */ 227 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap.N,PETSC_NULL,&contents->workB);CHKERRQ(ierr); 228 229 /* Create work arrays needed */ 230 ierr = PetscMalloc4(B->cmap.N*from->starts[from->n],PetscScalar,&contents->rvalues, 231 B->cmap.N*to->starts[to->n],PetscScalar,&contents->svalues, 232 from->n,MPI_Request,&contents->rwaits, 233 to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr); 234 235 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)cont);CHKERRQ(ierr); 236 ierr = PetscObjectContainerDestroy(cont);CHKERRQ(ierr); 237 PetscFunctionReturn(0); 238 } 239 240 /* 241 Performs an efficient scatter on the rows of B needed by this process; this is 242 a modification of the VecScatterBegin_() routines. 243 */ 244 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 245 { 246 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 247 PetscErrorCode ierr; 248 PetscScalar *b,*w,*svalues,*rvalues; 249 VecScatter ctx = aij->Mvctx; 250 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 251 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 252 PetscInt i,j,k; 253 PetscMPIInt *sindices,*sstarts,*sprocs,*rindices,*rstarts,*rprocs,nrecvs; 254 MPI_Request *swaits,*rwaits; 255 MPI_Comm comm = A->comm; 256 PetscMPIInt tag = ctx->tag,ncols = B->cmap.N, nrows = aij->B->cmap.n,imdex,nrowsB = B->rmap.n; 257 MPI_Status status; 258 MPIAIJ_MPIDense *contents; 259 PetscObjectContainer cont; 260 Mat workB; 261 262 PetscFunctionBegin; 263 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&cont);CHKERRQ(ierr); 264 ierr = PetscObjectContainerGetPointer(cont,(void**)&contents);CHKERRQ(ierr); 265 266 workB = *outworkB = contents->workB; 267 if (nrows != workB->rmap.n) SETERRQ2(PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap.n); 268 sindices = to->indices; 269 sstarts = to->starts; 270 sprocs = to->procs; 271 swaits = contents->swaits; 272 svalues = contents->svalues; 273 274 rindices = from->indices; 275 rstarts = from->starts; 276 rprocs = from->procs; 277 rwaits = contents->rwaits; 278 rvalues = contents->rvalues; 279 280 ierr = MatGetArray(B,&b);CHKERRQ(ierr); 281 ierr = MatGetArray(workB,&w);CHKERRQ(ierr); 282 283 for (i=0; i<from->n; i++) { 284 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 285 } 286 287 for (i=0; i<to->n; i++) { 288 /* pack a message at a time */ 289 CHKMEMQ; 290 for (j=0; j<sstarts[i+1]-sstarts[i]; j++){ 291 for (k=0; k<ncols; k++) { 292 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 293 } 294 } 295 CHKMEMQ; 296 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 297 } 298 299 nrecvs = from->n; 300 while (nrecvs) { 301 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 302 nrecvs--; 303 /* unpack a message at a time */ 304 CHKMEMQ; 305 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){ 306 for (k=0; k<ncols; k++) { 307 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 308 } 309 } 310 CHKMEMQ; 311 } 312 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr)} 313 314 ierr = MatRestoreArray(B,&b);CHKERRQ(ierr); 315 ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr); 316 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 317 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 318 PetscFunctionReturn(0); 319 } 320 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 321 322 #undef __FUNCT__ 323 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense" 324 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 325 { 326 PetscErrorCode ierr; 327 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 328 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 329 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 330 Mat workB; 331 332 PetscFunctionBegin; 333 334 /* diagonal block of A times all local rows of B*/ 335 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 336 337 /* get off processor parts of B needed to complete the product */ 338 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 339 340 /* off-diagonal block of A times nonlocal rows of B */ 341 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 342 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 343 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 344 PetscFunctionReturn(0); 345 } 346 347