1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 12 #undef __FUNCT__ 13 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ" 14 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 15 { 16 PetscErrorCode ierr; 17 18 PetscFunctionBegin; 19 if (scall == MAT_INITIAL_MATRIX){ 20 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 21 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 22 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 23 } 24 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 25 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 26 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 27 PetscFunctionReturn(0); 28 } 29 30 #undef __FUNCT__ 31 #define __FUNCT__ "PetscContainerDestroy_Mat_MatMatMultMPI" 32 PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr) 33 { 34 PetscErrorCode ierr; 35 Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr; 36 37 PetscFunctionBegin; 38 ierr = ISDestroy(&mult->isrowa);CHKERRQ(ierr); 39 ierr = ISDestroy(&mult->isrowb);CHKERRQ(ierr); 40 ierr = ISDestroy(&mult->iscolb);CHKERRQ(ierr); 41 ierr = MatDestroy(&mult->C_seq);CHKERRQ(ierr); 42 ierr = MatDestroy(&mult->A_loc);CHKERRQ(ierr); 43 ierr = MatDestroy(&mult->B_seq);CHKERRQ(ierr); 44 ierr = PetscFree(mult);CHKERRQ(ierr); 45 PetscFunctionReturn(0); 46 } 47 48 #undef __FUNCT__ 49 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult" 50 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 51 { 52 PetscErrorCode ierr; 53 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 54 Mat_PtAPMPI *ptap=a->ptap; 55 56 PetscFunctionBegin; 57 ierr = PetscFree2(ptap->startsj,ptap->startsj_r);CHKERRQ(ierr); 58 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 59 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 60 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 61 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 62 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 63 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 64 ierr = ptap->destroy(A);CHKERRQ(ierr); 65 ierr = PetscFree(ptap);CHKERRQ(ierr); 66 PetscFunctionReturn(0); 67 } 68 69 #undef __FUNCT__ 70 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult_32" 71 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult_32(Mat A) 72 { 73 PetscErrorCode ierr; 74 PetscContainer container; 75 Mat_MatMatMultMPI *mult=PETSC_NULL; 76 77 PetscFunctionBegin; 78 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 79 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 80 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 81 A->ops->destroy = mult->destroy; 82 A->ops->duplicate = mult->duplicate; 83 if (A->ops->destroy) { 84 ierr = (*A->ops->destroy)(A);CHKERRQ(ierr); 85 } 86 ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr); 87 PetscFunctionReturn(0); 88 } 89 90 #undef __FUNCT__ 91 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult_32" 92 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult_32(Mat A, MatDuplicateOption op, Mat *M) 93 { 94 PetscErrorCode ierr; 95 Mat_MatMatMultMPI *mult; 96 PetscContainer container; 97 98 PetscFunctionBegin; 99 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 100 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 101 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 102 /* Note: the container is not duplicated, because it requires deep copying of 103 several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()). 104 These data sets are only used for repeated calling of MatMatMultNumeric(). 105 *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */ 106 ierr = (*mult->duplicate)(A,op,M);CHKERRQ(ierr); 107 (*M)->ops->destroy = mult->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */ 108 (*M)->ops->duplicate = mult->duplicate; /* = MatDuplicate_MPIAIJ */ 109 PetscFunctionReturn(0); 110 } 111 112 #undef __FUNCT__ 113 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult" 114 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) 115 { 116 PetscErrorCode ierr; 117 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 118 Mat_PtAPMPI *ptap=a->ptap; 119 120 PetscFunctionBegin; 121 ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr); 122 (*M)->ops->destroy = ptap->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */ 123 (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */ 124 PetscFunctionReturn(0); 125 } 126 127 #undef __FUNCT__ 128 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ" 129 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 130 { 131 PetscErrorCode ierr; 132 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 133 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 134 Mat_SeqAIJ *cd=(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 135 PetscInt *adi=ad->i,*adj,*aoi=ao->i,*aoj; 136 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 137 Mat_SeqAIJ *p_loc,*p_oth; 138 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 139 PetscScalar *pa_loc,*pa_oth,*pa,*apa,valtmp,*ca; 140 PetscInt cm=C->rmap->n,anz,pnz; 141 Mat_PtAPMPI *ptap=c->ptap; 142 PetscInt *api,*apj,*apJ,i,j,k,row; 143 PetscInt rstart=C->rmap->rstart,cstart=C->cmap->rstart; 144 PetscInt cdnz,conz,k0,k1; 145 146 PetscFunctionBegin; 147 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 148 /*-----------------------------------------------------*/ 149 /* update numerical values of P_oth and P_loc */ 150 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 151 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 152 153 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 154 /*----------------------------------------------------------*/ 155 /* get data from symbolic products */ 156 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 157 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 158 pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a; 159 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 160 161 /* get apa for storing dense row A[i,:]*P */ 162 apa = ptap->apa; 163 164 for (i=0; i<cm; i++) { 165 /* diagonal portion of A */ 166 anz = adi[i+1] - adi[i]; 167 adj = ad->j + adi[i]; 168 ada = ad->a + adi[i]; 169 for (j=0; j<anz; j++) { 170 row = adj[j]; 171 pnz = pi_loc[row+1] - pi_loc[row]; 172 pj = pj_loc + pi_loc[row]; 173 pa = pa_loc + pi_loc[row]; 174 175 /* perform dense axpy */ 176 valtmp = ada[j]; 177 for (k=0; k<pnz; k++){ 178 apa[pj[k]] += valtmp*pa[k]; 179 } 180 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 181 } 182 183 /* off-diagonal portion of A */ 184 anz = aoi[i+1] - aoi[i]; 185 aoj = ao->j + aoi[i]; 186 aoa = ao->a + aoi[i]; 187 for (j=0; j<anz; j++) { 188 row = aoj[j]; 189 pnz = pi_oth[row+1] - pi_oth[row]; 190 pj = pj_oth + pi_oth[row]; 191 pa = pa_oth + pi_oth[row]; 192 193 /* perform dense axpy */ 194 valtmp = aoa[j]; 195 for (k=0; k<pnz; k++){ 196 apa[pj[k]] += valtmp*pa[k]; 197 } 198 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 199 } 200 201 /* set values in C */ 202 row = rstart + i; 203 api = ptap->api; 204 apj = ptap->apj; 205 apJ = apj + api[i]; 206 cdnz = cd->i[i+1] - cd->i[i]; 207 conz = co->i[i+1] - co->i[i]; 208 209 /* 1st off-diagoanl part of C */ 210 ca = coa + co->i[i]; 211 k = 0; 212 for (k0=0; k0<conz; k0++){ 213 if (apJ[k] >= cstart) break; 214 ca[k0] = apa[apJ[k]]; 215 apa[apJ[k]] = 0.0; 216 k++; 217 } 218 219 /* diagonal part of C */ 220 ca = cda + cd->i[i]; 221 for (k1=0; k1<cdnz; k1++){ 222 ca[k1] = apa[apJ[k]]; 223 apa[apJ[k]] = 0.0; 224 k++; 225 } 226 227 /* 2nd off-diagoanl part of C */ 228 ca = coa + co->i[i]; 229 for (; k0<conz; k0++){ 230 ca[k0] = apa[apJ[k]]; 231 apa[apJ[k]] = 0.0; 232 k++; 233 } 234 } 235 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 236 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 237 PetscFunctionReturn(0); 238 } 239 240 #undef __FUNCT__ 241 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 242 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 243 { 244 PetscErrorCode ierr; 245 MPI_Comm comm=((PetscObject)A)->comm; 246 Mat Cmpi; 247 Mat_PtAPMPI *ptap; 248 PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 249 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*c; 250 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 251 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 252 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 253 PetscInt nlnk,*lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 254 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 255 PetscBT lnkbt; 256 PetscScalar *apa; 257 PetscReal afill; 258 PetscBool matmatmult_old=PETSC_FALSE; 259 260 PetscFunctionBegin; 261 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend){ 262 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 263 } 264 ierr = PetscOptionsGetBool(PETSC_NULL,"-matmatmult_old",&matmatmult_old,PETSC_NULL);CHKERRQ(ierr); 265 if (matmatmult_old){ 266 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_32(A,P,fill,C);;CHKERRQ(ierr); 267 PetscFunctionReturn(0); 268 } 269 270 /* create struct Mat_PtAPMPI and attached it to C later */ 271 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 272 ptap->abnz_max = 0; 273 274 /* malloc apa to store dense row A[i,:]*P */ 275 ierr = PetscMalloc(pN*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 276 ierr = PetscMemzero(apa,pN*sizeof(PetscScalar));CHKERRQ(ierr); 277 ptap->apa = apa; 278 279 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 280 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 281 /* get P_loc by taking all local rows of P */ 282 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 283 284 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 285 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 286 pi_loc = p_loc->i; pj_loc = p_loc->j; 287 pi_oth = p_oth->i; pj_oth = p_oth->j; 288 289 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 290 /*-------------------------------------------------------------------*/ 291 ierr = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr); 292 ptap->api = api; 293 api[0] = 0; 294 295 /* create and initialize a linked list */ 296 nlnk = pN+1; 297 ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 298 299 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 300 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr); 301 current_space = free_space; 302 303 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 304 for (i=0; i<am; i++) { 305 apnz = 0; 306 /* diagonal portion of A */ 307 nzi = adi[i+1] - adi[i]; 308 for (j=0; j<nzi; j++){ 309 row = *adj++; 310 pnz = pi_loc[row+1] - pi_loc[row]; 311 Jptr = pj_loc + pi_loc[row]; 312 /* add non-zero cols of P into the sorted linked list lnk */ 313 ierr = PetscLLAdd(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 314 apnz += nlnk; 315 } 316 /* off-diagonal portion of A */ 317 nzi = aoi[i+1] - aoi[i]; 318 for (j=0; j<nzi; j++){ 319 row = *aoj++; 320 pnz = pi_oth[row+1] - pi_oth[row]; 321 Jptr = pj_oth + pi_oth[row]; 322 ierr = PetscLLAdd(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 323 apnz += nlnk; 324 } 325 326 api[i+1] = api[i] + apnz; 327 if (ptap->abnz_max < apnz) ptap->abnz_max = apnz; 328 329 /* if free space is not available, double the total space in the list */ 330 if (current_space->local_remaining<apnz) { 331 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 332 nspacedouble++; 333 } 334 335 /* Copy data into free space, then initialize lnk */ 336 ierr = PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 337 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 338 current_space->array += apnz; 339 current_space->local_used += apnz; 340 current_space->local_remaining -= apnz; 341 } 342 343 /* Allocate space for apj, initialize apj, and */ 344 /* destroy list of free space and other temporary array(s) */ 345 ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr); 346 apj = ptap->apj; 347 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 348 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 349 350 /* create and assemble symbolic parallel matrix Cmpi */ 351 /*----------------------------------------------------*/ 352 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 353 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 354 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 355 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 356 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 357 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 358 for (i=0; i<am; i++){ 359 row = i + rstart; 360 apnz = api[i+1] - api[i]; 361 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 362 apj += apnz; 363 } 364 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 365 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 366 367 ptap->destroy = Cmpi->ops->destroy; 368 ptap->duplicate = Cmpi->ops->duplicate; 369 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 370 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 371 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 372 373 /* attach the supporting struct to Cmpi for reuse */ 374 c = (Mat_MPIAIJ*)Cmpi->data; 375 c->ptap = ptap; 376 377 *C = Cmpi; 378 379 /* set MatInfo */ 380 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]) + 1.e-5; 381 if (afill < 1.0) afill = 1.0; 382 Cmpi->info.mallocs = nspacedouble; 383 Cmpi->info.fill_ratio_given = fill; 384 Cmpi->info.fill_ratio_needed = afill; 385 386 #if defined(PETSC_USE_INFO) 387 if (api[am]) { 388 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 389 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr); 390 } else { 391 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 392 } 393 #endif 394 PetscFunctionReturn(0); 395 } 396 397 /* implementation used in PETSc-3.2 */ 398 /* This routine is called ONLY in the case of reusing previously computed symbolic C */ 399 #undef __FUNCT__ 400 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ_32" 401 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_32(Mat A,Mat B,Mat C) 402 { 403 PetscErrorCode ierr; 404 Mat *seq; 405 Mat_MatMatMultMPI *mult; 406 PetscContainer container; 407 408 PetscFunctionBegin; 409 ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 410 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 411 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 412 seq = &mult->B_seq; 413 ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 414 mult->B_seq = *seq; 415 416 seq = &mult->A_loc; 417 ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 418 mult->A_loc = *seq; 419 420 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_SparseAxpy(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr); 421 422 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); 423 ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr); 424 PetscFunctionReturn(0); 425 } 426 427 #undef __FUNCT__ 428 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_32" 429 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_32(Mat A,Mat B,PetscReal fill,Mat *C) 430 { 431 PetscErrorCode ierr; 432 Mat_MatMatMultMPI *mult; 433 PetscContainer container; 434 Mat AB,*seq; 435 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 436 PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark; 437 438 PetscFunctionBegin; 439 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){ 440 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 441 } 442 443 ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr); 444 445 /* get isrowb: nonzero col of A */ 446 start = A->cmap->rstart; 447 cmap = a->garray; 448 nzA = a->A->cmap->n; 449 nzB = a->B->cmap->n; 450 ierr = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr); 451 ncols = 0; 452 for (i=0; i<nzB; i++) { /* row < local row index */ 453 if (cmap[i] < start) idx[ncols++] = cmap[i]; 454 else break; 455 } 456 imark = i; 457 for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */ 458 for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */ 459 ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&mult->isrowb);CHKERRQ(ierr); 460 ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&mult->iscolb);CHKERRQ(ierr); 461 462 /* get isrowa: all local rows of A */ 463 ierr = ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,&mult->isrowa);CHKERRQ(ierr); 464 465 /* Below should go to MatMatMultNumeric_MPIAIJ_MPIAIJ() - How to generate C there? */ 466 /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */ 467 ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr); 468 mult->B_seq = *seq; 469 ierr = PetscFree(seq);CHKERRQ(ierr); 470 471 /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */ 472 ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr); 473 mult->A_loc = *seq; 474 ierr = PetscFree(seq);CHKERRQ(ierr); 475 476 /* compute C_seq = A_seq * B_seq */ 477 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,fill,&mult->C_seq);CHKERRQ(ierr); 478 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr); 479 480 /* create mpi matrix C by concatinating C_seq */ 481 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */ 482 ierr = MatMergeSymbolic(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,&AB);CHKERRQ(ierr); 483 ierr = MatMergeNumeric(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,AB);CHKERRQ(ierr); 484 485 /* attach the supporting struct to C for reuse of symbolic C */ 486 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 487 ierr = PetscContainerSetPointer(container,mult);CHKERRQ(ierr); 488 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr); 489 ierr = PetscObjectCompose((PetscObject)AB,"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr); 490 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 491 mult->destroy = AB->ops->destroy; 492 mult->duplicate = AB->ops->duplicate; 493 AB->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_32; 494 AB->ops->destroy = MatDestroy_MPIAIJ_MatMatMult_32; 495 AB->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult_32; 496 AB->ops->matmult = MatMatMult_MPIAIJ_MPIAIJ; 497 *C = AB; 498 PetscFunctionReturn(0); 499 } 500 501 #undef __FUNCT__ 502 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense" 503 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 504 { 505 PetscErrorCode ierr; 506 507 PetscFunctionBegin; 508 if (scall == MAT_INITIAL_MATRIX){ 509 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 510 } 511 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 512 PetscFunctionReturn(0); 513 } 514 515 typedef struct { 516 Mat workB; 517 PetscScalar *rvalues,*svalues; 518 MPI_Request *rwaits,*swaits; 519 } MPIAIJ_MPIDense; 520 521 #undef __FUNCT__ 522 #define __FUNCT__ "MPIAIJ_MPIDenseDestroy" 523 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx) 524 { 525 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 526 PetscErrorCode ierr; 527 528 PetscFunctionBegin; 529 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 530 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 531 ierr = PetscFree(contents);CHKERRQ(ierr); 532 PetscFunctionReturn(0); 533 } 534 535 #undef __FUNCT__ 536 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense" 537 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 538 { 539 PetscErrorCode ierr; 540 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 541 PetscInt nz = aij->B->cmap->n; 542 PetscContainer container; 543 MPIAIJ_MPIDense *contents; 544 VecScatter ctx = aij->Mvctx; 545 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 546 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 547 PetscInt m=A->rmap->n,n=B->cmap->n; 548 549 PetscFunctionBegin; 550 ierr = MatCreate(((PetscObject)B)->comm,C);CHKERRQ(ierr); 551 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 552 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 553 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 554 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 555 (*C)->ops->matmult = MatMatMult_MPIAIJ_MPIDense; 556 557 ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr); 558 /* Create work matrix used to store off processor rows of B needed for local product */ 559 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);CHKERRQ(ierr); 560 /* Create work arrays needed */ 561 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues, 562 B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues, 563 from->n,MPI_Request,&contents->rwaits, 564 to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr); 565 566 ierr = PetscContainerCreate(((PetscObject)A)->comm,&container);CHKERRQ(ierr); 567 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 568 ierr = PetscContainerSetUserDestroy(container,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 569 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 570 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 571 PetscFunctionReturn(0); 572 } 573 574 #undef __FUNCT__ 575 #define __FUNCT__ "MatMPIDenseScatter" 576 /* 577 Performs an efficient scatter on the rows of B needed by this process; this is 578 a modification of the VecScatterBegin_() routines. 579 */ 580 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 581 { 582 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 583 PetscErrorCode ierr; 584 PetscScalar *b,*w,*svalues,*rvalues; 585 VecScatter ctx = aij->Mvctx; 586 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 587 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 588 PetscInt i,j,k; 589 PetscInt *sindices,*sstarts,*rindices,*rstarts; 590 PetscMPIInt *sprocs,*rprocs,nrecvs; 591 MPI_Request *swaits,*rwaits; 592 MPI_Comm comm = ((PetscObject)A)->comm; 593 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 594 MPI_Status status; 595 MPIAIJ_MPIDense *contents; 596 PetscContainer container; 597 Mat workB; 598 599 PetscFunctionBegin; 600 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 601 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 602 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 603 604 workB = *outworkB = contents->workB; 605 if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 606 sindices = to->indices; 607 sstarts = to->starts; 608 sprocs = to->procs; 609 swaits = contents->swaits; 610 svalues = contents->svalues; 611 612 rindices = from->indices; 613 rstarts = from->starts; 614 rprocs = from->procs; 615 rwaits = contents->rwaits; 616 rvalues = contents->rvalues; 617 618 ierr = MatGetArray(B,&b);CHKERRQ(ierr); 619 ierr = MatGetArray(workB,&w);CHKERRQ(ierr); 620 621 for (i=0; i<from->n; i++) { 622 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 623 } 624 625 for (i=0; i<to->n; i++) { 626 /* pack a message at a time */ 627 CHKMEMQ; 628 for (j=0; j<sstarts[i+1]-sstarts[i]; j++){ 629 for (k=0; k<ncols; k++) { 630 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 631 } 632 } 633 CHKMEMQ; 634 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 635 } 636 637 nrecvs = from->n; 638 while (nrecvs) { 639 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 640 nrecvs--; 641 /* unpack a message at a time */ 642 CHKMEMQ; 643 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){ 644 for (k=0; k<ncols; k++) { 645 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 646 } 647 } 648 CHKMEMQ; 649 } 650 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 651 652 ierr = MatRestoreArray(B,&b);CHKERRQ(ierr); 653 ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr); 654 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 655 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 656 PetscFunctionReturn(0); 657 } 658 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 659 660 #undef __FUNCT__ 661 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense" 662 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 663 { 664 PetscErrorCode ierr; 665 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 666 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 667 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 668 Mat workB; 669 670 PetscFunctionBegin; 671 672 /* diagonal block of A times all local rows of B*/ 673 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 674 675 /* get off processor parts of B needed to complete the product */ 676 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 677 678 /* off-diagonal block of A times nonlocal rows of B */ 679 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 680 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 681 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 682 PetscFunctionReturn(0); 683 } 684 685