1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 #include <petsc/private/vecimpl.h> 12 13 #if defined(PETSC_HAVE_HYPRE) 14 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat*); 15 #endif 16 17 PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 18 { 19 PetscErrorCode ierr; 20 #if defined(PETSC_HAVE_HYPRE) 21 const char *algTypes[3] = {"scalable","nonscalable","hypre"}; 22 PetscInt nalg = 3; 23 #else 24 const char *algTypes[2] = {"scalable","nonscalable"}; 25 PetscInt nalg = 2; 26 #endif 27 PetscInt alg = 1; /* set nonscalable algorithm as default */ 28 MPI_Comm comm; 29 PetscBool flg; 30 31 PetscFunctionBegin; 32 if (scall == MAT_INITIAL_MATRIX) { 33 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 34 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 35 36 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 37 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 38 ierr = PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[1],&alg,&flg);CHKERRQ(ierr); 39 ierr = PetscOptionsEnd();CHKERRQ(ierr); 40 41 if (!flg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */ 42 MatInfo Ainfo,Binfo; 43 PetscInt nz_local; 44 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 45 46 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 47 ierr = MatGetInfo(B,MAT_LOCAL,&Binfo);CHKERRQ(ierr); 48 nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated); 49 50 if (B->cmap->N > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 51 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 52 53 if (alg_scalable) { 54 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 55 ierr = PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,fill*nz_local);CHKERRQ(ierr); 56 } 57 } 58 59 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 60 switch (alg) { 61 case 1: 62 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);CHKERRQ(ierr); 63 break; 64 #if defined(PETSC_HAVE_HYPRE) 65 case 2: 66 ierr = MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);CHKERRQ(ierr); 67 break; 68 #endif 69 default: 70 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 71 break; 72 } 73 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 74 } 75 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 76 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 77 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 78 PetscFunctionReturn(0); 79 } 80 81 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 82 { 83 PetscErrorCode ierr; 84 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 85 Mat_PtAPMPI *ptap = a->ptap; 86 87 PetscFunctionBegin; 88 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 89 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 90 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 91 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 92 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 93 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 94 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 95 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 96 ierr = ptap->destroy(A);CHKERRQ(ierr); 97 ierr = PetscFree(ptap);CHKERRQ(ierr); 98 PetscFunctionReturn(0); 99 } 100 101 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) 102 { 103 PetscErrorCode ierr; 104 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 105 Mat_PtAPMPI *ptap = a->ptap; 106 107 PetscFunctionBegin; 108 ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr); 109 110 (*M)->ops->destroy = ptap->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */ 111 (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */ 112 PetscFunctionReturn(0); 113 } 114 115 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C) 116 { 117 PetscErrorCode ierr; 118 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 119 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 120 Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 121 PetscScalar *cda=cd->a,*coa=co->a; 122 Mat_SeqAIJ *p_loc,*p_oth; 123 PetscScalar *apa,*ca; 124 PetscInt cm =C->rmap->n; 125 Mat_PtAPMPI *ptap=c->ptap; 126 PetscInt *api,*apj,*apJ,i,k; 127 PetscInt cstart=C->cmap->rstart; 128 PetscInt cdnz,conz,k0,k1; 129 MPI_Comm comm; 130 PetscMPIInt size; 131 132 PetscFunctionBegin; 133 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 134 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 135 136 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 137 /*-----------------------------------------------------*/ 138 /* update numerical values of P_oth and P_loc */ 139 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 140 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 141 142 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 143 /*----------------------------------------------------------*/ 144 /* get data from symbolic products */ 145 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 146 p_oth = NULL; 147 if (size >1) { 148 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 149 } 150 151 /* get apa for storing dense row A[i,:]*P */ 152 apa = ptap->apa; 153 154 api = ptap->api; 155 apj = ptap->apj; 156 for (i=0; i<cm; i++) { 157 /* compute apa = A[i,:]*P */ 158 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 159 160 /* set values in C */ 161 apJ = apj + api[i]; 162 cdnz = cd->i[i+1] - cd->i[i]; 163 conz = co->i[i+1] - co->i[i]; 164 165 /* 1st off-diagoanl part of C */ 166 ca = coa + co->i[i]; 167 k = 0; 168 for (k0=0; k0<conz; k0++) { 169 if (apJ[k] >= cstart) break; 170 ca[k0] = apa[apJ[k]]; 171 apa[apJ[k++]] = 0.0; 172 } 173 174 /* diagonal part of C */ 175 ca = cda + cd->i[i]; 176 for (k1=0; k1<cdnz; k1++) { 177 ca[k1] = apa[apJ[k]]; 178 apa[apJ[k++]] = 0.0; 179 } 180 181 /* 2nd off-diagoanl part of C */ 182 ca = coa + co->i[i]; 183 for (; k0<conz; k0++) { 184 ca[k0] = apa[apJ[k]]; 185 apa[apJ[k++]] = 0.0; 186 } 187 } 188 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 189 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 190 PetscFunctionReturn(0); 191 } 192 193 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat *C) 194 { 195 PetscErrorCode ierr; 196 MPI_Comm comm; 197 PetscMPIInt size; 198 Mat Cmpi; 199 Mat_PtAPMPI *ptap; 200 PetscFreeSpaceList free_space=NULL,current_space=NULL; 201 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c; 202 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 203 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 204 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 205 PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 206 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 207 PetscBT lnkbt; 208 PetscScalar *apa; 209 PetscReal afill; 210 211 PetscFunctionBegin; 212 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 213 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 214 215 /* create struct Mat_PtAPMPI and attached it to C later */ 216 ierr = PetscNew(&ptap);CHKERRQ(ierr); 217 218 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 219 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 220 221 /* get P_loc by taking all local rows of P */ 222 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 223 224 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 225 pi_loc = p_loc->i; pj_loc = p_loc->j; 226 if (size > 1) { 227 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 228 pi_oth = p_oth->i; pj_oth = p_oth->j; 229 } else { 230 p_oth = NULL; 231 pi_oth = NULL; pj_oth = NULL; 232 } 233 234 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 235 /*-------------------------------------------------------------------*/ 236 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 237 ptap->api = api; 238 api[0] = 0; 239 240 /* create and initialize a linked list */ 241 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 242 243 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 244 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 245 current_space = free_space; 246 247 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 248 for (i=0; i<am; i++) { 249 /* diagonal portion of A */ 250 nzi = adi[i+1] - adi[i]; 251 for (j=0; j<nzi; j++) { 252 row = *adj++; 253 pnz = pi_loc[row+1] - pi_loc[row]; 254 Jptr = pj_loc + pi_loc[row]; 255 /* add non-zero cols of P into the sorted linked list lnk */ 256 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 257 } 258 /* off-diagonal portion of A */ 259 nzi = aoi[i+1] - aoi[i]; 260 for (j=0; j<nzi; j++) { 261 row = *aoj++; 262 pnz = pi_oth[row+1] - pi_oth[row]; 263 Jptr = pj_oth + pi_oth[row]; 264 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 265 } 266 267 apnz = lnk[0]; 268 api[i+1] = api[i] + apnz; 269 270 /* if free space is not available, double the total space in the list */ 271 if (current_space->local_remaining<apnz) { 272 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 273 nspacedouble++; 274 } 275 276 /* Copy data into free space, then initialize lnk */ 277 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 278 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 279 280 current_space->array += apnz; 281 current_space->local_used += apnz; 282 current_space->local_remaining -= apnz; 283 } 284 285 /* Allocate space for apj, initialize apj, and */ 286 /* destroy list of free space and other temporary array(s) */ 287 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 288 apj = ptap->apj; 289 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 290 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 291 292 /* malloc apa to store dense row A[i,:]*P */ 293 ierr = PetscCalloc1(pN,&apa);CHKERRQ(ierr); 294 295 ptap->apa = apa; 296 297 /* create and assemble symbolic parallel matrix Cmpi */ 298 /*----------------------------------------------------*/ 299 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 300 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 301 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 302 303 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 304 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 305 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 306 for (i=0; i<am; i++) { 307 row = i + rstart; 308 apnz = api[i+1] - api[i]; 309 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 310 apj += apnz; 311 } 312 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 313 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 314 315 ptap->destroy = Cmpi->ops->destroy; 316 ptap->duplicate = Cmpi->ops->duplicate; 317 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 318 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 319 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 320 321 /* attach the supporting struct to Cmpi for reuse */ 322 c = (Mat_MPIAIJ*)Cmpi->data; 323 c->ptap = ptap; 324 325 *C = Cmpi; 326 327 /* set MatInfo */ 328 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 329 if (afill < 1.0) afill = 1.0; 330 Cmpi->info.mallocs = nspacedouble; 331 Cmpi->info.fill_ratio_given = fill; 332 Cmpi->info.fill_ratio_needed = afill; 333 334 #if defined(PETSC_USE_INFO) 335 if (api[am]) { 336 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 337 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 338 } else { 339 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 340 } 341 #endif 342 PetscFunctionReturn(0); 343 } 344 345 PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 346 { 347 PetscErrorCode ierr; 348 349 PetscFunctionBegin; 350 if (scall == MAT_INITIAL_MATRIX) { 351 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 352 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 353 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 354 } 355 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 356 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 357 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 358 PetscFunctionReturn(0); 359 } 360 361 typedef struct { 362 Mat workB; 363 PetscScalar *rvalues,*svalues; 364 MPI_Request *rwaits,*swaits; 365 } MPIAIJ_MPIDense; 366 367 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx) 368 { 369 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 370 PetscErrorCode ierr; 371 372 PetscFunctionBegin; 373 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 374 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 375 ierr = PetscFree(contents);CHKERRQ(ierr); 376 PetscFunctionReturn(0); 377 } 378 379 /* 380 This is a "dummy function" that handles the case where matrix C was created as a dense matrix 381 directly by the user and passed to MatMatMult() with the MAT_REUSE_MATRIX option 382 383 It is the same as MatMatMultSymbolic_MPIAIJ_MPIDense() except does not create C 384 */ 385 PetscErrorCode MatMatMultNumeric_MPIDense(Mat A,Mat B,Mat C) 386 { 387 PetscErrorCode ierr; 388 PetscBool flg; 389 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 390 PetscInt nz = aij->B->cmap->n; 391 PetscContainer container; 392 MPIAIJ_MPIDense *contents; 393 VecScatter ctx = aij->Mvctx; 394 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 395 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 396 397 PetscFunctionBegin; 398 ierr = PetscObjectTypeCompare((PetscObject)B,MATMPIDENSE,&flg);CHKERRQ(ierr); 399 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Second matrix must be mpidense"); 400 401 /* Handle case where where user provided the final C matrix rather than calling MatMatMult() with MAT_INITIAL_MATRIX*/ 402 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);CHKERRQ(ierr); 403 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"First matrix must be MPIAIJ"); 404 405 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 406 407 ierr = PetscNew(&contents);CHKERRQ(ierr); 408 /* Create work matrix used to store off processor rows of B needed for local product */ 409 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 410 /* Create work arrays needed */ 411 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues, 412 B->cmap->N*to->starts[to->n],&contents->svalues, 413 from->n,&contents->rwaits, 414 to->n,&contents->swaits);CHKERRQ(ierr); 415 416 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 417 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 418 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 419 ierr = PetscObjectCompose((PetscObject)C,"workB",(PetscObject)container);CHKERRQ(ierr); 420 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 421 422 ierr = (*C->ops->matmultnumeric)(A,B,C);CHKERRQ(ierr); 423 PetscFunctionReturn(0); 424 } 425 426 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 427 { 428 PetscErrorCode ierr; 429 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 430 PetscInt nz = aij->B->cmap->n; 431 PetscContainer container; 432 MPIAIJ_MPIDense *contents; 433 VecScatter ctx = aij->Mvctx; 434 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 435 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 436 PetscInt m = A->rmap->n,n=B->cmap->n; 437 438 PetscFunctionBegin; 439 ierr = MatCreate(PetscObjectComm((PetscObject)B),C);CHKERRQ(ierr); 440 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 441 ierr = MatSetBlockSizesFromMats(*C,A,B);CHKERRQ(ierr); 442 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 443 ierr = MatMPIDenseSetPreallocation(*C,NULL);CHKERRQ(ierr); 444 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 445 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 446 447 (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 448 449 ierr = PetscNew(&contents);CHKERRQ(ierr); 450 /* Create work matrix used to store off processor rows of B needed for local product */ 451 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 452 /* Create work arrays needed */ 453 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues, 454 B->cmap->N*to->starts[to->n],&contents->svalues, 455 from->n,&contents->rwaits, 456 to->n,&contents->swaits);CHKERRQ(ierr); 457 458 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 459 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 460 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 461 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 462 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 463 PetscFunctionReturn(0); 464 } 465 466 /* 467 Performs an efficient scatter on the rows of B needed by this process; this is 468 a modification of the VecScatterBegin_() routines. 469 */ 470 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 471 { 472 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 473 PetscErrorCode ierr; 474 PetscScalar *b,*w,*svalues,*rvalues; 475 VecScatter ctx = aij->Mvctx; 476 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 477 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 478 PetscInt i,j,k; 479 PetscInt *sindices,*sstarts,*rindices,*rstarts; 480 PetscMPIInt *sprocs,*rprocs,nrecvs; 481 MPI_Request *swaits,*rwaits; 482 MPI_Comm comm; 483 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 484 MPI_Status status; 485 MPIAIJ_MPIDense *contents; 486 PetscContainer container; 487 Mat workB; 488 489 PetscFunctionBegin; 490 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 491 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 492 if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist"); 493 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 494 495 workB = *outworkB = contents->workB; 496 if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 497 sindices = to->indices; 498 sstarts = to->starts; 499 sprocs = to->procs; 500 swaits = contents->swaits; 501 svalues = contents->svalues; 502 503 rindices = from->indices; 504 rstarts = from->starts; 505 rprocs = from->procs; 506 rwaits = contents->rwaits; 507 rvalues = contents->rvalues; 508 509 ierr = MatDenseGetArray(B,&b);CHKERRQ(ierr); 510 ierr = MatDenseGetArray(workB,&w);CHKERRQ(ierr); 511 512 for (i=0; i<from->n; i++) { 513 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 514 } 515 516 for (i=0; i<to->n; i++) { 517 /* pack a message at a time */ 518 for (j=0; j<sstarts[i+1]-sstarts[i]; j++) { 519 for (k=0; k<ncols; k++) { 520 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 521 } 522 } 523 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 524 } 525 526 nrecvs = from->n; 527 while (nrecvs) { 528 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 529 nrecvs--; 530 /* unpack a message at a time */ 531 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++) { 532 for (k=0; k<ncols; k++) { 533 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 534 } 535 } 536 } 537 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 538 539 ierr = MatDenseRestoreArray(B,&b);CHKERRQ(ierr); 540 ierr = MatDenseRestoreArray(workB,&w);CHKERRQ(ierr); 541 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 542 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 543 PetscFunctionReturn(0); 544 } 545 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 546 547 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 548 { 549 PetscErrorCode ierr; 550 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 551 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 552 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 553 Mat workB; 554 555 PetscFunctionBegin; 556 /* diagonal block of A times all local rows of B*/ 557 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 558 559 /* get off processor parts of B needed to complete the product */ 560 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 561 562 /* off-diagonal block of A times nonlocal rows of B */ 563 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 564 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 565 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 566 PetscFunctionReturn(0); 567 } 568 569 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 570 { 571 PetscErrorCode ierr; 572 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 573 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 574 Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 575 PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj; 576 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 577 Mat_SeqAIJ *p_loc,*p_oth; 578 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 579 PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca; 580 PetscInt cm = C->rmap->n,anz,pnz; 581 Mat_PtAPMPI *ptap = c->ptap; 582 PetscScalar *apa_sparse = ptap->apa; 583 PetscInt *api,*apj,*apJ,i,j,k,row; 584 PetscInt cstart = C->cmap->rstart; 585 PetscInt cdnz,conz,k0,k1,nextp; 586 MPI_Comm comm; 587 PetscMPIInt size; 588 589 PetscFunctionBegin; 590 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 591 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 592 593 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 594 /*-----------------------------------------------------*/ 595 /* update numerical values of P_oth and P_loc */ 596 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 597 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 598 599 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 600 /*----------------------------------------------------------*/ 601 /* get data from symbolic products */ 602 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 603 pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a; 604 if (size >1) { 605 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 606 pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a; 607 } else { 608 p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL; 609 } 610 611 api = ptap->api; 612 apj = ptap->apj; 613 for (i=0; i<cm; i++) { 614 apJ = apj + api[i]; 615 616 /* diagonal portion of A */ 617 anz = adi[i+1] - adi[i]; 618 adj = ad->j + adi[i]; 619 ada = ad->a + adi[i]; 620 for (j=0; j<anz; j++) { 621 row = adj[j]; 622 pnz = pi_loc[row+1] - pi_loc[row]; 623 pj = pj_loc + pi_loc[row]; 624 pa = pa_loc + pi_loc[row]; 625 /* perform sparse axpy */ 626 valtmp = ada[j]; 627 nextp = 0; 628 for (k=0; nextp<pnz; k++) { 629 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 630 apa_sparse[k] += valtmp*pa[nextp++]; 631 } 632 } 633 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 634 } 635 636 /* off-diagonal portion of A */ 637 anz = aoi[i+1] - aoi[i]; 638 aoj = ao->j + aoi[i]; 639 aoa = ao->a + aoi[i]; 640 for (j=0; j<anz; j++) { 641 row = aoj[j]; 642 pnz = pi_oth[row+1] - pi_oth[row]; 643 pj = pj_oth + pi_oth[row]; 644 pa = pa_oth + pi_oth[row]; 645 /* perform sparse axpy */ 646 valtmp = aoa[j]; 647 nextp = 0; 648 for (k=0; nextp<pnz; k++) { 649 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 650 apa_sparse[k] += valtmp*pa[nextp++]; 651 } 652 } 653 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 654 } 655 656 /* set values in C */ 657 cdnz = cd->i[i+1] - cd->i[i]; 658 conz = co->i[i+1] - co->i[i]; 659 660 /* 1st off-diagoanl part of C */ 661 ca = coa + co->i[i]; 662 k = 0; 663 for (k0=0; k0<conz; k0++) { 664 if (apJ[k] >= cstart) break; 665 ca[k0] = apa_sparse[k]; 666 apa_sparse[k] = 0.0; 667 k++; 668 } 669 670 /* diagonal part of C */ 671 ca = cda + cd->i[i]; 672 for (k1=0; k1<cdnz; k1++) { 673 ca[k1] = apa_sparse[k]; 674 apa_sparse[k] = 0.0; 675 k++; 676 } 677 678 /* 2nd off-diagoanl part of C */ 679 ca = coa + co->i[i]; 680 for (; k0<conz; k0++) { 681 ca[k0] = apa_sparse[k]; 682 apa_sparse[k] = 0.0; 683 k++; 684 } 685 } 686 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 687 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 688 PetscFunctionReturn(0); 689 } 690 691 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */ 692 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 693 { 694 PetscErrorCode ierr; 695 MPI_Comm comm; 696 PetscMPIInt size; 697 Mat Cmpi; 698 Mat_PtAPMPI *ptap; 699 PetscFreeSpaceList free_space = NULL,current_space=NULL; 700 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c; 701 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 702 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 703 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 704 PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max; 705 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 706 PetscReal afill; 707 PetscScalar *apa; 708 PetscTable ta; 709 710 PetscFunctionBegin; 711 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 712 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 713 714 /* create struct Mat_PtAPMPI and attached it to C later */ 715 ierr = PetscNew(&ptap);CHKERRQ(ierr); 716 717 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 718 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 719 720 /* get P_loc by taking all local rows of P */ 721 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 722 723 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 724 pi_loc = p_loc->i; pj_loc = p_loc->j; 725 if (size > 1) { 726 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 727 pi_oth = p_oth->i; pj_oth = p_oth->j; 728 } else { 729 p_oth = NULL; 730 pi_oth = NULL; pj_oth = NULL; 731 } 732 733 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 734 /*-------------------------------------------------------------------*/ 735 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 736 ptap->api = api; 737 api[0] = 0; 738 739 /* create and initialize a linked list */ 740 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); 741 742 /* Calculate apnz_max */ 743 apnz_max = 0; 744 for (i=0; i<am; i++) { 745 ierr = PetscTableRemoveAll(ta);CHKERRQ(ierr); 746 /* diagonal portion of A */ 747 nzi = adi[i+1] - adi[i]; 748 Jptr = adj+adi[i]; /* cols of A_diag */ 749 MatMergeRows_SeqAIJ(p_loc,nzi,Jptr,ta); 750 ierr = PetscTableGetCount(ta,&apnz);CHKERRQ(ierr); 751 if (apnz_max < apnz) apnz_max = apnz; 752 753 /* off-diagonal portion of A */ 754 nzi = aoi[i+1] - aoi[i]; 755 Jptr = aoj+aoi[i]; /* cols of A_off */ 756 MatMergeRows_SeqAIJ(p_oth,nzi,Jptr,ta); 757 ierr = PetscTableGetCount(ta,&apnz);CHKERRQ(ierr); 758 if (apnz_max < apnz) apnz_max = apnz; 759 } 760 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 761 762 ierr = PetscLLCondensedCreate_Scalable(apnz_max,&lnk);CHKERRQ(ierr); 763 764 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 765 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 766 current_space = free_space; 767 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 768 for (i=0; i<am; i++) { 769 /* diagonal portion of A */ 770 nzi = adi[i+1] - adi[i]; 771 for (j=0; j<nzi; j++) { 772 row = *adj++; 773 pnz = pi_loc[row+1] - pi_loc[row]; 774 Jptr = pj_loc + pi_loc[row]; 775 /* add non-zero cols of P into the sorted linked list lnk */ 776 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 777 } 778 /* off-diagonal portion of A */ 779 nzi = aoi[i+1] - aoi[i]; 780 for (j=0; j<nzi; j++) { 781 row = *aoj++; 782 pnz = pi_oth[row+1] - pi_oth[row]; 783 Jptr = pj_oth + pi_oth[row]; 784 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 785 } 786 787 apnz = *lnk; 788 api[i+1] = api[i] + apnz; 789 790 /* if free space is not available, double the total space in the list */ 791 if (current_space->local_remaining<apnz) { 792 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 793 nspacedouble++; 794 } 795 796 /* Copy data into free space, then initialize lnk */ 797 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 798 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 799 800 current_space->array += apnz; 801 current_space->local_used += apnz; 802 current_space->local_remaining -= apnz; 803 } 804 805 /* Allocate space for apj, initialize apj, and */ 806 /* destroy list of free space and other temporary array(s) */ 807 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 808 apj = ptap->apj; 809 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 810 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 811 812 /* create and assemble symbolic parallel matrix Cmpi */ 813 /*----------------------------------------------------*/ 814 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 815 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 816 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 817 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 818 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 819 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 820 821 /* malloc apa for assembly Cmpi */ 822 ierr = PetscCalloc1(apnz_max,&apa);CHKERRQ(ierr); 823 824 ptap->apa = apa; 825 for (i=0; i<am; i++) { 826 row = i + rstart; 827 apnz = api[i+1] - api[i]; 828 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 829 apj += apnz; 830 } 831 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 832 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 833 834 ptap->destroy = Cmpi->ops->destroy; 835 ptap->duplicate = Cmpi->ops->duplicate; 836 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 837 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 838 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 839 840 /* attach the supporting struct to Cmpi for reuse */ 841 c = (Mat_MPIAIJ*)Cmpi->data; 842 c->ptap = ptap; 843 844 *C = Cmpi; 845 846 /* set MatInfo */ 847 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 848 if (afill < 1.0) afill = 1.0; 849 Cmpi->info.mallocs = nspacedouble; 850 Cmpi->info.fill_ratio_given = fill; 851 Cmpi->info.fill_ratio_needed = afill; 852 853 #if defined(PETSC_USE_INFO) 854 if (api[am]) { 855 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 856 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 857 } else { 858 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 859 } 860 #endif 861 PetscFunctionReturn(0); 862 } 863 864 /*-------------------------------------------------------------------------*/ 865 PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat P,Mat A,MatReuse scall,PetscReal fill,Mat *C) 866 { 867 PetscErrorCode ierr; 868 const char *algTypes[3] = {"scalable","nonscalable","matmatmult"}; 869 PetscInt aN=A->cmap->N,alg=1; /* set default algorithm */ 870 PetscBool flg; 871 872 PetscFunctionBegin; 873 if (scall == MAT_INITIAL_MATRIX) { 874 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 875 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 876 ierr = PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,3,algTypes[1],&alg,&flg);CHKERRQ(ierr); 877 ierr = PetscOptionsEnd();CHKERRQ(ierr); 878 879 ierr = PetscLogEventBegin(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 880 switch (alg) { 881 case 1: 882 if (!flg && aN > 100000) { /* may switch to scalable algorithm as default */ 883 MatInfo Ainfo,Pinfo; 884 PetscInt nz_local; 885 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 886 MPI_Comm comm; 887 888 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 889 ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr); 890 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); /* estimated local nonzero entries */ 891 892 if (aN > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 893 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 894 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 895 896 if (alg_scalable) { 897 alg = 0; /* scalable algorithm would slower than nonscalable algorithm */ 898 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr); 899 break; 900 } 901 } 902 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(P,A,fill,C);CHKERRQ(ierr); 903 break; 904 case 2: 905 { 906 Mat Pt; 907 Mat_PtAPMPI *ptap; 908 Mat_MPIAIJ *c; 909 ierr = MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);CHKERRQ(ierr); 910 ierr = MatMatMult(Pt,A,MAT_INITIAL_MATRIX,fill,C);CHKERRQ(ierr); 911 c = (Mat_MPIAIJ*)(*C)->data; 912 ptap = c->ptap; 913 ptap->Pt = Pt; 914 (*C)->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult; 915 PetscFunctionReturn(0); 916 } 917 break; 918 default: /* scalable algorithm */ 919 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr); 920 break; 921 } 922 ierr = PetscLogEventEnd(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 923 } 924 ierr = PetscLogEventBegin(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 925 ierr = (*(*C)->ops->mattransposemultnumeric)(P,A,*C);CHKERRQ(ierr); 926 ierr = PetscLogEventEnd(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 927 PetscFunctionReturn(0); 928 } 929 930 /* This routine only works when scall=MAT_REUSE_MATRIX! */ 931 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C) 932 { 933 PetscErrorCode ierr; 934 Mat_MPIAIJ *c=(Mat_MPIAIJ*)C->data; 935 Mat_PtAPMPI *ptap= c->ptap; 936 Mat Pt=ptap->Pt; 937 938 PetscFunctionBegin; 939 ierr = MatTranspose(P,MAT_REUSE_MATRIX,&Pt);CHKERRQ(ierr); 940 ierr = MatMatMultNumeric(Pt,A,C);CHKERRQ(ierr); 941 PetscFunctionReturn(0); 942 } 943 944 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat,MatDuplicateOption,Mat*); 945 946 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 947 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat *C) 948 { 949 PetscErrorCode ierr; 950 Mat_PtAPMPI *ptap; 951 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c; 952 MPI_Comm comm; 953 PetscMPIInt size,rank; 954 Mat Cmpi; 955 PetscFreeSpaceList free_space=NULL,current_space=NULL; 956 PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n; 957 PetscInt *lnk,i,k,nsend; 958 PetscBT lnkbt; 959 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 960 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 961 PetscInt len,proc,*dnz,*onz,*owners,nzi; 962 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 963 MPI_Request *swaits,*rwaits; 964 MPI_Status *sstatus,rstatus; 965 PetscLayout rowmap; 966 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 967 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 968 PetscInt *Jptr,*prmap=p->garray,con,j,Crmax; 969 Mat_SeqAIJ *a_loc,*c_loc,*c_oth; 970 PetscTable ta; 971 972 PetscFunctionBegin; 973 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 974 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 975 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 976 977 /* create symbolic parallel matrix Cmpi */ 978 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 979 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 980 981 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 982 983 /* create struct Mat_PtAPMPI and attached it to C later */ 984 ierr = PetscNew(&ptap);CHKERRQ(ierr); 985 ptap->reuse = MAT_INITIAL_MATRIX; 986 987 /* (0) compute Rd = Pd^T, Ro = Po^T */ 988 /* --------------------------------- */ 989 ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 990 ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 991 992 /* (1) compute symbolic A_loc */ 993 /* ---------------------------*/ 994 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);CHKERRQ(ierr); 995 996 /* (2-1) compute symbolic C_oth = Ro*A_loc */ 997 /* ------------------------------------ */ 998 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 999 1000 /* (3) send coj of C_oth to other processors */ 1001 /* ------------------------------------------ */ 1002 /* determine row ownership */ 1003 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 1004 rowmap->n = pn; 1005 rowmap->bs = 1; 1006 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 1007 owners = rowmap->range; 1008 1009 /* determine the number of messages to send, their lengths */ 1010 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 1011 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1012 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1013 1014 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 1015 coi = c_oth->i; coj = c_oth->j; 1016 con = ptap->C_oth->rmap->n; 1017 proc = 0; 1018 for (i=0; i<con; i++) { 1019 while (prmap[i] >= owners[proc+1]) proc++; 1020 len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */ 1021 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 1022 } 1023 1024 len = 0; /* max length of buf_si[], see (4) */ 1025 owners_co[0] = 0; 1026 nsend = 0; 1027 for (proc=0; proc<size; proc++) { 1028 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1029 if (len_s[proc]) { 1030 nsend++; 1031 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 1032 len += len_si[proc]; 1033 } 1034 } 1035 1036 /* determine the number and length of messages to receive for coi and coj */ 1037 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 1038 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 1039 1040 /* post the Irecv and Isend of coj */ 1041 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1042 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1043 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 1044 for (proc=0, k=0; proc<size; proc++) { 1045 if (!len_s[proc]) continue; 1046 i = owners_co[proc]; 1047 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1048 k++; 1049 } 1050 1051 /* (2-2) compute symbolic C_loc = Rd*A_loc */ 1052 /* ---------------------------------------- */ 1053 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 1054 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 1055 1056 /* receives coj are complete */ 1057 for (i=0; i<nrecv; i++) { 1058 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1059 } 1060 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1061 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1062 1063 /* add received column indices into ta to update Crmax */ 1064 a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data; 1065 1066 /* create and initialize a linked list */ 1067 ierr = PetscTableCreate(an,aN,&ta);CHKERRQ(ierr); /* for compute Crmax */ 1068 MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta); 1069 1070 for (k=0; k<nrecv; k++) {/* k-th received message */ 1071 Jptr = buf_rj[k]; 1072 for (j=0; j<len_r[k]; j++) { 1073 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1074 } 1075 } 1076 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 1077 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1078 1079 /* (4) send and recv coi */ 1080 /*-----------------------*/ 1081 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1082 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1083 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1084 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1085 for (proc=0,k=0; proc<size; proc++) { 1086 if (!len_s[proc]) continue; 1087 /* form outgoing message for i-structure: 1088 buf_si[0]: nrows to be sent 1089 [1:nrows]: row index (global) 1090 [nrows+1:2*nrows+1]: i-structure index 1091 */ 1092 /*-------------------------------------------*/ 1093 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 1094 buf_si_i = buf_si + nrows+1; 1095 buf_si[0] = nrows; 1096 buf_si_i[0] = 0; 1097 nrows = 0; 1098 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1099 nzi = coi[i+1] - coi[i]; 1100 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1101 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1102 nrows++; 1103 } 1104 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1105 k++; 1106 buf_si += len_si[proc]; 1107 } 1108 for (i=0; i<nrecv; i++) { 1109 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1110 } 1111 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1112 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1113 1114 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 1115 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1116 ierr = PetscFree(swaits);CHKERRQ(ierr); 1117 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1118 1119 /* (5) compute the local portion of Cmpi */ 1120 /* ------------------------------------------ */ 1121 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 1122 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 1123 current_space = free_space; 1124 1125 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 1126 for (k=0; k<nrecv; k++) { 1127 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1128 nrows = *buf_ri_k[k]; 1129 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1130 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1131 } 1132 1133 ierr = MatPreallocateInitialize(comm,pn,an,dnz,onz);CHKERRQ(ierr); 1134 ierr = PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);CHKERRQ(ierr); 1135 for (i=0; i<pn; i++) { 1136 /* add C_loc into Cmpi */ 1137 nzi = c_loc->i[i+1] - c_loc->i[i]; 1138 Jptr = c_loc->j + c_loc->i[i]; 1139 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1140 1141 /* add received col data into lnk */ 1142 for (k=0; k<nrecv; k++) { /* k-th received message */ 1143 if (i == *nextrow[k]) { /* i-th row */ 1144 nzi = *(nextci[k]+1) - *nextci[k]; 1145 Jptr = buf_rj[k] + *nextci[k]; 1146 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1147 nextrow[k]++; nextci[k]++; 1148 } 1149 } 1150 nzi = lnk[0]; 1151 1152 /* copy data into free space, then initialize lnk */ 1153 ierr = PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1154 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 1155 } 1156 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1157 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1158 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 1159 1160 /* local sizes and preallocation */ 1161 ierr = MatSetSizes(Cmpi,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1162 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr); 1163 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1164 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1165 1166 /* members in merge */ 1167 ierr = PetscFree(id_r);CHKERRQ(ierr); 1168 ierr = PetscFree(len_r);CHKERRQ(ierr); 1169 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 1170 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 1171 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 1172 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 1173 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 1174 1175 /* attach the supporting struct to Cmpi for reuse */ 1176 c = (Mat_MPIAIJ*)Cmpi->data; 1177 c->ptap = ptap; 1178 ptap->duplicate = Cmpi->ops->duplicate; 1179 ptap->destroy = Cmpi->ops->destroy; 1180 1181 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 1182 Cmpi->assembled = PETSC_FALSE; 1183 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1184 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 1185 *C = Cmpi; 1186 PetscFunctionReturn(0); 1187 } 1188 1189 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C) 1190 { 1191 PetscErrorCode ierr; 1192 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1193 Mat_SeqAIJ *c_seq; 1194 Mat_PtAPMPI *ptap = c->ptap; 1195 Mat A_loc,C_loc,C_oth; 1196 PetscInt i,rstart,rend,cm,ncols,row; 1197 const PetscInt *cols; 1198 const PetscScalar *vals; 1199 1200 PetscFunctionBegin; 1201 ierr = MatZeroEntries(C);CHKERRQ(ierr); 1202 1203 if (ptap->reuse == MAT_REUSE_MATRIX) { 1204 /* These matrices are obtained in MatTransposeMatMultSymbolic() */ 1205 /* 1) get R = Pd^T, Ro = Po^T */ 1206 /*----------------------------*/ 1207 ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1208 ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1209 1210 /* 2) compute numeric A_loc */ 1211 /*--------------------------*/ 1212 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);CHKERRQ(ierr); 1213 } 1214 1215 /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */ 1216 A_loc = ptap->A_loc; 1217 ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);CHKERRQ(ierr); 1218 ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);CHKERRQ(ierr); 1219 C_loc = ptap->C_loc; 1220 C_oth = ptap->C_oth; 1221 1222 /* add C_loc and Co to to C */ 1223 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 1224 1225 /* C_loc -> C */ 1226 cm = C_loc->rmap->N; 1227 c_seq = (Mat_SeqAIJ*)C_loc->data; 1228 cols = c_seq->j; 1229 vals = c_seq->a; 1230 for (i=0; i<cm; i++) { 1231 ncols = c_seq->i[i+1] - c_seq->i[i]; 1232 row = rstart + i; 1233 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1234 cols += ncols; vals += ncols; 1235 } 1236 1237 /* Co -> C, off-processor part */ 1238 cm = C_oth->rmap->N; 1239 c_seq = (Mat_SeqAIJ*)C_oth->data; 1240 cols = c_seq->j; 1241 vals = c_seq->a; 1242 for (i=0; i<cm; i++) { 1243 ncols = c_seq->i[i+1] - c_seq->i[i]; 1244 row = p->garray[i]; 1245 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1246 cols += ncols; vals += ncols; 1247 } 1248 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1249 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1250 1251 ptap->reuse = MAT_REUSE_MATRIX; 1252 PetscFunctionReturn(0); 1253 } 1254 1255 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C) 1256 { 1257 PetscErrorCode ierr; 1258 Mat_Merge_SeqsToMPI *merge; 1259 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1260 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 1261 Mat_PtAPMPI *ptap; 1262 PetscInt *adj; 1263 PetscInt i,j,k,anz,pnz,row,*cj,nexta; 1264 MatScalar *ada,*ca,valtmp; 1265 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 1266 MPI_Comm comm; 1267 PetscMPIInt size,rank,taga,*len_s; 1268 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 1269 PetscInt **buf_ri,**buf_rj; 1270 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 1271 MPI_Request *s_waits,*r_waits; 1272 MPI_Status *status; 1273 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 1274 PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ; 1275 Mat A_loc; 1276 Mat_SeqAIJ *a_loc; 1277 1278 PetscFunctionBegin; 1279 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1280 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1281 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1282 1283 ptap = c->ptap; 1284 merge = ptap->merge; 1285 1286 /* 2) compute numeric C_seq = P_loc^T*A_loc */ 1287 /*------------------------------------------*/ 1288 /* get data from symbolic products */ 1289 coi = merge->coi; coj = merge->coj; 1290 ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr); 1291 bi = merge->bi; bj = merge->bj; 1292 owners = merge->rowmap->range; 1293 ierr = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr); 1294 1295 /* get A_loc by taking all local rows of A */ 1296 A_loc = ptap->A_loc; 1297 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 1298 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1299 ai = a_loc->i; 1300 aj = a_loc->j; 1301 1302 for (i=0; i<am; i++) { 1303 anz = ai[i+1] - ai[i]; 1304 adj = aj + ai[i]; 1305 ada = a_loc->a + ai[i]; 1306 1307 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 1308 /*-------------------------------------------------------------*/ 1309 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 1310 pnz = po->i[i+1] - po->i[i]; 1311 poJ = po->j + po->i[i]; 1312 pA = po->a + po->i[i]; 1313 for (j=0; j<pnz; j++) { 1314 row = poJ[j]; 1315 cj = coj + coi[row]; 1316 ca = coa + coi[row]; 1317 /* perform sparse axpy */ 1318 nexta = 0; 1319 valtmp = pA[j]; 1320 for (k=0; nexta<anz; k++) { 1321 if (cj[k] == adj[nexta]) { 1322 ca[k] += valtmp*ada[nexta]; 1323 nexta++; 1324 } 1325 } 1326 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1327 } 1328 1329 /* put the value into Cd (diagonal part) */ 1330 pnz = pd->i[i+1] - pd->i[i]; 1331 pdJ = pd->j + pd->i[i]; 1332 pA = pd->a + pd->i[i]; 1333 for (j=0; j<pnz; j++) { 1334 row = pdJ[j]; 1335 cj = bj + bi[row]; 1336 ca = ba + bi[row]; 1337 /* perform sparse axpy */ 1338 nexta = 0; 1339 valtmp = pA[j]; 1340 for (k=0; nexta<anz; k++) { 1341 if (cj[k] == adj[nexta]) { 1342 ca[k] += valtmp*ada[nexta]; 1343 nexta++; 1344 } 1345 } 1346 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1347 } 1348 } 1349 1350 /* 3) send and recv matrix values coa */ 1351 /*------------------------------------*/ 1352 buf_ri = merge->buf_ri; 1353 buf_rj = merge->buf_rj; 1354 len_s = merge->len_s; 1355 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 1356 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 1357 1358 ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr); 1359 for (proc=0,k=0; proc<size; proc++) { 1360 if (!len_s[proc]) continue; 1361 i = merge->owners_co[proc]; 1362 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 1363 k++; 1364 } 1365 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 1366 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 1367 1368 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 1369 ierr = PetscFree(r_waits);CHKERRQ(ierr); 1370 ierr = PetscFree(coa);CHKERRQ(ierr); 1371 1372 /* 4) insert local Cseq and received values into Cmpi */ 1373 /*----------------------------------------------------*/ 1374 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1375 for (k=0; k<merge->nrecv; k++) { 1376 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1377 nrows = *(buf_ri_k[k]); 1378 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1379 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1380 } 1381 1382 for (i=0; i<cm; i++) { 1383 row = owners[rank] + i; /* global row index of C_seq */ 1384 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1385 ba_i = ba + bi[i]; 1386 bnz = bi[i+1] - bi[i]; 1387 /* add received vals into ba */ 1388 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1389 /* i-th row */ 1390 if (i == *nextrow[k]) { 1391 cnz = *(nextci[k]+1) - *nextci[k]; 1392 cj = buf_rj[k] + *(nextci[k]); 1393 ca = abuf_r[k] + *(nextci[k]); 1394 nextcj = 0; 1395 for (j=0; nextcj<cnz; j++) { 1396 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1397 ba_i[j] += ca[nextcj++]; 1398 } 1399 } 1400 nextrow[k]++; nextci[k]++; 1401 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1402 } 1403 } 1404 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1405 } 1406 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1407 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1408 1409 ierr = PetscFree(ba);CHKERRQ(ierr); 1410 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1411 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1412 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1413 PetscFunctionReturn(0); 1414 } 1415 1416 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat *C) 1417 { 1418 PetscErrorCode ierr; 1419 Mat Cmpi,A_loc,POt,PDt; 1420 Mat_PtAPMPI *ptap; 1421 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1422 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data,*c; 1423 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1424 PetscInt nnz; 1425 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1426 PetscInt am =A->rmap->n,pn=P->cmap->n; 1427 MPI_Comm comm; 1428 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1429 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1430 PetscInt len,proc,*dnz,*onz,*owners; 1431 PetscInt nzi,*bi,*bj; 1432 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1433 MPI_Request *swaits,*rwaits; 1434 MPI_Status *sstatus,rstatus; 1435 Mat_Merge_SeqsToMPI *merge; 1436 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1437 PetscReal afill =1.0,afill_tmp; 1438 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax; 1439 PetscScalar *vals; 1440 Mat_SeqAIJ *a_loc,*pdt,*pot; 1441 PetscTable ta; 1442 1443 PetscFunctionBegin; 1444 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1445 /* check if matrix local sizes are compatible */ 1446 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1447 1448 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1449 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1450 1451 /* create struct Mat_PtAPMPI and attached it to C later */ 1452 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1453 1454 /* get A_loc by taking all local rows of A */ 1455 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1456 1457 ptap->A_loc = A_loc; 1458 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1459 ai = a_loc->i; 1460 aj = a_loc->j; 1461 1462 /* determine symbolic Co=(p->B)^T*A - send to others */ 1463 /*----------------------------------------------------*/ 1464 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1465 pdt = (Mat_SeqAIJ*)PDt->data; 1466 pdti = pdt->i; pdtj = pdt->j; 1467 1468 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1469 pot = (Mat_SeqAIJ*)POt->data; 1470 poti = pot->i; potj = pot->j; 1471 1472 /* then, compute symbolic Co = (p->B)^T*A */ 1473 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 1474 >= (num of nonzero rows of C_seq) - pn */ 1475 ierr = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr); 1476 coi[0] = 0; 1477 1478 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1479 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am])); 1480 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1481 current_space = free_space; 1482 1483 /* create and initialize a linked list */ 1484 ierr = PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);CHKERRQ(ierr); 1485 MatRowMergeMax_SeqAIJ(a_loc,am,ta); 1486 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 1487 1488 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 1489 1490 for (i=0; i<pon; i++) { 1491 pnz = poti[i+1] - poti[i]; 1492 ptJ = potj + poti[i]; 1493 for (j=0; j<pnz; j++) { 1494 row = ptJ[j]; /* row of A_loc == col of Pot */ 1495 anz = ai[row+1] - ai[row]; 1496 Jptr = aj + ai[row]; 1497 /* add non-zero cols of AP into the sorted linked list lnk */ 1498 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1499 } 1500 nnz = lnk[0]; 1501 1502 /* If free space is not available, double the total space in the list */ 1503 if (current_space->local_remaining<nnz) { 1504 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1505 nspacedouble++; 1506 } 1507 1508 /* Copy data into free space, and zero out denserows */ 1509 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1510 1511 current_space->array += nnz; 1512 current_space->local_used += nnz; 1513 current_space->local_remaining -= nnz; 1514 1515 coi[i+1] = coi[i] + nnz; 1516 } 1517 1518 ierr = PetscMalloc1(coi[pon]+1,&coj);CHKERRQ(ierr); 1519 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1520 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); /* must destroy to get a new one for C */ 1521 1522 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1523 if (afill_tmp > afill) afill = afill_tmp; 1524 1525 /* send j-array (coj) of Co to other processors */ 1526 /*----------------------------------------------*/ 1527 /* determine row ownership */ 1528 ierr = PetscNew(&merge);CHKERRQ(ierr); 1529 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1530 1531 merge->rowmap->n = pn; 1532 merge->rowmap->bs = 1; 1533 1534 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1535 owners = merge->rowmap->range; 1536 1537 /* determine the number of messages to send, their lengths */ 1538 ierr = PetscCalloc1(size,&len_si);CHKERRQ(ierr); 1539 ierr = PetscMalloc1(size,&merge->len_s);CHKERRQ(ierr); 1540 1541 len_s = merge->len_s; 1542 merge->nsend = 0; 1543 1544 ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr); 1545 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1546 1547 proc = 0; 1548 for (i=0; i<pon; i++) { 1549 while (prmap[i] >= owners[proc+1]) proc++; 1550 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1551 len_s[proc] += coi[i+1] - coi[i]; 1552 } 1553 1554 len = 0; /* max length of buf_si[] */ 1555 owners_co[0] = 0; 1556 for (proc=0; proc<size; proc++) { 1557 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1558 if (len_si[proc]) { 1559 merge->nsend++; 1560 len_si[proc] = 2*(len_si[proc] + 1); 1561 len += len_si[proc]; 1562 } 1563 } 1564 1565 /* determine the number and length of messages to receive for coi and coj */ 1566 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1567 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1568 1569 /* post the Irecv and Isend of coj */ 1570 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1571 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1572 ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr); 1573 for (proc=0, k=0; proc<size; proc++) { 1574 if (!len_s[proc]) continue; 1575 i = owners_co[proc]; 1576 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1577 k++; 1578 } 1579 1580 /* receives and sends of coj are complete */ 1581 ierr = PetscMalloc1(size,&sstatus);CHKERRQ(ierr); 1582 for (i=0; i<merge->nrecv; i++) { 1583 PetscMPIInt icompleted; 1584 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1585 } 1586 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1587 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1588 1589 /* add received column indices into table to update Armax */ 1590 /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/examples/tutorials/ex56.c! */ 1591 for (k=0; k<merge->nrecv; k++) {/* k-th received message */ 1592 Jptr = buf_rj[k]; 1593 for (j=0; j<merge->len_r[k]; j++) { 1594 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1595 } 1596 } 1597 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 1598 /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */ 1599 1600 /* send and recv coi */ 1601 /*-------------------*/ 1602 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1603 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1604 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1605 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1606 for (proc=0,k=0; proc<size; proc++) { 1607 if (!len_s[proc]) continue; 1608 /* form outgoing message for i-structure: 1609 buf_si[0]: nrows to be sent 1610 [1:nrows]: row index (global) 1611 [nrows+1:2*nrows+1]: i-structure index 1612 */ 1613 /*-------------------------------------------*/ 1614 nrows = len_si[proc]/2 - 1; 1615 buf_si_i = buf_si + nrows+1; 1616 buf_si[0] = nrows; 1617 buf_si_i[0] = 0; 1618 nrows = 0; 1619 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1620 nzi = coi[i+1] - coi[i]; 1621 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1622 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1623 nrows++; 1624 } 1625 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1626 k++; 1627 buf_si += len_si[proc]; 1628 } 1629 i = merge->nrecv; 1630 while (i--) { 1631 PetscMPIInt icompleted; 1632 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1633 } 1634 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1635 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1636 ierr = PetscFree(len_si);CHKERRQ(ierr); 1637 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1638 ierr = PetscFree(swaits);CHKERRQ(ierr); 1639 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1640 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1641 1642 /* compute the local portion of C (mpi mat) */ 1643 /*------------------------------------------*/ 1644 /* allocate bi array and free space for accumulating nonzero column info */ 1645 ierr = PetscMalloc1(pn+1,&bi);CHKERRQ(ierr); 1646 bi[0] = 0; 1647 1648 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 1649 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am]))); 1650 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1651 current_space = free_space; 1652 1653 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1654 for (k=0; k<merge->nrecv; k++) { 1655 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1656 nrows = *buf_ri_k[k]; 1657 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1658 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recieved i-structure */ 1659 } 1660 1661 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 1662 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1663 rmax = 0; 1664 for (i=0; i<pn; i++) { 1665 /* add pdt[i,:]*AP into lnk */ 1666 pnz = pdti[i+1] - pdti[i]; 1667 ptJ = pdtj + pdti[i]; 1668 for (j=0; j<pnz; j++) { 1669 row = ptJ[j]; /* row of AP == col of Pt */ 1670 anz = ai[row+1] - ai[row]; 1671 Jptr = aj + ai[row]; 1672 /* add non-zero cols of AP into the sorted linked list lnk */ 1673 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1674 } 1675 1676 /* add received col data into lnk */ 1677 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1678 if (i == *nextrow[k]) { /* i-th row */ 1679 nzi = *(nextci[k]+1) - *nextci[k]; 1680 Jptr = buf_rj[k] + *nextci[k]; 1681 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 1682 nextrow[k]++; nextci[k]++; 1683 } 1684 } 1685 nnz = lnk[0]; 1686 1687 /* if free space is not available, make more free space */ 1688 if (current_space->local_remaining<nnz) { 1689 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1690 nspacedouble++; 1691 } 1692 /* copy data into free space, then initialize lnk */ 1693 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1694 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 1695 1696 current_space->array += nnz; 1697 current_space->local_used += nnz; 1698 current_space->local_remaining -= nnz; 1699 1700 bi[i+1] = bi[i] + nnz; 1701 if (nnz > rmax) rmax = nnz; 1702 } 1703 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1704 1705 ierr = PetscMalloc1(bi[pn]+1,&bj);CHKERRQ(ierr); 1706 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 1707 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 1708 if (afill_tmp > afill) afill = afill_tmp; 1709 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 1710 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1711 1712 ierr = MatDestroy(&POt);CHKERRQ(ierr); 1713 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 1714 1715 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 1716 /*----------------------------------------------------------------------------------*/ 1717 ierr = PetscCalloc1(rmax+1,&vals);CHKERRQ(ierr); 1718 1719 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1720 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1721 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));CHKERRQ(ierr); 1722 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 1723 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1724 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1725 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 1726 for (i=0; i<pn; i++) { 1727 row = i + rstart; 1728 nnz = bi[i+1] - bi[i]; 1729 Jptr = bj + bi[i]; 1730 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 1731 } 1732 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1733 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1734 ierr = PetscFree(vals);CHKERRQ(ierr); 1735 1736 merge->bi = bi; 1737 merge->bj = bj; 1738 merge->coi = coi; 1739 merge->coj = coj; 1740 merge->buf_ri = buf_ri; 1741 merge->buf_rj = buf_rj; 1742 merge->owners_co = owners_co; 1743 1744 /* attach the supporting struct to Cmpi for reuse */ 1745 c = (Mat_MPIAIJ*)Cmpi->data; 1746 1747 c->ptap = ptap; 1748 ptap->api = NULL; 1749 ptap->apj = NULL; 1750 ptap->merge = merge; 1751 ptap->apa = NULL; 1752 ptap->destroy = Cmpi->ops->destroy; 1753 ptap->duplicate = Cmpi->ops->duplicate; 1754 1755 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ; 1756 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1757 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 1758 1759 *C = Cmpi; 1760 #if defined(PETSC_USE_INFO) 1761 if (bi[pn] != 0) { 1762 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 1763 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr); 1764 } else { 1765 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 1766 } 1767 #endif 1768 PetscFunctionReturn(0); 1769 } 1770