1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 #include <petsc/private/vecimpl.h> 12 #include <petsc/private/sfimpl.h> 13 14 #if defined(PETSC_HAVE_HYPRE) 15 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat, Mat, PetscReal, Mat); 16 #endif 17 18 PETSC_INTERN PetscErrorCode MatProductSymbolic_ABt_MPIAIJ_MPIAIJ(Mat C) 19 { 20 Mat_Product *product = C->product; 21 Mat B = product->B; 22 23 PetscFunctionBegin; 24 PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &product->B)); 25 PetscCall(MatDestroy(&B)); 26 PetscCall(MatProductSymbolic_AB_MPIAIJ_MPIAIJ(C)); 27 PetscFunctionReturn(0); 28 } 29 30 PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C) 31 { 32 Mat_Product *product = C->product; 33 Mat A = product->A, B = product->B; 34 MatProductAlgorithm alg = product->alg; 35 PetscReal fill = product->fill; 36 PetscBool flg; 37 38 PetscFunctionBegin; 39 /* scalable */ 40 PetscCall(PetscStrcmp(alg, "scalable", &flg)); 41 if (flg) { 42 PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C)); 43 PetscFunctionReturn(0); 44 } 45 46 /* nonscalable */ 47 PetscCall(PetscStrcmp(alg, "nonscalable", &flg)); 48 if (flg) { 49 PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C)); 50 PetscFunctionReturn(0); 51 } 52 53 /* seqmpi */ 54 PetscCall(PetscStrcmp(alg, "seqmpi", &flg)); 55 if (flg) { 56 PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A, B, fill, C)); 57 PetscFunctionReturn(0); 58 } 59 60 /* backend general code */ 61 PetscCall(PetscStrcmp(alg, "backend", &flg)); 62 if (flg) { 63 PetscCall(MatProductSymbolic_MPIAIJBACKEND(C)); 64 PetscFunctionReturn(0); 65 } 66 67 #if defined(PETSC_HAVE_HYPRE) 68 PetscCall(PetscStrcmp(alg, "hypre", &flg)); 69 if (flg) { 70 PetscCall(MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A, B, fill, C)); 71 PetscFunctionReturn(0); 72 } 73 #endif 74 SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_SUP, "Mat Product Algorithm is not supported"); 75 } 76 77 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data) 78 { 79 Mat_APMPI *ptap = (Mat_APMPI *)data; 80 81 PetscFunctionBegin; 82 PetscCall(PetscFree2(ptap->startsj_s, ptap->startsj_r)); 83 PetscCall(PetscFree(ptap->bufa)); 84 PetscCall(MatDestroy(&ptap->P_loc)); 85 PetscCall(MatDestroy(&ptap->P_oth)); 86 PetscCall(MatDestroy(&ptap->Pt)); 87 PetscCall(PetscFree(ptap->api)); 88 PetscCall(PetscFree(ptap->apj)); 89 PetscCall(PetscFree(ptap->apa)); 90 PetscCall(PetscFree(ptap)); 91 PetscFunctionReturn(0); 92 } 93 94 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, Mat C) 95 { 96 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data; 97 Mat_SeqAIJ *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data; 98 Mat_SeqAIJ *cd = (Mat_SeqAIJ *)(c->A)->data, *co = (Mat_SeqAIJ *)(c->B)->data; 99 PetscScalar *cda = cd->a, *coa = co->a; 100 Mat_SeqAIJ *p_loc, *p_oth; 101 PetscScalar *apa, *ca; 102 PetscInt cm = C->rmap->n; 103 Mat_APMPI *ptap; 104 PetscInt *api, *apj, *apJ, i, k; 105 PetscInt cstart = C->cmap->rstart; 106 PetscInt cdnz, conz, k0, k1; 107 const PetscScalar *dummy; 108 MPI_Comm comm; 109 PetscMPIInt size; 110 111 PetscFunctionBegin; 112 MatCheckProduct(C, 3); 113 ptap = (Mat_APMPI *)C->product->data; 114 PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data"); 115 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 116 PetscCallMPI(MPI_Comm_size(comm, &size)); 117 PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()"); 118 119 /* flag CPU mask for C */ 120 #if defined(PETSC_HAVE_DEVICE) 121 if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU; 122 if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU; 123 if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU; 124 #endif 125 126 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 127 /*-----------------------------------------------------*/ 128 /* update numerical values of P_oth and P_loc */ 129 PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth)); 130 PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc)); 131 132 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 133 /*----------------------------------------------------------*/ 134 /* get data from symbolic products */ 135 p_loc = (Mat_SeqAIJ *)(ptap->P_loc)->data; 136 p_oth = NULL; 137 if (size > 1) p_oth = (Mat_SeqAIJ *)(ptap->P_oth)->data; 138 139 /* get apa for storing dense row A[i,:]*P */ 140 apa = ptap->apa; 141 142 api = ptap->api; 143 apj = ptap->apj; 144 /* trigger copy to CPU */ 145 PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy)); 146 PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy)); 147 PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy)); 148 PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy)); 149 for (i = 0; i < cm; i++) { 150 /* compute apa = A[i,:]*P */ 151 AProw_nonscalable(i, ad, ao, p_loc, p_oth, apa); 152 153 /* set values in C */ 154 apJ = apj + api[i]; 155 cdnz = cd->i[i + 1] - cd->i[i]; 156 conz = co->i[i + 1] - co->i[i]; 157 158 /* 1st off-diagonal part of C */ 159 ca = coa + co->i[i]; 160 k = 0; 161 for (k0 = 0; k0 < conz; k0++) { 162 if (apJ[k] >= cstart) break; 163 ca[k0] = apa[apJ[k]]; 164 apa[apJ[k++]] = 0.0; 165 } 166 167 /* diagonal part of C */ 168 ca = cda + cd->i[i]; 169 for (k1 = 0; k1 < cdnz; k1++) { 170 ca[k1] = apa[apJ[k]]; 171 apa[apJ[k++]] = 0.0; 172 } 173 174 /* 2nd off-diagonal part of C */ 175 ca = coa + co->i[i]; 176 for (; k0 < conz; k0++) { 177 ca[k0] = apa[apJ[k]]; 178 apa[apJ[k++]] = 0.0; 179 } 180 } 181 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 182 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 183 PetscFunctionReturn(0); 184 } 185 186 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, PetscReal fill, Mat C) 187 { 188 MPI_Comm comm; 189 PetscMPIInt size; 190 Mat_APMPI *ptap; 191 PetscFreeSpaceList free_space = NULL, current_space = NULL; 192 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 193 Mat_SeqAIJ *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data, *p_loc, *p_oth; 194 PetscInt *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz; 195 PetscInt *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart; 196 PetscInt *lnk, i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi; 197 PetscInt am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n; 198 PetscBT lnkbt; 199 PetscReal afill; 200 MatType mtype; 201 202 PetscFunctionBegin; 203 MatCheckProduct(C, 4); 204 PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty"); 205 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 206 PetscCallMPI(MPI_Comm_size(comm, &size)); 207 208 /* create struct Mat_APMPI and attached it to C later */ 209 PetscCall(PetscNew(&ptap)); 210 211 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 212 PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth)); 213 214 /* get P_loc by taking all local rows of P */ 215 PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc)); 216 217 p_loc = (Mat_SeqAIJ *)(ptap->P_loc)->data; 218 pi_loc = p_loc->i; 219 pj_loc = p_loc->j; 220 if (size > 1) { 221 p_oth = (Mat_SeqAIJ *)(ptap->P_oth)->data; 222 pi_oth = p_oth->i; 223 pj_oth = p_oth->j; 224 } else { 225 p_oth = NULL; 226 pi_oth = NULL; 227 pj_oth = NULL; 228 } 229 230 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 231 /*-------------------------------------------------------------------*/ 232 PetscCall(PetscMalloc1(am + 2, &api)); 233 ptap->api = api; 234 api[0] = 0; 235 236 /* create and initialize a linked list */ 237 PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt)); 238 239 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 240 PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space)); 241 current_space = free_space; 242 243 MatPreallocateBegin(comm, am, pn, dnz, onz); 244 for (i = 0; i < am; i++) { 245 /* diagonal portion of A */ 246 nzi = adi[i + 1] - adi[i]; 247 for (j = 0; j < nzi; j++) { 248 row = *adj++; 249 pnz = pi_loc[row + 1] - pi_loc[row]; 250 Jptr = pj_loc + pi_loc[row]; 251 /* add non-zero cols of P into the sorted linked list lnk */ 252 PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt)); 253 } 254 /* off-diagonal portion of A */ 255 nzi = aoi[i + 1] - aoi[i]; 256 for (j = 0; j < nzi; j++) { 257 row = *aoj++; 258 pnz = pi_oth[row + 1] - pi_oth[row]; 259 Jptr = pj_oth + pi_oth[row]; 260 PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt)); 261 } 262 /* add possible missing diagonal entry */ 263 if (C->force_diagonals) { 264 j = i + rstart; /* column index */ 265 PetscCall(PetscLLCondensedAddSorted(1, &j, lnk, lnkbt)); 266 } 267 268 apnz = lnk[0]; 269 api[i + 1] = api[i] + apnz; 270 271 /* if free space is not available, double the total space in the list */ 272 if (current_space->local_remaining < apnz) { 273 PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), ¤t_space)); 274 nspacedouble++; 275 } 276 277 /* Copy data into free space, then initialize lnk */ 278 PetscCall(PetscLLCondensedClean(pN, apnz, current_space->array, lnk, lnkbt)); 279 PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz)); 280 281 current_space->array += apnz; 282 current_space->local_used += apnz; 283 current_space->local_remaining -= apnz; 284 } 285 286 /* Allocate space for apj, initialize apj, and */ 287 /* destroy list of free space and other temporary array(s) */ 288 PetscCall(PetscMalloc1(api[am] + 1, &ptap->apj)); 289 apj = ptap->apj; 290 PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj)); 291 PetscCall(PetscLLDestroy(lnk, lnkbt)); 292 293 /* malloc apa to store dense row A[i,:]*P */ 294 PetscCall(PetscCalloc1(pN, &ptap->apa)); 295 296 /* set and assemble symbolic parallel matrix C */ 297 /*---------------------------------------------*/ 298 PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE)); 299 PetscCall(MatSetBlockSizesFromMats(C, A, P)); 300 301 PetscCall(MatGetType(A, &mtype)); 302 PetscCall(MatSetType(C, mtype)); 303 PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz)); 304 MatPreallocateEnd(dnz, onz); 305 306 PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api)); 307 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 308 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 309 PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 310 311 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 312 C->ops->productnumeric = MatProductNumeric_AB; 313 314 /* attach the supporting struct to C for reuse */ 315 C->product->data = ptap; 316 C->product->destroy = MatDestroy_MPIAIJ_MatMatMult; 317 318 /* set MatInfo */ 319 afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5; 320 if (afill < 1.0) afill = 1.0; 321 C->info.mallocs = nspacedouble; 322 C->info.fill_ratio_given = fill; 323 C->info.fill_ratio_needed = afill; 324 325 #if defined(PETSC_USE_INFO) 326 if (api[am]) { 327 PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill)); 328 PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill)); 329 } else { 330 PetscCall(PetscInfo(C, "Empty matrix product\n")); 331 } 332 #endif 333 PetscFunctionReturn(0); 334 } 335 336 /* ------------------------------------------------------- */ 337 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat, Mat, PetscReal, Mat); 338 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat, Mat, Mat); 339 340 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C) 341 { 342 Mat_Product *product = C->product; 343 Mat A = product->A, B = product->B; 344 345 PetscFunctionBegin; 346 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) 347 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend); 348 349 C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense; 350 C->ops->productsymbolic = MatProductSymbolic_AB; 351 PetscFunctionReturn(0); 352 } 353 /* -------------------------------------------------------------------- */ 354 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C) 355 { 356 Mat_Product *product = C->product; 357 Mat A = product->A, B = product->B; 358 359 PetscFunctionBegin; 360 if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) 361 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend); 362 363 C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense; 364 C->ops->productsymbolic = MatProductSymbolic_AtB; 365 PetscFunctionReturn(0); 366 } 367 368 /* --------------------------------------------------------------------- */ 369 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C) 370 { 371 Mat_Product *product = C->product; 372 373 PetscFunctionBegin; 374 switch (product->type) { 375 case MATPRODUCT_AB: 376 PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C)); 377 break; 378 case MATPRODUCT_AtB: 379 PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C)); 380 break; 381 default: 382 break; 383 } 384 PetscFunctionReturn(0); 385 } 386 /* ------------------------------------------------------- */ 387 388 typedef struct { 389 Mat workB, workB1; 390 MPI_Request *rwaits, *swaits; 391 PetscInt nsends, nrecvs; 392 MPI_Datatype *stype, *rtype; 393 PetscInt blda; 394 } MPIAIJ_MPIDense; 395 396 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx) 397 { 398 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense *)ctx; 399 PetscInt i; 400 401 PetscFunctionBegin; 402 PetscCall(MatDestroy(&contents->workB)); 403 PetscCall(MatDestroy(&contents->workB1)); 404 for (i = 0; i < contents->nsends; i++) PetscCallMPI(MPI_Type_free(&contents->stype[i])); 405 for (i = 0; i < contents->nrecvs; i++) PetscCallMPI(MPI_Type_free(&contents->rtype[i])); 406 PetscCall(PetscFree4(contents->stype, contents->rtype, contents->rwaits, contents->swaits)); 407 PetscCall(PetscFree(contents)); 408 PetscFunctionReturn(0); 409 } 410 411 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A, Mat B, PetscReal fill, Mat C) 412 { 413 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)A->data; 414 PetscInt nz = aij->B->cmap->n, nsends, nrecvs, i, nrows_to, j, blda, m, M, n, N; 415 MPIAIJ_MPIDense *contents; 416 VecScatter ctx = aij->Mvctx; 417 PetscInt Am = A->rmap->n, Bm = B->rmap->n, BN = B->cmap->N, Bbn, Bbn1, bs, nrows_from, numBb; 418 MPI_Comm comm; 419 MPI_Datatype type1, *stype, *rtype; 420 const PetscInt *sindices, *sstarts, *rstarts; 421 PetscMPIInt *disp; 422 PetscBool cisdense; 423 424 PetscFunctionBegin; 425 MatCheckProduct(C, 4); 426 PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty"); 427 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 428 PetscCall(PetscObjectBaseTypeCompare((PetscObject)C, MATMPIDENSE, &cisdense)); 429 if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)B)->type_name)); 430 PetscCall(MatGetLocalSize(C, &m, &n)); 431 PetscCall(MatGetSize(C, &M, &N)); 432 if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) PetscCall(MatSetSizes(C, Am, B->cmap->n, A->rmap->N, BN)); 433 PetscCall(MatSetBlockSizesFromMats(C, A, B)); 434 PetscCall(MatSetUp(C)); 435 PetscCall(MatDenseGetLDA(B, &blda)); 436 PetscCall(PetscNew(&contents)); 437 438 PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL)); 439 PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL)); 440 441 /* Create column block of B and C for memory scalability when BN is too large */ 442 /* Estimate Bbn, column size of Bb */ 443 if (nz) { 444 Bbn1 = 2 * Am * BN / nz; 445 if (!Bbn1) Bbn1 = 1; 446 } else Bbn1 = BN; 447 448 bs = PetscAbs(B->cmap->bs); 449 Bbn1 = Bbn1 / bs * bs; /* Bbn1 is a multiple of bs */ 450 if (Bbn1 > BN) Bbn1 = BN; 451 PetscCallMPI(MPI_Allreduce(&Bbn1, &Bbn, 1, MPIU_INT, MPI_MAX, comm)); 452 453 /* Enable runtime option for Bbn */ 454 PetscOptionsBegin(comm, ((PetscObject)C)->prefix, "MatMatMult", "Mat"); 455 PetscCall(PetscOptionsInt("-matmatmult_Bbn", "Number of columns in Bb", "MatMatMult", Bbn, &Bbn, NULL)); 456 PetscOptionsEnd(); 457 Bbn = PetscMin(Bbn, BN); 458 459 if (Bbn > 0 && Bbn < BN) { 460 numBb = BN / Bbn; 461 Bbn1 = BN - numBb * Bbn; 462 } else numBb = 0; 463 464 if (numBb) { 465 PetscCall(PetscInfo(C, "use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n", BN, Bbn, numBb)); 466 if (Bbn1) { /* Create workB1 for the remaining columns */ 467 PetscCall(PetscInfo(C, "use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n", BN, Bbn1)); 468 /* Create work matrix used to store off processor rows of B needed for local product */ 469 PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn1, NULL, &contents->workB1)); 470 } else contents->workB1 = NULL; 471 } 472 473 /* Create work matrix used to store off processor rows of B needed for local product */ 474 PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn, NULL, &contents->workB)); 475 476 /* Use MPI derived data type to reduce memory required by the send/recv buffers */ 477 PetscCall(PetscMalloc4(nsends, &stype, nrecvs, &rtype, nrecvs, &contents->rwaits, nsends, &contents->swaits)); 478 contents->stype = stype; 479 contents->nsends = nsends; 480 481 contents->rtype = rtype; 482 contents->nrecvs = nrecvs; 483 contents->blda = blda; 484 485 PetscCall(PetscMalloc1(Bm + 1, &disp)); 486 for (i = 0; i < nsends; i++) { 487 nrows_to = sstarts[i + 1] - sstarts[i]; 488 for (j = 0; j < nrows_to; j++) disp[j] = sindices[sstarts[i] + j]; /* rowB to be sent */ 489 PetscCallMPI(MPI_Type_create_indexed_block(nrows_to, 1, disp, MPIU_SCALAR, &type1)); 490 PetscCallMPI(MPI_Type_create_resized(type1, 0, blda * sizeof(PetscScalar), &stype[i])); 491 PetscCallMPI(MPI_Type_commit(&stype[i])); 492 PetscCallMPI(MPI_Type_free(&type1)); 493 } 494 495 for (i = 0; i < nrecvs; i++) { 496 /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */ 497 nrows_from = rstarts[i + 1] - rstarts[i]; 498 disp[0] = 0; 499 PetscCallMPI(MPI_Type_create_indexed_block(1, nrows_from, disp, MPIU_SCALAR, &type1)); 500 PetscCallMPI(MPI_Type_create_resized(type1, 0, nz * sizeof(PetscScalar), &rtype[i])); 501 PetscCallMPI(MPI_Type_commit(&rtype[i])); 502 PetscCallMPI(MPI_Type_free(&type1)); 503 } 504 505 PetscCall(PetscFree(disp)); 506 PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL)); 507 PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL)); 508 PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); 509 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 510 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 511 PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 512 513 C->product->data = contents; 514 C->product->destroy = MatMPIAIJ_MPIDenseDestroy; 515 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 516 PetscFunctionReturn(0); 517 } 518 519 PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat, Mat, Mat, const PetscBool); 520 521 /* 522 Performs an efficient scatter on the rows of B needed by this process; this is 523 a modification of the VecScatterBegin_() routines. 524 525 Input: If Bbidx = 0, uses B = Bb, else B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense() 526 */ 527 528 PetscErrorCode MatMPIDenseScatter(Mat A, Mat B, PetscInt Bbidx, Mat C, Mat *outworkB) 529 { 530 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)A->data; 531 const PetscScalar *b; 532 PetscScalar *rvalues; 533 VecScatter ctx = aij->Mvctx; 534 const PetscInt *sindices, *sstarts, *rstarts; 535 const PetscMPIInt *sprocs, *rprocs; 536 PetscInt i, nsends, nrecvs; 537 MPI_Request *swaits, *rwaits; 538 MPI_Comm comm; 539 PetscMPIInt tag = ((PetscObject)ctx)->tag, ncols = B->cmap->N, nrows = aij->B->cmap->n, nsends_mpi, nrecvs_mpi; 540 MPIAIJ_MPIDense *contents; 541 Mat workB; 542 MPI_Datatype *stype, *rtype; 543 PetscInt blda; 544 545 PetscFunctionBegin; 546 MatCheckProduct(C, 4); 547 PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty"); 548 contents = (MPIAIJ_MPIDense *)C->product->data; 549 PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL /*bs*/)); 550 PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL /*bs*/)); 551 PetscCall(PetscMPIIntCast(nsends, &nsends_mpi)); 552 PetscCall(PetscMPIIntCast(nrecvs, &nrecvs_mpi)); 553 if (Bbidx == 0) workB = *outworkB = contents->workB; 554 else workB = *outworkB = contents->workB1; 555 PetscCheck(nrows == workB->rmap->n, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Number of rows of workB %" PetscInt_FMT " not equal to columns of aij->B %d", workB->cmap->n, nrows); 556 swaits = contents->swaits; 557 rwaits = contents->rwaits; 558 559 PetscCall(MatDenseGetArrayRead(B, &b)); 560 PetscCall(MatDenseGetLDA(B, &blda)); 561 PetscCheck(blda == contents->blda, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot reuse an input matrix with lda %" PetscInt_FMT " != %" PetscInt_FMT, blda, contents->blda); 562 PetscCall(MatDenseGetArray(workB, &rvalues)); 563 564 /* Post recv, use MPI derived data type to save memory */ 565 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 566 rtype = contents->rtype; 567 for (i = 0; i < nrecvs; i++) PetscCallMPI(MPI_Irecv(rvalues + (rstarts[i] - rstarts[0]), ncols, rtype[i], rprocs[i], tag, comm, rwaits + i)); 568 569 stype = contents->stype; 570 for (i = 0; i < nsends; i++) PetscCallMPI(MPI_Isend(b, ncols, stype[i], sprocs[i], tag, comm, swaits + i)); 571 572 if (nrecvs) PetscCallMPI(MPI_Waitall(nrecvs_mpi, rwaits, MPI_STATUSES_IGNORE)); 573 if (nsends) PetscCallMPI(MPI_Waitall(nsends_mpi, swaits, MPI_STATUSES_IGNORE)); 574 575 PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL)); 576 PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL)); 577 PetscCall(MatDenseRestoreArrayRead(B, &b)); 578 PetscCall(MatDenseRestoreArray(workB, &rvalues)); 579 PetscFunctionReturn(0); 580 } 581 582 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A, Mat B, Mat C) 583 { 584 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)A->data; 585 Mat_MPIDense *bdense = (Mat_MPIDense *)B->data; 586 Mat_MPIDense *cdense = (Mat_MPIDense *)C->data; 587 Mat workB; 588 MPIAIJ_MPIDense *contents; 589 590 PetscFunctionBegin; 591 MatCheckProduct(C, 3); 592 PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty"); 593 contents = (MPIAIJ_MPIDense *)C->product->data; 594 /* diagonal block of A times all local rows of B */ 595 /* TODO: this calls a symbolic multiplication every time, which could be avoided */ 596 PetscCall(MatMatMult(aij->A, bdense->A, MAT_REUSE_MATRIX, PETSC_DEFAULT, &cdense->A)); 597 if (contents->workB->cmap->n == B->cmap->N) { 598 /* get off processor parts of B needed to complete C=A*B */ 599 PetscCall(MatMPIDenseScatter(A, B, 0, C, &workB)); 600 601 /* off-diagonal block of A times nonlocal rows of B */ 602 PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE)); 603 } else { 604 Mat Bb, Cb; 605 PetscInt BN = B->cmap->N, n = contents->workB->cmap->n, i; 606 PetscBool ccpu; 607 608 PetscCheck(n > 0, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Column block size %" PetscInt_FMT " must be positive", n); 609 /* Prevent from unneeded copies back and forth from the GPU 610 when getting and restoring the submatrix 611 We need a proper GPU code for AIJ * dense in parallel */ 612 PetscCall(MatBoundToCPU(C, &ccpu)); 613 PetscCall(MatBindToCPU(C, PETSC_TRUE)); 614 for (i = 0; i < BN; i += n) { 615 PetscCall(MatDenseGetSubMatrix(B, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Bb)); 616 PetscCall(MatDenseGetSubMatrix(C, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Cb)); 617 618 /* get off processor parts of B needed to complete C=A*B */ 619 PetscCall(MatMPIDenseScatter(A, Bb, (i + n) > BN, C, &workB)); 620 621 /* off-diagonal block of A times nonlocal rows of B */ 622 cdense = (Mat_MPIDense *)Cb->data; 623 PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE)); 624 PetscCall(MatDenseRestoreSubMatrix(B, &Bb)); 625 PetscCall(MatDenseRestoreSubMatrix(C, &Cb)); 626 } 627 PetscCall(MatBindToCPU(C, ccpu)); 628 } 629 PetscFunctionReturn(0); 630 } 631 632 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A, Mat P, Mat C) 633 { 634 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data; 635 Mat_SeqAIJ *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data; 636 Mat_SeqAIJ *cd = (Mat_SeqAIJ *)(c->A)->data, *co = (Mat_SeqAIJ *)(c->B)->data; 637 PetscInt *adi = ad->i, *adj, *aoi = ao->i, *aoj; 638 PetscScalar *ada, *aoa, *cda = cd->a, *coa = co->a; 639 Mat_SeqAIJ *p_loc, *p_oth; 640 PetscInt *pi_loc, *pj_loc, *pi_oth, *pj_oth, *pj; 641 PetscScalar *pa_loc, *pa_oth, *pa, valtmp, *ca; 642 PetscInt cm = C->rmap->n, anz, pnz; 643 Mat_APMPI *ptap; 644 PetscScalar *apa_sparse; 645 const PetscScalar *dummy; 646 PetscInt *api, *apj, *apJ, i, j, k, row; 647 PetscInt cstart = C->cmap->rstart; 648 PetscInt cdnz, conz, k0, k1, nextp; 649 MPI_Comm comm; 650 PetscMPIInt size; 651 652 PetscFunctionBegin; 653 MatCheckProduct(C, 3); 654 ptap = (Mat_APMPI *)C->product->data; 655 PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data"); 656 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 657 PetscCallMPI(MPI_Comm_size(comm, &size)); 658 PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()"); 659 660 /* flag CPU mask for C */ 661 #if defined(PETSC_HAVE_DEVICE) 662 if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU; 663 if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU; 664 if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU; 665 #endif 666 apa_sparse = ptap->apa; 667 668 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 669 /*-----------------------------------------------------*/ 670 /* update numerical values of P_oth and P_loc */ 671 PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth)); 672 PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc)); 673 674 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 675 /*----------------------------------------------------------*/ 676 /* get data from symbolic products */ 677 p_loc = (Mat_SeqAIJ *)(ptap->P_loc)->data; 678 pi_loc = p_loc->i; 679 pj_loc = p_loc->j; 680 pa_loc = p_loc->a; 681 if (size > 1) { 682 p_oth = (Mat_SeqAIJ *)(ptap->P_oth)->data; 683 pi_oth = p_oth->i; 684 pj_oth = p_oth->j; 685 pa_oth = p_oth->a; 686 } else { 687 p_oth = NULL; 688 pi_oth = NULL; 689 pj_oth = NULL; 690 pa_oth = NULL; 691 } 692 693 /* trigger copy to CPU */ 694 PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy)); 695 PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy)); 696 PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy)); 697 PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy)); 698 api = ptap->api; 699 apj = ptap->apj; 700 for (i = 0; i < cm; i++) { 701 apJ = apj + api[i]; 702 703 /* diagonal portion of A */ 704 anz = adi[i + 1] - adi[i]; 705 adj = ad->j + adi[i]; 706 ada = ad->a + adi[i]; 707 for (j = 0; j < anz; j++) { 708 row = adj[j]; 709 pnz = pi_loc[row + 1] - pi_loc[row]; 710 pj = pj_loc + pi_loc[row]; 711 pa = pa_loc + pi_loc[row]; 712 /* perform sparse axpy */ 713 valtmp = ada[j]; 714 nextp = 0; 715 for (k = 0; nextp < pnz; k++) { 716 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 717 apa_sparse[k] += valtmp * pa[nextp++]; 718 } 719 } 720 PetscCall(PetscLogFlops(2.0 * pnz)); 721 } 722 723 /* off-diagonal portion of A */ 724 anz = aoi[i + 1] - aoi[i]; 725 aoj = ao->j + aoi[i]; 726 aoa = ao->a + aoi[i]; 727 for (j = 0; j < anz; j++) { 728 row = aoj[j]; 729 pnz = pi_oth[row + 1] - pi_oth[row]; 730 pj = pj_oth + pi_oth[row]; 731 pa = pa_oth + pi_oth[row]; 732 /* perform sparse axpy */ 733 valtmp = aoa[j]; 734 nextp = 0; 735 for (k = 0; nextp < pnz; k++) { 736 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 737 apa_sparse[k] += valtmp * pa[nextp++]; 738 } 739 } 740 PetscCall(PetscLogFlops(2.0 * pnz)); 741 } 742 743 /* set values in C */ 744 cdnz = cd->i[i + 1] - cd->i[i]; 745 conz = co->i[i + 1] - co->i[i]; 746 747 /* 1st off-diagonal part of C */ 748 ca = coa + co->i[i]; 749 k = 0; 750 for (k0 = 0; k0 < conz; k0++) { 751 if (apJ[k] >= cstart) break; 752 ca[k0] = apa_sparse[k]; 753 apa_sparse[k] = 0.0; 754 k++; 755 } 756 757 /* diagonal part of C */ 758 ca = cda + cd->i[i]; 759 for (k1 = 0; k1 < cdnz; k1++) { 760 ca[k1] = apa_sparse[k]; 761 apa_sparse[k] = 0.0; 762 k++; 763 } 764 765 /* 2nd off-diagonal part of C */ 766 ca = coa + co->i[i]; 767 for (; k0 < conz; k0++) { 768 ca[k0] = apa_sparse[k]; 769 apa_sparse[k] = 0.0; 770 k++; 771 } 772 } 773 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 774 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 775 PetscFunctionReturn(0); 776 } 777 778 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */ 779 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A, Mat P, PetscReal fill, Mat C) 780 { 781 MPI_Comm comm; 782 PetscMPIInt size; 783 Mat_APMPI *ptap; 784 PetscFreeSpaceList free_space = NULL, current_space = NULL; 785 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 786 Mat_SeqAIJ *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data, *p_loc, *p_oth; 787 PetscInt *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz; 788 PetscInt *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart; 789 PetscInt i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi, *lnk, apnz_max = 1; 790 PetscInt am = A->rmap->n, pn = P->cmap->n, pm = P->rmap->n, lsize = pn + 20; 791 PetscReal afill; 792 MatType mtype; 793 794 PetscFunctionBegin; 795 MatCheckProduct(C, 4); 796 PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty"); 797 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 798 PetscCallMPI(MPI_Comm_size(comm, &size)); 799 800 /* create struct Mat_APMPI and attached it to C later */ 801 PetscCall(PetscNew(&ptap)); 802 803 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 804 PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth)); 805 806 /* get P_loc by taking all local rows of P */ 807 PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc)); 808 809 p_loc = (Mat_SeqAIJ *)(ptap->P_loc)->data; 810 pi_loc = p_loc->i; 811 pj_loc = p_loc->j; 812 if (size > 1) { 813 p_oth = (Mat_SeqAIJ *)(ptap->P_oth)->data; 814 pi_oth = p_oth->i; 815 pj_oth = p_oth->j; 816 } else { 817 p_oth = NULL; 818 pi_oth = NULL; 819 pj_oth = NULL; 820 } 821 822 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 823 /*-------------------------------------------------------------------*/ 824 PetscCall(PetscMalloc1(am + 2, &api)); 825 ptap->api = api; 826 api[0] = 0; 827 828 PetscCall(PetscLLCondensedCreate_Scalable(lsize, &lnk)); 829 830 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 831 PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space)); 832 current_space = free_space; 833 MatPreallocateBegin(comm, am, pn, dnz, onz); 834 for (i = 0; i < am; i++) { 835 /* diagonal portion of A */ 836 nzi = adi[i + 1] - adi[i]; 837 for (j = 0; j < nzi; j++) { 838 row = *adj++; 839 pnz = pi_loc[row + 1] - pi_loc[row]; 840 Jptr = pj_loc + pi_loc[row]; 841 /* Expand list if it is not long enough */ 842 if (pnz + apnz_max > lsize) { 843 lsize = pnz + apnz_max; 844 PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk)); 845 } 846 /* add non-zero cols of P into the sorted linked list lnk */ 847 PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk)); 848 apnz = *lnk; /* The first element in the list is the number of items in the list */ 849 api[i + 1] = api[i] + apnz; 850 if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */ 851 } 852 /* off-diagonal portion of A */ 853 nzi = aoi[i + 1] - aoi[i]; 854 for (j = 0; j < nzi; j++) { 855 row = *aoj++; 856 pnz = pi_oth[row + 1] - pi_oth[row]; 857 Jptr = pj_oth + pi_oth[row]; 858 /* Expand list if it is not long enough */ 859 if (pnz + apnz_max > lsize) { 860 lsize = pnz + apnz_max; 861 PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk)); 862 } 863 /* add non-zero cols of P into the sorted linked list lnk */ 864 PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk)); 865 apnz = *lnk; /* The first element in the list is the number of items in the list */ 866 api[i + 1] = api[i] + apnz; 867 if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */ 868 } 869 870 /* add missing diagonal entry */ 871 if (C->force_diagonals) { 872 j = i + rstart; /* column index */ 873 PetscCall(PetscLLCondensedAddSorted_Scalable(1, &j, lnk)); 874 } 875 876 apnz = *lnk; 877 api[i + 1] = api[i] + apnz; 878 if (apnz > apnz_max) apnz_max = apnz; 879 880 /* if free space is not available, double the total space in the list */ 881 if (current_space->local_remaining < apnz) { 882 PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), ¤t_space)); 883 nspacedouble++; 884 } 885 886 /* Copy data into free space, then initialize lnk */ 887 PetscCall(PetscLLCondensedClean_Scalable(apnz, current_space->array, lnk)); 888 PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz)); 889 890 current_space->array += apnz; 891 current_space->local_used += apnz; 892 current_space->local_remaining -= apnz; 893 } 894 895 /* Allocate space for apj, initialize apj, and */ 896 /* destroy list of free space and other temporary array(s) */ 897 PetscCall(PetscMalloc1(api[am] + 1, &ptap->apj)); 898 apj = ptap->apj; 899 PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj)); 900 PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); 901 902 /* create and assemble symbolic parallel matrix C */ 903 /*----------------------------------------------------*/ 904 PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE)); 905 PetscCall(MatSetBlockSizesFromMats(C, A, P)); 906 PetscCall(MatGetType(A, &mtype)); 907 PetscCall(MatSetType(C, mtype)); 908 PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz)); 909 MatPreallocateEnd(dnz, onz); 910 911 /* malloc apa for assembly C */ 912 PetscCall(PetscCalloc1(apnz_max, &ptap->apa)); 913 914 PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api)); 915 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 916 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 917 PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 918 919 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 920 C->ops->productnumeric = MatProductNumeric_AB; 921 922 /* attach the supporting struct to C for reuse */ 923 C->product->data = ptap; 924 C->product->destroy = MatDestroy_MPIAIJ_MatMatMult; 925 926 /* set MatInfo */ 927 afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5; 928 if (afill < 1.0) afill = 1.0; 929 C->info.mallocs = nspacedouble; 930 C->info.fill_ratio_given = fill; 931 C->info.fill_ratio_needed = afill; 932 933 #if defined(PETSC_USE_INFO) 934 if (api[am]) { 935 PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill)); 936 PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill)); 937 } else { 938 PetscCall(PetscInfo(C, "Empty matrix product\n")); 939 } 940 #endif 941 PetscFunctionReturn(0); 942 } 943 944 /* This function is needed for the seqMPI matrix-matrix multiplication. */ 945 /* Three input arrays are merged to one output array. The size of the */ 946 /* output array is also output. Duplicate entries only show up once. */ 947 static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, PetscInt size2, PetscInt *in2, PetscInt size3, PetscInt *in3, PetscInt *size4, PetscInt *out) 948 { 949 int i = 0, j = 0, k = 0, l = 0; 950 951 /* Traverse all three arrays */ 952 while (i < size1 && j < size2 && k < size3) { 953 if (in1[i] < in2[j] && in1[i] < in3[k]) { 954 out[l++] = in1[i++]; 955 } else if (in2[j] < in1[i] && in2[j] < in3[k]) { 956 out[l++] = in2[j++]; 957 } else if (in3[k] < in1[i] && in3[k] < in2[j]) { 958 out[l++] = in3[k++]; 959 } else if (in1[i] == in2[j] && in1[i] < in3[k]) { 960 out[l++] = in1[i]; 961 i++, j++; 962 } else if (in1[i] == in3[k] && in1[i] < in2[j]) { 963 out[l++] = in1[i]; 964 i++, k++; 965 } else if (in3[k] == in2[j] && in2[j] < in1[i]) { 966 out[l++] = in2[j]; 967 k++, j++; 968 } else if (in1[i] == in2[j] && in1[i] == in3[k]) { 969 out[l++] = in1[i]; 970 i++, j++, k++; 971 } 972 } 973 974 /* Traverse two remaining arrays */ 975 while (i < size1 && j < size2) { 976 if (in1[i] < in2[j]) { 977 out[l++] = in1[i++]; 978 } else if (in1[i] > in2[j]) { 979 out[l++] = in2[j++]; 980 } else { 981 out[l++] = in1[i]; 982 i++, j++; 983 } 984 } 985 986 while (i < size1 && k < size3) { 987 if (in1[i] < in3[k]) { 988 out[l++] = in1[i++]; 989 } else if (in1[i] > in3[k]) { 990 out[l++] = in3[k++]; 991 } else { 992 out[l++] = in1[i]; 993 i++, k++; 994 } 995 } 996 997 while (k < size3 && j < size2) { 998 if (in3[k] < in2[j]) { 999 out[l++] = in3[k++]; 1000 } else if (in3[k] > in2[j]) { 1001 out[l++] = in2[j++]; 1002 } else { 1003 out[l++] = in3[k]; 1004 k++, j++; 1005 } 1006 } 1007 1008 /* Traverse one remaining array */ 1009 while (i < size1) out[l++] = in1[i++]; 1010 while (j < size2) out[l++] = in2[j++]; 1011 while (k < size3) out[l++] = in3[k++]; 1012 1013 *size4 = l; 1014 } 1015 1016 /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */ 1017 /* adds up the products. Two of these three multiplications are performed with existing (sequential) */ 1018 /* matrix-matrix multiplications. */ 1019 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C) 1020 { 1021 MPI_Comm comm; 1022 PetscMPIInt size; 1023 Mat_APMPI *ptap; 1024 PetscFreeSpaceList free_space_diag = NULL, current_space = NULL; 1025 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1026 Mat_SeqAIJ *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data, *p_loc; 1027 Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data; 1028 Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq; 1029 PetscInt adponz, adpdnz; 1030 PetscInt *pi_loc, *dnz, *onz; 1031 PetscInt *adi = ad->i, *adj = ad->j, *aoi = ao->i, rstart = A->rmap->rstart; 1032 PetscInt *lnk, i, i1 = 0, pnz, row, *adpoi, *adpoj, *api, *adpoJ, *aopJ, *apJ, *Jptr, aopnz, nspacedouble = 0, j, nzi, *apj, apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj; 1033 PetscInt am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n, p_colstart, p_colend; 1034 PetscBT lnkbt; 1035 PetscReal afill; 1036 PetscMPIInt rank; 1037 Mat adpd, aopoth; 1038 MatType mtype; 1039 const char *prefix; 1040 1041 PetscFunctionBegin; 1042 MatCheckProduct(C, 4); 1043 PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty"); 1044 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 1045 PetscCallMPI(MPI_Comm_size(comm, &size)); 1046 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1047 PetscCall(MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend)); 1048 1049 /* create struct Mat_APMPI and attached it to C later */ 1050 PetscCall(PetscNew(&ptap)); 1051 1052 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1053 PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth)); 1054 1055 /* get P_loc by taking all local rows of P */ 1056 PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc)); 1057 1058 p_loc = (Mat_SeqAIJ *)(ptap->P_loc)->data; 1059 pi_loc = p_loc->i; 1060 1061 /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */ 1062 PetscCall(PetscMalloc1(am + 2, &api)); 1063 PetscCall(PetscMalloc1(am + 2, &adpoi)); 1064 1065 adpoi[0] = 0; 1066 ptap->api = api; 1067 api[0] = 0; 1068 1069 /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */ 1070 PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt)); 1071 MatPreallocateBegin(comm, am, pn, dnz, onz); 1072 1073 /* Symbolic calc of A_loc_diag * P_loc_diag */ 1074 PetscCall(MatGetOptionsPrefix(A, &prefix)); 1075 PetscCall(MatProductCreate(a->A, p->A, NULL, &adpd)); 1076 PetscCall(MatGetOptionsPrefix(A, &prefix)); 1077 PetscCall(MatSetOptionsPrefix(adpd, prefix)); 1078 PetscCall(MatAppendOptionsPrefix(adpd, "inner_diag_")); 1079 1080 PetscCall(MatProductSetType(adpd, MATPRODUCT_AB)); 1081 PetscCall(MatProductSetAlgorithm(adpd, "sorted")); 1082 PetscCall(MatProductSetFill(adpd, fill)); 1083 PetscCall(MatProductSetFromOptions(adpd)); 1084 1085 adpd->force_diagonals = C->force_diagonals; 1086 PetscCall(MatProductSymbolic(adpd)); 1087 1088 adpd_seq = (Mat_SeqAIJ *)((adpd)->data); 1089 adpdi = adpd_seq->i; 1090 adpdj = adpd_seq->j; 1091 p_off = (Mat_SeqAIJ *)((p->B)->data); 1092 poff_i = p_off->i; 1093 poff_j = p_off->j; 1094 1095 /* j_temp stores indices of a result row before they are added to the linked list */ 1096 PetscCall(PetscMalloc1(pN + 2, &j_temp)); 1097 1098 /* Symbolic calc of the A_diag * p_loc_off */ 1099 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 1100 PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space_diag)); 1101 current_space = free_space_diag; 1102 1103 for (i = 0; i < am; i++) { 1104 /* A_diag * P_loc_off */ 1105 nzi = adi[i + 1] - adi[i]; 1106 for (j = 0; j < nzi; j++) { 1107 row = *adj++; 1108 pnz = poff_i[row + 1] - poff_i[row]; 1109 Jptr = poff_j + poff_i[row]; 1110 for (i1 = 0; i1 < pnz; i1++) j_temp[i1] = p->garray[Jptr[i1]]; 1111 /* add non-zero cols of P into the sorted linked list lnk */ 1112 PetscCall(PetscLLCondensedAddSorted(pnz, j_temp, lnk, lnkbt)); 1113 } 1114 1115 adponz = lnk[0]; 1116 adpoi[i + 1] = adpoi[i] + adponz; 1117 1118 /* if free space is not available, double the total space in the list */ 1119 if (current_space->local_remaining < adponz) { 1120 PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(adponz, current_space->total_array_size), ¤t_space)); 1121 nspacedouble++; 1122 } 1123 1124 /* Copy data into free space, then initialize lnk */ 1125 PetscCall(PetscLLCondensedClean(pN, adponz, current_space->array, lnk, lnkbt)); 1126 1127 current_space->array += adponz; 1128 current_space->local_used += adponz; 1129 current_space->local_remaining -= adponz; 1130 } 1131 1132 /* Symbolic calc of A_off * P_oth */ 1133 PetscCall(MatSetOptionsPrefix(a->B, prefix)); 1134 PetscCall(MatAppendOptionsPrefix(a->B, "inner_offdiag_")); 1135 PetscCall(MatCreate(PETSC_COMM_SELF, &aopoth)); 1136 PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth)); 1137 aopoth_seq = (Mat_SeqAIJ *)((aopoth)->data); 1138 aopothi = aopoth_seq->i; 1139 aopothj = aopoth_seq->j; 1140 1141 /* Allocate space for apj, adpj, aopj, ... */ 1142 /* destroy lists of free space and other temporary array(s) */ 1143 1144 PetscCall(PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am] + 2, &ptap->apj)); 1145 PetscCall(PetscMalloc1(adpoi[am] + 2, &adpoj)); 1146 1147 /* Copy from linked list to j-array */ 1148 PetscCall(PetscFreeSpaceContiguous(&free_space_diag, adpoj)); 1149 PetscCall(PetscLLDestroy(lnk, lnkbt)); 1150 1151 adpoJ = adpoj; 1152 adpdJ = adpdj; 1153 aopJ = aopothj; 1154 apj = ptap->apj; 1155 apJ = apj; /* still empty */ 1156 1157 /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */ 1158 /* A_diag * P_loc_diag to get A*P */ 1159 for (i = 0; i < am; i++) { 1160 aopnz = aopothi[i + 1] - aopothi[i]; 1161 adponz = adpoi[i + 1] - adpoi[i]; 1162 adpdnz = adpdi[i + 1] - adpdi[i]; 1163 1164 /* Correct indices from A_diag*P_diag */ 1165 for (i1 = 0; i1 < adpdnz; i1++) adpdJ[i1] += p_colstart; 1166 /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */ 1167 Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ); 1168 PetscCall(MatPreallocateSet(i + rstart, apnz, apJ, dnz, onz)); 1169 1170 aopJ += aopnz; 1171 adpoJ += adponz; 1172 adpdJ += adpdnz; 1173 apJ += apnz; 1174 api[i + 1] = api[i] + apnz; 1175 } 1176 1177 /* malloc apa to store dense row A[i,:]*P */ 1178 PetscCall(PetscCalloc1(pN + 2, &ptap->apa)); 1179 1180 /* create and assemble symbolic parallel matrix C */ 1181 PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE)); 1182 PetscCall(MatSetBlockSizesFromMats(C, A, P)); 1183 PetscCall(MatGetType(A, &mtype)); 1184 PetscCall(MatSetType(C, mtype)); 1185 PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz)); 1186 MatPreallocateEnd(dnz, onz); 1187 1188 PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api)); 1189 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 1190 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 1191 PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 1192 1193 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1194 C->ops->productnumeric = MatProductNumeric_AB; 1195 1196 /* attach the supporting struct to C for reuse */ 1197 C->product->data = ptap; 1198 C->product->destroy = MatDestroy_MPIAIJ_MatMatMult; 1199 1200 /* set MatInfo */ 1201 afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5; 1202 if (afill < 1.0) afill = 1.0; 1203 C->info.mallocs = nspacedouble; 1204 C->info.fill_ratio_given = fill; 1205 C->info.fill_ratio_needed = afill; 1206 1207 #if defined(PETSC_USE_INFO) 1208 if (api[am]) { 1209 PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill)); 1210 PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill)); 1211 } else { 1212 PetscCall(PetscInfo(C, "Empty matrix product\n")); 1213 } 1214 #endif 1215 1216 PetscCall(MatDestroy(&aopoth)); 1217 PetscCall(MatDestroy(&adpd)); 1218 PetscCall(PetscFree(j_temp)); 1219 PetscCall(PetscFree(adpoj)); 1220 PetscCall(PetscFree(adpoi)); 1221 PetscFunctionReturn(0); 1222 } 1223 1224 /*-------------------------------------------------------------------------*/ 1225 /* This routine only works when scall=MAT_REUSE_MATRIX! */ 1226 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P, Mat A, Mat C) 1227 { 1228 Mat_APMPI *ptap; 1229 Mat Pt; 1230 1231 PetscFunctionBegin; 1232 MatCheckProduct(C, 3); 1233 ptap = (Mat_APMPI *)C->product->data; 1234 PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data"); 1235 PetscCheck(ptap->Pt, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()"); 1236 1237 Pt = ptap->Pt; 1238 PetscCall(MatTransposeSetPrecursor(P, Pt)); 1239 PetscCall(MatTranspose(P, MAT_REUSE_MATRIX, &Pt)); 1240 PetscCall(MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt, A, C)); 1241 PetscFunctionReturn(0); 1242 } 1243 1244 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 1245 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, PetscReal fill, Mat C) 1246 { 1247 Mat_APMPI *ptap; 1248 Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data; 1249 MPI_Comm comm; 1250 PetscMPIInt size, rank; 1251 PetscFreeSpaceList free_space = NULL, current_space = NULL; 1252 PetscInt pn = P->cmap->n, aN = A->cmap->N, an = A->cmap->n; 1253 PetscInt *lnk, i, k, nsend, rstart; 1254 PetscBT lnkbt; 1255 PetscMPIInt tagi, tagj, *len_si, *len_s, *len_ri, nrecv; 1256 PETSC_UNUSED PetscMPIInt icompleted = 0; 1257 PetscInt **buf_rj, **buf_ri, **buf_ri_k, row, ncols, *cols; 1258 PetscInt len, proc, *dnz, *onz, *owners, nzi; 1259 PetscInt nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci; 1260 MPI_Request *swaits, *rwaits; 1261 MPI_Status *sstatus, rstatus; 1262 PetscLayout rowmap; 1263 PetscInt *owners_co, *coi, *coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 1264 PetscMPIInt *len_r, *id_r; /* array of length of comm->size, store send/recv matrix values */ 1265 PetscInt *Jptr, *prmap = p->garray, con, j, Crmax; 1266 Mat_SeqAIJ *a_loc, *c_loc, *c_oth; 1267 PetscTable ta; 1268 MatType mtype; 1269 const char *prefix; 1270 1271 PetscFunctionBegin; 1272 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 1273 PetscCallMPI(MPI_Comm_size(comm, &size)); 1274 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1275 1276 /* create symbolic parallel matrix C */ 1277 PetscCall(MatGetType(A, &mtype)); 1278 PetscCall(MatSetType(C, mtype)); 1279 1280 C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1281 1282 /* create struct Mat_APMPI and attached it to C later */ 1283 PetscCall(PetscNew(&ptap)); 1284 1285 /* (0) compute Rd = Pd^T, Ro = Po^T */ 1286 /* --------------------------------- */ 1287 PetscCall(MatTranspose(p->A, MAT_INITIAL_MATRIX, &ptap->Rd)); 1288 PetscCall(MatTranspose(p->B, MAT_INITIAL_MATRIX, &ptap->Ro)); 1289 1290 /* (1) compute symbolic A_loc */ 1291 /* ---------------------------*/ 1292 PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &ptap->A_loc)); 1293 1294 /* (2-1) compute symbolic C_oth = Ro*A_loc */ 1295 /* ------------------------------------ */ 1296 PetscCall(MatGetOptionsPrefix(A, &prefix)); 1297 PetscCall(MatSetOptionsPrefix(ptap->Ro, prefix)); 1298 PetscCall(MatAppendOptionsPrefix(ptap->Ro, "inner_offdiag_")); 1299 PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_oth)); 1300 PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro, ptap->A_loc, fill, ptap->C_oth)); 1301 1302 /* (3) send coj of C_oth to other processors */ 1303 /* ------------------------------------------ */ 1304 /* determine row ownership */ 1305 PetscCall(PetscLayoutCreate(comm, &rowmap)); 1306 rowmap->n = pn; 1307 rowmap->bs = 1; 1308 PetscCall(PetscLayoutSetUp(rowmap)); 1309 owners = rowmap->range; 1310 1311 /* determine the number of messages to send, their lengths */ 1312 PetscCall(PetscMalloc4(size, &len_s, size, &len_si, size, &sstatus, size + 2, &owners_co)); 1313 PetscCall(PetscArrayzero(len_s, size)); 1314 PetscCall(PetscArrayzero(len_si, size)); 1315 1316 c_oth = (Mat_SeqAIJ *)ptap->C_oth->data; 1317 coi = c_oth->i; 1318 coj = c_oth->j; 1319 con = ptap->C_oth->rmap->n; 1320 proc = 0; 1321 for (i = 0; i < con; i++) { 1322 while (prmap[i] >= owners[proc + 1]) proc++; 1323 len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */ 1324 len_s[proc] += coi[i + 1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 1325 } 1326 1327 len = 0; /* max length of buf_si[], see (4) */ 1328 owners_co[0] = 0; 1329 nsend = 0; 1330 for (proc = 0; proc < size; proc++) { 1331 owners_co[proc + 1] = owners_co[proc] + len_si[proc]; 1332 if (len_s[proc]) { 1333 nsend++; 1334 len_si[proc] = 2 * (len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 1335 len += len_si[proc]; 1336 } 1337 } 1338 1339 /* determine the number and length of messages to receive for coi and coj */ 1340 PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &nrecv)); 1341 PetscCall(PetscGatherMessageLengths2(comm, nsend, nrecv, len_s, len_si, &id_r, &len_r, &len_ri)); 1342 1343 /* post the Irecv and Isend of coj */ 1344 PetscCall(PetscCommGetNewTag(comm, &tagj)); 1345 PetscCall(PetscPostIrecvInt(comm, tagj, nrecv, id_r, len_r, &buf_rj, &rwaits)); 1346 PetscCall(PetscMalloc1(nsend + 1, &swaits)); 1347 for (proc = 0, k = 0; proc < size; proc++) { 1348 if (!len_s[proc]) continue; 1349 i = owners_co[proc]; 1350 PetscCallMPI(MPI_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k)); 1351 k++; 1352 } 1353 1354 /* (2-2) compute symbolic C_loc = Rd*A_loc */ 1355 /* ---------------------------------------- */ 1356 PetscCall(MatSetOptionsPrefix(ptap->Rd, prefix)); 1357 PetscCall(MatAppendOptionsPrefix(ptap->Rd, "inner_diag_")); 1358 PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_loc)); 1359 PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd, ptap->A_loc, fill, ptap->C_loc)); 1360 c_loc = (Mat_SeqAIJ *)ptap->C_loc->data; 1361 1362 /* receives coj are complete */ 1363 for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus)); 1364 PetscCall(PetscFree(rwaits)); 1365 if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus)); 1366 1367 /* add received column indices into ta to update Crmax */ 1368 a_loc = (Mat_SeqAIJ *)(ptap->A_loc)->data; 1369 1370 /* create and initialize a linked list */ 1371 PetscCall(PetscTableCreate(an, aN, &ta)); /* for compute Crmax */ 1372 MatRowMergeMax_SeqAIJ(a_loc, ptap->A_loc->rmap->N, ta); 1373 1374 for (k = 0; k < nrecv; k++) { /* k-th received message */ 1375 Jptr = buf_rj[k]; 1376 for (j = 0; j < len_r[k]; j++) PetscCall(PetscTableAdd(ta, *(Jptr + j) + 1, 1, INSERT_VALUES)); 1377 } 1378 PetscCall(PetscTableGetCount(ta, &Crmax)); 1379 PetscCall(PetscTableDestroy(&ta)); 1380 1381 /* (4) send and recv coi */ 1382 /*-----------------------*/ 1383 PetscCall(PetscCommGetNewTag(comm, &tagi)); 1384 PetscCall(PetscPostIrecvInt(comm, tagi, nrecv, id_r, len_ri, &buf_ri, &rwaits)); 1385 PetscCall(PetscMalloc1(len + 1, &buf_s)); 1386 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1387 for (proc = 0, k = 0; proc < size; proc++) { 1388 if (!len_s[proc]) continue; 1389 /* form outgoing message for i-structure: 1390 buf_si[0]: nrows to be sent 1391 [1:nrows]: row index (global) 1392 [nrows+1:2*nrows+1]: i-structure index 1393 */ 1394 /*-------------------------------------------*/ 1395 nrows = len_si[proc] / 2 - 1; /* num of rows in Co to be sent to [proc] */ 1396 buf_si_i = buf_si + nrows + 1; 1397 buf_si[0] = nrows; 1398 buf_si_i[0] = 0; 1399 nrows = 0; 1400 for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) { 1401 nzi = coi[i + 1] - coi[i]; 1402 buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi; /* i-structure */ 1403 buf_si[nrows + 1] = prmap[i] - owners[proc]; /* local row index */ 1404 nrows++; 1405 } 1406 PetscCallMPI(MPI_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k)); 1407 k++; 1408 buf_si += len_si[proc]; 1409 } 1410 for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus)); 1411 PetscCall(PetscFree(rwaits)); 1412 if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus)); 1413 1414 PetscCall(PetscFree4(len_s, len_si, sstatus, owners_co)); 1415 PetscCall(PetscFree(len_ri)); 1416 PetscCall(PetscFree(swaits)); 1417 PetscCall(PetscFree(buf_s)); 1418 1419 /* (5) compute the local portion of C */ 1420 /* ------------------------------------------ */ 1421 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */ 1422 PetscCall(PetscFreeSpaceGet(Crmax, &free_space)); 1423 current_space = free_space; 1424 1425 PetscCall(PetscMalloc3(nrecv, &buf_ri_k, nrecv, &nextrow, nrecv, &nextci)); 1426 for (k = 0; k < nrecv; k++) { 1427 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1428 nrows = *buf_ri_k[k]; 1429 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1430 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */ 1431 } 1432 1433 MatPreallocateBegin(comm, pn, an, dnz, onz); 1434 PetscCall(PetscLLCondensedCreate(Crmax, aN, &lnk, &lnkbt)); 1435 for (i = 0; i < pn; i++) { /* for each local row of C */ 1436 /* add C_loc into C */ 1437 nzi = c_loc->i[i + 1] - c_loc->i[i]; 1438 Jptr = c_loc->j + c_loc->i[i]; 1439 PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt)); 1440 1441 /* add received col data into lnk */ 1442 for (k = 0; k < nrecv; k++) { /* k-th received message */ 1443 if (i == *nextrow[k]) { /* i-th row */ 1444 nzi = *(nextci[k] + 1) - *nextci[k]; 1445 Jptr = buf_rj[k] + *nextci[k]; 1446 PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt)); 1447 nextrow[k]++; 1448 nextci[k]++; 1449 } 1450 } 1451 1452 /* add missing diagonal entry */ 1453 if (C->force_diagonals) { 1454 k = i + owners[rank]; /* column index */ 1455 PetscCall(PetscLLCondensedAddSorted(1, &k, lnk, lnkbt)); 1456 } 1457 1458 nzi = lnk[0]; 1459 1460 /* copy data into free space, then initialize lnk */ 1461 PetscCall(PetscLLCondensedClean(aN, nzi, current_space->array, lnk, lnkbt)); 1462 PetscCall(MatPreallocateSet(i + owners[rank], nzi, current_space->array, dnz, onz)); 1463 } 1464 PetscCall(PetscFree3(buf_ri_k, nextrow, nextci)); 1465 PetscCall(PetscLLDestroy(lnk, lnkbt)); 1466 PetscCall(PetscFreeSpaceDestroy(free_space)); 1467 1468 /* local sizes and preallocation */ 1469 PetscCall(MatSetSizes(C, pn, an, PETSC_DETERMINE, PETSC_DETERMINE)); 1470 if (P->cmap->bs > 0) PetscCall(PetscLayoutSetBlockSize(C->rmap, P->cmap->bs)); 1471 if (A->cmap->bs > 0) PetscCall(PetscLayoutSetBlockSize(C->cmap, A->cmap->bs)); 1472 PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz)); 1473 MatPreallocateEnd(dnz, onz); 1474 1475 /* add C_loc and C_oth to C */ 1476 PetscCall(MatGetOwnershipRange(C, &rstart, NULL)); 1477 for (i = 0; i < pn; i++) { 1478 ncols = c_loc->i[i + 1] - c_loc->i[i]; 1479 cols = c_loc->j + c_loc->i[i]; 1480 row = rstart + i; 1481 PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES)); 1482 1483 if (C->force_diagonals) PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, 1, (const PetscInt *)&row, NULL, INSERT_VALUES)); 1484 } 1485 for (i = 0; i < con; i++) { 1486 ncols = c_oth->i[i + 1] - c_oth->i[i]; 1487 cols = c_oth->j + c_oth->i[i]; 1488 row = prmap[i]; 1489 PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES)); 1490 } 1491 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 1492 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 1493 PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 1494 1495 /* members in merge */ 1496 PetscCall(PetscFree(id_r)); 1497 PetscCall(PetscFree(len_r)); 1498 PetscCall(PetscFree(buf_ri[0])); 1499 PetscCall(PetscFree(buf_ri)); 1500 PetscCall(PetscFree(buf_rj[0])); 1501 PetscCall(PetscFree(buf_rj)); 1502 PetscCall(PetscLayoutDestroy(&rowmap)); 1503 1504 /* attach the supporting struct to C for reuse */ 1505 C->product->data = ptap; 1506 C->product->destroy = MatDestroy_MPIAIJ_PtAP; 1507 PetscFunctionReturn(0); 1508 } 1509 1510 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, Mat C) 1511 { 1512 Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data; 1513 Mat_SeqAIJ *c_seq; 1514 Mat_APMPI *ptap; 1515 Mat A_loc, C_loc, C_oth; 1516 PetscInt i, rstart, rend, cm, ncols, row; 1517 const PetscInt *cols; 1518 const PetscScalar *vals; 1519 1520 PetscFunctionBegin; 1521 MatCheckProduct(C, 3); 1522 ptap = (Mat_APMPI *)C->product->data; 1523 PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data"); 1524 PetscCheck(ptap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()"); 1525 PetscCall(MatZeroEntries(C)); 1526 1527 /* These matrices are obtained in MatTransposeMatMultSymbolic() */ 1528 /* 1) get R = Pd^T, Ro = Po^T */ 1529 /*----------------------------*/ 1530 PetscCall(MatTransposeSetPrecursor(p->A, ptap->Rd)); 1531 PetscCall(MatTranspose(p->A, MAT_REUSE_MATRIX, &ptap->Rd)); 1532 PetscCall(MatTransposeSetPrecursor(p->B, ptap->Ro)); 1533 PetscCall(MatTranspose(p->B, MAT_REUSE_MATRIX, &ptap->Ro)); 1534 1535 /* 2) compute numeric A_loc */ 1536 /*--------------------------*/ 1537 PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &ptap->A_loc)); 1538 1539 /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */ 1540 A_loc = ptap->A_loc; 1541 PetscCall(((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd, A_loc, ptap->C_loc)); 1542 PetscCall(((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro, A_loc, ptap->C_oth)); 1543 C_loc = ptap->C_loc; 1544 C_oth = ptap->C_oth; 1545 1546 /* add C_loc and C_oth to C */ 1547 PetscCall(MatGetOwnershipRange(C, &rstart, &rend)); 1548 1549 /* C_loc -> C */ 1550 cm = C_loc->rmap->N; 1551 c_seq = (Mat_SeqAIJ *)C_loc->data; 1552 cols = c_seq->j; 1553 vals = c_seq->a; 1554 for (i = 0; i < cm; i++) { 1555 ncols = c_seq->i[i + 1] - c_seq->i[i]; 1556 row = rstart + i; 1557 PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES)); 1558 cols += ncols; 1559 vals += ncols; 1560 } 1561 1562 /* Co -> C, off-processor part */ 1563 cm = C_oth->rmap->N; 1564 c_seq = (Mat_SeqAIJ *)C_oth->data; 1565 cols = c_seq->j; 1566 vals = c_seq->a; 1567 for (i = 0; i < cm; i++) { 1568 ncols = c_seq->i[i + 1] - c_seq->i[i]; 1569 row = p->garray[i]; 1570 PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES)); 1571 cols += ncols; 1572 vals += ncols; 1573 } 1574 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 1575 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 1576 PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 1577 PetscFunctionReturn(0); 1578 } 1579 1580 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P, Mat A, Mat C) 1581 { 1582 Mat_Merge_SeqsToMPI *merge; 1583 Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data; 1584 Mat_SeqAIJ *pd = (Mat_SeqAIJ *)(p->A)->data, *po = (Mat_SeqAIJ *)(p->B)->data; 1585 Mat_APMPI *ap; 1586 PetscInt *adj; 1587 PetscInt i, j, k, anz, pnz, row, *cj, nexta; 1588 MatScalar *ada, *ca, valtmp; 1589 PetscInt am = A->rmap->n, cm = C->rmap->n, pon = (p->B)->cmap->n; 1590 MPI_Comm comm; 1591 PetscMPIInt size, rank, taga, *len_s; 1592 PetscInt *owners, proc, nrows, **buf_ri_k, **nextrow, **nextci; 1593 PetscInt **buf_ri, **buf_rj; 1594 PetscInt cnz = 0, *bj_i, *bi, *bj, bnz, nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 1595 MPI_Request *s_waits, *r_waits; 1596 MPI_Status *status; 1597 MatScalar **abuf_r, *ba_i, *pA, *coa, *ba; 1598 const PetscScalar *dummy; 1599 PetscInt *ai, *aj, *coi, *coj, *poJ, *pdJ; 1600 Mat A_loc; 1601 Mat_SeqAIJ *a_loc; 1602 1603 PetscFunctionBegin; 1604 MatCheckProduct(C, 3); 1605 ap = (Mat_APMPI *)C->product->data; 1606 PetscCheck(ap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be computed. Missing data"); 1607 PetscCheck(ap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()"); 1608 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 1609 PetscCallMPI(MPI_Comm_size(comm, &size)); 1610 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1611 1612 merge = ap->merge; 1613 1614 /* 2) compute numeric C_seq = P_loc^T*A_loc */ 1615 /*------------------------------------------*/ 1616 /* get data from symbolic products */ 1617 coi = merge->coi; 1618 coj = merge->coj; 1619 PetscCall(PetscCalloc1(coi[pon] + 1, &coa)); 1620 bi = merge->bi; 1621 bj = merge->bj; 1622 owners = merge->rowmap->range; 1623 PetscCall(PetscCalloc1(bi[cm] + 1, &ba)); 1624 1625 /* get A_loc by taking all local rows of A */ 1626 A_loc = ap->A_loc; 1627 PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &A_loc)); 1628 a_loc = (Mat_SeqAIJ *)(A_loc)->data; 1629 ai = a_loc->i; 1630 aj = a_loc->j; 1631 1632 /* trigger copy to CPU */ 1633 PetscCall(MatSeqAIJGetArrayRead(p->A, &dummy)); 1634 PetscCall(MatSeqAIJRestoreArrayRead(p->A, &dummy)); 1635 PetscCall(MatSeqAIJGetArrayRead(p->B, &dummy)); 1636 PetscCall(MatSeqAIJRestoreArrayRead(p->B, &dummy)); 1637 for (i = 0; i < am; i++) { 1638 anz = ai[i + 1] - ai[i]; 1639 adj = aj + ai[i]; 1640 ada = a_loc->a + ai[i]; 1641 1642 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 1643 /*-------------------------------------------------------------*/ 1644 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 1645 pnz = po->i[i + 1] - po->i[i]; 1646 poJ = po->j + po->i[i]; 1647 pA = po->a + po->i[i]; 1648 for (j = 0; j < pnz; j++) { 1649 row = poJ[j]; 1650 cj = coj + coi[row]; 1651 ca = coa + coi[row]; 1652 /* perform sparse axpy */ 1653 nexta = 0; 1654 valtmp = pA[j]; 1655 for (k = 0; nexta < anz; k++) { 1656 if (cj[k] == adj[nexta]) { 1657 ca[k] += valtmp * ada[nexta]; 1658 nexta++; 1659 } 1660 } 1661 PetscCall(PetscLogFlops(2.0 * anz)); 1662 } 1663 1664 /* put the value into Cd (diagonal part) */ 1665 pnz = pd->i[i + 1] - pd->i[i]; 1666 pdJ = pd->j + pd->i[i]; 1667 pA = pd->a + pd->i[i]; 1668 for (j = 0; j < pnz; j++) { 1669 row = pdJ[j]; 1670 cj = bj + bi[row]; 1671 ca = ba + bi[row]; 1672 /* perform sparse axpy */ 1673 nexta = 0; 1674 valtmp = pA[j]; 1675 for (k = 0; nexta < anz; k++) { 1676 if (cj[k] == adj[nexta]) { 1677 ca[k] += valtmp * ada[nexta]; 1678 nexta++; 1679 } 1680 } 1681 PetscCall(PetscLogFlops(2.0 * anz)); 1682 } 1683 } 1684 1685 /* 3) send and recv matrix values coa */ 1686 /*------------------------------------*/ 1687 buf_ri = merge->buf_ri; 1688 buf_rj = merge->buf_rj; 1689 len_s = merge->len_s; 1690 PetscCall(PetscCommGetNewTag(comm, &taga)); 1691 PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits)); 1692 1693 PetscCall(PetscMalloc2(merge->nsend + 1, &s_waits, size, &status)); 1694 for (proc = 0, k = 0; proc < size; proc++) { 1695 if (!len_s[proc]) continue; 1696 i = merge->owners_co[proc]; 1697 PetscCallMPI(MPI_Isend(coa + coi[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k)); 1698 k++; 1699 } 1700 if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status)); 1701 if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status)); 1702 1703 PetscCall(PetscFree2(s_waits, status)); 1704 PetscCall(PetscFree(r_waits)); 1705 PetscCall(PetscFree(coa)); 1706 1707 /* 4) insert local Cseq and received values into Cmpi */ 1708 /*----------------------------------------------------*/ 1709 PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci)); 1710 for (k = 0; k < merge->nrecv; k++) { 1711 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1712 nrows = *(buf_ri_k[k]); 1713 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1714 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */ 1715 } 1716 1717 for (i = 0; i < cm; i++) { 1718 row = owners[rank] + i; /* global row index of C_seq */ 1719 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1720 ba_i = ba + bi[i]; 1721 bnz = bi[i + 1] - bi[i]; 1722 /* add received vals into ba */ 1723 for (k = 0; k < merge->nrecv; k++) { /* k-th received message */ 1724 /* i-th row */ 1725 if (i == *nextrow[k]) { 1726 cnz = *(nextci[k] + 1) - *nextci[k]; 1727 cj = buf_rj[k] + *(nextci[k]); 1728 ca = abuf_r[k] + *(nextci[k]); 1729 nextcj = 0; 1730 for (j = 0; nextcj < cnz; j++) { 1731 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1732 ba_i[j] += ca[nextcj++]; 1733 } 1734 } 1735 nextrow[k]++; 1736 nextci[k]++; 1737 PetscCall(PetscLogFlops(2.0 * cnz)); 1738 } 1739 } 1740 PetscCall(MatSetValues(C, 1, &row, bnz, bj_i, ba_i, INSERT_VALUES)); 1741 } 1742 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 1743 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 1744 1745 PetscCall(PetscFree(ba)); 1746 PetscCall(PetscFree(abuf_r[0])); 1747 PetscCall(PetscFree(abuf_r)); 1748 PetscCall(PetscFree3(buf_ri_k, nextrow, nextci)); 1749 PetscFunctionReturn(0); 1750 } 1751 1752 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P, Mat A, PetscReal fill, Mat C) 1753 { 1754 Mat A_loc; 1755 Mat_APMPI *ap; 1756 PetscFreeSpaceList free_space = NULL, current_space = NULL; 1757 Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data, *a = (Mat_MPIAIJ *)A->data; 1758 PetscInt *pdti, *pdtj, *poti, *potj, *ptJ; 1759 PetscInt nnz; 1760 PetscInt *lnk, *owners_co, *coi, *coj, i, k, pnz, row; 1761 PetscInt am = A->rmap->n, pn = P->cmap->n; 1762 MPI_Comm comm; 1763 PetscMPIInt size, rank, tagi, tagj, *len_si, *len_s, *len_ri; 1764 PetscInt **buf_rj, **buf_ri, **buf_ri_k; 1765 PetscInt len, proc, *dnz, *onz, *owners; 1766 PetscInt nzi, *bi, *bj; 1767 PetscInt nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci; 1768 MPI_Request *swaits, *rwaits; 1769 MPI_Status *sstatus, rstatus; 1770 Mat_Merge_SeqsToMPI *merge; 1771 PetscInt *ai, *aj, *Jptr, anz, *prmap = p->garray, pon, nspacedouble = 0, j; 1772 PetscReal afill = 1.0, afill_tmp; 1773 PetscInt rstart = P->cmap->rstart, rmax, aN = A->cmap->N, Armax; 1774 Mat_SeqAIJ *a_loc; 1775 PetscTable ta; 1776 MatType mtype; 1777 1778 PetscFunctionBegin; 1779 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 1780 /* check if matrix local sizes are compatible */ 1781 PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, comm, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != P (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart, 1782 A->rmap->rend, P->rmap->rstart, P->rmap->rend); 1783 1784 PetscCallMPI(MPI_Comm_size(comm, &size)); 1785 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1786 1787 /* create struct Mat_APMPI and attached it to C later */ 1788 PetscCall(PetscNew(&ap)); 1789 1790 /* get A_loc by taking all local rows of A */ 1791 PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &A_loc)); 1792 1793 ap->A_loc = A_loc; 1794 a_loc = (Mat_SeqAIJ *)(A_loc)->data; 1795 ai = a_loc->i; 1796 aj = a_loc->j; 1797 1798 /* determine symbolic Co=(p->B)^T*A - send to others */ 1799 /*----------------------------------------------------*/ 1800 PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj)); 1801 PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->B, &poti, &potj)); 1802 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 1803 >= (num of nonzero rows of C_seq) - pn */ 1804 PetscCall(PetscMalloc1(pon + 1, &coi)); 1805 coi[0] = 0; 1806 1807 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1808 nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(poti[pon], ai[am])); 1809 PetscCall(PetscFreeSpaceGet(nnz, &free_space)); 1810 current_space = free_space; 1811 1812 /* create and initialize a linked list */ 1813 PetscCall(PetscTableCreate(A->cmap->n + a->B->cmap->N, aN, &ta)); 1814 MatRowMergeMax_SeqAIJ(a_loc, am, ta); 1815 PetscCall(PetscTableGetCount(ta, &Armax)); 1816 1817 PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk)); 1818 1819 for (i = 0; i < pon; i++) { 1820 pnz = poti[i + 1] - poti[i]; 1821 ptJ = potj + poti[i]; 1822 for (j = 0; j < pnz; j++) { 1823 row = ptJ[j]; /* row of A_loc == col of Pot */ 1824 anz = ai[row + 1] - ai[row]; 1825 Jptr = aj + ai[row]; 1826 /* add non-zero cols of AP into the sorted linked list lnk */ 1827 PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk)); 1828 } 1829 nnz = lnk[0]; 1830 1831 /* If free space is not available, double the total space in the list */ 1832 if (current_space->local_remaining < nnz) { 1833 PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), ¤t_space)); 1834 nspacedouble++; 1835 } 1836 1837 /* Copy data into free space, and zero out denserows */ 1838 PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk)); 1839 1840 current_space->array += nnz; 1841 current_space->local_used += nnz; 1842 current_space->local_remaining -= nnz; 1843 1844 coi[i + 1] = coi[i] + nnz; 1845 } 1846 1847 PetscCall(PetscMalloc1(coi[pon] + 1, &coj)); 1848 PetscCall(PetscFreeSpaceContiguous(&free_space, coj)); 1849 PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); /* must destroy to get a new one for C */ 1850 1851 afill_tmp = (PetscReal)coi[pon] / (poti[pon] + ai[am] + 1); 1852 if (afill_tmp > afill) afill = afill_tmp; 1853 1854 /* send j-array (coj) of Co to other processors */ 1855 /*----------------------------------------------*/ 1856 /* determine row ownership */ 1857 PetscCall(PetscNew(&merge)); 1858 PetscCall(PetscLayoutCreate(comm, &merge->rowmap)); 1859 1860 merge->rowmap->n = pn; 1861 merge->rowmap->bs = 1; 1862 1863 PetscCall(PetscLayoutSetUp(merge->rowmap)); 1864 owners = merge->rowmap->range; 1865 1866 /* determine the number of messages to send, their lengths */ 1867 PetscCall(PetscCalloc1(size, &len_si)); 1868 PetscCall(PetscCalloc1(size, &merge->len_s)); 1869 1870 len_s = merge->len_s; 1871 merge->nsend = 0; 1872 1873 PetscCall(PetscMalloc1(size + 2, &owners_co)); 1874 1875 proc = 0; 1876 for (i = 0; i < pon; i++) { 1877 while (prmap[i] >= owners[proc + 1]) proc++; 1878 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1879 len_s[proc] += coi[i + 1] - coi[i]; 1880 } 1881 1882 len = 0; /* max length of buf_si[] */ 1883 owners_co[0] = 0; 1884 for (proc = 0; proc < size; proc++) { 1885 owners_co[proc + 1] = owners_co[proc] + len_si[proc]; 1886 if (len_si[proc]) { 1887 merge->nsend++; 1888 len_si[proc] = 2 * (len_si[proc] + 1); 1889 len += len_si[proc]; 1890 } 1891 } 1892 1893 /* determine the number and length of messages to receive for coi and coj */ 1894 PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv)); 1895 PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri)); 1896 1897 /* post the Irecv and Isend of coj */ 1898 PetscCall(PetscCommGetNewTag(comm, &tagj)); 1899 PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rwaits)); 1900 PetscCall(PetscMalloc1(merge->nsend + 1, &swaits)); 1901 for (proc = 0, k = 0; proc < size; proc++) { 1902 if (!len_s[proc]) continue; 1903 i = owners_co[proc]; 1904 PetscCallMPI(MPI_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k)); 1905 k++; 1906 } 1907 1908 /* receives and sends of coj are complete */ 1909 PetscCall(PetscMalloc1(size, &sstatus)); 1910 for (i = 0; i < merge->nrecv; i++) { 1911 PETSC_UNUSED PetscMPIInt icompleted; 1912 PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus)); 1913 } 1914 PetscCall(PetscFree(rwaits)); 1915 if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus)); 1916 1917 /* add received column indices into table to update Armax */ 1918 /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */ 1919 for (k = 0; k < merge->nrecv; k++) { /* k-th received message */ 1920 Jptr = buf_rj[k]; 1921 for (j = 0; j < merge->len_r[k]; j++) PetscCall(PetscTableAdd(ta, *(Jptr + j) + 1, 1, INSERT_VALUES)); 1922 } 1923 PetscCall(PetscTableGetCount(ta, &Armax)); 1924 1925 /* send and recv coi */ 1926 /*-------------------*/ 1927 PetscCall(PetscCommGetNewTag(comm, &tagi)); 1928 PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &rwaits)); 1929 PetscCall(PetscMalloc1(len + 1, &buf_s)); 1930 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1931 for (proc = 0, k = 0; proc < size; proc++) { 1932 if (!len_s[proc]) continue; 1933 /* form outgoing message for i-structure: 1934 buf_si[0]: nrows to be sent 1935 [1:nrows]: row index (global) 1936 [nrows+1:2*nrows+1]: i-structure index 1937 */ 1938 /*-------------------------------------------*/ 1939 nrows = len_si[proc] / 2 - 1; 1940 buf_si_i = buf_si + nrows + 1; 1941 buf_si[0] = nrows; 1942 buf_si_i[0] = 0; 1943 nrows = 0; 1944 for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) { 1945 nzi = coi[i + 1] - coi[i]; 1946 buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi; /* i-structure */ 1947 buf_si[nrows + 1] = prmap[i] - owners[proc]; /* local row index */ 1948 nrows++; 1949 } 1950 PetscCallMPI(MPI_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k)); 1951 k++; 1952 buf_si += len_si[proc]; 1953 } 1954 i = merge->nrecv; 1955 while (i--) { 1956 PETSC_UNUSED PetscMPIInt icompleted; 1957 PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus)); 1958 } 1959 PetscCall(PetscFree(rwaits)); 1960 if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus)); 1961 PetscCall(PetscFree(len_si)); 1962 PetscCall(PetscFree(len_ri)); 1963 PetscCall(PetscFree(swaits)); 1964 PetscCall(PetscFree(sstatus)); 1965 PetscCall(PetscFree(buf_s)); 1966 1967 /* compute the local portion of C (mpi mat) */ 1968 /*------------------------------------------*/ 1969 /* allocate bi array and free space for accumulating nonzero column info */ 1970 PetscCall(PetscMalloc1(pn + 1, &bi)); 1971 bi[0] = 0; 1972 1973 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 1974 nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(pdti[pn], PetscIntSumTruncate(poti[pon], ai[am]))); 1975 PetscCall(PetscFreeSpaceGet(nnz, &free_space)); 1976 current_space = free_space; 1977 1978 PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci)); 1979 for (k = 0; k < merge->nrecv; k++) { 1980 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1981 nrows = *buf_ri_k[k]; 1982 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1983 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure */ 1984 } 1985 1986 PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk)); 1987 MatPreallocateBegin(comm, pn, A->cmap->n, dnz, onz); 1988 rmax = 0; 1989 for (i = 0; i < pn; i++) { 1990 /* add pdt[i,:]*AP into lnk */ 1991 pnz = pdti[i + 1] - pdti[i]; 1992 ptJ = pdtj + pdti[i]; 1993 for (j = 0; j < pnz; j++) { 1994 row = ptJ[j]; /* row of AP == col of Pt */ 1995 anz = ai[row + 1] - ai[row]; 1996 Jptr = aj + ai[row]; 1997 /* add non-zero cols of AP into the sorted linked list lnk */ 1998 PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk)); 1999 } 2000 2001 /* add received col data into lnk */ 2002 for (k = 0; k < merge->nrecv; k++) { /* k-th received message */ 2003 if (i == *nextrow[k]) { /* i-th row */ 2004 nzi = *(nextci[k] + 1) - *nextci[k]; 2005 Jptr = buf_rj[k] + *nextci[k]; 2006 PetscCall(PetscLLCondensedAddSorted_Scalable(nzi, Jptr, lnk)); 2007 nextrow[k]++; 2008 nextci[k]++; 2009 } 2010 } 2011 2012 /* add missing diagonal entry */ 2013 if (C->force_diagonals) { 2014 k = i + owners[rank]; /* column index */ 2015 PetscCall(PetscLLCondensedAddSorted_Scalable(1, &k, lnk)); 2016 } 2017 2018 nnz = lnk[0]; 2019 2020 /* if free space is not available, make more free space */ 2021 if (current_space->local_remaining < nnz) { 2022 PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), ¤t_space)); 2023 nspacedouble++; 2024 } 2025 /* copy data into free space, then initialize lnk */ 2026 PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk)); 2027 PetscCall(MatPreallocateSet(i + owners[rank], nnz, current_space->array, dnz, onz)); 2028 2029 current_space->array += nnz; 2030 current_space->local_used += nnz; 2031 current_space->local_remaining -= nnz; 2032 2033 bi[i + 1] = bi[i] + nnz; 2034 if (nnz > rmax) rmax = nnz; 2035 } 2036 PetscCall(PetscFree3(buf_ri_k, nextrow, nextci)); 2037 2038 PetscCall(PetscMalloc1(bi[pn] + 1, &bj)); 2039 PetscCall(PetscFreeSpaceContiguous(&free_space, bj)); 2040 afill_tmp = (PetscReal)bi[pn] / (pdti[pn] + poti[pon] + ai[am] + 1); 2041 if (afill_tmp > afill) afill = afill_tmp; 2042 PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); 2043 PetscCall(PetscTableDestroy(&ta)); 2044 PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj)); 2045 PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->B, &poti, &potj)); 2046 2047 /* create symbolic parallel matrix C - why cannot be assembled in Numeric part */ 2048 /*-------------------------------------------------------------------------------*/ 2049 PetscCall(MatSetSizes(C, pn, A->cmap->n, PETSC_DETERMINE, PETSC_DETERMINE)); 2050 PetscCall(MatSetBlockSizes(C, PetscAbs(P->cmap->bs), PetscAbs(A->cmap->bs))); 2051 PetscCall(MatGetType(A, &mtype)); 2052 PetscCall(MatSetType(C, mtype)); 2053 PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz)); 2054 MatPreallocateEnd(dnz, onz); 2055 PetscCall(MatSetBlockSize(C, 1)); 2056 PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); 2057 for (i = 0; i < pn; i++) { 2058 row = i + rstart; 2059 nnz = bi[i + 1] - bi[i]; 2060 Jptr = bj + bi[i]; 2061 PetscCall(MatSetValues(C, 1, &row, nnz, Jptr, NULL, INSERT_VALUES)); 2062 } 2063 PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY)); 2064 PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY)); 2065 PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 2066 merge->bi = bi; 2067 merge->bj = bj; 2068 merge->coi = coi; 2069 merge->coj = coj; 2070 merge->buf_ri = buf_ri; 2071 merge->buf_rj = buf_rj; 2072 merge->owners_co = owners_co; 2073 2074 /* attach the supporting struct to C for reuse */ 2075 C->product->data = ap; 2076 C->product->destroy = MatDestroy_MPIAIJ_PtAP; 2077 ap->merge = merge; 2078 2079 C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ; 2080 2081 #if defined(PETSC_USE_INFO) 2082 if (bi[pn] != 0) { 2083 PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill)); 2084 PetscCall(PetscInfo(C, "Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n", (double)afill)); 2085 } else { 2086 PetscCall(PetscInfo(C, "Empty matrix product\n")); 2087 } 2088 #endif 2089 PetscFunctionReturn(0); 2090 } 2091 2092 /* ---------------------------------------------------------------- */ 2093 static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C) 2094 { 2095 Mat_Product *product = C->product; 2096 Mat A = product->A, B = product->B; 2097 PetscReal fill = product->fill; 2098 PetscBool flg; 2099 2100 PetscFunctionBegin; 2101 /* scalable */ 2102 PetscCall(PetscStrcmp(product->alg, "scalable", &flg)); 2103 if (flg) { 2104 PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C)); 2105 goto next; 2106 } 2107 2108 /* nonscalable */ 2109 PetscCall(PetscStrcmp(product->alg, "nonscalable", &flg)); 2110 if (flg) { 2111 PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C)); 2112 goto next; 2113 } 2114 2115 /* matmatmult */ 2116 PetscCall(PetscStrcmp(product->alg, "at*b", &flg)); 2117 if (flg) { 2118 Mat At; 2119 Mat_APMPI *ptap; 2120 2121 PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At)); 2122 PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(At, B, fill, C)); 2123 ptap = (Mat_APMPI *)C->product->data; 2124 if (ptap) { 2125 ptap->Pt = At; 2126 C->product->destroy = MatDestroy_MPIAIJ_PtAP; 2127 } 2128 C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult; 2129 goto next; 2130 } 2131 2132 /* backend general code */ 2133 PetscCall(PetscStrcmp(product->alg, "backend", &flg)); 2134 if (flg) { 2135 PetscCall(MatProductSymbolic_MPIAIJBACKEND(C)); 2136 PetscFunctionReturn(0); 2137 } 2138 2139 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatProduct type is not supported"); 2140 2141 next: 2142 C->ops->productnumeric = MatProductNumeric_AtB; 2143 PetscFunctionReturn(0); 2144 } 2145 2146 /* ---------------------------------------------------------------- */ 2147 /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */ 2148 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C) 2149 { 2150 Mat_Product *product = C->product; 2151 Mat A = product->A, B = product->B; 2152 #if defined(PETSC_HAVE_HYPRE) 2153 const char *algTypes[5] = {"scalable", "nonscalable", "seqmpi", "backend", "hypre"}; 2154 PetscInt nalg = 5; 2155 #else 2156 const char *algTypes[4] = { 2157 "scalable", 2158 "nonscalable", 2159 "seqmpi", 2160 "backend", 2161 }; 2162 PetscInt nalg = 4; 2163 #endif 2164 PetscInt alg = 1; /* set nonscalable algorithm as default */ 2165 PetscBool flg; 2166 MPI_Comm comm; 2167 2168 PetscFunctionBegin; 2169 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 2170 2171 /* Set "nonscalable" as default algorithm */ 2172 PetscCall(PetscStrcmp(C->product->alg, "default", &flg)); 2173 if (flg) { 2174 PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2175 2176 /* Set "scalable" as default if BN and local nonzeros of A and B are large */ 2177 if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */ 2178 MatInfo Ainfo, Binfo; 2179 PetscInt nz_local; 2180 PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable; 2181 2182 PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo)); 2183 PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo)); 2184 nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated); 2185 2186 if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE; 2187 PetscCall(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm)); 2188 2189 if (alg_scalable) { 2190 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 2191 PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2192 PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local))); 2193 } 2194 } 2195 } 2196 2197 /* Get runtime option */ 2198 if (product->api_user) { 2199 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat"); 2200 PetscCall(PetscOptionsEList("-matmatmult_via", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg)); 2201 PetscOptionsEnd(); 2202 } else { 2203 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat"); 2204 PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg)); 2205 PetscOptionsEnd(); 2206 } 2207 if (flg) PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2208 2209 C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ; 2210 PetscFunctionReturn(0); 2211 } 2212 2213 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABt(Mat C) 2214 { 2215 PetscFunctionBegin; 2216 PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C)); 2217 C->ops->productsymbolic = MatProductSymbolic_ABt_MPIAIJ_MPIAIJ; 2218 PetscFunctionReturn(0); 2219 } 2220 2221 /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */ 2222 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C) 2223 { 2224 Mat_Product *product = C->product; 2225 Mat A = product->A, B = product->B; 2226 const char *algTypes[4] = {"scalable", "nonscalable", "at*b", "backend"}; 2227 PetscInt nalg = 4; 2228 PetscInt alg = 1; /* set default algorithm */ 2229 PetscBool flg; 2230 MPI_Comm comm; 2231 2232 PetscFunctionBegin; 2233 /* Check matrix local sizes */ 2234 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 2235 PetscCheck(A->rmap->rstart == B->rmap->rstart && A->rmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != B (%" PetscInt_FMT ",%" PetscInt_FMT ")", 2236 A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend); 2237 2238 /* Set default algorithm */ 2239 PetscCall(PetscStrcmp(C->product->alg, "default", &flg)); 2240 if (flg) PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2241 2242 /* Set "scalable" as default if BN and local nonzeros of A and B are large */ 2243 if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */ 2244 MatInfo Ainfo, Binfo; 2245 PetscInt nz_local; 2246 PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable; 2247 2248 PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo)); 2249 PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo)); 2250 nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated); 2251 2252 if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE; 2253 PetscCall(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm)); 2254 2255 if (alg_scalable) { 2256 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 2257 PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2258 PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local))); 2259 } 2260 } 2261 2262 /* Get runtime option */ 2263 if (product->api_user) { 2264 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatTransposeMatMult", "Mat"); 2265 PetscCall(PetscOptionsEList("-mattransposematmult_via", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg)); 2266 PetscOptionsEnd(); 2267 } else { 2268 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AtB", "Mat"); 2269 PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg)); 2270 PetscOptionsEnd(); 2271 } 2272 if (flg) PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2273 2274 C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ; 2275 PetscFunctionReturn(0); 2276 } 2277 2278 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C) 2279 { 2280 Mat_Product *product = C->product; 2281 Mat A = product->A, P = product->B; 2282 MPI_Comm comm; 2283 PetscBool flg; 2284 PetscInt alg = 1; /* set default algorithm */ 2285 #if !defined(PETSC_HAVE_HYPRE) 2286 const char *algTypes[5] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend"}; 2287 PetscInt nalg = 5; 2288 #else 2289 const char *algTypes[6] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend", "hypre"}; 2290 PetscInt nalg = 6; 2291 #endif 2292 PetscInt pN = P->cmap->N; 2293 2294 PetscFunctionBegin; 2295 /* Check matrix local sizes */ 2296 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 2297 PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Arow (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")", 2298 A->rmap->rstart, A->rmap->rend, P->rmap->rstart, P->rmap->rend); 2299 PetscCheck(A->cmap->rstart == P->rmap->rstart && A->cmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Acol (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")", 2300 A->cmap->rstart, A->cmap->rend, P->rmap->rstart, P->rmap->rend); 2301 2302 /* Set "nonscalable" as default algorithm */ 2303 PetscCall(PetscStrcmp(C->product->alg, "default", &flg)); 2304 if (flg) { 2305 PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2306 2307 /* Set "scalable" as default if BN and local nonzeros of A and B are large */ 2308 if (pN > 100000) { 2309 MatInfo Ainfo, Pinfo; 2310 PetscInt nz_local; 2311 PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable; 2312 2313 PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo)); 2314 PetscCall(MatGetInfo(P, MAT_LOCAL, &Pinfo)); 2315 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); 2316 2317 if (pN > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE; 2318 PetscCall(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm)); 2319 2320 if (alg_scalable) { 2321 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 2322 PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2323 } 2324 } 2325 } 2326 2327 /* Get runtime option */ 2328 if (product->api_user) { 2329 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat"); 2330 PetscCall(PetscOptionsEList("-matptap_via", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg)); 2331 PetscOptionsEnd(); 2332 } else { 2333 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat"); 2334 PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg)); 2335 PetscOptionsEnd(); 2336 } 2337 if (flg) PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2338 2339 C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ; 2340 PetscFunctionReturn(0); 2341 } 2342 2343 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C) 2344 { 2345 Mat_Product *product = C->product; 2346 Mat A = product->A, R = product->B; 2347 2348 PetscFunctionBegin; 2349 /* Check matrix local sizes */ 2350 PetscCheck(A->cmap->n == R->cmap->n && A->rmap->n == R->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A local (%" PetscInt_FMT ", %" PetscInt_FMT "), R local (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->n, 2351 A->rmap->n, R->rmap->n, R->cmap->n); 2352 2353 C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ; 2354 PetscFunctionReturn(0); 2355 } 2356 2357 /* 2358 Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm 2359 */ 2360 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C) 2361 { 2362 Mat_Product *product = C->product; 2363 PetscBool flg = PETSC_FALSE; 2364 PetscInt alg = 1; /* default algorithm */ 2365 const char *algTypes[3] = {"scalable", "nonscalable", "seqmpi"}; 2366 PetscInt nalg = 3; 2367 2368 PetscFunctionBegin; 2369 /* Set default algorithm */ 2370 PetscCall(PetscStrcmp(C->product->alg, "default", &flg)); 2371 if (flg) PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2372 2373 /* Get runtime option */ 2374 if (product->api_user) { 2375 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMatMult", "Mat"); 2376 PetscCall(PetscOptionsEList("-matmatmatmult_via", "Algorithmic approach", "MatMatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg)); 2377 PetscOptionsEnd(); 2378 } else { 2379 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_ABC", "Mat"); 2380 PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatProduct_ABC", algTypes, nalg, algTypes[alg], &alg, &flg)); 2381 PetscOptionsEnd(); 2382 } 2383 if (flg) PetscCall(MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg])); 2384 2385 C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ; 2386 C->ops->productsymbolic = MatProductSymbolic_ABC; 2387 PetscFunctionReturn(0); 2388 } 2389 2390 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C) 2391 { 2392 Mat_Product *product = C->product; 2393 2394 PetscFunctionBegin; 2395 switch (product->type) { 2396 case MATPRODUCT_AB: 2397 PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C)); 2398 break; 2399 case MATPRODUCT_ABt: 2400 PetscCall(MatProductSetFromOptions_MPIAIJ_ABt(C)); 2401 break; 2402 case MATPRODUCT_AtB: 2403 PetscCall(MatProductSetFromOptions_MPIAIJ_AtB(C)); 2404 break; 2405 case MATPRODUCT_PtAP: 2406 PetscCall(MatProductSetFromOptions_MPIAIJ_PtAP(C)); 2407 break; 2408 case MATPRODUCT_RARt: 2409 PetscCall(MatProductSetFromOptions_MPIAIJ_RARt(C)); 2410 break; 2411 case MATPRODUCT_ABC: 2412 PetscCall(MatProductSetFromOptions_MPIAIJ_ABC(C)); 2413 break; 2414 default: 2415 break; 2416 } 2417 PetscFunctionReturn(0); 2418 } 2419