1 #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/ 2 #include <petsc/private/vecimpl.h> 3 #include <petsc/private/sfimpl.h> 4 #include <petsc/private/isimpl.h> 5 #include <petscblaslapack.h> 6 #include <petscsf.h> 7 #include <petsc/private/hashmapi.h> 8 9 PetscErrorCode MatDestroy_MPIAIJ(Mat mat) 10 { 11 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 12 13 PetscFunctionBegin; 14 PetscCall(PetscLogObjectState((PetscObject)mat, "Rows=%" PetscInt_FMT ", Cols=%" PetscInt_FMT, mat->rmap->N, mat->cmap->N)); 15 PetscCall(MatStashDestroy_Private(&mat->stash)); 16 PetscCall(VecDestroy(&aij->diag)); 17 PetscCall(MatDestroy(&aij->A)); 18 PetscCall(MatDestroy(&aij->B)); 19 #if defined(PETSC_USE_CTABLE) 20 PetscCall(PetscHMapIDestroy(&aij->colmap)); 21 #else 22 PetscCall(PetscFree(aij->colmap)); 23 #endif 24 PetscCall(PetscFree(aij->garray)); 25 PetscCall(VecDestroy(&aij->lvec)); 26 PetscCall(VecScatterDestroy(&aij->Mvctx)); 27 PetscCall(PetscFree2(aij->rowvalues, aij->rowindices)); 28 PetscCall(PetscFree(aij->ld)); 29 30 PetscCall(PetscFree(mat->data)); 31 32 /* may be created by MatCreateMPIAIJSumSeqAIJSymbolic */ 33 PetscCall(PetscObjectCompose((PetscObject)mat, "MatMergeSeqsToMPI", NULL)); 34 35 PetscCall(PetscObjectChangeTypeName((PetscObject)mat, NULL)); 36 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatStoreValues_C", NULL)); 37 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatRetrieveValues_C", NULL)); 38 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatIsTranspose_C", NULL)); 39 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetPreallocation_C", NULL)); 40 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatResetPreallocation_C", NULL)); 41 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetPreallocationCSR_C", NULL)); 42 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatDiagonalScaleLocal_C", NULL)); 43 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpibaij_C", NULL)); 44 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpisbaij_C", NULL)); 45 #if defined(PETSC_HAVE_CUDA) 46 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijcusparse_C", NULL)); 47 #endif 48 #if defined(PETSC_HAVE_HIP) 49 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijhipsparse_C", NULL)); 50 #endif 51 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 52 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijkokkos_C", NULL)); 53 #endif 54 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpidense_C", NULL)); 55 #if defined(PETSC_HAVE_ELEMENTAL) 56 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_elemental_C", NULL)); 57 #endif 58 #if defined(PETSC_HAVE_SCALAPACK) 59 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_scalapack_C", NULL)); 60 #endif 61 #if defined(PETSC_HAVE_HYPRE) 62 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_hypre_C", NULL)); 63 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_transpose_mpiaij_mpiaij_C", NULL)); 64 #endif 65 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_is_C", NULL)); 66 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_is_mpiaij_C", NULL)); 67 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_mpiaij_mpiaij_C", NULL)); 68 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetUseScalableIncreaseOverlap_C", NULL)); 69 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijperm_C", NULL)); 70 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijsell_C", NULL)); 71 #if defined(PETSC_HAVE_MKL_SPARSE) 72 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijmkl_C", NULL)); 73 #endif 74 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijcrl_C", NULL)); 75 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_is_C", NULL)); 76 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpisell_C", NULL)); 77 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatSetPreallocationCOO_C", NULL)); 78 PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatSetValuesCOO_C", NULL)); 79 PetscFunctionReturn(PETSC_SUCCESS); 80 } 81 82 /* defines MatSetValues_MPI_Hash(), MatAssemblyBegin_MPI_Hash(), and MatAssemblyEnd_MPI_Hash() */ 83 #define TYPE AIJ 84 #define TYPE_AIJ 85 #include "../src/mat/impls/aij/mpi/mpihashmat.h" 86 #undef TYPE 87 #undef TYPE_AIJ 88 89 static PetscErrorCode MatGetRowIJ_MPIAIJ(Mat A, PetscInt oshift, PetscBool symmetric, PetscBool inodecompressed, PetscInt *m, const PetscInt *ia[], const PetscInt *ja[], PetscBool *done) 90 { 91 Mat B; 92 93 PetscFunctionBegin; 94 PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &B)); 95 PetscCall(PetscObjectCompose((PetscObject)A, "MatGetRowIJ_MPIAIJ", (PetscObject)B)); 96 PetscCall(MatGetRowIJ(B, oshift, symmetric, inodecompressed, m, ia, ja, done)); 97 PetscCall(MatDestroy(&B)); 98 PetscFunctionReturn(PETSC_SUCCESS); 99 } 100 101 static PetscErrorCode MatRestoreRowIJ_MPIAIJ(Mat A, PetscInt oshift, PetscBool symmetric, PetscBool inodecompressed, PetscInt *m, const PetscInt *ia[], const PetscInt *ja[], PetscBool *done) 102 { 103 Mat B; 104 105 PetscFunctionBegin; 106 PetscCall(PetscObjectQuery((PetscObject)A, "MatGetRowIJ_MPIAIJ", (PetscObject *)&B)); 107 PetscCall(MatRestoreRowIJ(B, oshift, symmetric, inodecompressed, m, ia, ja, done)); 108 PetscCall(PetscObjectCompose((PetscObject)A, "MatGetRowIJ_MPIAIJ", NULL)); 109 PetscFunctionReturn(PETSC_SUCCESS); 110 } 111 112 /*MC 113 MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices. 114 115 This matrix type is identical to` MATSEQAIJ` when constructed with a single process communicator, 116 and `MATMPIAIJ` otherwise. As a result, for single process communicators, 117 `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported 118 for communicators controlling multiple processes. It is recommended that you call both of 119 the above preallocation routines for simplicity. 120 121 Options Database Key: 122 . -mat_type aij - sets the matrix type to `MATAIJ` during a call to `MatSetFromOptions()` 123 124 Developer Note: 125 Level: beginner 126 127 Subclasses include `MATAIJCUSPARSE`, `MATAIJPERM`, `MATAIJSELL`, `MATAIJMKL`, `MATAIJCRL`, `MATAIJKOKKOS`,and also automatically switches over to use inodes when 128 enough exist. 129 130 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MATSEQAIJ`, `MatCreateAIJ()`, `MatCreateSeqAIJ()`, `MATSEQAIJ`, `MATMPIAIJ` 131 M*/ 132 133 /*MC 134 MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices. 135 136 This matrix type is identical to `MATSEQAIJCRL` when constructed with a single process communicator, 137 and `MATMPIAIJCRL` otherwise. As a result, for single process communicators, 138 `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported 139 for communicators controlling multiple processes. It is recommended that you call both of 140 the above preallocation routines for simplicity. 141 142 Options Database Key: 143 . -mat_type aijcrl - sets the matrix type to `MATMPIAIJCRL` during a call to `MatSetFromOptions()` 144 145 Level: beginner 146 147 .seealso: [](ch_matrices), `Mat`, `MatCreateMPIAIJCRL`, `MATSEQAIJCRL`, `MATMPIAIJCRL`, `MATSEQAIJCRL`, `MATMPIAIJCRL` 148 M*/ 149 150 static PetscErrorCode MatBindToCPU_MPIAIJ(Mat A, PetscBool flg) 151 { 152 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 153 154 PetscFunctionBegin; 155 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP) || defined(PETSC_HAVE_VIENNACL) 156 A->boundtocpu = flg; 157 #endif 158 if (a->A) PetscCall(MatBindToCPU(a->A, flg)); 159 if (a->B) PetscCall(MatBindToCPU(a->B, flg)); 160 161 /* In addition to binding the diagonal and off-diagonal matrices, bind the local vectors used for matrix-vector products. 162 * This maybe seems a little odd for a MatBindToCPU() call to do, but it makes no sense for the binding of these vectors 163 * to differ from the parent matrix. */ 164 if (a->lvec) PetscCall(VecBindToCPU(a->lvec, flg)); 165 if (a->diag) PetscCall(VecBindToCPU(a->diag, flg)); 166 167 PetscFunctionReturn(PETSC_SUCCESS); 168 } 169 170 static PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs) 171 { 172 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)M->data; 173 174 PetscFunctionBegin; 175 if (mat->A) { 176 PetscCall(MatSetBlockSizes(mat->A, rbs, cbs)); 177 PetscCall(MatSetBlockSizes(mat->B, rbs, 1)); 178 } 179 PetscFunctionReturn(PETSC_SUCCESS); 180 } 181 182 static PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M, IS *keptrows) 183 { 184 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)M->data; 185 Mat_SeqAIJ *a = (Mat_SeqAIJ *)mat->A->data; 186 Mat_SeqAIJ *b = (Mat_SeqAIJ *)mat->B->data; 187 const PetscInt *ia, *ib; 188 const MatScalar *aa, *bb, *aav, *bav; 189 PetscInt na, nb, i, j, *rows, cnt = 0, n0rows; 190 PetscInt m = M->rmap->n, rstart = M->rmap->rstart; 191 192 PetscFunctionBegin; 193 *keptrows = NULL; 194 195 ia = a->i; 196 ib = b->i; 197 PetscCall(MatSeqAIJGetArrayRead(mat->A, &aav)); 198 PetscCall(MatSeqAIJGetArrayRead(mat->B, &bav)); 199 for (i = 0; i < m; i++) { 200 na = ia[i + 1] - ia[i]; 201 nb = ib[i + 1] - ib[i]; 202 if (!na && !nb) { 203 cnt++; 204 goto ok1; 205 } 206 aa = aav + ia[i]; 207 for (j = 0; j < na; j++) { 208 if (aa[j] != 0.0) goto ok1; 209 } 210 bb = PetscSafePointerPlusOffset(bav, ib[i]); 211 for (j = 0; j < nb; j++) { 212 if (bb[j] != 0.0) goto ok1; 213 } 214 cnt++; 215 ok1:; 216 } 217 PetscCall(MPIU_Allreduce(&cnt, &n0rows, 1, MPIU_INT, MPI_SUM, PetscObjectComm((PetscObject)M))); 218 if (!n0rows) { 219 PetscCall(MatSeqAIJRestoreArrayRead(mat->A, &aav)); 220 PetscCall(MatSeqAIJRestoreArrayRead(mat->B, &bav)); 221 PetscFunctionReturn(PETSC_SUCCESS); 222 } 223 PetscCall(PetscMalloc1(M->rmap->n - cnt, &rows)); 224 cnt = 0; 225 for (i = 0; i < m; i++) { 226 na = ia[i + 1] - ia[i]; 227 nb = ib[i + 1] - ib[i]; 228 if (!na && !nb) continue; 229 aa = aav + ia[i]; 230 for (j = 0; j < na; j++) { 231 if (aa[j] != 0.0) { 232 rows[cnt++] = rstart + i; 233 goto ok2; 234 } 235 } 236 bb = PetscSafePointerPlusOffset(bav, ib[i]); 237 for (j = 0; j < nb; j++) { 238 if (bb[j] != 0.0) { 239 rows[cnt++] = rstart + i; 240 goto ok2; 241 } 242 } 243 ok2:; 244 } 245 PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)M), cnt, rows, PETSC_OWN_POINTER, keptrows)); 246 PetscCall(MatSeqAIJRestoreArrayRead(mat->A, &aav)); 247 PetscCall(MatSeqAIJRestoreArrayRead(mat->B, &bav)); 248 PetscFunctionReturn(PETSC_SUCCESS); 249 } 250 251 static PetscErrorCode MatDiagonalSet_MPIAIJ(Mat Y, Vec D, InsertMode is) 252 { 253 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)Y->data; 254 PetscBool cong; 255 256 PetscFunctionBegin; 257 PetscCall(MatHasCongruentLayouts(Y, &cong)); 258 if (Y->assembled && cong) { 259 PetscCall(MatDiagonalSet(aij->A, D, is)); 260 } else { 261 PetscCall(MatDiagonalSet_Default(Y, D, is)); 262 } 263 PetscFunctionReturn(PETSC_SUCCESS); 264 } 265 266 static PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M, IS *zrows) 267 { 268 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)M->data; 269 PetscInt i, rstart, nrows, *rows; 270 271 PetscFunctionBegin; 272 *zrows = NULL; 273 PetscCall(MatFindZeroDiagonals_SeqAIJ_Private(aij->A, &nrows, &rows)); 274 PetscCall(MatGetOwnershipRange(M, &rstart, NULL)); 275 for (i = 0; i < nrows; i++) rows[i] += rstart; 276 PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)M), nrows, rows, PETSC_OWN_POINTER, zrows)); 277 PetscFunctionReturn(PETSC_SUCCESS); 278 } 279 280 static PetscErrorCode MatGetColumnReductions_MPIAIJ(Mat A, PetscInt type, PetscReal *reductions) 281 { 282 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)A->data; 283 PetscInt i, m, n, *garray = aij->garray; 284 Mat_SeqAIJ *a_aij = (Mat_SeqAIJ *)aij->A->data; 285 Mat_SeqAIJ *b_aij = (Mat_SeqAIJ *)aij->B->data; 286 PetscReal *work; 287 const PetscScalar *dummy; 288 289 PetscFunctionBegin; 290 PetscCall(MatGetSize(A, &m, &n)); 291 PetscCall(PetscCalloc1(n, &work)); 292 PetscCall(MatSeqAIJGetArrayRead(aij->A, &dummy)); 293 PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &dummy)); 294 PetscCall(MatSeqAIJGetArrayRead(aij->B, &dummy)); 295 PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &dummy)); 296 if (type == NORM_2) { 297 for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i] * a_aij->a[i]); 298 for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i] * b_aij->a[i]); 299 } else if (type == NORM_1) { 300 for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]); 301 for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]); 302 } else if (type == NORM_INFINITY) { 303 for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]); 304 for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]), work[garray[b_aij->j[i]]]); 305 } else if (type == REDUCTION_SUM_REALPART || type == REDUCTION_MEAN_REALPART) { 306 for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscRealPart(a_aij->a[i]); 307 for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscRealPart(b_aij->a[i]); 308 } else if (type == REDUCTION_SUM_IMAGINARYPART || type == REDUCTION_MEAN_IMAGINARYPART) { 309 for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscImaginaryPart(a_aij->a[i]); 310 for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscImaginaryPart(b_aij->a[i]); 311 } else SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Unknown reduction type"); 312 if (type == NORM_INFINITY) { 313 PetscCall(MPIU_Allreduce(work, reductions, n, MPIU_REAL, MPIU_MAX, PetscObjectComm((PetscObject)A))); 314 } else { 315 PetscCall(MPIU_Allreduce(work, reductions, n, MPIU_REAL, MPIU_SUM, PetscObjectComm((PetscObject)A))); 316 } 317 PetscCall(PetscFree(work)); 318 if (type == NORM_2) { 319 for (i = 0; i < n; i++) reductions[i] = PetscSqrtReal(reductions[i]); 320 } else if (type == REDUCTION_MEAN_REALPART || type == REDUCTION_MEAN_IMAGINARYPART) { 321 for (i = 0; i < n; i++) reductions[i] /= m; 322 } 323 PetscFunctionReturn(PETSC_SUCCESS); 324 } 325 326 static PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A, IS *is) 327 { 328 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 329 IS sis, gis; 330 const PetscInt *isis, *igis; 331 PetscInt n, *iis, nsis, ngis, rstart, i; 332 333 PetscFunctionBegin; 334 PetscCall(MatFindOffBlockDiagonalEntries(a->A, &sis)); 335 PetscCall(MatFindNonzeroRows(a->B, &gis)); 336 PetscCall(ISGetSize(gis, &ngis)); 337 PetscCall(ISGetSize(sis, &nsis)); 338 PetscCall(ISGetIndices(sis, &isis)); 339 PetscCall(ISGetIndices(gis, &igis)); 340 341 PetscCall(PetscMalloc1(ngis + nsis, &iis)); 342 PetscCall(PetscArraycpy(iis, igis, ngis)); 343 PetscCall(PetscArraycpy(iis + ngis, isis, nsis)); 344 n = ngis + nsis; 345 PetscCall(PetscSortRemoveDupsInt(&n, iis)); 346 PetscCall(MatGetOwnershipRange(A, &rstart, NULL)); 347 for (i = 0; i < n; i++) iis[i] += rstart; 348 PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)A), n, iis, PETSC_OWN_POINTER, is)); 349 350 PetscCall(ISRestoreIndices(sis, &isis)); 351 PetscCall(ISRestoreIndices(gis, &igis)); 352 PetscCall(ISDestroy(&sis)); 353 PetscCall(ISDestroy(&gis)); 354 PetscFunctionReturn(PETSC_SUCCESS); 355 } 356 357 /* 358 Local utility routine that creates a mapping from the global column 359 number to the local number in the off-diagonal part of the local 360 storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at 361 a slightly higher hash table cost; without it it is not scalable (each processor 362 has an order N integer array but is fast to access. 363 */ 364 PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat) 365 { 366 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 367 PetscInt n = aij->B->cmap->n, i; 368 369 PetscFunctionBegin; 370 PetscCheck(!n || aij->garray, PETSC_COMM_SELF, PETSC_ERR_PLIB, "MPIAIJ Matrix was assembled but is missing garray"); 371 #if defined(PETSC_USE_CTABLE) 372 PetscCall(PetscHMapICreateWithSize(n, &aij->colmap)); 373 for (i = 0; i < n; i++) PetscCall(PetscHMapISet(aij->colmap, aij->garray[i] + 1, i + 1)); 374 #else 375 PetscCall(PetscCalloc1(mat->cmap->N + 1, &aij->colmap)); 376 for (i = 0; i < n; i++) aij->colmap[aij->garray[i]] = i + 1; 377 #endif 378 PetscFunctionReturn(PETSC_SUCCESS); 379 } 380 381 #define MatSetValues_SeqAIJ_A_Private(row, col, value, addv, orow, ocol) \ 382 do { \ 383 if (col <= lastcol1) low1 = 0; \ 384 else high1 = nrow1; \ 385 lastcol1 = col; \ 386 while (high1 - low1 > 5) { \ 387 t = (low1 + high1) / 2; \ 388 if (rp1[t] > col) high1 = t; \ 389 else low1 = t; \ 390 } \ 391 for (_i = low1; _i < high1; _i++) { \ 392 if (rp1[_i] > col) break; \ 393 if (rp1[_i] == col) { \ 394 if (addv == ADD_VALUES) { \ 395 ap1[_i] += value; \ 396 /* Not sure LogFlops will slow dow the code or not */ \ 397 (void)PetscLogFlops(1.0); \ 398 } else ap1[_i] = value; \ 399 goto a_noinsert; \ 400 } \ 401 } \ 402 if (value == 0.0 && ignorezeroentries && row != col) { \ 403 low1 = 0; \ 404 high1 = nrow1; \ 405 goto a_noinsert; \ 406 } \ 407 if (nonew == 1) { \ 408 low1 = 0; \ 409 high1 = nrow1; \ 410 goto a_noinsert; \ 411 } \ 412 PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \ 413 MatSeqXAIJReallocateAIJ(A, am, 1, nrow1, row, col, rmax1, aa, ai, aj, rp1, ap1, aimax, nonew, MatScalar); \ 414 N = nrow1++ - 1; \ 415 a->nz++; \ 416 high1++; \ 417 /* shift up all the later entries in this row */ \ 418 PetscCall(PetscArraymove(rp1 + _i + 1, rp1 + _i, N - _i + 1)); \ 419 PetscCall(PetscArraymove(ap1 + _i + 1, ap1 + _i, N - _i + 1)); \ 420 rp1[_i] = col; \ 421 ap1[_i] = value; \ 422 A->nonzerostate++; \ 423 a_noinsert:; \ 424 ailen[row] = nrow1; \ 425 } while (0) 426 427 #define MatSetValues_SeqAIJ_B_Private(row, col, value, addv, orow, ocol) \ 428 do { \ 429 if (col <= lastcol2) low2 = 0; \ 430 else high2 = nrow2; \ 431 lastcol2 = col; \ 432 while (high2 - low2 > 5) { \ 433 t = (low2 + high2) / 2; \ 434 if (rp2[t] > col) high2 = t; \ 435 else low2 = t; \ 436 } \ 437 for (_i = low2; _i < high2; _i++) { \ 438 if (rp2[_i] > col) break; \ 439 if (rp2[_i] == col) { \ 440 if (addv == ADD_VALUES) { \ 441 ap2[_i] += value; \ 442 (void)PetscLogFlops(1.0); \ 443 } else ap2[_i] = value; \ 444 goto b_noinsert; \ 445 } \ 446 } \ 447 if (value == 0.0 && ignorezeroentries) { \ 448 low2 = 0; \ 449 high2 = nrow2; \ 450 goto b_noinsert; \ 451 } \ 452 if (nonew == 1) { \ 453 low2 = 0; \ 454 high2 = nrow2; \ 455 goto b_noinsert; \ 456 } \ 457 PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \ 458 MatSeqXAIJReallocateAIJ(B, bm, 1, nrow2, row, col, rmax2, ba, bi, bj, rp2, ap2, bimax, nonew, MatScalar); \ 459 N = nrow2++ - 1; \ 460 b->nz++; \ 461 high2++; \ 462 /* shift up all the later entries in this row */ \ 463 PetscCall(PetscArraymove(rp2 + _i + 1, rp2 + _i, N - _i + 1)); \ 464 PetscCall(PetscArraymove(ap2 + _i + 1, ap2 + _i, N - _i + 1)); \ 465 rp2[_i] = col; \ 466 ap2[_i] = value; \ 467 B->nonzerostate++; \ 468 b_noinsert:; \ 469 bilen[row] = nrow2; \ 470 } while (0) 471 472 static PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A, PetscInt row, const PetscScalar v[]) 473 { 474 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data; 475 Mat_SeqAIJ *a = (Mat_SeqAIJ *)mat->A->data, *b = (Mat_SeqAIJ *)mat->B->data; 476 PetscInt l, *garray = mat->garray, diag; 477 PetscScalar *aa, *ba; 478 479 PetscFunctionBegin; 480 /* code only works for square matrices A */ 481 482 /* find size of row to the left of the diagonal part */ 483 PetscCall(MatGetOwnershipRange(A, &diag, NULL)); 484 row = row - diag; 485 for (l = 0; l < b->i[row + 1] - b->i[row]; l++) { 486 if (garray[b->j[b->i[row] + l]] > diag) break; 487 } 488 if (l) { 489 PetscCall(MatSeqAIJGetArray(mat->B, &ba)); 490 PetscCall(PetscArraycpy(ba + b->i[row], v, l)); 491 PetscCall(MatSeqAIJRestoreArray(mat->B, &ba)); 492 } 493 494 /* diagonal part */ 495 if (a->i[row + 1] - a->i[row]) { 496 PetscCall(MatSeqAIJGetArray(mat->A, &aa)); 497 PetscCall(PetscArraycpy(aa + a->i[row], v + l, (a->i[row + 1] - a->i[row]))); 498 PetscCall(MatSeqAIJRestoreArray(mat->A, &aa)); 499 } 500 501 /* right of diagonal part */ 502 if (b->i[row + 1] - b->i[row] - l) { 503 PetscCall(MatSeqAIJGetArray(mat->B, &ba)); 504 PetscCall(PetscArraycpy(ba + b->i[row] + l, v + l + a->i[row + 1] - a->i[row], b->i[row + 1] - b->i[row] - l)); 505 PetscCall(MatSeqAIJRestoreArray(mat->B, &ba)); 506 } 507 PetscFunctionReturn(PETSC_SUCCESS); 508 } 509 510 PetscErrorCode MatSetValues_MPIAIJ(Mat mat, PetscInt m, const PetscInt im[], PetscInt n, const PetscInt in[], const PetscScalar v[], InsertMode addv) 511 { 512 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 513 PetscScalar value = 0.0; 514 PetscInt i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend; 515 PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col; 516 PetscBool roworiented = aij->roworiented; 517 518 /* Some Variables required in the macro */ 519 Mat A = aij->A; 520 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; 521 PetscInt *aimax = a->imax, *ai = a->i, *ailen = a->ilen, *aj = a->j; 522 PetscBool ignorezeroentries = a->ignorezeroentries; 523 Mat B = aij->B; 524 Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; 525 PetscInt *bimax = b->imax, *bi = b->i, *bilen = b->ilen, *bj = b->j, bm = aij->B->rmap->n, am = aij->A->rmap->n; 526 MatScalar *aa, *ba; 527 PetscInt *rp1, *rp2, ii, nrow1, nrow2, _i, rmax1, rmax2, N, low1, high1, low2, high2, t, lastcol1, lastcol2; 528 PetscInt nonew; 529 MatScalar *ap1, *ap2; 530 531 PetscFunctionBegin; 532 PetscCall(MatSeqAIJGetArray(A, &aa)); 533 PetscCall(MatSeqAIJGetArray(B, &ba)); 534 for (i = 0; i < m; i++) { 535 if (im[i] < 0) continue; 536 PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1); 537 if (im[i] >= rstart && im[i] < rend) { 538 row = im[i] - rstart; 539 lastcol1 = -1; 540 rp1 = PetscSafePointerPlusOffset(aj, ai[row]); 541 ap1 = PetscSafePointerPlusOffset(aa, ai[row]); 542 rmax1 = aimax[row]; 543 nrow1 = ailen[row]; 544 low1 = 0; 545 high1 = nrow1; 546 lastcol2 = -1; 547 rp2 = PetscSafePointerPlusOffset(bj, bi[row]); 548 ap2 = PetscSafePointerPlusOffset(ba, bi[row]); 549 rmax2 = bimax[row]; 550 nrow2 = bilen[row]; 551 low2 = 0; 552 high2 = nrow2; 553 554 for (j = 0; j < n; j++) { 555 if (v) value = roworiented ? v[i * n + j] : v[i + j * m]; 556 if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue; 557 if (in[j] >= cstart && in[j] < cend) { 558 col = in[j] - cstart; 559 nonew = a->nonew; 560 MatSetValues_SeqAIJ_A_Private(row, col, value, addv, im[i], in[j]); 561 } else if (in[j] < 0) { 562 continue; 563 } else { 564 PetscCheck(in[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1); 565 if (mat->was_assembled) { 566 if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat)); 567 #if defined(PETSC_USE_CTABLE) 568 PetscCall(PetscHMapIGetWithDefault(aij->colmap, in[j] + 1, 0, &col)); /* map global col ids to local ones */ 569 col--; 570 #else 571 col = aij->colmap[in[j]] - 1; 572 #endif 573 if (col < 0 && !((Mat_SeqAIJ *)(aij->B->data))->nonew) { /* col < 0 means in[j] is a new col for B */ 574 PetscCall(MatDisAssemble_MPIAIJ(mat)); /* Change aij->B from reduced/local format to expanded/global format */ 575 col = in[j]; 576 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */ 577 B = aij->B; 578 b = (Mat_SeqAIJ *)B->data; 579 bimax = b->imax; 580 bi = b->i; 581 bilen = b->ilen; 582 bj = b->j; 583 ba = b->a; 584 rp2 = bj + bi[row]; 585 ap2 = ba + bi[row]; 586 rmax2 = bimax[row]; 587 nrow2 = bilen[row]; 588 low2 = 0; 589 high2 = nrow2; 590 bm = aij->B->rmap->n; 591 ba = b->a; 592 } else if (col < 0 && !(ignorezeroentries && value == 0.0)) { 593 if (1 == ((Mat_SeqAIJ *)(aij->B->data))->nonew) { 594 PetscCall(PetscInfo(mat, "Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%" PetscInt_FMT ",%" PetscInt_FMT ")\n", (double)PetscRealPart(value), im[i], in[j])); 595 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", im[i], in[j]); 596 } 597 } else col = in[j]; 598 nonew = b->nonew; 599 MatSetValues_SeqAIJ_B_Private(row, col, value, addv, im[i], in[j]); 600 } 601 } 602 } else { 603 PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Setting off process row %" PetscInt_FMT " even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set", im[i]); 604 if (!aij->donotstash) { 605 mat->assembled = PETSC_FALSE; 606 if (roworiented) { 607 PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, PetscSafePointerPlusOffset(v, i * n), (PetscBool)(ignorezeroentries && (addv == ADD_VALUES)))); 608 } else { 609 PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, PetscSafePointerPlusOffset(v, i), m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES)))); 610 } 611 } 612 } 613 } 614 PetscCall(MatSeqAIJRestoreArray(A, &aa)); /* aa, bb might have been free'd due to reallocation above. But we don't access them here */ 615 PetscCall(MatSeqAIJRestoreArray(B, &ba)); 616 PetscFunctionReturn(PETSC_SUCCESS); 617 } 618 619 /* 620 This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix. 621 The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like). 622 No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE. 623 */ 624 PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat, const PetscInt mat_j[], const PetscInt mat_i[]) 625 { 626 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 627 Mat A = aij->A; /* diagonal part of the matrix */ 628 Mat B = aij->B; /* off-diagonal part of the matrix */ 629 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; 630 Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; 631 PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, col; 632 PetscInt *ailen = a->ilen, *aj = a->j; 633 PetscInt *bilen = b->ilen, *bj = b->j; 634 PetscInt am = aij->A->rmap->n, j; 635 PetscInt diag_so_far = 0, dnz; 636 PetscInt offd_so_far = 0, onz; 637 638 PetscFunctionBegin; 639 /* Iterate over all rows of the matrix */ 640 for (j = 0; j < am; j++) { 641 dnz = onz = 0; 642 /* Iterate over all non-zero columns of the current row */ 643 for (col = mat_i[j]; col < mat_i[j + 1]; col++) { 644 /* If column is in the diagonal */ 645 if (mat_j[col] >= cstart && mat_j[col] < cend) { 646 aj[diag_so_far++] = mat_j[col] - cstart; 647 dnz++; 648 } else { /* off-diagonal entries */ 649 bj[offd_so_far++] = mat_j[col]; 650 onz++; 651 } 652 } 653 ailen[j] = dnz; 654 bilen[j] = onz; 655 } 656 PetscFunctionReturn(PETSC_SUCCESS); 657 } 658 659 /* 660 This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix. 661 The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like). 662 No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ. 663 Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; 664 would not be true and the more complex MatSetValues_MPIAIJ has to be used. 665 */ 666 PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat, const PetscInt mat_j[], const PetscInt mat_i[], const PetscScalar mat_a[]) 667 { 668 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 669 Mat A = aij->A; /* diagonal part of the matrix */ 670 Mat B = aij->B; /* off-diagonal part of the matrix */ 671 Mat_SeqAIJ *aijd = (Mat_SeqAIJ *)(aij->A)->data, *aijo = (Mat_SeqAIJ *)(aij->B)->data; 672 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; 673 Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; 674 PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend; 675 PetscInt *ailen = a->ilen, *aj = a->j; 676 PetscInt *bilen = b->ilen, *bj = b->j; 677 PetscInt am = aij->A->rmap->n, j; 678 PetscInt *full_diag_i = aijd->i, *full_offd_i = aijo->i; /* These variables can also include non-local elements, which are set at a later point. */ 679 PetscInt col, dnz_row, onz_row, rowstart_diag, rowstart_offd; 680 PetscScalar *aa = a->a, *ba = b->a; 681 682 PetscFunctionBegin; 683 /* Iterate over all rows of the matrix */ 684 for (j = 0; j < am; j++) { 685 dnz_row = onz_row = 0; 686 rowstart_offd = full_offd_i[j]; 687 rowstart_diag = full_diag_i[j]; 688 /* Iterate over all non-zero columns of the current row */ 689 for (col = mat_i[j]; col < mat_i[j + 1]; col++) { 690 /* If column is in the diagonal */ 691 if (mat_j[col] >= cstart && mat_j[col] < cend) { 692 aj[rowstart_diag + dnz_row] = mat_j[col] - cstart; 693 aa[rowstart_diag + dnz_row] = mat_a[col]; 694 dnz_row++; 695 } else { /* off-diagonal entries */ 696 bj[rowstart_offd + onz_row] = mat_j[col]; 697 ba[rowstart_offd + onz_row] = mat_a[col]; 698 onz_row++; 699 } 700 } 701 ailen[j] = dnz_row; 702 bilen[j] = onz_row; 703 } 704 PetscFunctionReturn(PETSC_SUCCESS); 705 } 706 707 static PetscErrorCode MatGetValues_MPIAIJ(Mat mat, PetscInt m, const PetscInt idxm[], PetscInt n, const PetscInt idxn[], PetscScalar v[]) 708 { 709 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 710 PetscInt i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend; 711 PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col; 712 713 PetscFunctionBegin; 714 for (i = 0; i < m; i++) { 715 if (idxm[i] < 0) continue; /* negative row */ 716 PetscCheck(idxm[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, idxm[i], mat->rmap->N - 1); 717 PetscCheck(idxm[i] >= rstart && idxm[i] < rend, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only local values currently supported, row requested %" PetscInt_FMT " range [%" PetscInt_FMT " %" PetscInt_FMT ")", idxm[i], rstart, rend); 718 row = idxm[i] - rstart; 719 for (j = 0; j < n; j++) { 720 if (idxn[j] < 0) continue; /* negative column */ 721 PetscCheck(idxn[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, idxn[j], mat->cmap->N - 1); 722 if (idxn[j] >= cstart && idxn[j] < cend) { 723 col = idxn[j] - cstart; 724 PetscCall(MatGetValues(aij->A, 1, &row, 1, &col, v + i * n + j)); 725 } else { 726 if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat)); 727 #if defined(PETSC_USE_CTABLE) 728 PetscCall(PetscHMapIGetWithDefault(aij->colmap, idxn[j] + 1, 0, &col)); 729 col--; 730 #else 731 col = aij->colmap[idxn[j]] - 1; 732 #endif 733 if ((col < 0) || (aij->garray[col] != idxn[j])) *(v + i * n + j) = 0.0; 734 else PetscCall(MatGetValues(aij->B, 1, &row, 1, &col, v + i * n + j)); 735 } 736 } 737 } 738 PetscFunctionReturn(PETSC_SUCCESS); 739 } 740 741 static PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat, MatAssemblyType mode) 742 { 743 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 744 PetscInt nstash, reallocs; 745 746 PetscFunctionBegin; 747 if (aij->donotstash || mat->nooffprocentries) PetscFunctionReturn(PETSC_SUCCESS); 748 749 PetscCall(MatStashScatterBegin_Private(mat, &mat->stash, mat->rmap->range)); 750 PetscCall(MatStashGetInfo_Private(&mat->stash, &nstash, &reallocs)); 751 PetscCall(PetscInfo(aij->A, "Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs)); 752 PetscFunctionReturn(PETSC_SUCCESS); 753 } 754 755 PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat, MatAssemblyType mode) 756 { 757 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 758 PetscMPIInt n; 759 PetscInt i, j, rstart, ncols, flg; 760 PetscInt *row, *col; 761 PetscBool other_disassembled; 762 PetscScalar *val; 763 764 /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */ 765 766 PetscFunctionBegin; 767 if (!aij->donotstash && !mat->nooffprocentries) { 768 while (1) { 769 PetscCall(MatStashScatterGetMesg_Private(&mat->stash, &n, &row, &col, &val, &flg)); 770 if (!flg) break; 771 772 for (i = 0; i < n;) { 773 /* Now identify the consecutive vals belonging to the same row */ 774 for (j = i, rstart = row[j]; j < n; j++) { 775 if (row[j] != rstart) break; 776 } 777 if (j < n) ncols = j - i; 778 else ncols = n - i; 779 /* Now assemble all these values with a single function call */ 780 PetscCall(MatSetValues_MPIAIJ(mat, 1, row + i, ncols, col + i, val + i, mat->insertmode)); 781 i = j; 782 } 783 } 784 PetscCall(MatStashScatterEnd_Private(&mat->stash)); 785 } 786 #if defined(PETSC_HAVE_DEVICE) 787 if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU; 788 /* We call MatBindToCPU() on aij->A and aij->B here, because if MatBindToCPU_MPIAIJ() is called before assembly, it cannot bind these. */ 789 if (mat->boundtocpu) { 790 PetscCall(MatBindToCPU(aij->A, PETSC_TRUE)); 791 PetscCall(MatBindToCPU(aij->B, PETSC_TRUE)); 792 } 793 #endif 794 PetscCall(MatAssemblyBegin(aij->A, mode)); 795 PetscCall(MatAssemblyEnd(aij->A, mode)); 796 797 /* determine if any processor has disassembled, if so we must 798 also disassemble ourself, in order that we may reassemble. */ 799 /* 800 if nonzero structure of submatrix B cannot change then we know that 801 no processor disassembled thus we can skip this stuff 802 */ 803 if (!((Mat_SeqAIJ *)aij->B->data)->nonew) { 804 PetscCall(MPIU_Allreduce(&mat->was_assembled, &other_disassembled, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat))); 805 if (mat->was_assembled && !other_disassembled) { /* mat on this rank has reduced off-diag B with local col ids, but globally it does not */ 806 PetscCall(MatDisAssemble_MPIAIJ(mat)); 807 } 808 } 809 if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) PetscCall(MatSetUpMultiply_MPIAIJ(mat)); 810 PetscCall(MatSetOption(aij->B, MAT_USE_INODES, PETSC_FALSE)); 811 #if defined(PETSC_HAVE_DEVICE) 812 if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU; 813 #endif 814 PetscCall(MatAssemblyBegin(aij->B, mode)); 815 PetscCall(MatAssemblyEnd(aij->B, mode)); 816 817 PetscCall(PetscFree2(aij->rowvalues, aij->rowindices)); 818 819 aij->rowvalues = NULL; 820 821 PetscCall(VecDestroy(&aij->diag)); 822 823 /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */ 824 if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ *)(aij->A->data))->nonew) { 825 PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate; 826 PetscCall(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat))); 827 } 828 #if defined(PETSC_HAVE_DEVICE) 829 mat->offloadmask = PETSC_OFFLOAD_BOTH; 830 #endif 831 PetscFunctionReturn(PETSC_SUCCESS); 832 } 833 834 static PetscErrorCode MatZeroEntries_MPIAIJ(Mat A) 835 { 836 Mat_MPIAIJ *l = (Mat_MPIAIJ *)A->data; 837 838 PetscFunctionBegin; 839 PetscCall(MatZeroEntries(l->A)); 840 PetscCall(MatZeroEntries(l->B)); 841 PetscFunctionReturn(PETSC_SUCCESS); 842 } 843 844 static PetscErrorCode MatZeroRows_MPIAIJ(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b) 845 { 846 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data; 847 PetscObjectState sA, sB; 848 PetscInt *lrows; 849 PetscInt r, len; 850 PetscBool cong, lch, gch; 851 852 PetscFunctionBegin; 853 /* get locally owned rows */ 854 PetscCall(MatZeroRowsMapLocal_Private(A, N, rows, &len, &lrows)); 855 PetscCall(MatHasCongruentLayouts(A, &cong)); 856 /* fix right hand side if needed */ 857 if (x && b) { 858 const PetscScalar *xx; 859 PetscScalar *bb; 860 861 PetscCheck(cong, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Need matching row/col layout"); 862 PetscCall(VecGetArrayRead(x, &xx)); 863 PetscCall(VecGetArray(b, &bb)); 864 for (r = 0; r < len; ++r) bb[lrows[r]] = diag * xx[lrows[r]]; 865 PetscCall(VecRestoreArrayRead(x, &xx)); 866 PetscCall(VecRestoreArray(b, &bb)); 867 } 868 869 sA = mat->A->nonzerostate; 870 sB = mat->B->nonzerostate; 871 872 if (diag != 0.0 && cong) { 873 PetscCall(MatZeroRows(mat->A, len, lrows, diag, NULL, NULL)); 874 PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL)); 875 } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */ 876 Mat_SeqAIJ *aijA = (Mat_SeqAIJ *)mat->A->data; 877 Mat_SeqAIJ *aijB = (Mat_SeqAIJ *)mat->B->data; 878 PetscInt nnwA, nnwB; 879 PetscBool nnzA, nnzB; 880 881 nnwA = aijA->nonew; 882 nnwB = aijB->nonew; 883 nnzA = aijA->keepnonzeropattern; 884 nnzB = aijB->keepnonzeropattern; 885 if (!nnzA) { 886 PetscCall(PetscInfo(mat->A, "Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n")); 887 aijA->nonew = 0; 888 } 889 if (!nnzB) { 890 PetscCall(PetscInfo(mat->B, "Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n")); 891 aijB->nonew = 0; 892 } 893 /* Must zero here before the next loop */ 894 PetscCall(MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL)); 895 PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL)); 896 for (r = 0; r < len; ++r) { 897 const PetscInt row = lrows[r] + A->rmap->rstart; 898 if (row >= A->cmap->N) continue; 899 PetscCall(MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES)); 900 } 901 aijA->nonew = nnwA; 902 aijB->nonew = nnwB; 903 } else { 904 PetscCall(MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL)); 905 PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL)); 906 } 907 PetscCall(PetscFree(lrows)); 908 PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY)); 909 PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY)); 910 911 /* reduce nonzerostate */ 912 lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate); 913 PetscCall(MPIU_Allreduce(&lch, &gch, 1, MPIU_BOOL, MPI_LOR, PetscObjectComm((PetscObject)A))); 914 if (gch) A->nonzerostate++; 915 PetscFunctionReturn(PETSC_SUCCESS); 916 } 917 918 static PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b) 919 { 920 Mat_MPIAIJ *l = (Mat_MPIAIJ *)A->data; 921 PetscMPIInt n = A->rmap->n; 922 PetscInt i, j, r, m, len = 0; 923 PetscInt *lrows, *owners = A->rmap->range; 924 PetscMPIInt p = 0; 925 PetscSFNode *rrows; 926 PetscSF sf; 927 const PetscScalar *xx; 928 PetscScalar *bb, *mask, *aij_a; 929 Vec xmask, lmask; 930 Mat_SeqAIJ *aij = (Mat_SeqAIJ *)l->B->data; 931 const PetscInt *aj, *ii, *ridx; 932 PetscScalar *aa; 933 934 PetscFunctionBegin; 935 /* Create SF where leaves are input rows and roots are owned rows */ 936 PetscCall(PetscMalloc1(n, &lrows)); 937 for (r = 0; r < n; ++r) lrows[r] = -1; 938 PetscCall(PetscMalloc1(N, &rrows)); 939 for (r = 0; r < N; ++r) { 940 const PetscInt idx = rows[r]; 941 PetscCheck(idx >= 0 && A->rmap->N > idx, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row %" PetscInt_FMT " out of range [0,%" PetscInt_FMT ")", idx, A->rmap->N); 942 if (idx < owners[p] || owners[p + 1] <= idx) { /* short-circuit the search if the last p owns this row too */ 943 PetscCall(PetscLayoutFindOwner(A->rmap, idx, &p)); 944 } 945 rrows[r].rank = p; 946 rrows[r].index = rows[r] - owners[p]; 947 } 948 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf)); 949 PetscCall(PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER)); 950 /* Collect flags for rows to be zeroed */ 951 PetscCall(PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *)rows, lrows, MPI_LOR)); 952 PetscCall(PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *)rows, lrows, MPI_LOR)); 953 PetscCall(PetscSFDestroy(&sf)); 954 /* Compress and put in row numbers */ 955 for (r = 0; r < n; ++r) 956 if (lrows[r] >= 0) lrows[len++] = r; 957 /* zero diagonal part of matrix */ 958 PetscCall(MatZeroRowsColumns(l->A, len, lrows, diag, x, b)); 959 /* handle off-diagonal part of matrix */ 960 PetscCall(MatCreateVecs(A, &xmask, NULL)); 961 PetscCall(VecDuplicate(l->lvec, &lmask)); 962 PetscCall(VecGetArray(xmask, &bb)); 963 for (i = 0; i < len; i++) bb[lrows[i]] = 1; 964 PetscCall(VecRestoreArray(xmask, &bb)); 965 PetscCall(VecScatterBegin(l->Mvctx, xmask, lmask, ADD_VALUES, SCATTER_FORWARD)); 966 PetscCall(VecScatterEnd(l->Mvctx, xmask, lmask, ADD_VALUES, SCATTER_FORWARD)); 967 PetscCall(VecDestroy(&xmask)); 968 if (x && b) { /* this code is buggy when the row and column layout don't match */ 969 PetscBool cong; 970 971 PetscCall(MatHasCongruentLayouts(A, &cong)); 972 PetscCheck(cong, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Need matching row/col layout"); 973 PetscCall(VecScatterBegin(l->Mvctx, x, l->lvec, INSERT_VALUES, SCATTER_FORWARD)); 974 PetscCall(VecScatterEnd(l->Mvctx, x, l->lvec, INSERT_VALUES, SCATTER_FORWARD)); 975 PetscCall(VecGetArrayRead(l->lvec, &xx)); 976 PetscCall(VecGetArray(b, &bb)); 977 } 978 PetscCall(VecGetArray(lmask, &mask)); 979 /* remove zeroed rows of off-diagonal matrix */ 980 PetscCall(MatSeqAIJGetArray(l->B, &aij_a)); 981 ii = aij->i; 982 for (i = 0; i < len; i++) PetscCall(PetscArrayzero(PetscSafePointerPlusOffset(aij_a, ii[lrows[i]]), ii[lrows[i] + 1] - ii[lrows[i]])); 983 /* loop over all elements of off process part of matrix zeroing removed columns*/ 984 if (aij->compressedrow.use) { 985 m = aij->compressedrow.nrows; 986 ii = aij->compressedrow.i; 987 ridx = aij->compressedrow.rindex; 988 for (i = 0; i < m; i++) { 989 n = ii[i + 1] - ii[i]; 990 aj = aij->j + ii[i]; 991 aa = aij_a + ii[i]; 992 993 for (j = 0; j < n; j++) { 994 if (PetscAbsScalar(mask[*aj])) { 995 if (b) bb[*ridx] -= *aa * xx[*aj]; 996 *aa = 0.0; 997 } 998 aa++; 999 aj++; 1000 } 1001 ridx++; 1002 } 1003 } else { /* do not use compressed row format */ 1004 m = l->B->rmap->n; 1005 for (i = 0; i < m; i++) { 1006 n = ii[i + 1] - ii[i]; 1007 aj = aij->j + ii[i]; 1008 aa = aij_a + ii[i]; 1009 for (j = 0; j < n; j++) { 1010 if (PetscAbsScalar(mask[*aj])) { 1011 if (b) bb[i] -= *aa * xx[*aj]; 1012 *aa = 0.0; 1013 } 1014 aa++; 1015 aj++; 1016 } 1017 } 1018 } 1019 if (x && b) { 1020 PetscCall(VecRestoreArray(b, &bb)); 1021 PetscCall(VecRestoreArrayRead(l->lvec, &xx)); 1022 } 1023 PetscCall(MatSeqAIJRestoreArray(l->B, &aij_a)); 1024 PetscCall(VecRestoreArray(lmask, &mask)); 1025 PetscCall(VecDestroy(&lmask)); 1026 PetscCall(PetscFree(lrows)); 1027 1028 /* only change matrix nonzero state if pattern was allowed to be changed */ 1029 if (!((Mat_SeqAIJ *)(l->A->data))->keepnonzeropattern) { 1030 PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate; 1031 PetscCall(MPIU_Allreduce(&state, &A->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)A))); 1032 } 1033 PetscFunctionReturn(PETSC_SUCCESS); 1034 } 1035 1036 static PetscErrorCode MatMult_MPIAIJ(Mat A, Vec xx, Vec yy) 1037 { 1038 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1039 PetscInt nt; 1040 VecScatter Mvctx = a->Mvctx; 1041 1042 PetscFunctionBegin; 1043 PetscCall(VecGetLocalSize(xx, &nt)); 1044 PetscCheck(nt == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Incompatible partition of A (%" PetscInt_FMT ") and xx (%" PetscInt_FMT ")", A->cmap->n, nt); 1045 PetscCall(VecScatterBegin(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1046 PetscUseTypeMethod(a->A, mult, xx, yy); 1047 PetscCall(VecScatterEnd(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1048 PetscUseTypeMethod(a->B, multadd, a->lvec, yy, yy); 1049 PetscFunctionReturn(PETSC_SUCCESS); 1050 } 1051 1052 static PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A, Vec bb, Vec xx) 1053 { 1054 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1055 1056 PetscFunctionBegin; 1057 PetscCall(MatMultDiagonalBlock(a->A, bb, xx)); 1058 PetscFunctionReturn(PETSC_SUCCESS); 1059 } 1060 1061 static PetscErrorCode MatMultAdd_MPIAIJ(Mat A, Vec xx, Vec yy, Vec zz) 1062 { 1063 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1064 VecScatter Mvctx = a->Mvctx; 1065 1066 PetscFunctionBegin; 1067 PetscCall(VecScatterBegin(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1068 PetscCall((*a->A->ops->multadd)(a->A, xx, yy, zz)); 1069 PetscCall(VecScatterEnd(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1070 PetscCall((*a->B->ops->multadd)(a->B, a->lvec, zz, zz)); 1071 PetscFunctionReturn(PETSC_SUCCESS); 1072 } 1073 1074 static PetscErrorCode MatMultTranspose_MPIAIJ(Mat A, Vec xx, Vec yy) 1075 { 1076 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1077 1078 PetscFunctionBegin; 1079 /* do nondiagonal part */ 1080 PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec)); 1081 /* do local part */ 1082 PetscCall((*a->A->ops->multtranspose)(a->A, xx, yy)); 1083 /* add partial results together */ 1084 PetscCall(VecScatterBegin(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE)); 1085 PetscCall(VecScatterEnd(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE)); 1086 PetscFunctionReturn(PETSC_SUCCESS); 1087 } 1088 1089 static PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat, Mat Bmat, PetscReal tol, PetscBool *f) 1090 { 1091 MPI_Comm comm; 1092 Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)Amat->data, *Bij = (Mat_MPIAIJ *)Bmat->data; 1093 Mat Adia = Aij->A, Bdia = Bij->A, Aoff, Boff, *Aoffs, *Boffs; 1094 IS Me, Notme; 1095 PetscInt M, N, first, last, *notme, i; 1096 PetscBool lf; 1097 PetscMPIInt size; 1098 1099 PetscFunctionBegin; 1100 /* Easy test: symmetric diagonal block */ 1101 PetscCall(MatIsTranspose(Adia, Bdia, tol, &lf)); 1102 PetscCall(MPIU_Allreduce(&lf, f, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)Amat))); 1103 if (!*f) PetscFunctionReturn(PETSC_SUCCESS); 1104 PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm)); 1105 PetscCallMPI(MPI_Comm_size(comm, &size)); 1106 if (size == 1) PetscFunctionReturn(PETSC_SUCCESS); 1107 1108 /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */ 1109 PetscCall(MatGetSize(Amat, &M, &N)); 1110 PetscCall(MatGetOwnershipRange(Amat, &first, &last)); 1111 PetscCall(PetscMalloc1(N - last + first, ¬me)); 1112 for (i = 0; i < first; i++) notme[i] = i; 1113 for (i = last; i < M; i++) notme[i - last + first] = i; 1114 PetscCall(ISCreateGeneral(MPI_COMM_SELF, N - last + first, notme, PETSC_COPY_VALUES, &Notme)); 1115 PetscCall(ISCreateStride(MPI_COMM_SELF, last - first, first, 1, &Me)); 1116 PetscCall(MatCreateSubMatrices(Amat, 1, &Me, &Notme, MAT_INITIAL_MATRIX, &Aoffs)); 1117 Aoff = Aoffs[0]; 1118 PetscCall(MatCreateSubMatrices(Bmat, 1, &Notme, &Me, MAT_INITIAL_MATRIX, &Boffs)); 1119 Boff = Boffs[0]; 1120 PetscCall(MatIsTranspose(Aoff, Boff, tol, f)); 1121 PetscCall(MatDestroyMatrices(1, &Aoffs)); 1122 PetscCall(MatDestroyMatrices(1, &Boffs)); 1123 PetscCall(ISDestroy(&Me)); 1124 PetscCall(ISDestroy(&Notme)); 1125 PetscCall(PetscFree(notme)); 1126 PetscFunctionReturn(PETSC_SUCCESS); 1127 } 1128 1129 static PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A, PetscReal tol, PetscBool *f) 1130 { 1131 PetscFunctionBegin; 1132 PetscCall(MatIsTranspose_MPIAIJ(A, A, tol, f)); 1133 PetscFunctionReturn(PETSC_SUCCESS); 1134 } 1135 1136 static PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A, Vec xx, Vec yy, Vec zz) 1137 { 1138 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1139 1140 PetscFunctionBegin; 1141 /* do nondiagonal part */ 1142 PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec)); 1143 /* do local part */ 1144 PetscCall((*a->A->ops->multtransposeadd)(a->A, xx, yy, zz)); 1145 /* add partial results together */ 1146 PetscCall(VecScatterBegin(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE)); 1147 PetscCall(VecScatterEnd(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE)); 1148 PetscFunctionReturn(PETSC_SUCCESS); 1149 } 1150 1151 /* 1152 This only works correctly for square matrices where the subblock A->A is the 1153 diagonal block 1154 */ 1155 static PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A, Vec v) 1156 { 1157 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1158 1159 PetscFunctionBegin; 1160 PetscCheck(A->rmap->N == A->cmap->N, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Supports only square matrix where A->A is diag block"); 1161 PetscCheck(A->rmap->rstart == A->cmap->rstart && A->rmap->rend == A->cmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "row partition must equal col partition"); 1162 PetscCall(MatGetDiagonal(a->A, v)); 1163 PetscFunctionReturn(PETSC_SUCCESS); 1164 } 1165 1166 static PetscErrorCode MatScale_MPIAIJ(Mat A, PetscScalar aa) 1167 { 1168 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1169 1170 PetscFunctionBegin; 1171 PetscCall(MatScale(a->A, aa)); 1172 PetscCall(MatScale(a->B, aa)); 1173 PetscFunctionReturn(PETSC_SUCCESS); 1174 } 1175 1176 static PetscErrorCode MatView_MPIAIJ_Binary(Mat mat, PetscViewer viewer) 1177 { 1178 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 1179 Mat_SeqAIJ *A = (Mat_SeqAIJ *)aij->A->data; 1180 Mat_SeqAIJ *B = (Mat_SeqAIJ *)aij->B->data; 1181 const PetscInt *garray = aij->garray; 1182 const PetscScalar *aa, *ba; 1183 PetscInt header[4], M, N, m, rs, cs, cnt, i, ja, jb; 1184 PetscInt64 nz, hnz; 1185 PetscInt *rowlens; 1186 PetscInt *colidxs; 1187 PetscScalar *matvals; 1188 PetscMPIInt rank; 1189 1190 PetscFunctionBegin; 1191 PetscCall(PetscViewerSetUp(viewer)); 1192 1193 M = mat->rmap->N; 1194 N = mat->cmap->N; 1195 m = mat->rmap->n; 1196 rs = mat->rmap->rstart; 1197 cs = mat->cmap->rstart; 1198 nz = A->nz + B->nz; 1199 1200 /* write matrix header */ 1201 header[0] = MAT_FILE_CLASSID; 1202 header[1] = M; 1203 header[2] = N; 1204 PetscCallMPI(MPI_Reduce(&nz, &hnz, 1, MPIU_INT64, MPI_SUM, 0, PetscObjectComm((PetscObject)mat))); 1205 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank)); 1206 if (rank == 0) { 1207 if (hnz > PETSC_MAX_INT) header[3] = PETSC_MAX_INT; 1208 else header[3] = (PetscInt)hnz; 1209 } 1210 PetscCall(PetscViewerBinaryWrite(viewer, header, 4, PETSC_INT)); 1211 1212 /* fill in and store row lengths */ 1213 PetscCall(PetscMalloc1(m, &rowlens)); 1214 for (i = 0; i < m; i++) rowlens[i] = A->i[i + 1] - A->i[i] + B->i[i + 1] - B->i[i]; 1215 PetscCall(PetscViewerBinaryWriteAll(viewer, rowlens, m, rs, M, PETSC_INT)); 1216 PetscCall(PetscFree(rowlens)); 1217 1218 /* fill in and store column indices */ 1219 PetscCall(PetscMalloc1(nz, &colidxs)); 1220 for (cnt = 0, i = 0; i < m; i++) { 1221 for (jb = B->i[i]; jb < B->i[i + 1]; jb++) { 1222 if (garray[B->j[jb]] > cs) break; 1223 colidxs[cnt++] = garray[B->j[jb]]; 1224 } 1225 for (ja = A->i[i]; ja < A->i[i + 1]; ja++) colidxs[cnt++] = A->j[ja] + cs; 1226 for (; jb < B->i[i + 1]; jb++) colidxs[cnt++] = garray[B->j[jb]]; 1227 } 1228 PetscCheck(cnt == nz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Internal PETSc error: cnt = %" PetscInt_FMT " nz = %" PetscInt64_FMT, cnt, nz); 1229 PetscCall(PetscViewerBinaryWriteAll(viewer, colidxs, nz, PETSC_DETERMINE, PETSC_DETERMINE, PETSC_INT)); 1230 PetscCall(PetscFree(colidxs)); 1231 1232 /* fill in and store nonzero values */ 1233 PetscCall(MatSeqAIJGetArrayRead(aij->A, &aa)); 1234 PetscCall(MatSeqAIJGetArrayRead(aij->B, &ba)); 1235 PetscCall(PetscMalloc1(nz, &matvals)); 1236 for (cnt = 0, i = 0; i < m; i++) { 1237 for (jb = B->i[i]; jb < B->i[i + 1]; jb++) { 1238 if (garray[B->j[jb]] > cs) break; 1239 matvals[cnt++] = ba[jb]; 1240 } 1241 for (ja = A->i[i]; ja < A->i[i + 1]; ja++) matvals[cnt++] = aa[ja]; 1242 for (; jb < B->i[i + 1]; jb++) matvals[cnt++] = ba[jb]; 1243 } 1244 PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &aa)); 1245 PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &ba)); 1246 PetscCheck(cnt == nz, PETSC_COMM_SELF, PETSC_ERR_LIB, "Internal PETSc error: cnt = %" PetscInt_FMT " nz = %" PetscInt64_FMT, cnt, nz); 1247 PetscCall(PetscViewerBinaryWriteAll(viewer, matvals, nz, PETSC_DETERMINE, PETSC_DETERMINE, PETSC_SCALAR)); 1248 PetscCall(PetscFree(matvals)); 1249 1250 /* write block size option to the viewer's .info file */ 1251 PetscCall(MatView_Binary_BlockSizes(mat, viewer)); 1252 PetscFunctionReturn(PETSC_SUCCESS); 1253 } 1254 1255 #include <petscdraw.h> 1256 static PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat, PetscViewer viewer) 1257 { 1258 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 1259 PetscMPIInt rank = aij->rank, size = aij->size; 1260 PetscBool isdraw, iascii, isbinary; 1261 PetscViewer sviewer; 1262 PetscViewerFormat format; 1263 1264 PetscFunctionBegin; 1265 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw)); 1266 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii)); 1267 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary)); 1268 if (iascii) { 1269 PetscCall(PetscViewerGetFormat(viewer, &format)); 1270 if (format == PETSC_VIEWER_LOAD_BALANCE) { 1271 PetscInt i, nmax = 0, nmin = PETSC_MAX_INT, navg = 0, *nz, nzlocal = ((Mat_SeqAIJ *)(aij->A->data))->nz + ((Mat_SeqAIJ *)(aij->B->data))->nz; 1272 PetscCall(PetscMalloc1(size, &nz)); 1273 PetscCallMPI(MPI_Allgather(&nzlocal, 1, MPIU_INT, nz, 1, MPIU_INT, PetscObjectComm((PetscObject)mat))); 1274 for (i = 0; i < (PetscInt)size; i++) { 1275 nmax = PetscMax(nmax, nz[i]); 1276 nmin = PetscMin(nmin, nz[i]); 1277 navg += nz[i]; 1278 } 1279 PetscCall(PetscFree(nz)); 1280 navg = navg / size; 1281 PetscCall(PetscViewerASCIIPrintf(viewer, "Load Balance - Nonzeros: Min %" PetscInt_FMT " avg %" PetscInt_FMT " max %" PetscInt_FMT "\n", nmin, navg, nmax)); 1282 PetscFunctionReturn(PETSC_SUCCESS); 1283 } 1284 PetscCall(PetscViewerGetFormat(viewer, &format)); 1285 if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) { 1286 MatInfo info; 1287 PetscInt *inodes = NULL; 1288 1289 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank)); 1290 PetscCall(MatGetInfo(mat, MAT_LOCAL, &info)); 1291 PetscCall(MatInodeGetInodeSizes(aij->A, NULL, &inodes, NULL)); 1292 PetscCall(PetscViewerASCIIPushSynchronized(viewer)); 1293 if (!inodes) { 1294 PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %g, not using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used, (PetscInt)info.nz_allocated, 1295 (double)info.memory)); 1296 } else { 1297 PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %g, using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used, (PetscInt)info.nz_allocated, 1298 (double)info.memory)); 1299 } 1300 PetscCall(MatGetInfo(aij->A, MAT_LOCAL, &info)); 1301 PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] on-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used)); 1302 PetscCall(MatGetInfo(aij->B, MAT_LOCAL, &info)); 1303 PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] off-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used)); 1304 PetscCall(PetscViewerFlush(viewer)); 1305 PetscCall(PetscViewerASCIIPopSynchronized(viewer)); 1306 PetscCall(PetscViewerASCIIPrintf(viewer, "Information on VecScatter used in matrix-vector product: \n")); 1307 PetscCall(VecScatterView(aij->Mvctx, viewer)); 1308 PetscFunctionReturn(PETSC_SUCCESS); 1309 } else if (format == PETSC_VIEWER_ASCII_INFO) { 1310 PetscInt inodecount, inodelimit, *inodes; 1311 PetscCall(MatInodeGetInodeSizes(aij->A, &inodecount, &inodes, &inodelimit)); 1312 if (inodes) { 1313 PetscCall(PetscViewerASCIIPrintf(viewer, "using I-node (on process 0) routines: found %" PetscInt_FMT " nodes, limit used is %" PetscInt_FMT "\n", inodecount, inodelimit)); 1314 } else { 1315 PetscCall(PetscViewerASCIIPrintf(viewer, "not using I-node (on process 0) routines\n")); 1316 } 1317 PetscFunctionReturn(PETSC_SUCCESS); 1318 } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) { 1319 PetscFunctionReturn(PETSC_SUCCESS); 1320 } 1321 } else if (isbinary) { 1322 if (size == 1) { 1323 PetscCall(PetscObjectSetName((PetscObject)aij->A, ((PetscObject)mat)->name)); 1324 PetscCall(MatView(aij->A, viewer)); 1325 } else { 1326 PetscCall(MatView_MPIAIJ_Binary(mat, viewer)); 1327 } 1328 PetscFunctionReturn(PETSC_SUCCESS); 1329 } else if (iascii && size == 1) { 1330 PetscCall(PetscObjectSetName((PetscObject)aij->A, ((PetscObject)mat)->name)); 1331 PetscCall(MatView(aij->A, viewer)); 1332 PetscFunctionReturn(PETSC_SUCCESS); 1333 } else if (isdraw) { 1334 PetscDraw draw; 1335 PetscBool isnull; 1336 PetscCall(PetscViewerDrawGetDraw(viewer, 0, &draw)); 1337 PetscCall(PetscDrawIsNull(draw, &isnull)); 1338 if (isnull) PetscFunctionReturn(PETSC_SUCCESS); 1339 } 1340 1341 { /* assemble the entire matrix onto first processor */ 1342 Mat A = NULL, Av; 1343 IS isrow, iscol; 1344 1345 PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat), rank == 0 ? mat->rmap->N : 0, 0, 1, &isrow)); 1346 PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat), rank == 0 ? mat->cmap->N : 0, 0, 1, &iscol)); 1347 PetscCall(MatCreateSubMatrix(mat, isrow, iscol, MAT_INITIAL_MATRIX, &A)); 1348 PetscCall(MatMPIAIJGetSeqAIJ(A, &Av, NULL, NULL)); 1349 /* The commented code uses MatCreateSubMatrices instead */ 1350 /* 1351 Mat *AA, A = NULL, Av; 1352 IS isrow,iscol; 1353 1354 PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->rmap->N : 0,0,1,&isrow)); 1355 PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->cmap->N : 0,0,1,&iscol)); 1356 PetscCall(MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA)); 1357 if (rank == 0) { 1358 PetscCall(PetscObjectReference((PetscObject)AA[0])); 1359 A = AA[0]; 1360 Av = AA[0]; 1361 } 1362 PetscCall(MatDestroySubMatrices(1,&AA)); 1363 */ 1364 PetscCall(ISDestroy(&iscol)); 1365 PetscCall(ISDestroy(&isrow)); 1366 /* 1367 Everyone has to call to draw the matrix since the graphics waits are 1368 synchronized across all processors that share the PetscDraw object 1369 */ 1370 PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer)); 1371 if (rank == 0) { 1372 if (((PetscObject)mat)->name) PetscCall(PetscObjectSetName((PetscObject)Av, ((PetscObject)mat)->name)); 1373 PetscCall(MatView_SeqAIJ(Av, sviewer)); 1374 } 1375 PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer)); 1376 PetscCall(MatDestroy(&A)); 1377 } 1378 PetscFunctionReturn(PETSC_SUCCESS); 1379 } 1380 1381 PetscErrorCode MatView_MPIAIJ(Mat mat, PetscViewer viewer) 1382 { 1383 PetscBool iascii, isdraw, issocket, isbinary; 1384 1385 PetscFunctionBegin; 1386 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii)); 1387 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw)); 1388 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary)); 1389 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERSOCKET, &issocket)); 1390 if (iascii || isdraw || isbinary || issocket) PetscCall(MatView_MPIAIJ_ASCIIorDraworSocket(mat, viewer)); 1391 PetscFunctionReturn(PETSC_SUCCESS); 1392 } 1393 1394 static PetscErrorCode MatSOR_MPIAIJ(Mat matin, Vec bb, PetscReal omega, MatSORType flag, PetscReal fshift, PetscInt its, PetscInt lits, Vec xx) 1395 { 1396 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)matin->data; 1397 Vec bb1 = NULL; 1398 PetscBool hasop; 1399 1400 PetscFunctionBegin; 1401 if (flag == SOR_APPLY_UPPER) { 1402 PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx)); 1403 PetscFunctionReturn(PETSC_SUCCESS); 1404 } 1405 1406 if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) PetscCall(VecDuplicate(bb, &bb1)); 1407 1408 if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) { 1409 if (flag & SOR_ZERO_INITIAL_GUESS) { 1410 PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx)); 1411 its--; 1412 } 1413 1414 while (its--) { 1415 PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1416 PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1417 1418 /* update rhs: bb1 = bb - B*x */ 1419 PetscCall(VecScale(mat->lvec, -1.0)); 1420 PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1)); 1421 1422 /* local sweep */ 1423 PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_SYMMETRIC_SWEEP, fshift, lits, 1, xx)); 1424 } 1425 } else if (flag & SOR_LOCAL_FORWARD_SWEEP) { 1426 if (flag & SOR_ZERO_INITIAL_GUESS) { 1427 PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx)); 1428 its--; 1429 } 1430 while (its--) { 1431 PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1432 PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1433 1434 /* update rhs: bb1 = bb - B*x */ 1435 PetscCall(VecScale(mat->lvec, -1.0)); 1436 PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1)); 1437 1438 /* local sweep */ 1439 PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_FORWARD_SWEEP, fshift, lits, 1, xx)); 1440 } 1441 } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) { 1442 if (flag & SOR_ZERO_INITIAL_GUESS) { 1443 PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx)); 1444 its--; 1445 } 1446 while (its--) { 1447 PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1448 PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1449 1450 /* update rhs: bb1 = bb - B*x */ 1451 PetscCall(VecScale(mat->lvec, -1.0)); 1452 PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1)); 1453 1454 /* local sweep */ 1455 PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_BACKWARD_SWEEP, fshift, lits, 1, xx)); 1456 } 1457 } else if (flag & SOR_EISENSTAT) { 1458 Vec xx1; 1459 1460 PetscCall(VecDuplicate(bb, &xx1)); 1461 PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, (MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP), fshift, lits, 1, xx)); 1462 1463 PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1464 PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1465 if (!mat->diag) { 1466 PetscCall(MatCreateVecs(matin, &mat->diag, NULL)); 1467 PetscCall(MatGetDiagonal(matin, mat->diag)); 1468 } 1469 PetscCall(MatHasOperation(matin, MATOP_MULT_DIAGONAL_BLOCK, &hasop)); 1470 if (hasop) { 1471 PetscCall(MatMultDiagonalBlock(matin, xx, bb1)); 1472 } else { 1473 PetscCall(VecPointwiseMult(bb1, mat->diag, xx)); 1474 } 1475 PetscCall(VecAYPX(bb1, (omega - 2.0) / omega, bb)); 1476 1477 PetscCall(MatMultAdd(mat->B, mat->lvec, bb1, bb1)); 1478 1479 /* local sweep */ 1480 PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, (MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP), fshift, lits, 1, xx1)); 1481 PetscCall(VecAXPY(xx, 1.0, xx1)); 1482 PetscCall(VecDestroy(&xx1)); 1483 } else SETERRQ(PetscObjectComm((PetscObject)matin), PETSC_ERR_SUP, "Parallel SOR not supported"); 1484 1485 PetscCall(VecDestroy(&bb1)); 1486 1487 matin->factorerrortype = mat->A->factorerrortype; 1488 PetscFunctionReturn(PETSC_SUCCESS); 1489 } 1490 1491 static PetscErrorCode MatPermute_MPIAIJ(Mat A, IS rowp, IS colp, Mat *B) 1492 { 1493 Mat aA, aB, Aperm; 1494 const PetscInt *rwant, *cwant, *gcols, *ai, *bi, *aj, *bj; 1495 PetscScalar *aa, *ba; 1496 PetscInt i, j, m, n, ng, anz, bnz, *dnnz, *onnz, *tdnnz, *tonnz, *rdest, *cdest, *work, *gcdest; 1497 PetscSF rowsf, sf; 1498 IS parcolp = NULL; 1499 PetscBool done; 1500 1501 PetscFunctionBegin; 1502 PetscCall(MatGetLocalSize(A, &m, &n)); 1503 PetscCall(ISGetIndices(rowp, &rwant)); 1504 PetscCall(ISGetIndices(colp, &cwant)); 1505 PetscCall(PetscMalloc3(PetscMax(m, n), &work, m, &rdest, n, &cdest)); 1506 1507 /* Invert row permutation to find out where my rows should go */ 1508 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &rowsf)); 1509 PetscCall(PetscSFSetGraphLayout(rowsf, A->rmap, A->rmap->n, NULL, PETSC_OWN_POINTER, rwant)); 1510 PetscCall(PetscSFSetFromOptions(rowsf)); 1511 for (i = 0; i < m; i++) work[i] = A->rmap->rstart + i; 1512 PetscCall(PetscSFReduceBegin(rowsf, MPIU_INT, work, rdest, MPI_REPLACE)); 1513 PetscCall(PetscSFReduceEnd(rowsf, MPIU_INT, work, rdest, MPI_REPLACE)); 1514 1515 /* Invert column permutation to find out where my columns should go */ 1516 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf)); 1517 PetscCall(PetscSFSetGraphLayout(sf, A->cmap, A->cmap->n, NULL, PETSC_OWN_POINTER, cwant)); 1518 PetscCall(PetscSFSetFromOptions(sf)); 1519 for (i = 0; i < n; i++) work[i] = A->cmap->rstart + i; 1520 PetscCall(PetscSFReduceBegin(sf, MPIU_INT, work, cdest, MPI_REPLACE)); 1521 PetscCall(PetscSFReduceEnd(sf, MPIU_INT, work, cdest, MPI_REPLACE)); 1522 PetscCall(PetscSFDestroy(&sf)); 1523 1524 PetscCall(ISRestoreIndices(rowp, &rwant)); 1525 PetscCall(ISRestoreIndices(colp, &cwant)); 1526 PetscCall(MatMPIAIJGetSeqAIJ(A, &aA, &aB, &gcols)); 1527 1528 /* Find out where my gcols should go */ 1529 PetscCall(MatGetSize(aB, NULL, &ng)); 1530 PetscCall(PetscMalloc1(ng, &gcdest)); 1531 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf)); 1532 PetscCall(PetscSFSetGraphLayout(sf, A->cmap, ng, NULL, PETSC_OWN_POINTER, gcols)); 1533 PetscCall(PetscSFSetFromOptions(sf)); 1534 PetscCall(PetscSFBcastBegin(sf, MPIU_INT, cdest, gcdest, MPI_REPLACE)); 1535 PetscCall(PetscSFBcastEnd(sf, MPIU_INT, cdest, gcdest, MPI_REPLACE)); 1536 PetscCall(PetscSFDestroy(&sf)); 1537 1538 PetscCall(PetscCalloc4(m, &dnnz, m, &onnz, m, &tdnnz, m, &tonnz)); 1539 PetscCall(MatGetRowIJ(aA, 0, PETSC_FALSE, PETSC_FALSE, &anz, &ai, &aj, &done)); 1540 PetscCall(MatGetRowIJ(aB, 0, PETSC_FALSE, PETSC_FALSE, &bnz, &bi, &bj, &done)); 1541 for (i = 0; i < m; i++) { 1542 PetscInt row = rdest[i]; 1543 PetscMPIInt rowner; 1544 PetscCall(PetscLayoutFindOwner(A->rmap, row, &rowner)); 1545 for (j = ai[i]; j < ai[i + 1]; j++) { 1546 PetscInt col = cdest[aj[j]]; 1547 PetscMPIInt cowner; 1548 PetscCall(PetscLayoutFindOwner(A->cmap, col, &cowner)); /* Could build an index for the columns to eliminate this search */ 1549 if (rowner == cowner) dnnz[i]++; 1550 else onnz[i]++; 1551 } 1552 for (j = bi[i]; j < bi[i + 1]; j++) { 1553 PetscInt col = gcdest[bj[j]]; 1554 PetscMPIInt cowner; 1555 PetscCall(PetscLayoutFindOwner(A->cmap, col, &cowner)); 1556 if (rowner == cowner) dnnz[i]++; 1557 else onnz[i]++; 1558 } 1559 } 1560 PetscCall(PetscSFBcastBegin(rowsf, MPIU_INT, dnnz, tdnnz, MPI_REPLACE)); 1561 PetscCall(PetscSFBcastEnd(rowsf, MPIU_INT, dnnz, tdnnz, MPI_REPLACE)); 1562 PetscCall(PetscSFBcastBegin(rowsf, MPIU_INT, onnz, tonnz, MPI_REPLACE)); 1563 PetscCall(PetscSFBcastEnd(rowsf, MPIU_INT, onnz, tonnz, MPI_REPLACE)); 1564 PetscCall(PetscSFDestroy(&rowsf)); 1565 1566 PetscCall(MatCreateAIJ(PetscObjectComm((PetscObject)A), A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N, 0, tdnnz, 0, tonnz, &Aperm)); 1567 PetscCall(MatSeqAIJGetArray(aA, &aa)); 1568 PetscCall(MatSeqAIJGetArray(aB, &ba)); 1569 for (i = 0; i < m; i++) { 1570 PetscInt *acols = dnnz, *bcols = onnz; /* Repurpose now-unneeded arrays */ 1571 PetscInt j0, rowlen; 1572 rowlen = ai[i + 1] - ai[i]; 1573 for (j0 = j = 0; j < rowlen; j0 = j) { /* rowlen could be larger than number of rows m, so sum in batches */ 1574 for (; j < PetscMin(rowlen, j0 + m); j++) acols[j - j0] = cdest[aj[ai[i] + j]]; 1575 PetscCall(MatSetValues(Aperm, 1, &rdest[i], j - j0, acols, aa + ai[i] + j0, INSERT_VALUES)); 1576 } 1577 rowlen = bi[i + 1] - bi[i]; 1578 for (j0 = j = 0; j < rowlen; j0 = j) { 1579 for (; j < PetscMin(rowlen, j0 + m); j++) bcols[j - j0] = gcdest[bj[bi[i] + j]]; 1580 PetscCall(MatSetValues(Aperm, 1, &rdest[i], j - j0, bcols, ba + bi[i] + j0, INSERT_VALUES)); 1581 } 1582 } 1583 PetscCall(MatAssemblyBegin(Aperm, MAT_FINAL_ASSEMBLY)); 1584 PetscCall(MatAssemblyEnd(Aperm, MAT_FINAL_ASSEMBLY)); 1585 PetscCall(MatRestoreRowIJ(aA, 0, PETSC_FALSE, PETSC_FALSE, &anz, &ai, &aj, &done)); 1586 PetscCall(MatRestoreRowIJ(aB, 0, PETSC_FALSE, PETSC_FALSE, &bnz, &bi, &bj, &done)); 1587 PetscCall(MatSeqAIJRestoreArray(aA, &aa)); 1588 PetscCall(MatSeqAIJRestoreArray(aB, &ba)); 1589 PetscCall(PetscFree4(dnnz, onnz, tdnnz, tonnz)); 1590 PetscCall(PetscFree3(work, rdest, cdest)); 1591 PetscCall(PetscFree(gcdest)); 1592 if (parcolp) PetscCall(ISDestroy(&colp)); 1593 *B = Aperm; 1594 PetscFunctionReturn(PETSC_SUCCESS); 1595 } 1596 1597 static PetscErrorCode MatGetGhosts_MPIAIJ(Mat mat, PetscInt *nghosts, const PetscInt *ghosts[]) 1598 { 1599 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 1600 1601 PetscFunctionBegin; 1602 PetscCall(MatGetSize(aij->B, NULL, nghosts)); 1603 if (ghosts) *ghosts = aij->garray; 1604 PetscFunctionReturn(PETSC_SUCCESS); 1605 } 1606 1607 static PetscErrorCode MatGetInfo_MPIAIJ(Mat matin, MatInfoType flag, MatInfo *info) 1608 { 1609 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)matin->data; 1610 Mat A = mat->A, B = mat->B; 1611 PetscLogDouble isend[5], irecv[5]; 1612 1613 PetscFunctionBegin; 1614 info->block_size = 1.0; 1615 PetscCall(MatGetInfo(A, MAT_LOCAL, info)); 1616 1617 isend[0] = info->nz_used; 1618 isend[1] = info->nz_allocated; 1619 isend[2] = info->nz_unneeded; 1620 isend[3] = info->memory; 1621 isend[4] = info->mallocs; 1622 1623 PetscCall(MatGetInfo(B, MAT_LOCAL, info)); 1624 1625 isend[0] += info->nz_used; 1626 isend[1] += info->nz_allocated; 1627 isend[2] += info->nz_unneeded; 1628 isend[3] += info->memory; 1629 isend[4] += info->mallocs; 1630 if (flag == MAT_LOCAL) { 1631 info->nz_used = isend[0]; 1632 info->nz_allocated = isend[1]; 1633 info->nz_unneeded = isend[2]; 1634 info->memory = isend[3]; 1635 info->mallocs = isend[4]; 1636 } else if (flag == MAT_GLOBAL_MAX) { 1637 PetscCall(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_MAX, PetscObjectComm((PetscObject)matin))); 1638 1639 info->nz_used = irecv[0]; 1640 info->nz_allocated = irecv[1]; 1641 info->nz_unneeded = irecv[2]; 1642 info->memory = irecv[3]; 1643 info->mallocs = irecv[4]; 1644 } else if (flag == MAT_GLOBAL_SUM) { 1645 PetscCall(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_SUM, PetscObjectComm((PetscObject)matin))); 1646 1647 info->nz_used = irecv[0]; 1648 info->nz_allocated = irecv[1]; 1649 info->nz_unneeded = irecv[2]; 1650 info->memory = irecv[3]; 1651 info->mallocs = irecv[4]; 1652 } 1653 info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */ 1654 info->fill_ratio_needed = 0; 1655 info->factor_mallocs = 0; 1656 PetscFunctionReturn(PETSC_SUCCESS); 1657 } 1658 1659 PetscErrorCode MatSetOption_MPIAIJ(Mat A, MatOption op, PetscBool flg) 1660 { 1661 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 1662 1663 PetscFunctionBegin; 1664 switch (op) { 1665 case MAT_NEW_NONZERO_LOCATIONS: 1666 case MAT_NEW_NONZERO_ALLOCATION_ERR: 1667 case MAT_UNUSED_NONZERO_LOCATION_ERR: 1668 case MAT_KEEP_NONZERO_PATTERN: 1669 case MAT_NEW_NONZERO_LOCATION_ERR: 1670 case MAT_USE_INODES: 1671 case MAT_IGNORE_ZERO_ENTRIES: 1672 case MAT_FORM_EXPLICIT_TRANSPOSE: 1673 MatCheckPreallocated(A, 1); 1674 PetscCall(MatSetOption(a->A, op, flg)); 1675 PetscCall(MatSetOption(a->B, op, flg)); 1676 break; 1677 case MAT_ROW_ORIENTED: 1678 MatCheckPreallocated(A, 1); 1679 a->roworiented = flg; 1680 1681 PetscCall(MatSetOption(a->A, op, flg)); 1682 PetscCall(MatSetOption(a->B, op, flg)); 1683 break; 1684 case MAT_FORCE_DIAGONAL_ENTRIES: 1685 case MAT_SORTED_FULL: 1686 PetscCall(PetscInfo(A, "Option %s ignored\n", MatOptions[op])); 1687 break; 1688 case MAT_IGNORE_OFF_PROC_ENTRIES: 1689 a->donotstash = flg; 1690 break; 1691 /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */ 1692 case MAT_SPD: 1693 case MAT_SYMMETRIC: 1694 case MAT_STRUCTURALLY_SYMMETRIC: 1695 case MAT_HERMITIAN: 1696 case MAT_SYMMETRY_ETERNAL: 1697 case MAT_STRUCTURAL_SYMMETRY_ETERNAL: 1698 case MAT_SPD_ETERNAL: 1699 /* if the diagonal matrix is square it inherits some of the properties above */ 1700 break; 1701 case MAT_SUBMAT_SINGLEIS: 1702 A->submat_singleis = flg; 1703 break; 1704 case MAT_STRUCTURE_ONLY: 1705 /* The option is handled directly by MatSetOption() */ 1706 break; 1707 default: 1708 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unknown option %d", op); 1709 } 1710 PetscFunctionReturn(PETSC_SUCCESS); 1711 } 1712 1713 PetscErrorCode MatGetRow_MPIAIJ(Mat matin, PetscInt row, PetscInt *nz, PetscInt **idx, PetscScalar **v) 1714 { 1715 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)matin->data; 1716 PetscScalar *vworkA, *vworkB, **pvA, **pvB, *v_p; 1717 PetscInt i, *cworkA, *cworkB, **pcA, **pcB, cstart = matin->cmap->rstart; 1718 PetscInt nztot, nzA, nzB, lrow, rstart = matin->rmap->rstart, rend = matin->rmap->rend; 1719 PetscInt *cmap, *idx_p; 1720 1721 PetscFunctionBegin; 1722 PetscCheck(!mat->getrowactive, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Already active"); 1723 mat->getrowactive = PETSC_TRUE; 1724 1725 if (!mat->rowvalues && (idx || v)) { 1726 /* 1727 allocate enough space to hold information from the longest row. 1728 */ 1729 Mat_SeqAIJ *Aa = (Mat_SeqAIJ *)mat->A->data, *Ba = (Mat_SeqAIJ *)mat->B->data; 1730 PetscInt max = 1, tmp; 1731 for (i = 0; i < matin->rmap->n; i++) { 1732 tmp = Aa->i[i + 1] - Aa->i[i] + Ba->i[i + 1] - Ba->i[i]; 1733 if (max < tmp) max = tmp; 1734 } 1735 PetscCall(PetscMalloc2(max, &mat->rowvalues, max, &mat->rowindices)); 1736 } 1737 1738 PetscCheck(row >= rstart && row < rend, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Only local rows"); 1739 lrow = row - rstart; 1740 1741 pvA = &vworkA; 1742 pcA = &cworkA; 1743 pvB = &vworkB; 1744 pcB = &cworkB; 1745 if (!v) { 1746 pvA = NULL; 1747 pvB = NULL; 1748 } 1749 if (!idx) { 1750 pcA = NULL; 1751 if (!v) pcB = NULL; 1752 } 1753 PetscCall((*mat->A->ops->getrow)(mat->A, lrow, &nzA, pcA, pvA)); 1754 PetscCall((*mat->B->ops->getrow)(mat->B, lrow, &nzB, pcB, pvB)); 1755 nztot = nzA + nzB; 1756 1757 cmap = mat->garray; 1758 if (v || idx) { 1759 if (nztot) { 1760 /* Sort by increasing column numbers, assuming A and B already sorted */ 1761 PetscInt imark = -1; 1762 if (v) { 1763 *v = v_p = mat->rowvalues; 1764 for (i = 0; i < nzB; i++) { 1765 if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i]; 1766 else break; 1767 } 1768 imark = i; 1769 for (i = 0; i < nzA; i++) v_p[imark + i] = vworkA[i]; 1770 for (i = imark; i < nzB; i++) v_p[nzA + i] = vworkB[i]; 1771 } 1772 if (idx) { 1773 *idx = idx_p = mat->rowindices; 1774 if (imark > -1) { 1775 for (i = 0; i < imark; i++) idx_p[i] = cmap[cworkB[i]]; 1776 } else { 1777 for (i = 0; i < nzB; i++) { 1778 if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]]; 1779 else break; 1780 } 1781 imark = i; 1782 } 1783 for (i = 0; i < nzA; i++) idx_p[imark + i] = cstart + cworkA[i]; 1784 for (i = imark; i < nzB; i++) idx_p[nzA + i] = cmap[cworkB[i]]; 1785 } 1786 } else { 1787 if (idx) *idx = NULL; 1788 if (v) *v = NULL; 1789 } 1790 } 1791 *nz = nztot; 1792 PetscCall((*mat->A->ops->restorerow)(mat->A, lrow, &nzA, pcA, pvA)); 1793 PetscCall((*mat->B->ops->restorerow)(mat->B, lrow, &nzB, pcB, pvB)); 1794 PetscFunctionReturn(PETSC_SUCCESS); 1795 } 1796 1797 PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat, PetscInt row, PetscInt *nz, PetscInt **idx, PetscScalar **v) 1798 { 1799 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 1800 1801 PetscFunctionBegin; 1802 PetscCheck(aij->getrowactive, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "MatGetRow() must be called first"); 1803 aij->getrowactive = PETSC_FALSE; 1804 PetscFunctionReturn(PETSC_SUCCESS); 1805 } 1806 1807 static PetscErrorCode MatNorm_MPIAIJ(Mat mat, NormType type, PetscReal *norm) 1808 { 1809 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 1810 Mat_SeqAIJ *amat = (Mat_SeqAIJ *)aij->A->data, *bmat = (Mat_SeqAIJ *)aij->B->data; 1811 PetscInt i, j, cstart = mat->cmap->rstart; 1812 PetscReal sum = 0.0; 1813 const MatScalar *v, *amata, *bmata; 1814 1815 PetscFunctionBegin; 1816 if (aij->size == 1) { 1817 PetscCall(MatNorm(aij->A, type, norm)); 1818 } else { 1819 PetscCall(MatSeqAIJGetArrayRead(aij->A, &amata)); 1820 PetscCall(MatSeqAIJGetArrayRead(aij->B, &bmata)); 1821 if (type == NORM_FROBENIUS) { 1822 v = amata; 1823 for (i = 0; i < amat->nz; i++) { 1824 sum += PetscRealPart(PetscConj(*v) * (*v)); 1825 v++; 1826 } 1827 v = bmata; 1828 for (i = 0; i < bmat->nz; i++) { 1829 sum += PetscRealPart(PetscConj(*v) * (*v)); 1830 v++; 1831 } 1832 PetscCall(MPIU_Allreduce(&sum, norm, 1, MPIU_REAL, MPIU_SUM, PetscObjectComm((PetscObject)mat))); 1833 *norm = PetscSqrtReal(*norm); 1834 PetscCall(PetscLogFlops(2.0 * amat->nz + 2.0 * bmat->nz)); 1835 } else if (type == NORM_1) { /* max column norm */ 1836 PetscReal *tmp, *tmp2; 1837 PetscInt *jj, *garray = aij->garray; 1838 PetscCall(PetscCalloc1(mat->cmap->N + 1, &tmp)); 1839 PetscCall(PetscMalloc1(mat->cmap->N + 1, &tmp2)); 1840 *norm = 0.0; 1841 v = amata; 1842 jj = amat->j; 1843 for (j = 0; j < amat->nz; j++) { 1844 tmp[cstart + *jj++] += PetscAbsScalar(*v); 1845 v++; 1846 } 1847 v = bmata; 1848 jj = bmat->j; 1849 for (j = 0; j < bmat->nz; j++) { 1850 tmp[garray[*jj++]] += PetscAbsScalar(*v); 1851 v++; 1852 } 1853 PetscCall(MPIU_Allreduce(tmp, tmp2, mat->cmap->N, MPIU_REAL, MPIU_SUM, PetscObjectComm((PetscObject)mat))); 1854 for (j = 0; j < mat->cmap->N; j++) { 1855 if (tmp2[j] > *norm) *norm = tmp2[j]; 1856 } 1857 PetscCall(PetscFree(tmp)); 1858 PetscCall(PetscFree(tmp2)); 1859 PetscCall(PetscLogFlops(PetscMax(amat->nz + bmat->nz - 1, 0))); 1860 } else if (type == NORM_INFINITY) { /* max row norm */ 1861 PetscReal ntemp = 0.0; 1862 for (j = 0; j < aij->A->rmap->n; j++) { 1863 v = PetscSafePointerPlusOffset(amata, amat->i[j]); 1864 sum = 0.0; 1865 for (i = 0; i < amat->i[j + 1] - amat->i[j]; i++) { 1866 sum += PetscAbsScalar(*v); 1867 v++; 1868 } 1869 v = PetscSafePointerPlusOffset(bmata, bmat->i[j]); 1870 for (i = 0; i < bmat->i[j + 1] - bmat->i[j]; i++) { 1871 sum += PetscAbsScalar(*v); 1872 v++; 1873 } 1874 if (sum > ntemp) ntemp = sum; 1875 } 1876 PetscCall(MPIU_Allreduce(&ntemp, norm, 1, MPIU_REAL, MPIU_MAX, PetscObjectComm((PetscObject)mat))); 1877 PetscCall(PetscLogFlops(PetscMax(amat->nz + bmat->nz - 1, 0))); 1878 } else SETERRQ(PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "No support for two norm"); 1879 PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &amata)); 1880 PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &bmata)); 1881 } 1882 PetscFunctionReturn(PETSC_SUCCESS); 1883 } 1884 1885 static PetscErrorCode MatTranspose_MPIAIJ(Mat A, MatReuse reuse, Mat *matout) 1886 { 1887 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *b; 1888 Mat_SeqAIJ *Aloc = (Mat_SeqAIJ *)a->A->data, *Bloc = (Mat_SeqAIJ *)a->B->data, *sub_B_diag; 1889 PetscInt M = A->rmap->N, N = A->cmap->N, ma, na, mb, nb, row, *cols, *cols_tmp, *B_diag_ilen, i, ncol, A_diag_ncol; 1890 const PetscInt *ai, *aj, *bi, *bj, *B_diag_i; 1891 Mat B, A_diag, *B_diag; 1892 const MatScalar *pbv, *bv; 1893 1894 PetscFunctionBegin; 1895 if (reuse == MAT_REUSE_MATRIX) PetscCall(MatTransposeCheckNonzeroState_Private(A, *matout)); 1896 ma = A->rmap->n; 1897 na = A->cmap->n; 1898 mb = a->B->rmap->n; 1899 nb = a->B->cmap->n; 1900 ai = Aloc->i; 1901 aj = Aloc->j; 1902 bi = Bloc->i; 1903 bj = Bloc->j; 1904 if (reuse == MAT_INITIAL_MATRIX || *matout == A) { 1905 PetscInt *d_nnz, *g_nnz, *o_nnz; 1906 PetscSFNode *oloc; 1907 PETSC_UNUSED PetscSF sf; 1908 1909 PetscCall(PetscMalloc4(na, &d_nnz, na, &o_nnz, nb, &g_nnz, nb, &oloc)); 1910 /* compute d_nnz for preallocation */ 1911 PetscCall(PetscArrayzero(d_nnz, na)); 1912 for (i = 0; i < ai[ma]; i++) d_nnz[aj[i]]++; 1913 /* compute local off-diagonal contributions */ 1914 PetscCall(PetscArrayzero(g_nnz, nb)); 1915 for (i = 0; i < bi[ma]; i++) g_nnz[bj[i]]++; 1916 /* map those to global */ 1917 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf)); 1918 PetscCall(PetscSFSetGraphLayout(sf, A->cmap, nb, NULL, PETSC_USE_POINTER, a->garray)); 1919 PetscCall(PetscSFSetFromOptions(sf)); 1920 PetscCall(PetscArrayzero(o_nnz, na)); 1921 PetscCall(PetscSFReduceBegin(sf, MPIU_INT, g_nnz, o_nnz, MPI_SUM)); 1922 PetscCall(PetscSFReduceEnd(sf, MPIU_INT, g_nnz, o_nnz, MPI_SUM)); 1923 PetscCall(PetscSFDestroy(&sf)); 1924 1925 PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B)); 1926 PetscCall(MatSetSizes(B, A->cmap->n, A->rmap->n, N, M)); 1927 PetscCall(MatSetBlockSizes(B, PetscAbs(A->cmap->bs), PetscAbs(A->rmap->bs))); 1928 PetscCall(MatSetType(B, ((PetscObject)A)->type_name)); 1929 PetscCall(MatMPIAIJSetPreallocation(B, 0, d_nnz, 0, o_nnz)); 1930 PetscCall(PetscFree4(d_nnz, o_nnz, g_nnz, oloc)); 1931 } else { 1932 B = *matout; 1933 PetscCall(MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE)); 1934 } 1935 1936 b = (Mat_MPIAIJ *)B->data; 1937 A_diag = a->A; 1938 B_diag = &b->A; 1939 sub_B_diag = (Mat_SeqAIJ *)(*B_diag)->data; 1940 A_diag_ncol = A_diag->cmap->N; 1941 B_diag_ilen = sub_B_diag->ilen; 1942 B_diag_i = sub_B_diag->i; 1943 1944 /* Set ilen for diagonal of B */ 1945 for (i = 0; i < A_diag_ncol; i++) B_diag_ilen[i] = B_diag_i[i + 1] - B_diag_i[i]; 1946 1947 /* Transpose the diagonal part of the matrix. In contrast to the off-diagonal part, this can be done 1948 very quickly (=without using MatSetValues), because all writes are local. */ 1949 PetscCall(MatTransposeSetPrecursor(A_diag, *B_diag)); 1950 PetscCall(MatTranspose(A_diag, MAT_REUSE_MATRIX, B_diag)); 1951 1952 /* copy over the B part */ 1953 PetscCall(PetscMalloc1(bi[mb], &cols)); 1954 PetscCall(MatSeqAIJGetArrayRead(a->B, &bv)); 1955 pbv = bv; 1956 row = A->rmap->rstart; 1957 for (i = 0; i < bi[mb]; i++) cols[i] = a->garray[bj[i]]; 1958 cols_tmp = cols; 1959 for (i = 0; i < mb; i++) { 1960 ncol = bi[i + 1] - bi[i]; 1961 PetscCall(MatSetValues(B, ncol, cols_tmp, 1, &row, pbv, INSERT_VALUES)); 1962 row++; 1963 if (pbv) pbv += ncol; 1964 if (cols_tmp) cols_tmp += ncol; 1965 } 1966 PetscCall(PetscFree(cols)); 1967 PetscCall(MatSeqAIJRestoreArrayRead(a->B, &bv)); 1968 1969 PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY)); 1970 PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY)); 1971 if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) { 1972 *matout = B; 1973 } else { 1974 PetscCall(MatHeaderMerge(A, &B)); 1975 } 1976 PetscFunctionReturn(PETSC_SUCCESS); 1977 } 1978 1979 static PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat, Vec ll, Vec rr) 1980 { 1981 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 1982 Mat a = aij->A, b = aij->B; 1983 PetscInt s1, s2, s3; 1984 1985 PetscFunctionBegin; 1986 PetscCall(MatGetLocalSize(mat, &s2, &s3)); 1987 if (rr) { 1988 PetscCall(VecGetLocalSize(rr, &s1)); 1989 PetscCheck(s1 == s3, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "right vector non-conforming local size"); 1990 /* Overlap communication with computation. */ 1991 PetscCall(VecScatterBegin(aij->Mvctx, rr, aij->lvec, INSERT_VALUES, SCATTER_FORWARD)); 1992 } 1993 if (ll) { 1994 PetscCall(VecGetLocalSize(ll, &s1)); 1995 PetscCheck(s1 == s2, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "left vector non-conforming local size"); 1996 PetscUseTypeMethod(b, diagonalscale, ll, NULL); 1997 } 1998 /* scale the diagonal block */ 1999 PetscUseTypeMethod(a, diagonalscale, ll, rr); 2000 2001 if (rr) { 2002 /* Do a scatter end and then right scale the off-diagonal block */ 2003 PetscCall(VecScatterEnd(aij->Mvctx, rr, aij->lvec, INSERT_VALUES, SCATTER_FORWARD)); 2004 PetscUseTypeMethod(b, diagonalscale, NULL, aij->lvec); 2005 } 2006 PetscFunctionReturn(PETSC_SUCCESS); 2007 } 2008 2009 static PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A) 2010 { 2011 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2012 2013 PetscFunctionBegin; 2014 PetscCall(MatSetUnfactored(a->A)); 2015 PetscFunctionReturn(PETSC_SUCCESS); 2016 } 2017 2018 static PetscErrorCode MatEqual_MPIAIJ(Mat A, Mat B, PetscBool *flag) 2019 { 2020 Mat_MPIAIJ *matB = (Mat_MPIAIJ *)B->data, *matA = (Mat_MPIAIJ *)A->data; 2021 Mat a, b, c, d; 2022 PetscBool flg; 2023 2024 PetscFunctionBegin; 2025 a = matA->A; 2026 b = matA->B; 2027 c = matB->A; 2028 d = matB->B; 2029 2030 PetscCall(MatEqual(a, c, &flg)); 2031 if (flg) PetscCall(MatEqual(b, d, &flg)); 2032 PetscCall(MPIU_Allreduce(&flg, flag, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)A))); 2033 PetscFunctionReturn(PETSC_SUCCESS); 2034 } 2035 2036 static PetscErrorCode MatCopy_MPIAIJ(Mat A, Mat B, MatStructure str) 2037 { 2038 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2039 Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data; 2040 2041 PetscFunctionBegin; 2042 /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ 2043 if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) { 2044 /* because of the column compression in the off-processor part of the matrix a->B, 2045 the number of columns in a->B and b->B may be different, hence we cannot call 2046 the MatCopy() directly on the two parts. If need be, we can provide a more 2047 efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices 2048 then copying the submatrices */ 2049 PetscCall(MatCopy_Basic(A, B, str)); 2050 } else { 2051 PetscCall(MatCopy(a->A, b->A, str)); 2052 PetscCall(MatCopy(a->B, b->B, str)); 2053 } 2054 PetscCall(PetscObjectStateIncrease((PetscObject)B)); 2055 PetscFunctionReturn(PETSC_SUCCESS); 2056 } 2057 2058 /* 2059 Computes the number of nonzeros per row needed for preallocation when X and Y 2060 have different nonzero structure. 2061 */ 2062 PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m, const PetscInt *xi, const PetscInt *xj, const PetscInt *xltog, const PetscInt *yi, const PetscInt *yj, const PetscInt *yltog, PetscInt *nnz) 2063 { 2064 PetscInt i, j, k, nzx, nzy; 2065 2066 PetscFunctionBegin; 2067 /* Set the number of nonzeros in the new matrix */ 2068 for (i = 0; i < m; i++) { 2069 const PetscInt *xjj = PetscSafePointerPlusOffset(xj, xi[i]), *yjj = PetscSafePointerPlusOffset(yj, yi[i]); 2070 nzx = xi[i + 1] - xi[i]; 2071 nzy = yi[i + 1] - yi[i]; 2072 nnz[i] = 0; 2073 for (j = 0, k = 0; j < nzx; j++) { /* Point in X */ 2074 for (; k < nzy && yltog[yjj[k]] < xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */ 2075 if (k < nzy && yltog[yjj[k]] == xltog[xjj[j]]) k++; /* Skip duplicate */ 2076 nnz[i]++; 2077 } 2078 for (; k < nzy; k++) nnz[i]++; 2079 } 2080 PetscFunctionReturn(PETSC_SUCCESS); 2081 } 2082 2083 /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */ 2084 static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y, const PetscInt *yltog, Mat X, const PetscInt *xltog, PetscInt *nnz) 2085 { 2086 PetscInt m = Y->rmap->N; 2087 Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data; 2088 Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data; 2089 2090 PetscFunctionBegin; 2091 PetscCall(MatAXPYGetPreallocation_MPIX_private(m, x->i, x->j, xltog, y->i, y->j, yltog, nnz)); 2092 PetscFunctionReturn(PETSC_SUCCESS); 2093 } 2094 2095 static PetscErrorCode MatAXPY_MPIAIJ(Mat Y, PetscScalar a, Mat X, MatStructure str) 2096 { 2097 Mat_MPIAIJ *xx = (Mat_MPIAIJ *)X->data, *yy = (Mat_MPIAIJ *)Y->data; 2098 2099 PetscFunctionBegin; 2100 if (str == SAME_NONZERO_PATTERN) { 2101 PetscCall(MatAXPY(yy->A, a, xx->A, str)); 2102 PetscCall(MatAXPY(yy->B, a, xx->B, str)); 2103 } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */ 2104 PetscCall(MatAXPY_Basic(Y, a, X, str)); 2105 } else { 2106 Mat B; 2107 PetscInt *nnz_d, *nnz_o; 2108 2109 PetscCall(PetscMalloc1(yy->A->rmap->N, &nnz_d)); 2110 PetscCall(PetscMalloc1(yy->B->rmap->N, &nnz_o)); 2111 PetscCall(MatCreate(PetscObjectComm((PetscObject)Y), &B)); 2112 PetscCall(PetscObjectSetName((PetscObject)B, ((PetscObject)Y)->name)); 2113 PetscCall(MatSetLayouts(B, Y->rmap, Y->cmap)); 2114 PetscCall(MatSetType(B, ((PetscObject)Y)->type_name)); 2115 PetscCall(MatAXPYGetPreallocation_SeqAIJ(yy->A, xx->A, nnz_d)); 2116 PetscCall(MatAXPYGetPreallocation_MPIAIJ(yy->B, yy->garray, xx->B, xx->garray, nnz_o)); 2117 PetscCall(MatMPIAIJSetPreallocation(B, 0, nnz_d, 0, nnz_o)); 2118 PetscCall(MatAXPY_BasicWithPreallocation(B, Y, a, X, str)); 2119 PetscCall(MatHeaderMerge(Y, &B)); 2120 PetscCall(PetscFree(nnz_d)); 2121 PetscCall(PetscFree(nnz_o)); 2122 } 2123 PetscFunctionReturn(PETSC_SUCCESS); 2124 } 2125 2126 PETSC_INTERN PetscErrorCode MatConjugate_SeqAIJ(Mat); 2127 2128 static PetscErrorCode MatConjugate_MPIAIJ(Mat mat) 2129 { 2130 PetscFunctionBegin; 2131 if (PetscDefined(USE_COMPLEX)) { 2132 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 2133 2134 PetscCall(MatConjugate_SeqAIJ(aij->A)); 2135 PetscCall(MatConjugate_SeqAIJ(aij->B)); 2136 } 2137 PetscFunctionReturn(PETSC_SUCCESS); 2138 } 2139 2140 static PetscErrorCode MatRealPart_MPIAIJ(Mat A) 2141 { 2142 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2143 2144 PetscFunctionBegin; 2145 PetscCall(MatRealPart(a->A)); 2146 PetscCall(MatRealPart(a->B)); 2147 PetscFunctionReturn(PETSC_SUCCESS); 2148 } 2149 2150 static PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A) 2151 { 2152 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2153 2154 PetscFunctionBegin; 2155 PetscCall(MatImaginaryPart(a->A)); 2156 PetscCall(MatImaginaryPart(a->B)); 2157 PetscFunctionReturn(PETSC_SUCCESS); 2158 } 2159 2160 static PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[]) 2161 { 2162 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2163 PetscInt i, *idxb = NULL, m = A->rmap->n; 2164 PetscScalar *va, *vv; 2165 Vec vB, vA; 2166 const PetscScalar *vb; 2167 2168 PetscFunctionBegin; 2169 PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &vA)); 2170 PetscCall(MatGetRowMaxAbs(a->A, vA, idx)); 2171 2172 PetscCall(VecGetArrayWrite(vA, &va)); 2173 if (idx) { 2174 for (i = 0; i < m; i++) { 2175 if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart; 2176 } 2177 } 2178 2179 PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &vB)); 2180 PetscCall(PetscMalloc1(m, &idxb)); 2181 PetscCall(MatGetRowMaxAbs(a->B, vB, idxb)); 2182 2183 PetscCall(VecGetArrayWrite(v, &vv)); 2184 PetscCall(VecGetArrayRead(vB, &vb)); 2185 for (i = 0; i < m; i++) { 2186 if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) { 2187 vv[i] = vb[i]; 2188 if (idx) idx[i] = a->garray[idxb[i]]; 2189 } else { 2190 vv[i] = va[i]; 2191 if (idx && PetscAbsScalar(va[i]) == PetscAbsScalar(vb[i]) && idxb[i] != -1 && idx[i] > a->garray[idxb[i]]) idx[i] = a->garray[idxb[i]]; 2192 } 2193 } 2194 PetscCall(VecRestoreArrayWrite(vA, &vv)); 2195 PetscCall(VecRestoreArrayWrite(vA, &va)); 2196 PetscCall(VecRestoreArrayRead(vB, &vb)); 2197 PetscCall(PetscFree(idxb)); 2198 PetscCall(VecDestroy(&vA)); 2199 PetscCall(VecDestroy(&vB)); 2200 PetscFunctionReturn(PETSC_SUCCESS); 2201 } 2202 2203 static PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[]) 2204 { 2205 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data; 2206 PetscInt m = A->rmap->n, n = A->cmap->n; 2207 PetscInt cstart = A->cmap->rstart, cend = A->cmap->rend; 2208 PetscInt *cmap = mat->garray; 2209 PetscInt *diagIdx, *offdiagIdx; 2210 Vec diagV, offdiagV; 2211 PetscScalar *a, *diagA, *offdiagA; 2212 const PetscScalar *ba, *bav; 2213 PetscInt r, j, col, ncols, *bi, *bj; 2214 Mat B = mat->B; 2215 Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; 2216 2217 PetscFunctionBegin; 2218 /* When a process holds entire A and other processes have no entry */ 2219 if (A->cmap->N == n) { 2220 PetscCall(VecGetArrayWrite(v, &diagA)); 2221 PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV)); 2222 PetscCall(MatGetRowMinAbs(mat->A, diagV, idx)); 2223 PetscCall(VecDestroy(&diagV)); 2224 PetscCall(VecRestoreArrayWrite(v, &diagA)); 2225 PetscFunctionReturn(PETSC_SUCCESS); 2226 } else if (n == 0) { 2227 if (m) { 2228 PetscCall(VecGetArrayWrite(v, &a)); 2229 for (r = 0; r < m; r++) { 2230 a[r] = 0.0; 2231 if (idx) idx[r] = -1; 2232 } 2233 PetscCall(VecRestoreArrayWrite(v, &a)); 2234 } 2235 PetscFunctionReturn(PETSC_SUCCESS); 2236 } 2237 2238 PetscCall(PetscMalloc2(m, &diagIdx, m, &offdiagIdx)); 2239 PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV)); 2240 PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV)); 2241 PetscCall(MatGetRowMinAbs(mat->A, diagV, diagIdx)); 2242 2243 /* Get offdiagIdx[] for implicit 0.0 */ 2244 PetscCall(MatSeqAIJGetArrayRead(B, &bav)); 2245 ba = bav; 2246 bi = b->i; 2247 bj = b->j; 2248 PetscCall(VecGetArrayWrite(offdiagV, &offdiagA)); 2249 for (r = 0; r < m; r++) { 2250 ncols = bi[r + 1] - bi[r]; 2251 if (ncols == A->cmap->N - n) { /* Brow is dense */ 2252 offdiagA[r] = *ba; 2253 offdiagIdx[r] = cmap[0]; 2254 } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */ 2255 offdiagA[r] = 0.0; 2256 2257 /* Find first hole in the cmap */ 2258 for (j = 0; j < ncols; j++) { 2259 col = cmap[bj[j]]; /* global column number = cmap[B column number] */ 2260 if (col > j && j < cstart) { 2261 offdiagIdx[r] = j; /* global column number of first implicit 0.0 */ 2262 break; 2263 } else if (col > j + n && j >= cstart) { 2264 offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */ 2265 break; 2266 } 2267 } 2268 if (j == ncols && ncols < A->cmap->N - n) { 2269 /* a hole is outside compressed Bcols */ 2270 if (ncols == 0) { 2271 if (cstart) { 2272 offdiagIdx[r] = 0; 2273 } else offdiagIdx[r] = cend; 2274 } else { /* ncols > 0 */ 2275 offdiagIdx[r] = cmap[ncols - 1] + 1; 2276 if (offdiagIdx[r] == cstart) offdiagIdx[r] += n; 2277 } 2278 } 2279 } 2280 2281 for (j = 0; j < ncols; j++) { 2282 if (PetscAbsScalar(offdiagA[r]) > PetscAbsScalar(*ba)) { 2283 offdiagA[r] = *ba; 2284 offdiagIdx[r] = cmap[*bj]; 2285 } 2286 ba++; 2287 bj++; 2288 } 2289 } 2290 2291 PetscCall(VecGetArrayWrite(v, &a)); 2292 PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA)); 2293 for (r = 0; r < m; ++r) { 2294 if (PetscAbsScalar(diagA[r]) < PetscAbsScalar(offdiagA[r])) { 2295 a[r] = diagA[r]; 2296 if (idx) idx[r] = cstart + diagIdx[r]; 2297 } else if (PetscAbsScalar(diagA[r]) == PetscAbsScalar(offdiagA[r])) { 2298 a[r] = diagA[r]; 2299 if (idx) { 2300 if (cstart + diagIdx[r] <= offdiagIdx[r]) { 2301 idx[r] = cstart + diagIdx[r]; 2302 } else idx[r] = offdiagIdx[r]; 2303 } 2304 } else { 2305 a[r] = offdiagA[r]; 2306 if (idx) idx[r] = offdiagIdx[r]; 2307 } 2308 } 2309 PetscCall(MatSeqAIJRestoreArrayRead(B, &bav)); 2310 PetscCall(VecRestoreArrayWrite(v, &a)); 2311 PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA)); 2312 PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA)); 2313 PetscCall(VecDestroy(&diagV)); 2314 PetscCall(VecDestroy(&offdiagV)); 2315 PetscCall(PetscFree2(diagIdx, offdiagIdx)); 2316 PetscFunctionReturn(PETSC_SUCCESS); 2317 } 2318 2319 static PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[]) 2320 { 2321 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data; 2322 PetscInt m = A->rmap->n, n = A->cmap->n; 2323 PetscInt cstart = A->cmap->rstart, cend = A->cmap->rend; 2324 PetscInt *cmap = mat->garray; 2325 PetscInt *diagIdx, *offdiagIdx; 2326 Vec diagV, offdiagV; 2327 PetscScalar *a, *diagA, *offdiagA; 2328 const PetscScalar *ba, *bav; 2329 PetscInt r, j, col, ncols, *bi, *bj; 2330 Mat B = mat->B; 2331 Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; 2332 2333 PetscFunctionBegin; 2334 /* When a process holds entire A and other processes have no entry */ 2335 if (A->cmap->N == n) { 2336 PetscCall(VecGetArrayWrite(v, &diagA)); 2337 PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV)); 2338 PetscCall(MatGetRowMin(mat->A, diagV, idx)); 2339 PetscCall(VecDestroy(&diagV)); 2340 PetscCall(VecRestoreArrayWrite(v, &diagA)); 2341 PetscFunctionReturn(PETSC_SUCCESS); 2342 } else if (n == 0) { 2343 if (m) { 2344 PetscCall(VecGetArrayWrite(v, &a)); 2345 for (r = 0; r < m; r++) { 2346 a[r] = PETSC_MAX_REAL; 2347 if (idx) idx[r] = -1; 2348 } 2349 PetscCall(VecRestoreArrayWrite(v, &a)); 2350 } 2351 PetscFunctionReturn(PETSC_SUCCESS); 2352 } 2353 2354 PetscCall(PetscCalloc2(m, &diagIdx, m, &offdiagIdx)); 2355 PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV)); 2356 PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV)); 2357 PetscCall(MatGetRowMin(mat->A, diagV, diagIdx)); 2358 2359 /* Get offdiagIdx[] for implicit 0.0 */ 2360 PetscCall(MatSeqAIJGetArrayRead(B, &bav)); 2361 ba = bav; 2362 bi = b->i; 2363 bj = b->j; 2364 PetscCall(VecGetArrayWrite(offdiagV, &offdiagA)); 2365 for (r = 0; r < m; r++) { 2366 ncols = bi[r + 1] - bi[r]; 2367 if (ncols == A->cmap->N - n) { /* Brow is dense */ 2368 offdiagA[r] = *ba; 2369 offdiagIdx[r] = cmap[0]; 2370 } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */ 2371 offdiagA[r] = 0.0; 2372 2373 /* Find first hole in the cmap */ 2374 for (j = 0; j < ncols; j++) { 2375 col = cmap[bj[j]]; /* global column number = cmap[B column number] */ 2376 if (col > j && j < cstart) { 2377 offdiagIdx[r] = j; /* global column number of first implicit 0.0 */ 2378 break; 2379 } else if (col > j + n && j >= cstart) { 2380 offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */ 2381 break; 2382 } 2383 } 2384 if (j == ncols && ncols < A->cmap->N - n) { 2385 /* a hole is outside compressed Bcols */ 2386 if (ncols == 0) { 2387 if (cstart) { 2388 offdiagIdx[r] = 0; 2389 } else offdiagIdx[r] = cend; 2390 } else { /* ncols > 0 */ 2391 offdiagIdx[r] = cmap[ncols - 1] + 1; 2392 if (offdiagIdx[r] == cstart) offdiagIdx[r] += n; 2393 } 2394 } 2395 } 2396 2397 for (j = 0; j < ncols; j++) { 2398 if (PetscRealPart(offdiagA[r]) > PetscRealPart(*ba)) { 2399 offdiagA[r] = *ba; 2400 offdiagIdx[r] = cmap[*bj]; 2401 } 2402 ba++; 2403 bj++; 2404 } 2405 } 2406 2407 PetscCall(VecGetArrayWrite(v, &a)); 2408 PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA)); 2409 for (r = 0; r < m; ++r) { 2410 if (PetscRealPart(diagA[r]) < PetscRealPart(offdiagA[r])) { 2411 a[r] = diagA[r]; 2412 if (idx) idx[r] = cstart + diagIdx[r]; 2413 } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) { 2414 a[r] = diagA[r]; 2415 if (idx) { 2416 if (cstart + diagIdx[r] <= offdiagIdx[r]) { 2417 idx[r] = cstart + diagIdx[r]; 2418 } else idx[r] = offdiagIdx[r]; 2419 } 2420 } else { 2421 a[r] = offdiagA[r]; 2422 if (idx) idx[r] = offdiagIdx[r]; 2423 } 2424 } 2425 PetscCall(MatSeqAIJRestoreArrayRead(B, &bav)); 2426 PetscCall(VecRestoreArrayWrite(v, &a)); 2427 PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA)); 2428 PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA)); 2429 PetscCall(VecDestroy(&diagV)); 2430 PetscCall(VecDestroy(&offdiagV)); 2431 PetscCall(PetscFree2(diagIdx, offdiagIdx)); 2432 PetscFunctionReturn(PETSC_SUCCESS); 2433 } 2434 2435 static PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[]) 2436 { 2437 Mat_MPIAIJ *mat = (Mat_MPIAIJ *)A->data; 2438 PetscInt m = A->rmap->n, n = A->cmap->n; 2439 PetscInt cstart = A->cmap->rstart, cend = A->cmap->rend; 2440 PetscInt *cmap = mat->garray; 2441 PetscInt *diagIdx, *offdiagIdx; 2442 Vec diagV, offdiagV; 2443 PetscScalar *a, *diagA, *offdiagA; 2444 const PetscScalar *ba, *bav; 2445 PetscInt r, j, col, ncols, *bi, *bj; 2446 Mat B = mat->B; 2447 Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; 2448 2449 PetscFunctionBegin; 2450 /* When a process holds entire A and other processes have no entry */ 2451 if (A->cmap->N == n) { 2452 PetscCall(VecGetArrayWrite(v, &diagA)); 2453 PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV)); 2454 PetscCall(MatGetRowMax(mat->A, diagV, idx)); 2455 PetscCall(VecDestroy(&diagV)); 2456 PetscCall(VecRestoreArrayWrite(v, &diagA)); 2457 PetscFunctionReturn(PETSC_SUCCESS); 2458 } else if (n == 0) { 2459 if (m) { 2460 PetscCall(VecGetArrayWrite(v, &a)); 2461 for (r = 0; r < m; r++) { 2462 a[r] = PETSC_MIN_REAL; 2463 if (idx) idx[r] = -1; 2464 } 2465 PetscCall(VecRestoreArrayWrite(v, &a)); 2466 } 2467 PetscFunctionReturn(PETSC_SUCCESS); 2468 } 2469 2470 PetscCall(PetscMalloc2(m, &diagIdx, m, &offdiagIdx)); 2471 PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV)); 2472 PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV)); 2473 PetscCall(MatGetRowMax(mat->A, diagV, diagIdx)); 2474 2475 /* Get offdiagIdx[] for implicit 0.0 */ 2476 PetscCall(MatSeqAIJGetArrayRead(B, &bav)); 2477 ba = bav; 2478 bi = b->i; 2479 bj = b->j; 2480 PetscCall(VecGetArrayWrite(offdiagV, &offdiagA)); 2481 for (r = 0; r < m; r++) { 2482 ncols = bi[r + 1] - bi[r]; 2483 if (ncols == A->cmap->N - n) { /* Brow is dense */ 2484 offdiagA[r] = *ba; 2485 offdiagIdx[r] = cmap[0]; 2486 } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */ 2487 offdiagA[r] = 0.0; 2488 2489 /* Find first hole in the cmap */ 2490 for (j = 0; j < ncols; j++) { 2491 col = cmap[bj[j]]; /* global column number = cmap[B column number] */ 2492 if (col > j && j < cstart) { 2493 offdiagIdx[r] = j; /* global column number of first implicit 0.0 */ 2494 break; 2495 } else if (col > j + n && j >= cstart) { 2496 offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */ 2497 break; 2498 } 2499 } 2500 if (j == ncols && ncols < A->cmap->N - n) { 2501 /* a hole is outside compressed Bcols */ 2502 if (ncols == 0) { 2503 if (cstart) { 2504 offdiagIdx[r] = 0; 2505 } else offdiagIdx[r] = cend; 2506 } else { /* ncols > 0 */ 2507 offdiagIdx[r] = cmap[ncols - 1] + 1; 2508 if (offdiagIdx[r] == cstart) offdiagIdx[r] += n; 2509 } 2510 } 2511 } 2512 2513 for (j = 0; j < ncols; j++) { 2514 if (PetscRealPart(offdiagA[r]) < PetscRealPart(*ba)) { 2515 offdiagA[r] = *ba; 2516 offdiagIdx[r] = cmap[*bj]; 2517 } 2518 ba++; 2519 bj++; 2520 } 2521 } 2522 2523 PetscCall(VecGetArrayWrite(v, &a)); 2524 PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA)); 2525 for (r = 0; r < m; ++r) { 2526 if (PetscRealPart(diagA[r]) > PetscRealPart(offdiagA[r])) { 2527 a[r] = diagA[r]; 2528 if (idx) idx[r] = cstart + diagIdx[r]; 2529 } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) { 2530 a[r] = diagA[r]; 2531 if (idx) { 2532 if (cstart + diagIdx[r] <= offdiagIdx[r]) { 2533 idx[r] = cstart + diagIdx[r]; 2534 } else idx[r] = offdiagIdx[r]; 2535 } 2536 } else { 2537 a[r] = offdiagA[r]; 2538 if (idx) idx[r] = offdiagIdx[r]; 2539 } 2540 } 2541 PetscCall(MatSeqAIJRestoreArrayRead(B, &bav)); 2542 PetscCall(VecRestoreArrayWrite(v, &a)); 2543 PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA)); 2544 PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA)); 2545 PetscCall(VecDestroy(&diagV)); 2546 PetscCall(VecDestroy(&offdiagV)); 2547 PetscCall(PetscFree2(diagIdx, offdiagIdx)); 2548 PetscFunctionReturn(PETSC_SUCCESS); 2549 } 2550 2551 PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat, Mat *newmat) 2552 { 2553 Mat *dummy; 2554 2555 PetscFunctionBegin; 2556 PetscCall(MatCreateSubMatrix_MPIAIJ_All(mat, MAT_DO_NOT_GET_VALUES, MAT_INITIAL_MATRIX, &dummy)); 2557 *newmat = *dummy; 2558 PetscCall(PetscFree(dummy)); 2559 PetscFunctionReturn(PETSC_SUCCESS); 2560 } 2561 2562 static PetscErrorCode MatInvertBlockDiagonal_MPIAIJ(Mat A, const PetscScalar **values) 2563 { 2564 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2565 2566 PetscFunctionBegin; 2567 PetscCall(MatInvertBlockDiagonal(a->A, values)); 2568 A->factorerrortype = a->A->factorerrortype; 2569 PetscFunctionReturn(PETSC_SUCCESS); 2570 } 2571 2572 static PetscErrorCode MatSetRandom_MPIAIJ(Mat x, PetscRandom rctx) 2573 { 2574 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)x->data; 2575 2576 PetscFunctionBegin; 2577 PetscCheck(x->assembled || x->preallocated, PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed"); 2578 PetscCall(MatSetRandom(aij->A, rctx)); 2579 if (x->assembled) { 2580 PetscCall(MatSetRandom(aij->B, rctx)); 2581 } else { 2582 PetscCall(MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B, x->cmap->rstart, x->cmap->rend, rctx)); 2583 } 2584 PetscCall(MatAssemblyBegin(x, MAT_FINAL_ASSEMBLY)); 2585 PetscCall(MatAssemblyEnd(x, MAT_FINAL_ASSEMBLY)); 2586 PetscFunctionReturn(PETSC_SUCCESS); 2587 } 2588 2589 static PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A, PetscBool sc) 2590 { 2591 PetscFunctionBegin; 2592 if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable; 2593 else A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ; 2594 PetscFunctionReturn(PETSC_SUCCESS); 2595 } 2596 2597 /*@ 2598 MatMPIAIJGetNumberNonzeros - gets the number of nonzeros in the matrix on this MPI rank 2599 2600 Not Collective 2601 2602 Input Parameter: 2603 . A - the matrix 2604 2605 Output Parameter: 2606 . nz - the number of nonzeros 2607 2608 Level: advanced 2609 2610 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ` 2611 @*/ 2612 PetscErrorCode MatMPIAIJGetNumberNonzeros(Mat A, PetscCount *nz) 2613 { 2614 Mat_MPIAIJ *maij = (Mat_MPIAIJ *)A->data; 2615 Mat_SeqAIJ *aaij = (Mat_SeqAIJ *)maij->A->data, *baij = (Mat_SeqAIJ *)maij->B->data; 2616 PetscBool isaij; 2617 2618 PetscFunctionBegin; 2619 PetscCall(PetscObjectBaseTypeCompare((PetscObject)A, MATMPIAIJ, &isaij)); 2620 PetscCheck(isaij, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Not for type %s", ((PetscObject)A)->type_name); 2621 *nz = aaij->i[A->rmap->n] + baij->i[A->rmap->n]; 2622 PetscFunctionReturn(PETSC_SUCCESS); 2623 } 2624 2625 /*@ 2626 MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap 2627 2628 Collective 2629 2630 Input Parameters: 2631 + A - the matrix 2632 - sc - `PETSC_TRUE` indicates use the scalable algorithm (default is not to use the scalable algorithm) 2633 2634 Level: advanced 2635 2636 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ` 2637 @*/ 2638 PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A, PetscBool sc) 2639 { 2640 PetscFunctionBegin; 2641 PetscTryMethod(A, "MatMPIAIJSetUseScalableIncreaseOverlap_C", (Mat, PetscBool), (A, sc)); 2642 PetscFunctionReturn(PETSC_SUCCESS); 2643 } 2644 2645 PetscErrorCode MatSetFromOptions_MPIAIJ(Mat A, PetscOptionItems *PetscOptionsObject) 2646 { 2647 PetscBool sc = PETSC_FALSE, flg; 2648 2649 PetscFunctionBegin; 2650 PetscOptionsHeadBegin(PetscOptionsObject, "MPIAIJ options"); 2651 if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE; 2652 PetscCall(PetscOptionsBool("-mat_increase_overlap_scalable", "Use a scalable algorithm to compute the overlap", "MatIncreaseOverlap", sc, &sc, &flg)); 2653 if (flg) PetscCall(MatMPIAIJSetUseScalableIncreaseOverlap(A, sc)); 2654 PetscOptionsHeadEnd(); 2655 PetscFunctionReturn(PETSC_SUCCESS); 2656 } 2657 2658 static PetscErrorCode MatShift_MPIAIJ(Mat Y, PetscScalar a) 2659 { 2660 Mat_MPIAIJ *maij = (Mat_MPIAIJ *)Y->data; 2661 Mat_SeqAIJ *aij = (Mat_SeqAIJ *)maij->A->data; 2662 2663 PetscFunctionBegin; 2664 if (!Y->preallocated) { 2665 PetscCall(MatMPIAIJSetPreallocation(Y, 1, NULL, 0, NULL)); 2666 } else if (!aij->nz) { /* It does not matter if diagonals of Y only partially lie in maij->A. We just need an estimated preallocation. */ 2667 PetscInt nonew = aij->nonew; 2668 PetscCall(MatSeqAIJSetPreallocation(maij->A, 1, NULL)); 2669 aij->nonew = nonew; 2670 } 2671 PetscCall(MatShift_Basic(Y, a)); 2672 PetscFunctionReturn(PETSC_SUCCESS); 2673 } 2674 2675 static PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A, PetscBool *missing, PetscInt *d) 2676 { 2677 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2678 2679 PetscFunctionBegin; 2680 PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only works for square matrices"); 2681 PetscCall(MatMissingDiagonal(a->A, missing, d)); 2682 if (d) { 2683 PetscInt rstart; 2684 PetscCall(MatGetOwnershipRange(A, &rstart, NULL)); 2685 *d += rstart; 2686 } 2687 PetscFunctionReturn(PETSC_SUCCESS); 2688 } 2689 2690 static PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A, PetscInt nblocks, const PetscInt *bsizes, PetscScalar *diag) 2691 { 2692 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2693 2694 PetscFunctionBegin; 2695 PetscCall(MatInvertVariableBlockDiagonal(a->A, nblocks, bsizes, diag)); 2696 PetscFunctionReturn(PETSC_SUCCESS); 2697 } 2698 2699 static PetscErrorCode MatEliminateZeros_MPIAIJ(Mat A, PetscBool keep) 2700 { 2701 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 2702 2703 PetscFunctionBegin; 2704 PetscCall(MatEliminateZeros_SeqAIJ(a->A, keep)); // possibly keep zero diagonal coefficients 2705 PetscCall(MatEliminateZeros_SeqAIJ(a->B, PETSC_FALSE)); // never keep zero diagonal coefficients 2706 PetscFunctionReturn(PETSC_SUCCESS); 2707 } 2708 2709 static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ, 2710 MatGetRow_MPIAIJ, 2711 MatRestoreRow_MPIAIJ, 2712 MatMult_MPIAIJ, 2713 /* 4*/ MatMultAdd_MPIAIJ, 2714 MatMultTranspose_MPIAIJ, 2715 MatMultTransposeAdd_MPIAIJ, 2716 NULL, 2717 NULL, 2718 NULL, 2719 /*10*/ NULL, 2720 NULL, 2721 NULL, 2722 MatSOR_MPIAIJ, 2723 MatTranspose_MPIAIJ, 2724 /*15*/ MatGetInfo_MPIAIJ, 2725 MatEqual_MPIAIJ, 2726 MatGetDiagonal_MPIAIJ, 2727 MatDiagonalScale_MPIAIJ, 2728 MatNorm_MPIAIJ, 2729 /*20*/ MatAssemblyBegin_MPIAIJ, 2730 MatAssemblyEnd_MPIAIJ, 2731 MatSetOption_MPIAIJ, 2732 MatZeroEntries_MPIAIJ, 2733 /*24*/ MatZeroRows_MPIAIJ, 2734 NULL, 2735 NULL, 2736 NULL, 2737 NULL, 2738 /*29*/ MatSetUp_MPI_Hash, 2739 NULL, 2740 NULL, 2741 MatGetDiagonalBlock_MPIAIJ, 2742 NULL, 2743 /*34*/ MatDuplicate_MPIAIJ, 2744 NULL, 2745 NULL, 2746 NULL, 2747 NULL, 2748 /*39*/ MatAXPY_MPIAIJ, 2749 MatCreateSubMatrices_MPIAIJ, 2750 MatIncreaseOverlap_MPIAIJ, 2751 MatGetValues_MPIAIJ, 2752 MatCopy_MPIAIJ, 2753 /*44*/ MatGetRowMax_MPIAIJ, 2754 MatScale_MPIAIJ, 2755 MatShift_MPIAIJ, 2756 MatDiagonalSet_MPIAIJ, 2757 MatZeroRowsColumns_MPIAIJ, 2758 /*49*/ MatSetRandom_MPIAIJ, 2759 MatGetRowIJ_MPIAIJ, 2760 MatRestoreRowIJ_MPIAIJ, 2761 NULL, 2762 NULL, 2763 /*54*/ MatFDColoringCreate_MPIXAIJ, 2764 NULL, 2765 MatSetUnfactored_MPIAIJ, 2766 MatPermute_MPIAIJ, 2767 NULL, 2768 /*59*/ MatCreateSubMatrix_MPIAIJ, 2769 MatDestroy_MPIAIJ, 2770 MatView_MPIAIJ, 2771 NULL, 2772 NULL, 2773 /*64*/ NULL, 2774 MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ, 2775 NULL, 2776 NULL, 2777 NULL, 2778 /*69*/ MatGetRowMaxAbs_MPIAIJ, 2779 MatGetRowMinAbs_MPIAIJ, 2780 NULL, 2781 NULL, 2782 NULL, 2783 NULL, 2784 /*75*/ MatFDColoringApply_AIJ, 2785 MatSetFromOptions_MPIAIJ, 2786 NULL, 2787 NULL, 2788 MatFindZeroDiagonals_MPIAIJ, 2789 /*80*/ NULL, 2790 NULL, 2791 NULL, 2792 /*83*/ MatLoad_MPIAIJ, 2793 MatIsSymmetric_MPIAIJ, 2794 NULL, 2795 NULL, 2796 NULL, 2797 NULL, 2798 /*89*/ NULL, 2799 NULL, 2800 MatMatMultNumeric_MPIAIJ_MPIAIJ, 2801 NULL, 2802 NULL, 2803 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ, 2804 NULL, 2805 NULL, 2806 NULL, 2807 MatBindToCPU_MPIAIJ, 2808 /*99*/ MatProductSetFromOptions_MPIAIJ, 2809 NULL, 2810 NULL, 2811 MatConjugate_MPIAIJ, 2812 NULL, 2813 /*104*/ MatSetValuesRow_MPIAIJ, 2814 MatRealPart_MPIAIJ, 2815 MatImaginaryPart_MPIAIJ, 2816 NULL, 2817 NULL, 2818 /*109*/ NULL, 2819 NULL, 2820 MatGetRowMin_MPIAIJ, 2821 NULL, 2822 MatMissingDiagonal_MPIAIJ, 2823 /*114*/ MatGetSeqNonzeroStructure_MPIAIJ, 2824 NULL, 2825 MatGetGhosts_MPIAIJ, 2826 NULL, 2827 NULL, 2828 /*119*/ MatMultDiagonalBlock_MPIAIJ, 2829 NULL, 2830 NULL, 2831 NULL, 2832 MatGetMultiProcBlock_MPIAIJ, 2833 /*124*/ MatFindNonzeroRows_MPIAIJ, 2834 MatGetColumnReductions_MPIAIJ, 2835 MatInvertBlockDiagonal_MPIAIJ, 2836 MatInvertVariableBlockDiagonal_MPIAIJ, 2837 MatCreateSubMatricesMPI_MPIAIJ, 2838 /*129*/ NULL, 2839 NULL, 2840 NULL, 2841 MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ, 2842 NULL, 2843 /*134*/ NULL, 2844 NULL, 2845 NULL, 2846 NULL, 2847 NULL, 2848 /*139*/ MatSetBlockSizes_MPIAIJ, 2849 NULL, 2850 NULL, 2851 MatFDColoringSetUp_MPIXAIJ, 2852 MatFindOffBlockDiagonalEntries_MPIAIJ, 2853 MatCreateMPIMatConcatenateSeqMat_MPIAIJ, 2854 /*145*/ NULL, 2855 NULL, 2856 NULL, 2857 MatCreateGraph_Simple_AIJ, 2858 NULL, 2859 /*150*/ NULL, 2860 MatEliminateZeros_MPIAIJ}; 2861 2862 static PetscErrorCode MatStoreValues_MPIAIJ(Mat mat) 2863 { 2864 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 2865 2866 PetscFunctionBegin; 2867 PetscCall(MatStoreValues(aij->A)); 2868 PetscCall(MatStoreValues(aij->B)); 2869 PetscFunctionReturn(PETSC_SUCCESS); 2870 } 2871 2872 static PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat) 2873 { 2874 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 2875 2876 PetscFunctionBegin; 2877 PetscCall(MatRetrieveValues(aij->A)); 2878 PetscCall(MatRetrieveValues(aij->B)); 2879 PetscFunctionReturn(PETSC_SUCCESS); 2880 } 2881 2882 PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[]) 2883 { 2884 Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data; 2885 PetscMPIInt size; 2886 2887 PetscFunctionBegin; 2888 if (B->hash_active) { 2889 B->ops[0] = b->cops; 2890 B->hash_active = PETSC_FALSE; 2891 } 2892 PetscCall(PetscLayoutSetUp(B->rmap)); 2893 PetscCall(PetscLayoutSetUp(B->cmap)); 2894 2895 #if defined(PETSC_USE_CTABLE) 2896 PetscCall(PetscHMapIDestroy(&b->colmap)); 2897 #else 2898 PetscCall(PetscFree(b->colmap)); 2899 #endif 2900 PetscCall(PetscFree(b->garray)); 2901 PetscCall(VecDestroy(&b->lvec)); 2902 PetscCall(VecScatterDestroy(&b->Mvctx)); 2903 2904 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size)); 2905 PetscCall(MatDestroy(&b->B)); 2906 PetscCall(MatCreate(PETSC_COMM_SELF, &b->B)); 2907 PetscCall(MatSetSizes(b->B, B->rmap->n, size > 1 ? B->cmap->N : 0, B->rmap->n, size > 1 ? B->cmap->N : 0)); 2908 PetscCall(MatSetBlockSizesFromMats(b->B, B, B)); 2909 PetscCall(MatSetType(b->B, MATSEQAIJ)); 2910 2911 PetscCall(MatDestroy(&b->A)); 2912 PetscCall(MatCreate(PETSC_COMM_SELF, &b->A)); 2913 PetscCall(MatSetSizes(b->A, B->rmap->n, B->cmap->n, B->rmap->n, B->cmap->n)); 2914 PetscCall(MatSetBlockSizesFromMats(b->A, B, B)); 2915 PetscCall(MatSetType(b->A, MATSEQAIJ)); 2916 2917 PetscCall(MatSeqAIJSetPreallocation(b->A, d_nz, d_nnz)); 2918 PetscCall(MatSeqAIJSetPreallocation(b->B, o_nz, o_nnz)); 2919 B->preallocated = PETSC_TRUE; 2920 B->was_assembled = PETSC_FALSE; 2921 B->assembled = PETSC_FALSE; 2922 PetscFunctionReturn(PETSC_SUCCESS); 2923 } 2924 2925 static PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B) 2926 { 2927 Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data; 2928 2929 PetscFunctionBegin; 2930 PetscValidHeaderSpecific(B, MAT_CLASSID, 1); 2931 PetscCall(PetscLayoutSetUp(B->rmap)); 2932 PetscCall(PetscLayoutSetUp(B->cmap)); 2933 2934 #if defined(PETSC_USE_CTABLE) 2935 PetscCall(PetscHMapIDestroy(&b->colmap)); 2936 #else 2937 PetscCall(PetscFree(b->colmap)); 2938 #endif 2939 PetscCall(PetscFree(b->garray)); 2940 PetscCall(VecDestroy(&b->lvec)); 2941 PetscCall(VecScatterDestroy(&b->Mvctx)); 2942 2943 PetscCall(MatResetPreallocation(b->A)); 2944 PetscCall(MatResetPreallocation(b->B)); 2945 B->preallocated = PETSC_TRUE; 2946 B->was_assembled = PETSC_FALSE; 2947 B->assembled = PETSC_FALSE; 2948 PetscFunctionReturn(PETSC_SUCCESS); 2949 } 2950 2951 PetscErrorCode MatDuplicate_MPIAIJ(Mat matin, MatDuplicateOption cpvalues, Mat *newmat) 2952 { 2953 Mat mat; 2954 Mat_MPIAIJ *a, *oldmat = (Mat_MPIAIJ *)matin->data; 2955 2956 PetscFunctionBegin; 2957 *newmat = NULL; 2958 PetscCall(MatCreate(PetscObjectComm((PetscObject)matin), &mat)); 2959 PetscCall(MatSetSizes(mat, matin->rmap->n, matin->cmap->n, matin->rmap->N, matin->cmap->N)); 2960 PetscCall(MatSetBlockSizesFromMats(mat, matin, matin)); 2961 PetscCall(MatSetType(mat, ((PetscObject)matin)->type_name)); 2962 a = (Mat_MPIAIJ *)mat->data; 2963 2964 mat->factortype = matin->factortype; 2965 mat->assembled = matin->assembled; 2966 mat->insertmode = NOT_SET_VALUES; 2967 2968 a->size = oldmat->size; 2969 a->rank = oldmat->rank; 2970 a->donotstash = oldmat->donotstash; 2971 a->roworiented = oldmat->roworiented; 2972 a->rowindices = NULL; 2973 a->rowvalues = NULL; 2974 a->getrowactive = PETSC_FALSE; 2975 2976 PetscCall(PetscLayoutReference(matin->rmap, &mat->rmap)); 2977 PetscCall(PetscLayoutReference(matin->cmap, &mat->cmap)); 2978 if (matin->hash_active) { 2979 PetscCall(MatSetUp(mat)); 2980 } else { 2981 mat->preallocated = matin->preallocated; 2982 if (oldmat->colmap) { 2983 #if defined(PETSC_USE_CTABLE) 2984 PetscCall(PetscHMapIDuplicate(oldmat->colmap, &a->colmap)); 2985 #else 2986 PetscCall(PetscMalloc1(mat->cmap->N, &a->colmap)); 2987 PetscCall(PetscArraycpy(a->colmap, oldmat->colmap, mat->cmap->N)); 2988 #endif 2989 } else a->colmap = NULL; 2990 if (oldmat->garray) { 2991 PetscInt len; 2992 len = oldmat->B->cmap->n; 2993 PetscCall(PetscMalloc1(len + 1, &a->garray)); 2994 if (len) PetscCall(PetscArraycpy(a->garray, oldmat->garray, len)); 2995 } else a->garray = NULL; 2996 2997 /* It may happen MatDuplicate is called with a non-assembled matrix 2998 In fact, MatDuplicate only requires the matrix to be preallocated 2999 This may happen inside a DMCreateMatrix_Shell */ 3000 if (oldmat->lvec) PetscCall(VecDuplicate(oldmat->lvec, &a->lvec)); 3001 if (oldmat->Mvctx) PetscCall(VecScatterCopy(oldmat->Mvctx, &a->Mvctx)); 3002 PetscCall(MatDuplicate(oldmat->A, cpvalues, &a->A)); 3003 PetscCall(MatDuplicate(oldmat->B, cpvalues, &a->B)); 3004 } 3005 PetscCall(PetscFunctionListDuplicate(((PetscObject)matin)->qlist, &((PetscObject)mat)->qlist)); 3006 *newmat = mat; 3007 PetscFunctionReturn(PETSC_SUCCESS); 3008 } 3009 3010 PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer) 3011 { 3012 PetscBool isbinary, ishdf5; 3013 3014 PetscFunctionBegin; 3015 PetscValidHeaderSpecific(newMat, MAT_CLASSID, 1); 3016 PetscValidHeaderSpecific(viewer, PETSC_VIEWER_CLASSID, 2); 3017 /* force binary viewer to load .info file if it has not yet done so */ 3018 PetscCall(PetscViewerSetUp(viewer)); 3019 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary)); 3020 PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERHDF5, &ishdf5)); 3021 if (isbinary) { 3022 PetscCall(MatLoad_MPIAIJ_Binary(newMat, viewer)); 3023 } else if (ishdf5) { 3024 #if defined(PETSC_HAVE_HDF5) 3025 PetscCall(MatLoad_AIJ_HDF5(newMat, viewer)); 3026 #else 3027 SETERRQ(PetscObjectComm((PetscObject)newMat), PETSC_ERR_SUP, "HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5"); 3028 #endif 3029 } else { 3030 SETERRQ(PetscObjectComm((PetscObject)newMat), PETSC_ERR_SUP, "Viewer type %s not yet supported for reading %s matrices", ((PetscObject)viewer)->type_name, ((PetscObject)newMat)->type_name); 3031 } 3032 PetscFunctionReturn(PETSC_SUCCESS); 3033 } 3034 3035 PetscErrorCode MatLoad_MPIAIJ_Binary(Mat mat, PetscViewer viewer) 3036 { 3037 PetscInt header[4], M, N, m, nz, rows, cols, sum, i; 3038 PetscInt *rowidxs, *colidxs; 3039 PetscScalar *matvals; 3040 3041 PetscFunctionBegin; 3042 PetscCall(PetscViewerSetUp(viewer)); 3043 3044 /* read in matrix header */ 3045 PetscCall(PetscViewerBinaryRead(viewer, header, 4, NULL, PETSC_INT)); 3046 PetscCheck(header[0] == MAT_FILE_CLASSID, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Not a matrix object in file"); 3047 M = header[1]; 3048 N = header[2]; 3049 nz = header[3]; 3050 PetscCheck(M >= 0, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Matrix row size (%" PetscInt_FMT ") in file is negative", M); 3051 PetscCheck(N >= 0, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Matrix column size (%" PetscInt_FMT ") in file is negative", N); 3052 PetscCheck(nz >= 0, PETSC_COMM_SELF, PETSC_ERR_FILE_UNEXPECTED, "Matrix stored in special format on disk, cannot load as MPIAIJ"); 3053 3054 /* set block sizes from the viewer's .info file */ 3055 PetscCall(MatLoad_Binary_BlockSizes(mat, viewer)); 3056 /* set global sizes if not set already */ 3057 if (mat->rmap->N < 0) mat->rmap->N = M; 3058 if (mat->cmap->N < 0) mat->cmap->N = N; 3059 PetscCall(PetscLayoutSetUp(mat->rmap)); 3060 PetscCall(PetscLayoutSetUp(mat->cmap)); 3061 3062 /* check if the matrix sizes are correct */ 3063 PetscCall(MatGetSize(mat, &rows, &cols)); 3064 PetscCheck(M == rows && N == cols, PETSC_COMM_SELF, PETSC_ERR_FILE_UNEXPECTED, "Matrix in file of different sizes (%" PetscInt_FMT ", %" PetscInt_FMT ") than the input matrix (%" PetscInt_FMT ", %" PetscInt_FMT ")", M, N, rows, cols); 3065 3066 /* read in row lengths and build row indices */ 3067 PetscCall(MatGetLocalSize(mat, &m, NULL)); 3068 PetscCall(PetscMalloc1(m + 1, &rowidxs)); 3069 PetscCall(PetscViewerBinaryReadAll(viewer, rowidxs + 1, m, PETSC_DECIDE, M, PETSC_INT)); 3070 rowidxs[0] = 0; 3071 for (i = 0; i < m; i++) rowidxs[i + 1] += rowidxs[i]; 3072 if (nz != PETSC_MAX_INT) { 3073 PetscCall(MPIU_Allreduce(&rowidxs[m], &sum, 1, MPIU_INT, MPI_SUM, PetscObjectComm((PetscObject)viewer))); 3074 PetscCheck(sum == nz, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Inconsistent matrix data in file: nonzeros = %" PetscInt_FMT ", sum-row-lengths = %" PetscInt_FMT, nz, sum); 3075 } 3076 3077 /* read in column indices and matrix values */ 3078 PetscCall(PetscMalloc2(rowidxs[m], &colidxs, rowidxs[m], &matvals)); 3079 PetscCall(PetscViewerBinaryReadAll(viewer, colidxs, rowidxs[m], PETSC_DETERMINE, PETSC_DETERMINE, PETSC_INT)); 3080 PetscCall(PetscViewerBinaryReadAll(viewer, matvals, rowidxs[m], PETSC_DETERMINE, PETSC_DETERMINE, PETSC_SCALAR)); 3081 /* store matrix indices and values */ 3082 PetscCall(MatMPIAIJSetPreallocationCSR(mat, rowidxs, colidxs, matvals)); 3083 PetscCall(PetscFree(rowidxs)); 3084 PetscCall(PetscFree2(colidxs, matvals)); 3085 PetscFunctionReturn(PETSC_SUCCESS); 3086 } 3087 3088 /* Not scalable because of ISAllGather() unless getting all columns. */ 3089 static PetscErrorCode ISGetSeqIS_Private(Mat mat, IS iscol, IS *isseq) 3090 { 3091 IS iscol_local; 3092 PetscBool isstride; 3093 PetscMPIInt lisstride = 0, gisstride; 3094 3095 PetscFunctionBegin; 3096 /* check if we are grabbing all columns*/ 3097 PetscCall(PetscObjectTypeCompare((PetscObject)iscol, ISSTRIDE, &isstride)); 3098 3099 if (isstride) { 3100 PetscInt start, len, mstart, mlen; 3101 PetscCall(ISStrideGetInfo(iscol, &start, NULL)); 3102 PetscCall(ISGetLocalSize(iscol, &len)); 3103 PetscCall(MatGetOwnershipRangeColumn(mat, &mstart, &mlen)); 3104 if (mstart == start && mlen - mstart == len) lisstride = 1; 3105 } 3106 3107 PetscCall(MPIU_Allreduce(&lisstride, &gisstride, 1, MPI_INT, MPI_MIN, PetscObjectComm((PetscObject)mat))); 3108 if (gisstride) { 3109 PetscInt N; 3110 PetscCall(MatGetSize(mat, NULL, &N)); 3111 PetscCall(ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &iscol_local)); 3112 PetscCall(ISSetIdentity(iscol_local)); 3113 PetscCall(PetscInfo(mat, "Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n")); 3114 } else { 3115 PetscInt cbs; 3116 PetscCall(ISGetBlockSize(iscol, &cbs)); 3117 PetscCall(ISAllGather(iscol, &iscol_local)); 3118 PetscCall(ISSetBlockSize(iscol_local, cbs)); 3119 } 3120 3121 *isseq = iscol_local; 3122 PetscFunctionReturn(PETSC_SUCCESS); 3123 } 3124 3125 /* 3126 Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local 3127 (see MatCreateSubMatrix_MPIAIJ_nonscalable) 3128 3129 Input Parameters: 3130 + mat - matrix 3131 . isrow - parallel row index set; its local indices are a subset of local columns of `mat`, 3132 i.e., mat->rstart <= isrow[i] < mat->rend 3133 - iscol - parallel column index set; its local indices are a subset of local columns of `mat`, 3134 i.e., mat->cstart <= iscol[i] < mat->cend 3135 3136 Output Parameters: 3137 + isrow_d - sequential row index set for retrieving mat->A 3138 . iscol_d - sequential column index set for retrieving mat->A 3139 . iscol_o - sequential column index set for retrieving mat->B 3140 - garray - column map; garray[i] indicates global location of iscol_o[i] in `iscol` 3141 */ 3142 static PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat, IS isrow, IS iscol, IS *isrow_d, IS *iscol_d, IS *iscol_o, const PetscInt *garray[]) 3143 { 3144 Vec x, cmap; 3145 const PetscInt *is_idx; 3146 PetscScalar *xarray, *cmaparray; 3147 PetscInt ncols, isstart, *idx, m, rstart, *cmap1, count; 3148 Mat_MPIAIJ *a = (Mat_MPIAIJ *)mat->data; 3149 Mat B = a->B; 3150 Vec lvec = a->lvec, lcmap; 3151 PetscInt i, cstart, cend, Bn = B->cmap->N; 3152 MPI_Comm comm; 3153 VecScatter Mvctx = a->Mvctx; 3154 3155 PetscFunctionBegin; 3156 PetscCall(PetscObjectGetComm((PetscObject)mat, &comm)); 3157 PetscCall(ISGetLocalSize(iscol, &ncols)); 3158 3159 /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */ 3160 PetscCall(MatCreateVecs(mat, &x, NULL)); 3161 PetscCall(VecSet(x, -1.0)); 3162 PetscCall(VecDuplicate(x, &cmap)); 3163 PetscCall(VecSet(cmap, -1.0)); 3164 3165 /* Get start indices */ 3166 PetscCallMPI(MPI_Scan(&ncols, &isstart, 1, MPIU_INT, MPI_SUM, comm)); 3167 isstart -= ncols; 3168 PetscCall(MatGetOwnershipRangeColumn(mat, &cstart, &cend)); 3169 3170 PetscCall(ISGetIndices(iscol, &is_idx)); 3171 PetscCall(VecGetArray(x, &xarray)); 3172 PetscCall(VecGetArray(cmap, &cmaparray)); 3173 PetscCall(PetscMalloc1(ncols, &idx)); 3174 for (i = 0; i < ncols; i++) { 3175 xarray[is_idx[i] - cstart] = (PetscScalar)is_idx[i]; 3176 cmaparray[is_idx[i] - cstart] = i + isstart; /* global index of iscol[i] */ 3177 idx[i] = is_idx[i] - cstart; /* local index of iscol[i] */ 3178 } 3179 PetscCall(VecRestoreArray(x, &xarray)); 3180 PetscCall(VecRestoreArray(cmap, &cmaparray)); 3181 PetscCall(ISRestoreIndices(iscol, &is_idx)); 3182 3183 /* Get iscol_d */ 3184 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, iscol_d)); 3185 PetscCall(ISGetBlockSize(iscol, &i)); 3186 PetscCall(ISSetBlockSize(*iscol_d, i)); 3187 3188 /* Get isrow_d */ 3189 PetscCall(ISGetLocalSize(isrow, &m)); 3190 rstart = mat->rmap->rstart; 3191 PetscCall(PetscMalloc1(m, &idx)); 3192 PetscCall(ISGetIndices(isrow, &is_idx)); 3193 for (i = 0; i < m; i++) idx[i] = is_idx[i] - rstart; 3194 PetscCall(ISRestoreIndices(isrow, &is_idx)); 3195 3196 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, m, idx, PETSC_OWN_POINTER, isrow_d)); 3197 PetscCall(ISGetBlockSize(isrow, &i)); 3198 PetscCall(ISSetBlockSize(*isrow_d, i)); 3199 3200 /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */ 3201 PetscCall(VecScatterBegin(Mvctx, x, lvec, INSERT_VALUES, SCATTER_FORWARD)); 3202 PetscCall(VecScatterEnd(Mvctx, x, lvec, INSERT_VALUES, SCATTER_FORWARD)); 3203 3204 PetscCall(VecDuplicate(lvec, &lcmap)); 3205 3206 PetscCall(VecScatterBegin(Mvctx, cmap, lcmap, INSERT_VALUES, SCATTER_FORWARD)); 3207 PetscCall(VecScatterEnd(Mvctx, cmap, lcmap, INSERT_VALUES, SCATTER_FORWARD)); 3208 3209 /* (3) create sequential iscol_o (a subset of iscol) and isgarray */ 3210 /* off-process column indices */ 3211 count = 0; 3212 PetscCall(PetscMalloc1(Bn, &idx)); 3213 PetscCall(PetscMalloc1(Bn, &cmap1)); 3214 3215 PetscCall(VecGetArray(lvec, &xarray)); 3216 PetscCall(VecGetArray(lcmap, &cmaparray)); 3217 for (i = 0; i < Bn; i++) { 3218 if (PetscRealPart(xarray[i]) > -1.0) { 3219 idx[count] = i; /* local column index in off-diagonal part B */ 3220 cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]); /* column index in submat */ 3221 count++; 3222 } 3223 } 3224 PetscCall(VecRestoreArray(lvec, &xarray)); 3225 PetscCall(VecRestoreArray(lcmap, &cmaparray)); 3226 3227 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, count, idx, PETSC_COPY_VALUES, iscol_o)); 3228 /* cannot ensure iscol_o has same blocksize as iscol! */ 3229 3230 PetscCall(PetscFree(idx)); 3231 *garray = cmap1; 3232 3233 PetscCall(VecDestroy(&x)); 3234 PetscCall(VecDestroy(&cmap)); 3235 PetscCall(VecDestroy(&lcmap)); 3236 PetscFunctionReturn(PETSC_SUCCESS); 3237 } 3238 3239 /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */ 3240 PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat, IS isrow, IS iscol, MatReuse call, Mat *submat) 3241 { 3242 Mat_MPIAIJ *a = (Mat_MPIAIJ *)mat->data, *asub; 3243 Mat M = NULL; 3244 MPI_Comm comm; 3245 IS iscol_d, isrow_d, iscol_o; 3246 Mat Asub = NULL, Bsub = NULL; 3247 PetscInt n; 3248 3249 PetscFunctionBegin; 3250 PetscCall(PetscObjectGetComm((PetscObject)mat, &comm)); 3251 3252 if (call == MAT_REUSE_MATRIX) { 3253 /* Retrieve isrow_d, iscol_d and iscol_o from submat */ 3254 PetscCall(PetscObjectQuery((PetscObject)*submat, "isrow_d", (PetscObject *)&isrow_d)); 3255 PetscCheck(isrow_d, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "isrow_d passed in was not used before, cannot reuse"); 3256 3257 PetscCall(PetscObjectQuery((PetscObject)*submat, "iscol_d", (PetscObject *)&iscol_d)); 3258 PetscCheck(iscol_d, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "iscol_d passed in was not used before, cannot reuse"); 3259 3260 PetscCall(PetscObjectQuery((PetscObject)*submat, "iscol_o", (PetscObject *)&iscol_o)); 3261 PetscCheck(iscol_o, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "iscol_o passed in was not used before, cannot reuse"); 3262 3263 /* Update diagonal and off-diagonal portions of submat */ 3264 asub = (Mat_MPIAIJ *)(*submat)->data; 3265 PetscCall(MatCreateSubMatrix_SeqAIJ(a->A, isrow_d, iscol_d, PETSC_DECIDE, MAT_REUSE_MATRIX, &asub->A)); 3266 PetscCall(ISGetLocalSize(iscol_o, &n)); 3267 if (n) PetscCall(MatCreateSubMatrix_SeqAIJ(a->B, isrow_d, iscol_o, PETSC_DECIDE, MAT_REUSE_MATRIX, &asub->B)); 3268 PetscCall(MatAssemblyBegin(*submat, MAT_FINAL_ASSEMBLY)); 3269 PetscCall(MatAssemblyEnd(*submat, MAT_FINAL_ASSEMBLY)); 3270 3271 } else { /* call == MAT_INITIAL_MATRIX) */ 3272 const PetscInt *garray; 3273 PetscInt BsubN; 3274 3275 /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */ 3276 PetscCall(ISGetSeqIS_SameColDist_Private(mat, isrow, iscol, &isrow_d, &iscol_d, &iscol_o, &garray)); 3277 3278 /* Create local submatrices Asub and Bsub */ 3279 PetscCall(MatCreateSubMatrix_SeqAIJ(a->A, isrow_d, iscol_d, PETSC_DECIDE, MAT_INITIAL_MATRIX, &Asub)); 3280 PetscCall(MatCreateSubMatrix_SeqAIJ(a->B, isrow_d, iscol_o, PETSC_DECIDE, MAT_INITIAL_MATRIX, &Bsub)); 3281 3282 /* Create submatrix M */ 3283 PetscCall(MatCreateMPIAIJWithSeqAIJ(comm, Asub, Bsub, garray, &M)); 3284 3285 /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */ 3286 asub = (Mat_MPIAIJ *)M->data; 3287 3288 PetscCall(ISGetLocalSize(iscol_o, &BsubN)); 3289 n = asub->B->cmap->N; 3290 if (BsubN > n) { 3291 /* This case can be tested using ~petsc/src/tao/bound/tutorials/runplate2_3 */ 3292 const PetscInt *idx; 3293 PetscInt i, j, *idx_new, *subgarray = asub->garray; 3294 PetscCall(PetscInfo(M, "submatrix Bn %" PetscInt_FMT " != BsubN %" PetscInt_FMT ", update iscol_o\n", n, BsubN)); 3295 3296 PetscCall(PetscMalloc1(n, &idx_new)); 3297 j = 0; 3298 PetscCall(ISGetIndices(iscol_o, &idx)); 3299 for (i = 0; i < n; i++) { 3300 if (j >= BsubN) break; 3301 while (subgarray[i] > garray[j]) j++; 3302 3303 if (subgarray[i] == garray[j]) { 3304 idx_new[i] = idx[j++]; 3305 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "subgarray[%" PetscInt_FMT "]=%" PetscInt_FMT " cannot < garray[%" PetscInt_FMT "]=%" PetscInt_FMT, i, subgarray[i], j, garray[j]); 3306 } 3307 PetscCall(ISRestoreIndices(iscol_o, &idx)); 3308 3309 PetscCall(ISDestroy(&iscol_o)); 3310 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, n, idx_new, PETSC_OWN_POINTER, &iscol_o)); 3311 3312 } else if (BsubN < n) { 3313 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Columns of Bsub (%" PetscInt_FMT ") cannot be smaller than B's (%" PetscInt_FMT ")", BsubN, asub->B->cmap->N); 3314 } 3315 3316 PetscCall(PetscFree(garray)); 3317 *submat = M; 3318 3319 /* Save isrow_d, iscol_d and iscol_o used in processor for next request */ 3320 PetscCall(PetscObjectCompose((PetscObject)M, "isrow_d", (PetscObject)isrow_d)); 3321 PetscCall(ISDestroy(&isrow_d)); 3322 3323 PetscCall(PetscObjectCompose((PetscObject)M, "iscol_d", (PetscObject)iscol_d)); 3324 PetscCall(ISDestroy(&iscol_d)); 3325 3326 PetscCall(PetscObjectCompose((PetscObject)M, "iscol_o", (PetscObject)iscol_o)); 3327 PetscCall(ISDestroy(&iscol_o)); 3328 } 3329 PetscFunctionReturn(PETSC_SUCCESS); 3330 } 3331 3332 PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat, IS isrow, IS iscol, MatReuse call, Mat *newmat) 3333 { 3334 IS iscol_local = NULL, isrow_d; 3335 PetscInt csize; 3336 PetscInt n, i, j, start, end; 3337 PetscBool sameRowDist = PETSC_FALSE, sameDist[2], tsameDist[2]; 3338 MPI_Comm comm; 3339 3340 PetscFunctionBegin; 3341 /* If isrow has same processor distribution as mat, 3342 call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */ 3343 if (call == MAT_REUSE_MATRIX) { 3344 PetscCall(PetscObjectQuery((PetscObject)*newmat, "isrow_d", (PetscObject *)&isrow_d)); 3345 if (isrow_d) { 3346 sameRowDist = PETSC_TRUE; 3347 tsameDist[1] = PETSC_TRUE; /* sameColDist */ 3348 } else { 3349 PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_local)); 3350 if (iscol_local) { 3351 sameRowDist = PETSC_TRUE; 3352 tsameDist[1] = PETSC_FALSE; /* !sameColDist */ 3353 } 3354 } 3355 } else { 3356 /* Check if isrow has same processor distribution as mat */ 3357 sameDist[0] = PETSC_FALSE; 3358 PetscCall(ISGetLocalSize(isrow, &n)); 3359 if (!n) { 3360 sameDist[0] = PETSC_TRUE; 3361 } else { 3362 PetscCall(ISGetMinMax(isrow, &i, &j)); 3363 PetscCall(MatGetOwnershipRange(mat, &start, &end)); 3364 if (i >= start && j < end) sameDist[0] = PETSC_TRUE; 3365 } 3366 3367 /* Check if iscol has same processor distribution as mat */ 3368 sameDist[1] = PETSC_FALSE; 3369 PetscCall(ISGetLocalSize(iscol, &n)); 3370 if (!n) { 3371 sameDist[1] = PETSC_TRUE; 3372 } else { 3373 PetscCall(ISGetMinMax(iscol, &i, &j)); 3374 PetscCall(MatGetOwnershipRangeColumn(mat, &start, &end)); 3375 if (i >= start && j < end) sameDist[1] = PETSC_TRUE; 3376 } 3377 3378 PetscCall(PetscObjectGetComm((PetscObject)mat, &comm)); 3379 PetscCall(MPIU_Allreduce(&sameDist, &tsameDist, 2, MPIU_BOOL, MPI_LAND, comm)); 3380 sameRowDist = tsameDist[0]; 3381 } 3382 3383 if (sameRowDist) { 3384 if (tsameDist[1]) { /* sameRowDist & sameColDist */ 3385 /* isrow and iscol have same processor distribution as mat */ 3386 PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat, isrow, iscol, call, newmat)); 3387 PetscFunctionReturn(PETSC_SUCCESS); 3388 } else { /* sameRowDist */ 3389 /* isrow has same processor distribution as mat */ 3390 if (call == MAT_INITIAL_MATRIX) { 3391 PetscBool sorted; 3392 PetscCall(ISGetSeqIS_Private(mat, iscol, &iscol_local)); 3393 PetscCall(ISGetLocalSize(iscol_local, &n)); /* local size of iscol_local = global columns of newmat */ 3394 PetscCall(ISGetSize(iscol, &i)); 3395 PetscCheck(n == i, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "n %" PetscInt_FMT " != size of iscol %" PetscInt_FMT, n, i); 3396 3397 PetscCall(ISSorted(iscol_local, &sorted)); 3398 if (sorted) { 3399 /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */ 3400 PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowDist(mat, isrow, iscol, iscol_local, MAT_INITIAL_MATRIX, newmat)); 3401 PetscFunctionReturn(PETSC_SUCCESS); 3402 } 3403 } else { /* call == MAT_REUSE_MATRIX */ 3404 IS iscol_sub; 3405 PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_sub)); 3406 if (iscol_sub) { 3407 PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowDist(mat, isrow, iscol, NULL, call, newmat)); 3408 PetscFunctionReturn(PETSC_SUCCESS); 3409 } 3410 } 3411 } 3412 } 3413 3414 /* General case: iscol -> iscol_local which has global size of iscol */ 3415 if (call == MAT_REUSE_MATRIX) { 3416 PetscCall(PetscObjectQuery((PetscObject)*newmat, "ISAllGather", (PetscObject *)&iscol_local)); 3417 PetscCheck(iscol_local, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse"); 3418 } else { 3419 if (!iscol_local) PetscCall(ISGetSeqIS_Private(mat, iscol, &iscol_local)); 3420 } 3421 3422 PetscCall(ISGetLocalSize(iscol, &csize)); 3423 PetscCall(MatCreateSubMatrix_MPIAIJ_nonscalable(mat, isrow, iscol_local, csize, call, newmat)); 3424 3425 if (call == MAT_INITIAL_MATRIX) { 3426 PetscCall(PetscObjectCompose((PetscObject)*newmat, "ISAllGather", (PetscObject)iscol_local)); 3427 PetscCall(ISDestroy(&iscol_local)); 3428 } 3429 PetscFunctionReturn(PETSC_SUCCESS); 3430 } 3431 3432 /*@C 3433 MatCreateMPIAIJWithSeqAIJ - creates a `MATMPIAIJ` matrix using `MATSEQAIJ` matrices that contain the "diagonal" 3434 and "off-diagonal" part of the matrix in CSR format. 3435 3436 Collective 3437 3438 Input Parameters: 3439 + comm - MPI communicator 3440 . A - "diagonal" portion of matrix 3441 . B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine 3442 - garray - global index of `B` columns 3443 3444 Output Parameter: 3445 . mat - the matrix, with input `A` as its local diagonal matrix 3446 3447 Level: advanced 3448 3449 Notes: 3450 See `MatCreateAIJ()` for the definition of "diagonal" and "off-diagonal" portion of the matrix. 3451 3452 `A` becomes part of output mat, `B` is destroyed by this routine. The user cannot use `A` and `B` anymore. 3453 3454 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MATSEQAIJ`, `MatCreateMPIAIJWithSplitArrays()` 3455 @*/ 3456 PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm, Mat A, Mat B, const PetscInt garray[], Mat *mat) 3457 { 3458 Mat_MPIAIJ *maij; 3459 Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data, *bnew; 3460 PetscInt *oi = b->i, *oj = b->j, i, nz, col; 3461 const PetscScalar *oa; 3462 Mat Bnew; 3463 PetscInt m, n, N; 3464 MatType mpi_mat_type; 3465 3466 PetscFunctionBegin; 3467 PetscCall(MatCreate(comm, mat)); 3468 PetscCall(MatGetSize(A, &m, &n)); 3469 PetscCheck(m == B->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Am %" PetscInt_FMT " != Bm %" PetscInt_FMT, m, B->rmap->N); 3470 PetscCheck(PetscAbs(A->rmap->bs) == PetscAbs(B->rmap->bs), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "A row bs %" PetscInt_FMT " != B row bs %" PetscInt_FMT, A->rmap->bs, B->rmap->bs); 3471 /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */ 3472 /* PetscCheck(A->cmap->bs == B->cmap->bs,PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %" PetscInt_FMT " != B column bs %" PetscInt_FMT,A->cmap->bs,B->cmap->bs); */ 3473 3474 /* Get global columns of mat */ 3475 PetscCall(MPIU_Allreduce(&n, &N, 1, MPIU_INT, MPI_SUM, comm)); 3476 3477 PetscCall(MatSetSizes(*mat, m, n, PETSC_DECIDE, N)); 3478 /* Determine the type of MPI matrix that should be created from the type of matrix A, which holds the "diagonal" portion. */ 3479 PetscCall(MatGetMPIMatType_Private(A, &mpi_mat_type)); 3480 PetscCall(MatSetType(*mat, mpi_mat_type)); 3481 3482 if (A->rmap->bs > 1 || A->cmap->bs > 1) PetscCall(MatSetBlockSizes(*mat, A->rmap->bs, A->cmap->bs)); 3483 maij = (Mat_MPIAIJ *)(*mat)->data; 3484 3485 (*mat)->preallocated = PETSC_TRUE; 3486 3487 PetscCall(PetscLayoutSetUp((*mat)->rmap)); 3488 PetscCall(PetscLayoutSetUp((*mat)->cmap)); 3489 3490 /* Set A as diagonal portion of *mat */ 3491 maij->A = A; 3492 3493 nz = oi[m]; 3494 for (i = 0; i < nz; i++) { 3495 col = oj[i]; 3496 oj[i] = garray[col]; 3497 } 3498 3499 /* Set Bnew as off-diagonal portion of *mat */ 3500 PetscCall(MatSeqAIJGetArrayRead(B, &oa)); 3501 PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, N, oi, oj, (PetscScalar *)oa, &Bnew)); 3502 PetscCall(MatSeqAIJRestoreArrayRead(B, &oa)); 3503 bnew = (Mat_SeqAIJ *)Bnew->data; 3504 bnew->maxnz = b->maxnz; /* allocated nonzeros of B */ 3505 maij->B = Bnew; 3506 3507 PetscCheck(B->rmap->N == Bnew->rmap->N, PETSC_COMM_SELF, PETSC_ERR_PLIB, "BN %" PetscInt_FMT " != BnewN %" PetscInt_FMT, B->rmap->N, Bnew->rmap->N); 3508 3509 b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */ 3510 b->free_a = PETSC_FALSE; 3511 b->free_ij = PETSC_FALSE; 3512 PetscCall(MatDestroy(&B)); 3513 3514 bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */ 3515 bnew->free_a = PETSC_TRUE; 3516 bnew->free_ij = PETSC_TRUE; 3517 3518 /* condense columns of maij->B */ 3519 PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); 3520 PetscCall(MatAssemblyBegin(*mat, MAT_FINAL_ASSEMBLY)); 3521 PetscCall(MatAssemblyEnd(*mat, MAT_FINAL_ASSEMBLY)); 3522 PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_FALSE)); 3523 PetscCall(MatSetOption(*mat, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 3524 PetscFunctionReturn(PETSC_SUCCESS); 3525 } 3526 3527 extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat, PetscInt, const IS[], const IS[], MatReuse, PetscBool, Mat *); 3528 3529 PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat, IS isrow, IS iscol, IS iscol_local, MatReuse call, Mat *newmat) 3530 { 3531 PetscInt i, m, n, rstart, row, rend, nz, j, bs, cbs; 3532 PetscInt *ii, *jj, nlocal, *dlens, *olens, dlen, olen, jend, mglobal; 3533 Mat_MPIAIJ *a = (Mat_MPIAIJ *)mat->data; 3534 Mat M, Msub, B = a->B; 3535 MatScalar *aa; 3536 Mat_SeqAIJ *aij; 3537 PetscInt *garray = a->garray, *colsub, Ncols; 3538 PetscInt count, Bn = B->cmap->N, cstart = mat->cmap->rstart, cend = mat->cmap->rend; 3539 IS iscol_sub, iscmap; 3540 const PetscInt *is_idx, *cmap; 3541 PetscBool allcolumns = PETSC_FALSE; 3542 MPI_Comm comm; 3543 3544 PetscFunctionBegin; 3545 PetscCall(PetscObjectGetComm((PetscObject)mat, &comm)); 3546 if (call == MAT_REUSE_MATRIX) { 3547 PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_sub)); 3548 PetscCheck(iscol_sub, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "SubIScol passed in was not used before, cannot reuse"); 3549 PetscCall(ISGetLocalSize(iscol_sub, &count)); 3550 3551 PetscCall(PetscObjectQuery((PetscObject)*newmat, "Subcmap", (PetscObject *)&iscmap)); 3552 PetscCheck(iscmap, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Subcmap passed in was not used before, cannot reuse"); 3553 3554 PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubMatrix", (PetscObject *)&Msub)); 3555 PetscCheck(Msub, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse"); 3556 3557 PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol_sub, MAT_REUSE_MATRIX, PETSC_FALSE, &Msub)); 3558 3559 } else { /* call == MAT_INITIAL_MATRIX) */ 3560 PetscBool flg; 3561 3562 PetscCall(ISGetLocalSize(iscol, &n)); 3563 PetscCall(ISGetSize(iscol, &Ncols)); 3564 3565 /* (1) iscol -> nonscalable iscol_local */ 3566 /* Check for special case: each processor gets entire matrix columns */ 3567 PetscCall(ISIdentity(iscol_local, &flg)); 3568 if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE; 3569 PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &allcolumns, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat))); 3570 if (allcolumns) { 3571 iscol_sub = iscol_local; 3572 PetscCall(PetscObjectReference((PetscObject)iscol_local)); 3573 PetscCall(ISCreateStride(PETSC_COMM_SELF, n, 0, 1, &iscmap)); 3574 3575 } else { 3576 /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */ 3577 PetscInt *idx, *cmap1, k; 3578 PetscCall(PetscMalloc1(Ncols, &idx)); 3579 PetscCall(PetscMalloc1(Ncols, &cmap1)); 3580 PetscCall(ISGetIndices(iscol_local, &is_idx)); 3581 count = 0; 3582 k = 0; 3583 for (i = 0; i < Ncols; i++) { 3584 j = is_idx[i]; 3585 if (j >= cstart && j < cend) { 3586 /* diagonal part of mat */ 3587 idx[count] = j; 3588 cmap1[count++] = i; /* column index in submat */ 3589 } else if (Bn) { 3590 /* off-diagonal part of mat */ 3591 if (j == garray[k]) { 3592 idx[count] = j; 3593 cmap1[count++] = i; /* column index in submat */ 3594 } else if (j > garray[k]) { 3595 while (j > garray[k] && k < Bn - 1) k++; 3596 if (j == garray[k]) { 3597 idx[count] = j; 3598 cmap1[count++] = i; /* column index in submat */ 3599 } 3600 } 3601 } 3602 } 3603 PetscCall(ISRestoreIndices(iscol_local, &is_idx)); 3604 3605 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, count, idx, PETSC_OWN_POINTER, &iscol_sub)); 3606 PetscCall(ISGetBlockSize(iscol, &cbs)); 3607 PetscCall(ISSetBlockSize(iscol_sub, cbs)); 3608 3609 PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local), count, cmap1, PETSC_OWN_POINTER, &iscmap)); 3610 } 3611 3612 /* (3) Create sequential Msub */ 3613 PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol_sub, MAT_INITIAL_MATRIX, allcolumns, &Msub)); 3614 } 3615 3616 PetscCall(ISGetLocalSize(iscol_sub, &count)); 3617 aij = (Mat_SeqAIJ *)(Msub)->data; 3618 ii = aij->i; 3619 PetscCall(ISGetIndices(iscmap, &cmap)); 3620 3621 /* 3622 m - number of local rows 3623 Ncols - number of columns (same on all processors) 3624 rstart - first row in new global matrix generated 3625 */ 3626 PetscCall(MatGetSize(Msub, &m, NULL)); 3627 3628 if (call == MAT_INITIAL_MATRIX) { 3629 /* (4) Create parallel newmat */ 3630 PetscMPIInt rank, size; 3631 PetscInt csize; 3632 3633 PetscCallMPI(MPI_Comm_size(comm, &size)); 3634 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 3635 3636 /* 3637 Determine the number of non-zeros in the diagonal and off-diagonal 3638 portions of the matrix in order to do correct preallocation 3639 */ 3640 3641 /* first get start and end of "diagonal" columns */ 3642 PetscCall(ISGetLocalSize(iscol, &csize)); 3643 if (csize == PETSC_DECIDE) { 3644 PetscCall(ISGetSize(isrow, &mglobal)); 3645 if (mglobal == Ncols) { /* square matrix */ 3646 nlocal = m; 3647 } else { 3648 nlocal = Ncols / size + ((Ncols % size) > rank); 3649 } 3650 } else { 3651 nlocal = csize; 3652 } 3653 PetscCallMPI(MPI_Scan(&nlocal, &rend, 1, MPIU_INT, MPI_SUM, comm)); 3654 rstart = rend - nlocal; 3655 PetscCheck(rank != size - 1 || rend == Ncols, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Local column sizes %" PetscInt_FMT " do not add up to total number of columns %" PetscInt_FMT, rend, Ncols); 3656 3657 /* next, compute all the lengths */ 3658 jj = aij->j; 3659 PetscCall(PetscMalloc1(2 * m + 1, &dlens)); 3660 olens = dlens + m; 3661 for (i = 0; i < m; i++) { 3662 jend = ii[i + 1] - ii[i]; 3663 olen = 0; 3664 dlen = 0; 3665 for (j = 0; j < jend; j++) { 3666 if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++; 3667 else dlen++; 3668 jj++; 3669 } 3670 olens[i] = olen; 3671 dlens[i] = dlen; 3672 } 3673 3674 PetscCall(ISGetBlockSize(isrow, &bs)); 3675 PetscCall(ISGetBlockSize(iscol, &cbs)); 3676 3677 PetscCall(MatCreate(comm, &M)); 3678 PetscCall(MatSetSizes(M, m, nlocal, PETSC_DECIDE, Ncols)); 3679 PetscCall(MatSetBlockSizes(M, bs, cbs)); 3680 PetscCall(MatSetType(M, ((PetscObject)mat)->type_name)); 3681 PetscCall(MatMPIAIJSetPreallocation(M, 0, dlens, 0, olens)); 3682 PetscCall(PetscFree(dlens)); 3683 3684 } else { /* call == MAT_REUSE_MATRIX */ 3685 M = *newmat; 3686 PetscCall(MatGetLocalSize(M, &i, NULL)); 3687 PetscCheck(i == m, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Previous matrix must be same size/layout as request"); 3688 PetscCall(MatZeroEntries(M)); 3689 /* 3690 The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly, 3691 rather than the slower MatSetValues(). 3692 */ 3693 M->was_assembled = PETSC_TRUE; 3694 M->assembled = PETSC_FALSE; 3695 } 3696 3697 /* (5) Set values of Msub to *newmat */ 3698 PetscCall(PetscMalloc1(count, &colsub)); 3699 PetscCall(MatGetOwnershipRange(M, &rstart, NULL)); 3700 3701 jj = aij->j; 3702 PetscCall(MatSeqAIJGetArrayRead(Msub, (const PetscScalar **)&aa)); 3703 for (i = 0; i < m; i++) { 3704 row = rstart + i; 3705 nz = ii[i + 1] - ii[i]; 3706 for (j = 0; j < nz; j++) colsub[j] = cmap[jj[j]]; 3707 PetscCall(MatSetValues_MPIAIJ(M, 1, &row, nz, colsub, aa, INSERT_VALUES)); 3708 jj += nz; 3709 aa += nz; 3710 } 3711 PetscCall(MatSeqAIJRestoreArrayRead(Msub, (const PetscScalar **)&aa)); 3712 PetscCall(ISRestoreIndices(iscmap, &cmap)); 3713 3714 PetscCall(MatAssemblyBegin(M, MAT_FINAL_ASSEMBLY)); 3715 PetscCall(MatAssemblyEnd(M, MAT_FINAL_ASSEMBLY)); 3716 3717 PetscCall(PetscFree(colsub)); 3718 3719 /* save Msub, iscol_sub and iscmap used in processor for next request */ 3720 if (call == MAT_INITIAL_MATRIX) { 3721 *newmat = M; 3722 PetscCall(PetscObjectCompose((PetscObject)(*newmat), "SubMatrix", (PetscObject)Msub)); 3723 PetscCall(MatDestroy(&Msub)); 3724 3725 PetscCall(PetscObjectCompose((PetscObject)(*newmat), "SubIScol", (PetscObject)iscol_sub)); 3726 PetscCall(ISDestroy(&iscol_sub)); 3727 3728 PetscCall(PetscObjectCompose((PetscObject)(*newmat), "Subcmap", (PetscObject)iscmap)); 3729 PetscCall(ISDestroy(&iscmap)); 3730 3731 if (iscol_local) { 3732 PetscCall(PetscObjectCompose((PetscObject)(*newmat), "ISAllGather", (PetscObject)iscol_local)); 3733 PetscCall(ISDestroy(&iscol_local)); 3734 } 3735 } 3736 PetscFunctionReturn(PETSC_SUCCESS); 3737 } 3738 3739 /* 3740 Not great since it makes two copies of the submatrix, first an SeqAIJ 3741 in local and then by concatenating the local matrices the end result. 3742 Writing it directly would be much like MatCreateSubMatrices_MPIAIJ() 3743 3744 This requires a sequential iscol with all indices. 3745 */ 3746 PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat, IS isrow, IS iscol, PetscInt csize, MatReuse call, Mat *newmat) 3747 { 3748 PetscMPIInt rank, size; 3749 PetscInt i, m, n, rstart, row, rend, nz, *cwork, j, bs, cbs; 3750 PetscInt *ii, *jj, nlocal, *dlens, *olens, dlen, olen, jend, mglobal; 3751 Mat M, Mreuse; 3752 MatScalar *aa, *vwork; 3753 MPI_Comm comm; 3754 Mat_SeqAIJ *aij; 3755 PetscBool colflag, allcolumns = PETSC_FALSE; 3756 3757 PetscFunctionBegin; 3758 PetscCall(PetscObjectGetComm((PetscObject)mat, &comm)); 3759 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 3760 PetscCallMPI(MPI_Comm_size(comm, &size)); 3761 3762 /* Check for special case: each processor gets entire matrix columns */ 3763 PetscCall(ISIdentity(iscol, &colflag)); 3764 PetscCall(ISGetLocalSize(iscol, &n)); 3765 if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE; 3766 PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &allcolumns, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat))); 3767 3768 if (call == MAT_REUSE_MATRIX) { 3769 PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubMatrix", (PetscObject *)&Mreuse)); 3770 PetscCheck(Mreuse, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse"); 3771 PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol, MAT_REUSE_MATRIX, allcolumns, &Mreuse)); 3772 } else { 3773 PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol, MAT_INITIAL_MATRIX, allcolumns, &Mreuse)); 3774 } 3775 3776 /* 3777 m - number of local rows 3778 n - number of columns (same on all processors) 3779 rstart - first row in new global matrix generated 3780 */ 3781 PetscCall(MatGetSize(Mreuse, &m, &n)); 3782 PetscCall(MatGetBlockSizes(Mreuse, &bs, &cbs)); 3783 if (call == MAT_INITIAL_MATRIX) { 3784 aij = (Mat_SeqAIJ *)(Mreuse)->data; 3785 ii = aij->i; 3786 jj = aij->j; 3787 3788 /* 3789 Determine the number of non-zeros in the diagonal and off-diagonal 3790 portions of the matrix in order to do correct preallocation 3791 */ 3792 3793 /* first get start and end of "diagonal" columns */ 3794 if (csize == PETSC_DECIDE) { 3795 PetscCall(ISGetSize(isrow, &mglobal)); 3796 if (mglobal == n) { /* square matrix */ 3797 nlocal = m; 3798 } else { 3799 nlocal = n / size + ((n % size) > rank); 3800 } 3801 } else { 3802 nlocal = csize; 3803 } 3804 PetscCallMPI(MPI_Scan(&nlocal, &rend, 1, MPIU_INT, MPI_SUM, comm)); 3805 rstart = rend - nlocal; 3806 PetscCheck(rank != size - 1 || rend == n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Local column sizes %" PetscInt_FMT " do not add up to total number of columns %" PetscInt_FMT, rend, n); 3807 3808 /* next, compute all the lengths */ 3809 PetscCall(PetscMalloc1(2 * m + 1, &dlens)); 3810 olens = dlens + m; 3811 for (i = 0; i < m; i++) { 3812 jend = ii[i + 1] - ii[i]; 3813 olen = 0; 3814 dlen = 0; 3815 for (j = 0; j < jend; j++) { 3816 if (*jj < rstart || *jj >= rend) olen++; 3817 else dlen++; 3818 jj++; 3819 } 3820 olens[i] = olen; 3821 dlens[i] = dlen; 3822 } 3823 PetscCall(MatCreate(comm, &M)); 3824 PetscCall(MatSetSizes(M, m, nlocal, PETSC_DECIDE, n)); 3825 PetscCall(MatSetBlockSizes(M, bs, cbs)); 3826 PetscCall(MatSetType(M, ((PetscObject)mat)->type_name)); 3827 PetscCall(MatMPIAIJSetPreallocation(M, 0, dlens, 0, olens)); 3828 PetscCall(PetscFree(dlens)); 3829 } else { 3830 PetscInt ml, nl; 3831 3832 M = *newmat; 3833 PetscCall(MatGetLocalSize(M, &ml, &nl)); 3834 PetscCheck(ml == m, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Previous matrix must be same size/layout as request"); 3835 PetscCall(MatZeroEntries(M)); 3836 /* 3837 The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly, 3838 rather than the slower MatSetValues(). 3839 */ 3840 M->was_assembled = PETSC_TRUE; 3841 M->assembled = PETSC_FALSE; 3842 } 3843 PetscCall(MatGetOwnershipRange(M, &rstart, &rend)); 3844 aij = (Mat_SeqAIJ *)(Mreuse)->data; 3845 ii = aij->i; 3846 jj = aij->j; 3847 3848 /* trigger copy to CPU if needed */ 3849 PetscCall(MatSeqAIJGetArrayRead(Mreuse, (const PetscScalar **)&aa)); 3850 for (i = 0; i < m; i++) { 3851 row = rstart + i; 3852 nz = ii[i + 1] - ii[i]; 3853 cwork = jj; 3854 jj = PetscSafePointerPlusOffset(jj, nz); 3855 vwork = aa; 3856 aa = PetscSafePointerPlusOffset(aa, nz); 3857 PetscCall(MatSetValues_MPIAIJ(M, 1, &row, nz, cwork, vwork, INSERT_VALUES)); 3858 } 3859 PetscCall(MatSeqAIJRestoreArrayRead(Mreuse, (const PetscScalar **)&aa)); 3860 3861 PetscCall(MatAssemblyBegin(M, MAT_FINAL_ASSEMBLY)); 3862 PetscCall(MatAssemblyEnd(M, MAT_FINAL_ASSEMBLY)); 3863 *newmat = M; 3864 3865 /* save submatrix used in processor for next request */ 3866 if (call == MAT_INITIAL_MATRIX) { 3867 PetscCall(PetscObjectCompose((PetscObject)M, "SubMatrix", (PetscObject)Mreuse)); 3868 PetscCall(MatDestroy(&Mreuse)); 3869 } 3870 PetscFunctionReturn(PETSC_SUCCESS); 3871 } 3872 3873 static PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B, const PetscInt Ii[], const PetscInt J[], const PetscScalar v[]) 3874 { 3875 PetscInt m, cstart, cend, j, nnz, i, d, *ld; 3876 PetscInt *d_nnz, *o_nnz, nnz_max = 0, rstart, ii; 3877 const PetscInt *JJ; 3878 PetscBool nooffprocentries; 3879 Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)B->data; 3880 3881 PetscFunctionBegin; 3882 PetscCheck(Ii[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Ii[0] must be 0 it is %" PetscInt_FMT, Ii[0]); 3883 3884 PetscCall(PetscLayoutSetUp(B->rmap)); 3885 PetscCall(PetscLayoutSetUp(B->cmap)); 3886 m = B->rmap->n; 3887 cstart = B->cmap->rstart; 3888 cend = B->cmap->rend; 3889 rstart = B->rmap->rstart; 3890 3891 PetscCall(PetscCalloc2(m, &d_nnz, m, &o_nnz)); 3892 3893 if (PetscDefined(USE_DEBUG)) { 3894 for (i = 0; i < m; i++) { 3895 nnz = Ii[i + 1] - Ii[i]; 3896 JJ = PetscSafePointerPlusOffset(J, Ii[i]); 3897 PetscCheck(nnz >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Local row %" PetscInt_FMT " has a negative %" PetscInt_FMT " number of columns", i, nnz); 3898 PetscCheck(!nnz || !(JJ[0] < 0), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Row %" PetscInt_FMT " starts with negative column index %" PetscInt_FMT, i, JJ[0]); 3899 PetscCheck(!nnz || !(JJ[nnz - 1] >= B->cmap->N), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Row %" PetscInt_FMT " ends with too large a column index %" PetscInt_FMT " (max allowed %" PetscInt_FMT ")", i, JJ[nnz - 1], B->cmap->N); 3900 } 3901 } 3902 3903 for (i = 0; i < m; i++) { 3904 nnz = Ii[i + 1] - Ii[i]; 3905 JJ = PetscSafePointerPlusOffset(J, Ii[i]); 3906 nnz_max = PetscMax(nnz_max, nnz); 3907 d = 0; 3908 for (j = 0; j < nnz; j++) { 3909 if (cstart <= JJ[j] && JJ[j] < cend) d++; 3910 } 3911 d_nnz[i] = d; 3912 o_nnz[i] = nnz - d; 3913 } 3914 PetscCall(MatMPIAIJSetPreallocation(B, 0, d_nnz, 0, o_nnz)); 3915 PetscCall(PetscFree2(d_nnz, o_nnz)); 3916 3917 for (i = 0; i < m; i++) { 3918 ii = i + rstart; 3919 PetscCall(MatSetValues_MPIAIJ(B, 1, &ii, Ii[i + 1] - Ii[i], PetscSafePointerPlusOffset(J, Ii[i]), PetscSafePointerPlusOffset(v, Ii[i]), INSERT_VALUES)); 3920 } 3921 nooffprocentries = B->nooffprocentries; 3922 B->nooffprocentries = PETSC_TRUE; 3923 PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY)); 3924 PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY)); 3925 B->nooffprocentries = nooffprocentries; 3926 3927 /* count number of entries below block diagonal */ 3928 PetscCall(PetscFree(Aij->ld)); 3929 PetscCall(PetscCalloc1(m, &ld)); 3930 Aij->ld = ld; 3931 for (i = 0; i < m; i++) { 3932 nnz = Ii[i + 1] - Ii[i]; 3933 j = 0; 3934 while (j < nnz && J[j] < cstart) j++; 3935 ld[i] = j; 3936 if (J) J += nnz; 3937 } 3938 3939 PetscCall(MatSetOption(B, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 3940 PetscFunctionReturn(PETSC_SUCCESS); 3941 } 3942 3943 /*@ 3944 MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in `MATAIJ` format 3945 (the default parallel PETSc format). 3946 3947 Collective 3948 3949 Input Parameters: 3950 + B - the matrix 3951 . i - the indices into j for the start of each local row (starts with zero) 3952 . j - the column indices for each local row (starts with zero) 3953 - v - optional values in the matrix 3954 3955 Level: developer 3956 3957 Notes: 3958 The `i`, `j`, and `v` arrays ARE copied by this routine into the internal format used by PETSc; 3959 thus you CANNOT change the matrix entries by changing the values of `v` after you have 3960 called this routine. Use `MatCreateMPIAIJWithSplitArrays()` to avoid needing to copy the arrays. 3961 3962 The `i` and `j` indices are 0 based, and `i` indices are indices corresponding to the local `j` array. 3963 3964 The format which is used for the sparse matrix input, is equivalent to a 3965 row-major ordering.. i.e for the following matrix, the input data expected is 3966 as shown 3967 3968 .vb 3969 1 0 0 3970 2 0 3 P0 3971 ------- 3972 4 5 6 P1 3973 3974 Process0 [P0] rows_owned=[0,1] 3975 i = {0,1,3} [size = nrow+1 = 2+1] 3976 j = {0,0,2} [size = 3] 3977 v = {1,2,3} [size = 3] 3978 3979 Process1 [P1] rows_owned=[2] 3980 i = {0,3} [size = nrow+1 = 1+1] 3981 j = {0,1,2} [size = 3] 3982 v = {4,5,6} [size = 3] 3983 .ve 3984 3985 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatCreateAIJ()`, 3986 `MatCreateSeqAIJWithArrays()`, `MatCreateMPIAIJWithSplitArrays()` 3987 @*/ 3988 PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B, const PetscInt i[], const PetscInt j[], const PetscScalar v[]) 3989 { 3990 PetscFunctionBegin; 3991 PetscTryMethod(B, "MatMPIAIJSetPreallocationCSR_C", (Mat, const PetscInt[], const PetscInt[], const PetscScalar[]), (B, i, j, v)); 3992 PetscFunctionReturn(PETSC_SUCCESS); 3993 } 3994 3995 /*@C 3996 MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in `MATMPIAIJ` format 3997 (the default parallel PETSc format). For good matrix assembly performance 3998 the user should preallocate the matrix storage by setting the parameters 3999 `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`). 4000 4001 Collective 4002 4003 Input Parameters: 4004 + B - the matrix 4005 . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix 4006 (same value is used for all local rows) 4007 . d_nnz - array containing the number of nonzeros in the various rows of the 4008 DIAGONAL portion of the local submatrix (possibly different for each row) 4009 or `NULL` (`PETSC_NULL_INTEGER` in Fortran), if `d_nz` is used to specify the nonzero structure. 4010 The size of this array is equal to the number of local rows, i.e 'm'. 4011 For matrices that will be factored, you must leave room for (and set) 4012 the diagonal entry even if it is zero. 4013 . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local 4014 submatrix (same value is used for all local rows). 4015 - o_nnz - array containing the number of nonzeros in the various rows of the 4016 OFF-DIAGONAL portion of the local submatrix (possibly different for 4017 each row) or `NULL` (`PETSC_NULL_INTEGER` in Fortran), if `o_nz` is used to specify the nonzero 4018 structure. The size of this array is equal to the number 4019 of local rows, i.e 'm'. 4020 4021 Example Usage: 4022 Consider the following 8x8 matrix with 34 non-zero values, that is 4023 assembled across 3 processors. Lets assume that proc0 owns 3 rows, 4024 proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown 4025 as follows 4026 4027 .vb 4028 1 2 0 | 0 3 0 | 0 4 4029 Proc0 0 5 6 | 7 0 0 | 8 0 4030 9 0 10 | 11 0 0 | 12 0 4031 ------------------------------------- 4032 13 0 14 | 15 16 17 | 0 0 4033 Proc1 0 18 0 | 19 20 21 | 0 0 4034 0 0 0 | 22 23 0 | 24 0 4035 ------------------------------------- 4036 Proc2 25 26 27 | 0 0 28 | 29 0 4037 30 0 0 | 31 32 33 | 0 34 4038 .ve 4039 4040 This can be represented as a collection of submatrices as 4041 .vb 4042 A B C 4043 D E F 4044 G H I 4045 .ve 4046 4047 Where the submatrices A,B,C are owned by proc0, D,E,F are 4048 owned by proc1, G,H,I are owned by proc2. 4049 4050 The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 4051 The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 4052 The 'M','N' parameters are 8,8, and have the same values on all procs. 4053 4054 The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are 4055 submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices 4056 corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively. 4057 Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL 4058 part as `MATSEQAIJ` matrices. For example, proc1 will store [E] as a `MATSEQAIJ` 4059 matrix, ans [DF] as another `MATSEQAIJ` matrix. 4060 4061 When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are 4062 allocated for every row of the local diagonal submatrix, and `o_nz` 4063 storage locations are allocated for every row of the OFF-DIAGONAL submat. 4064 One way to choose `d_nz` and `o_nz` is to use the max nonzerors per local 4065 rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices. 4066 In this case, the values of `d_nz`, `o_nz` are 4067 .vb 4068 proc0 dnz = 2, o_nz = 2 4069 proc1 dnz = 3, o_nz = 2 4070 proc2 dnz = 1, o_nz = 4 4071 .ve 4072 We are allocating `m`*(`d_nz`+`o_nz`) storage locations for every proc. This 4073 translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10 4074 for proc3. i.e we are using 12+15+10=37 storage locations to store 4075 34 values. 4076 4077 When `d_nnz`, `o_nnz` parameters are specified, the storage is specified 4078 for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices. 4079 In the above case the values for `d_nnz`, `o_nnz` are 4080 .vb 4081 proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2] 4082 proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1] 4083 proc2 d_nnz = [1,1] and o_nnz = [4,4] 4084 .ve 4085 Here the space allocated is sum of all the above values i.e 34, and 4086 hence pre-allocation is perfect. 4087 4088 Level: intermediate 4089 4090 Notes: 4091 If the *_nnz parameter is given then the *_nz parameter is ignored 4092 4093 The `MATAIJ` format, also called compressed row storage (CSR), is compatible with standard Fortran 4094 storage. The stored row and column indices begin with zero. 4095 See [Sparse Matrices](sec_matsparse) for details. 4096 4097 The parallel matrix is partitioned such that the first m0 rows belong to 4098 process 0, the next m1 rows belong to process 1, the next m2 rows belong 4099 to process 2 etc.. where m0,m1,m2... are the input parameter 'm'. 4100 4101 The DIAGONAL portion of the local submatrix of a processor can be defined 4102 as the submatrix which is obtained by extraction the part corresponding to 4103 the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the 4104 first row that belongs to the processor, r2 is the last row belonging to 4105 the this processor, and c1-c2 is range of indices of the local part of a 4106 vector suitable for applying the matrix to. This is an mxn matrix. In the 4107 common case of a square matrix, the row and column ranges are the same and 4108 the DIAGONAL part is also square. The remaining portion of the local 4109 submatrix (mxN) constitute the OFF-DIAGONAL portion. 4110 4111 If `o_nnz` and `d_nnz` are specified, then `o_nz` and `d_nz` are ignored. 4112 4113 You can call `MatGetInfo()` to get information on how effective the preallocation was; 4114 for example the fields mallocs,nz_allocated,nz_used,nz_unneeded; 4115 You can also run with the option `-info` and look for messages with the string 4116 malloc in them to see if additional memory allocation was needed. 4117 4118 .seealso: [](ch_matrices), `Mat`, [Sparse Matrices](sec_matsparse), `MATMPIAIJ`, `MATAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatCreateAIJ()`, `MatMPIAIJSetPreallocationCSR()`, 4119 `MatGetInfo()`, `PetscSplitOwnership()` 4120 @*/ 4121 PetscErrorCode MatMPIAIJSetPreallocation(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[]) 4122 { 4123 PetscFunctionBegin; 4124 PetscValidHeaderSpecific(B, MAT_CLASSID, 1); 4125 PetscValidType(B, 1); 4126 PetscTryMethod(B, "MatMPIAIJSetPreallocation_C", (Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[]), (B, d_nz, d_nnz, o_nz, o_nnz)); 4127 PetscFunctionReturn(PETSC_SUCCESS); 4128 } 4129 4130 /*@ 4131 MatCreateMPIAIJWithArrays - creates a `MATMPIAIJ` matrix using arrays that contain in standard 4132 CSR format for the local rows. 4133 4134 Collective 4135 4136 Input Parameters: 4137 + comm - MPI communicator 4138 . m - number of local rows (Cannot be `PETSC_DECIDE`) 4139 . n - This value should be the same as the local size used in creating the 4140 x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have 4141 calculated if N is given) For square matrices n is almost always m. 4142 . M - number of global rows (or `PETSC_DETERMINE` to have calculated if m is given) 4143 . N - number of global columns (or `PETSC_DETERMINE` to have calculated if n is given) 4144 . i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix 4145 . j - column indices 4146 - a - optional matrix values 4147 4148 Output Parameter: 4149 . mat - the matrix 4150 4151 Level: intermediate 4152 4153 Notes: 4154 The `i`, `j`, and `a` arrays ARE copied by this routine into the internal format used by PETSc; 4155 thus you CANNOT change the matrix entries by changing the values of a[] after you have 4156 called this routine. Use `MatCreateMPIAIJWithSplitArrays()` to avoid needing to copy the arrays. 4157 4158 The `i` and `j` indices are 0 based, and `i` indices are indices corresponding to the local `j` array. 4159 4160 The format which is used for the sparse matrix input, is equivalent to a 4161 row-major ordering.. i.e for the following matrix, the input data expected is 4162 as shown 4163 4164 Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays 4165 .vb 4166 1 0 0 4167 2 0 3 P0 4168 ------- 4169 4 5 6 P1 4170 4171 Process0 [P0] rows_owned=[0,1] 4172 i = {0,1,3} [size = nrow+1 = 2+1] 4173 j = {0,0,2} [size = 3] 4174 v = {1,2,3} [size = 3] 4175 4176 Process1 [P1] rows_owned=[2] 4177 i = {0,3} [size = nrow+1 = 1+1] 4178 j = {0,1,2} [size = 3] 4179 v = {4,5,6} [size = 3] 4180 .ve 4181 4182 .seealso: [](ch_matrices), `Mat`, `MATMPIAIK`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`, 4183 `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArrays()` 4184 @*/ 4185 PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, const PetscInt i[], const PetscInt j[], const PetscScalar a[], Mat *mat) 4186 { 4187 PetscFunctionBegin; 4188 PetscCheck(!i || !i[0], PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0"); 4189 PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative"); 4190 PetscCall(MatCreate(comm, mat)); 4191 PetscCall(MatSetSizes(*mat, m, n, M, N)); 4192 /* PetscCall(MatSetBlockSizes(M,bs,cbs)); */ 4193 PetscCall(MatSetType(*mat, MATMPIAIJ)); 4194 PetscCall(MatMPIAIJSetPreallocationCSR(*mat, i, j, a)); 4195 PetscFunctionReturn(PETSC_SUCCESS); 4196 } 4197 4198 /*@ 4199 MatUpdateMPIAIJWithArrays - updates a `MATMPIAIJ` matrix using arrays that contain in standard 4200 CSR format for the local rows. Only the numerical values are updated the other arrays must be identical to what was passed 4201 from `MatCreateMPIAIJWithArrays()` 4202 4203 Deprecated: Use `MatUpdateMPIAIJWithArray()` 4204 4205 Collective 4206 4207 Input Parameters: 4208 + mat - the matrix 4209 . m - number of local rows (Cannot be `PETSC_DECIDE`) 4210 . n - This value should be the same as the local size used in creating the 4211 x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have 4212 calculated if N is given) For square matrices n is almost always m. 4213 . M - number of global rows (or `PETSC_DETERMINE` to have calculated if m is given) 4214 . N - number of global columns (or `PETSC_DETERMINE` to have calculated if n is given) 4215 . Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix 4216 . J - column indices 4217 - v - matrix values 4218 4219 Level: deprecated 4220 4221 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`, 4222 `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArray()` 4223 @*/ 4224 PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat, PetscInt m, PetscInt n, PetscInt M, PetscInt N, const PetscInt Ii[], const PetscInt J[], const PetscScalar v[]) 4225 { 4226 PetscInt nnz, i; 4227 PetscBool nooffprocentries; 4228 Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)mat->data; 4229 Mat_SeqAIJ *Ad = (Mat_SeqAIJ *)Aij->A->data; 4230 PetscScalar *ad, *ao; 4231 PetscInt ldi, Iii, md; 4232 const PetscInt *Adi = Ad->i; 4233 PetscInt *ld = Aij->ld; 4234 4235 PetscFunctionBegin; 4236 PetscCheck(Ii[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0"); 4237 PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative"); 4238 PetscCheck(m == mat->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()"); 4239 PetscCheck(n == mat->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()"); 4240 4241 PetscCall(MatSeqAIJGetArrayWrite(Aij->A, &ad)); 4242 PetscCall(MatSeqAIJGetArrayWrite(Aij->B, &ao)); 4243 4244 for (i = 0; i < m; i++) { 4245 nnz = Ii[i + 1] - Ii[i]; 4246 Iii = Ii[i]; 4247 ldi = ld[i]; 4248 md = Adi[i + 1] - Adi[i]; 4249 PetscCall(PetscArraycpy(ao, v + Iii, ldi)); 4250 PetscCall(PetscArraycpy(ad, v + Iii + ldi, md)); 4251 PetscCall(PetscArraycpy(ao + ldi, v + Iii + ldi + md, nnz - ldi - md)); 4252 ad += md; 4253 ao += nnz - md; 4254 } 4255 nooffprocentries = mat->nooffprocentries; 4256 mat->nooffprocentries = PETSC_TRUE; 4257 PetscCall(MatSeqAIJRestoreArrayWrite(Aij->A, &ad)); 4258 PetscCall(MatSeqAIJRestoreArrayWrite(Aij->B, &ao)); 4259 PetscCall(PetscObjectStateIncrease((PetscObject)Aij->A)); 4260 PetscCall(PetscObjectStateIncrease((PetscObject)Aij->B)); 4261 PetscCall(PetscObjectStateIncrease((PetscObject)mat)); 4262 PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY)); 4263 PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY)); 4264 mat->nooffprocentries = nooffprocentries; 4265 PetscFunctionReturn(PETSC_SUCCESS); 4266 } 4267 4268 /*@ 4269 MatUpdateMPIAIJWithArray - updates an `MATMPIAIJ` matrix using an array that contains the nonzero values 4270 4271 Collective 4272 4273 Input Parameters: 4274 + mat - the matrix 4275 - v - matrix values, stored by row 4276 4277 Level: intermediate 4278 4279 Note: 4280 The matrix must have been obtained with `MatCreateMPIAIJWithArrays()` or `MatMPIAIJSetPreallocationCSR()` 4281 4282 .seealso: [](ch_matrices), `Mat`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`, 4283 `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArrays()` 4284 @*/ 4285 PetscErrorCode MatUpdateMPIAIJWithArray(Mat mat, const PetscScalar v[]) 4286 { 4287 PetscInt nnz, i, m; 4288 PetscBool nooffprocentries; 4289 Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)mat->data; 4290 Mat_SeqAIJ *Ad = (Mat_SeqAIJ *)Aij->A->data; 4291 Mat_SeqAIJ *Ao = (Mat_SeqAIJ *)Aij->B->data; 4292 PetscScalar *ad, *ao; 4293 const PetscInt *Adi = Ad->i, *Adj = Ao->i; 4294 PetscInt ldi, Iii, md; 4295 PetscInt *ld = Aij->ld; 4296 4297 PetscFunctionBegin; 4298 m = mat->rmap->n; 4299 4300 PetscCall(MatSeqAIJGetArrayWrite(Aij->A, &ad)); 4301 PetscCall(MatSeqAIJGetArrayWrite(Aij->B, &ao)); 4302 Iii = 0; 4303 for (i = 0; i < m; i++) { 4304 nnz = Adi[i + 1] - Adi[i] + Adj[i + 1] - Adj[i]; 4305 ldi = ld[i]; 4306 md = Adi[i + 1] - Adi[i]; 4307 PetscCall(PetscArraycpy(ad, v + Iii + ldi, md)); 4308 ad += md; 4309 if (ao) { 4310 PetscCall(PetscArraycpy(ao, v + Iii, ldi)); 4311 PetscCall(PetscArraycpy(ao + ldi, v + Iii + ldi + md, nnz - ldi - md)); 4312 ao += nnz - md; 4313 } 4314 Iii += nnz; 4315 } 4316 nooffprocentries = mat->nooffprocentries; 4317 mat->nooffprocentries = PETSC_TRUE; 4318 PetscCall(MatSeqAIJRestoreArrayWrite(Aij->A, &ad)); 4319 PetscCall(MatSeqAIJRestoreArrayWrite(Aij->B, &ao)); 4320 PetscCall(PetscObjectStateIncrease((PetscObject)Aij->A)); 4321 PetscCall(PetscObjectStateIncrease((PetscObject)Aij->B)); 4322 PetscCall(PetscObjectStateIncrease((PetscObject)mat)); 4323 PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY)); 4324 PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY)); 4325 mat->nooffprocentries = nooffprocentries; 4326 PetscFunctionReturn(PETSC_SUCCESS); 4327 } 4328 4329 /*@C 4330 MatCreateAIJ - Creates a sparse parallel matrix in `MATAIJ` format 4331 (the default parallel PETSc format). For good matrix assembly performance 4332 the user should preallocate the matrix storage by setting the parameters 4333 `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`). 4334 4335 Collective 4336 4337 Input Parameters: 4338 + comm - MPI communicator 4339 . m - number of local rows (or `PETSC_DECIDE` to have calculated if M is given) 4340 This value should be the same as the local size used in creating the 4341 y vector for the matrix-vector product y = Ax. 4342 . n - This value should be the same as the local size used in creating the 4343 x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have 4344 calculated if N is given) For square matrices n is almost always m. 4345 . M - number of global rows (or `PETSC_DETERMINE` to have calculated if m is given) 4346 . N - number of global columns (or `PETSC_DETERMINE` to have calculated if n is given) 4347 . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix 4348 (same value is used for all local rows) 4349 . d_nnz - array containing the number of nonzeros in the various rows of the 4350 DIAGONAL portion of the local submatrix (possibly different for each row) 4351 or `NULL`, if `d_nz` is used to specify the nonzero structure. 4352 The size of this array is equal to the number of local rows, i.e 'm'. 4353 . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local 4354 submatrix (same value is used for all local rows). 4355 - o_nnz - array containing the number of nonzeros in the various rows of the 4356 OFF-DIAGONAL portion of the local submatrix (possibly different for 4357 each row) or `NULL`, if `o_nz` is used to specify the nonzero 4358 structure. The size of this array is equal to the number 4359 of local rows, i.e 'm'. 4360 4361 Output Parameter: 4362 . A - the matrix 4363 4364 Options Database Keys: 4365 + -mat_no_inode - Do not use inodes 4366 . -mat_inode_limit <limit> - Sets inode limit (max limit=5) 4367 - -matmult_vecscatter_view <viewer> - View the vecscatter (i.e., communication pattern) used in `MatMult()` of sparse parallel matrices. 4368 See viewer types in manual of `MatView()`. Of them, ascii_matlab, draw or binary cause the vecscatter be viewed as a matrix. 4369 Entry (i,j) is the size of message (in bytes) rank i sends to rank j in one `MatMult()` call. 4370 4371 Level: intermediate 4372 4373 Notes: 4374 It is recommended that one use `MatCreateFromOptions()` or the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`, 4375 MatXXXXSetPreallocation() paradigm instead of this routine directly. 4376 [MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`] 4377 4378 If the *_nnz parameter is given then the *_nz parameter is ignored 4379 4380 The `m`,`n`,`M`,`N` parameters specify the size of the matrix, and its partitioning across 4381 processors, while `d_nz`,`d_nnz`,`o_nz`,`o_nnz` parameters specify the approximate 4382 storage requirements for this matrix. 4383 4384 If `PETSC_DECIDE` or `PETSC_DETERMINE` is used for a particular argument on one 4385 processor than it must be used on all processors that share the object for 4386 that argument. 4387 4388 The user MUST specify either the local or global matrix dimensions 4389 (possibly both). 4390 4391 The parallel matrix is partitioned across processors such that the 4392 first m0 rows belong to process 0, the next m1 rows belong to 4393 process 1, the next m2 rows belong to process 2 etc.. where 4394 m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores 4395 values corresponding to [m x N] submatrix. 4396 4397 The columns are logically partitioned with the n0 columns belonging 4398 to 0th partition, the next n1 columns belonging to the next 4399 partition etc.. where n0,n1,n2... are the input parameter 'n'. 4400 4401 The DIAGONAL portion of the local submatrix on any given processor 4402 is the submatrix corresponding to the rows and columns m,n 4403 corresponding to the given processor. i.e diagonal matrix on 4404 process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1] 4405 etc. The remaining portion of the local submatrix [m x (N-n)] 4406 constitute the OFF-DIAGONAL portion. The example below better 4407 illustrates this concept. 4408 4409 For a square global matrix we define each processor's diagonal portion 4410 to be its local rows and the corresponding columns (a square submatrix); 4411 each processor's off-diagonal portion encompasses the remainder of the 4412 local matrix (a rectangular submatrix). 4413 4414 If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored. 4415 4416 When calling this routine with a single process communicator, a matrix of 4417 type `MATSEQAIJ` is returned. If a matrix of type `MATMPIAIJ` is desired for this 4418 type of communicator, use the construction mechanism 4419 .vb 4420 MatCreate(..., &A); 4421 MatSetType(A, MATMPIAIJ); 4422 MatSetSizes(A, m, n, M, N); 4423 MatMPIAIJSetPreallocation(A, ...); 4424 .ve 4425 4426 By default, this format uses inodes (identical nodes) when possible. 4427 We search for consecutive rows with the same nonzero structure, thereby 4428 reusing matrix information to achieve increased efficiency. 4429 4430 Example Usage: 4431 Consider the following 8x8 matrix with 34 non-zero values, that is 4432 assembled across 3 processors. Lets assume that proc0 owns 3 rows, 4433 proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown 4434 as follows 4435 4436 .vb 4437 1 2 0 | 0 3 0 | 0 4 4438 Proc0 0 5 6 | 7 0 0 | 8 0 4439 9 0 10 | 11 0 0 | 12 0 4440 ------------------------------------- 4441 13 0 14 | 15 16 17 | 0 0 4442 Proc1 0 18 0 | 19 20 21 | 0 0 4443 0 0 0 | 22 23 0 | 24 0 4444 ------------------------------------- 4445 Proc2 25 26 27 | 0 0 28 | 29 0 4446 30 0 0 | 31 32 33 | 0 34 4447 .ve 4448 4449 This can be represented as a collection of submatrices as 4450 4451 .vb 4452 A B C 4453 D E F 4454 G H I 4455 .ve 4456 4457 Where the submatrices A,B,C are owned by proc0, D,E,F are 4458 owned by proc1, G,H,I are owned by proc2. 4459 4460 The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 4461 The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 4462 The 'M','N' parameters are 8,8, and have the same values on all procs. 4463 4464 The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are 4465 submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices 4466 corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively. 4467 Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL 4468 part as `MATSEQAIJ` matrices. For example, proc1 will store [E] as a `MATSEQAIJ` 4469 matrix, ans [DF] as another SeqAIJ matrix. 4470 4471 When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are 4472 allocated for every row of the local diagonal submatrix, and `o_nz` 4473 storage locations are allocated for every row of the OFF-DIAGONAL submat. 4474 One way to choose `d_nz` and `o_nz` is to use the max nonzerors per local 4475 rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices. 4476 In this case, the values of `d_nz`,`o_nz` are 4477 .vb 4478 proc0 dnz = 2, o_nz = 2 4479 proc1 dnz = 3, o_nz = 2 4480 proc2 dnz = 1, o_nz = 4 4481 .ve 4482 We are allocating m*(`d_nz`+`o_nz`) storage locations for every proc. This 4483 translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10 4484 for proc3. i.e we are using 12+15+10=37 storage locations to store 4485 34 values. 4486 4487 When `d_nnz`, `o_nnz` parameters are specified, the storage is specified 4488 for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices. 4489 In the above case the values for d_nnz,o_nnz are 4490 .vb 4491 proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2] 4492 proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1] 4493 proc2 d_nnz = [1,1] and o_nnz = [4,4] 4494 .ve 4495 Here the space allocated is sum of all the above values i.e 34, and 4496 hence pre-allocation is perfect. 4497 4498 .seealso: [](ch_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`, 4499 `MATMPIAIJ`, `MatCreateMPIAIJWithArrays()` 4500 @*/ 4501 PetscErrorCode MatCreateAIJ(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[], Mat *A) 4502 { 4503 PetscMPIInt size; 4504 4505 PetscFunctionBegin; 4506 PetscCall(MatCreate(comm, A)); 4507 PetscCall(MatSetSizes(*A, m, n, M, N)); 4508 PetscCallMPI(MPI_Comm_size(comm, &size)); 4509 if (size > 1) { 4510 PetscCall(MatSetType(*A, MATMPIAIJ)); 4511 PetscCall(MatMPIAIJSetPreallocation(*A, d_nz, d_nnz, o_nz, o_nnz)); 4512 } else { 4513 PetscCall(MatSetType(*A, MATSEQAIJ)); 4514 PetscCall(MatSeqAIJSetPreallocation(*A, d_nz, d_nnz)); 4515 } 4516 PetscFunctionReturn(PETSC_SUCCESS); 4517 } 4518 4519 /*MC 4520 MatMPIAIJGetSeqAIJF90 - Returns the local pieces of this distributed matrix 4521 4522 Synopsis: 4523 MatMPIAIJGetSeqAIJF90(Mat A, Mat Ad, Mat Ao, {PetscInt, pointer :: colmap(:)},integer ierr) 4524 4525 Not Collective 4526 4527 Input Parameter: 4528 . A - the `MATMPIAIJ` matrix 4529 4530 Output Parameters: 4531 + Ad - the diagonal portion of the matrix 4532 . Ao - the off-diagonal portion of the matrix 4533 . colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix 4534 - ierr - error code 4535 4536 Level: advanced 4537 4538 Note: 4539 Use `MatMPIAIJRestoreSeqAIJF90()` when you no longer need access to the matrices and `colmap` 4540 4541 .seealso: [](ch_matrices), `Mat`, [](sec_fortranarrays), `Mat`, `MATMPIAIJ`, `MatMPIAIJGetSeqAIJ()`, `MatMPIAIJRestoreSeqAIJF90()` 4542 M*/ 4543 4544 /*MC 4545 MatMPIAIJRestoreSeqAIJF90 - call after `MatMPIAIJGetSeqAIJF90()` when you no longer need access to the matrices and `colmap` 4546 4547 Synopsis: 4548 MatMPIAIJRestoreSeqAIJF90(Mat A, Mat Ad, Mat Ao, {PetscInt, pointer :: colmap(:)},integer ierr) 4549 4550 Not Collective 4551 4552 Input Parameters: 4553 + A - the `MATMPIAIJ` matrix 4554 . Ad - the diagonal portion of the matrix 4555 . Ao - the off-diagonal portion of the matrix 4556 . colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix 4557 - ierr - error code 4558 4559 Level: advanced 4560 4561 .seealso: [](ch_matrices), `Mat`, [](sec_fortranarrays), `Mat`, `MATMPIAIJ`, `MatMPIAIJGetSeqAIJ()`, `MatMPIAIJGetSeqAIJF90()` 4562 M*/ 4563 4564 /*@C 4565 MatMPIAIJGetSeqAIJ - Returns the local pieces of this distributed matrix 4566 4567 Not Collective 4568 4569 Input Parameter: 4570 . A - The `MATMPIAIJ` matrix 4571 4572 Output Parameters: 4573 + Ad - The local diagonal block as a `MATSEQAIJ` matrix 4574 . Ao - The local off-diagonal block as a `MATSEQAIJ` matrix 4575 - colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix 4576 4577 Level: intermediate 4578 4579 Note: 4580 The rows in `Ad` and `Ao` are in [0, Nr), where Nr is the number of local rows on this process. The columns 4581 in `Ad` are in [0, Nc) where Nc is the number of local columns. The columns are `Ao` are in [0, Nco), where Nco is 4582 the number of nonzero columns in the local off-diagonal piece of the matrix `A`. The array colmap maps these 4583 local column numbers to global column numbers in the original matrix. 4584 4585 Fortran Notes: 4586 `MatMPIAIJGetSeqAIJ()` Fortran binding is deprecated (since PETSc 3.19), use `MatMPIAIJGetSeqAIJF90()` 4587 4588 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatMPIAIJGetSeqAIJF90()`, `MatMPIAIJRestoreSeqAIJF90()`, `MatMPIAIJGetLocalMat()`, `MatMPIAIJGetLocalMatCondensed()`, `MatCreateAIJ()`, `MATSEQAIJ` 4589 @*/ 4590 PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A, Mat *Ad, Mat *Ao, const PetscInt *colmap[]) 4591 { 4592 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 4593 PetscBool flg; 4594 4595 PetscFunctionBegin; 4596 PetscCall(PetscStrbeginswith(((PetscObject)A)->type_name, MATMPIAIJ, &flg)); 4597 PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "This function requires a MATMPIAIJ matrix as input"); 4598 if (Ad) *Ad = a->A; 4599 if (Ao) *Ao = a->B; 4600 if (colmap) *colmap = a->garray; 4601 PetscFunctionReturn(PETSC_SUCCESS); 4602 } 4603 4604 PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm, Mat inmat, PetscInt n, MatReuse scall, Mat *outmat) 4605 { 4606 PetscInt m, N, i, rstart, nnz, Ii; 4607 PetscInt *indx; 4608 PetscScalar *values; 4609 MatType rootType; 4610 4611 PetscFunctionBegin; 4612 PetscCall(MatGetSize(inmat, &m, &N)); 4613 if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */ 4614 PetscInt *dnz, *onz, sum, bs, cbs; 4615 4616 if (n == PETSC_DECIDE) PetscCall(PetscSplitOwnership(comm, &n, &N)); 4617 /* Check sum(n) = N */ 4618 PetscCall(MPIU_Allreduce(&n, &sum, 1, MPIU_INT, MPI_SUM, comm)); 4619 PetscCheck(sum == N, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Sum of local columns %" PetscInt_FMT " != global columns %" PetscInt_FMT, sum, N); 4620 4621 PetscCallMPI(MPI_Scan(&m, &rstart, 1, MPIU_INT, MPI_SUM, comm)); 4622 rstart -= m; 4623 4624 MatPreallocateBegin(comm, m, n, dnz, onz); 4625 for (i = 0; i < m; i++) { 4626 PetscCall(MatGetRow_SeqAIJ(inmat, i, &nnz, &indx, NULL)); 4627 PetscCall(MatPreallocateSet(i + rstart, nnz, indx, dnz, onz)); 4628 PetscCall(MatRestoreRow_SeqAIJ(inmat, i, &nnz, &indx, NULL)); 4629 } 4630 4631 PetscCall(MatCreate(comm, outmat)); 4632 PetscCall(MatSetSizes(*outmat, m, n, PETSC_DETERMINE, PETSC_DETERMINE)); 4633 PetscCall(MatGetBlockSizes(inmat, &bs, &cbs)); 4634 PetscCall(MatSetBlockSizes(*outmat, bs, cbs)); 4635 PetscCall(MatGetRootType_Private(inmat, &rootType)); 4636 PetscCall(MatSetType(*outmat, rootType)); 4637 PetscCall(MatSeqAIJSetPreallocation(*outmat, 0, dnz)); 4638 PetscCall(MatMPIAIJSetPreallocation(*outmat, 0, dnz, 0, onz)); 4639 MatPreallocateEnd(dnz, onz); 4640 PetscCall(MatSetOption(*outmat, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); 4641 } 4642 4643 /* numeric phase */ 4644 PetscCall(MatGetOwnershipRange(*outmat, &rstart, NULL)); 4645 for (i = 0; i < m; i++) { 4646 PetscCall(MatGetRow_SeqAIJ(inmat, i, &nnz, &indx, &values)); 4647 Ii = i + rstart; 4648 PetscCall(MatSetValues(*outmat, 1, &Ii, nnz, indx, values, INSERT_VALUES)); 4649 PetscCall(MatRestoreRow_SeqAIJ(inmat, i, &nnz, &indx, &values)); 4650 } 4651 PetscCall(MatAssemblyBegin(*outmat, MAT_FINAL_ASSEMBLY)); 4652 PetscCall(MatAssemblyEnd(*outmat, MAT_FINAL_ASSEMBLY)); 4653 PetscFunctionReturn(PETSC_SUCCESS); 4654 } 4655 4656 static PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(void *data) 4657 { 4658 Mat_Merge_SeqsToMPI *merge = (Mat_Merge_SeqsToMPI *)data; 4659 4660 PetscFunctionBegin; 4661 if (!merge) PetscFunctionReturn(PETSC_SUCCESS); 4662 PetscCall(PetscFree(merge->id_r)); 4663 PetscCall(PetscFree(merge->len_s)); 4664 PetscCall(PetscFree(merge->len_r)); 4665 PetscCall(PetscFree(merge->bi)); 4666 PetscCall(PetscFree(merge->bj)); 4667 PetscCall(PetscFree(merge->buf_ri[0])); 4668 PetscCall(PetscFree(merge->buf_ri)); 4669 PetscCall(PetscFree(merge->buf_rj[0])); 4670 PetscCall(PetscFree(merge->buf_rj)); 4671 PetscCall(PetscFree(merge->coi)); 4672 PetscCall(PetscFree(merge->coj)); 4673 PetscCall(PetscFree(merge->owners_co)); 4674 PetscCall(PetscLayoutDestroy(&merge->rowmap)); 4675 PetscCall(PetscFree(merge)); 4676 PetscFunctionReturn(PETSC_SUCCESS); 4677 } 4678 4679 #include <../src/mat/utils/freespace.h> 4680 #include <petscbt.h> 4681 4682 PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat, Mat mpimat) 4683 { 4684 MPI_Comm comm; 4685 Mat_SeqAIJ *a = (Mat_SeqAIJ *)seqmat->data; 4686 PetscMPIInt size, rank, taga, *len_s; 4687 PetscInt N = mpimat->cmap->N, i, j, *owners, *ai = a->i, *aj; 4688 PetscInt proc, m; 4689 PetscInt **buf_ri, **buf_rj; 4690 PetscInt k, anzi, *bj_i, *bi, *bj, arow, bnzi, nextaj; 4691 PetscInt nrows, **buf_ri_k, **nextrow, **nextai; 4692 MPI_Request *s_waits, *r_waits; 4693 MPI_Status *status; 4694 const MatScalar *aa, *a_a; 4695 MatScalar **abuf_r, *ba_i; 4696 Mat_Merge_SeqsToMPI *merge; 4697 PetscContainer container; 4698 4699 PetscFunctionBegin; 4700 PetscCall(PetscObjectGetComm((PetscObject)mpimat, &comm)); 4701 PetscCall(PetscLogEventBegin(MAT_Seqstompinum, seqmat, 0, 0, 0)); 4702 4703 PetscCallMPI(MPI_Comm_size(comm, &size)); 4704 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 4705 4706 PetscCall(PetscObjectQuery((PetscObject)mpimat, "MatMergeSeqsToMPI", (PetscObject *)&container)); 4707 PetscCheck(container, PetscObjectComm((PetscObject)mpimat), PETSC_ERR_PLIB, "Mat not created from MatCreateMPIAIJSumSeqAIJSymbolic"); 4708 PetscCall(PetscContainerGetPointer(container, (void **)&merge)); 4709 PetscCall(MatSeqAIJGetArrayRead(seqmat, &a_a)); 4710 aa = a_a; 4711 4712 bi = merge->bi; 4713 bj = merge->bj; 4714 buf_ri = merge->buf_ri; 4715 buf_rj = merge->buf_rj; 4716 4717 PetscCall(PetscMalloc1(size, &status)); 4718 owners = merge->rowmap->range; 4719 len_s = merge->len_s; 4720 4721 /* send and recv matrix values */ 4722 PetscCall(PetscObjectGetNewTag((PetscObject)mpimat, &taga)); 4723 PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits)); 4724 4725 PetscCall(PetscMalloc1(merge->nsend + 1, &s_waits)); 4726 for (proc = 0, k = 0; proc < size; proc++) { 4727 if (!len_s[proc]) continue; 4728 i = owners[proc]; 4729 PetscCallMPI(MPI_Isend(aa + ai[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k)); 4730 k++; 4731 } 4732 4733 if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status)); 4734 if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status)); 4735 PetscCall(PetscFree(status)); 4736 4737 PetscCall(PetscFree(s_waits)); 4738 PetscCall(PetscFree(r_waits)); 4739 4740 /* insert mat values of mpimat */ 4741 PetscCall(PetscMalloc1(N, &ba_i)); 4742 PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextai)); 4743 4744 for (k = 0; k < merge->nrecv; k++) { 4745 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 4746 nrows = *(buf_ri_k[k]); 4747 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 4748 nextai[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */ 4749 } 4750 4751 /* set values of ba */ 4752 m = merge->rowmap->n; 4753 for (i = 0; i < m; i++) { 4754 arow = owners[rank] + i; 4755 bj_i = bj + bi[i]; /* col indices of the i-th row of mpimat */ 4756 bnzi = bi[i + 1] - bi[i]; 4757 PetscCall(PetscArrayzero(ba_i, bnzi)); 4758 4759 /* add local non-zero vals of this proc's seqmat into ba */ 4760 anzi = ai[arow + 1] - ai[arow]; 4761 aj = a->j + ai[arow]; 4762 aa = a_a + ai[arow]; 4763 nextaj = 0; 4764 for (j = 0; nextaj < anzi; j++) { 4765 if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */ 4766 ba_i[j] += aa[nextaj++]; 4767 } 4768 } 4769 4770 /* add received vals into ba */ 4771 for (k = 0; k < merge->nrecv; k++) { /* k-th received message */ 4772 /* i-th row */ 4773 if (i == *nextrow[k]) { 4774 anzi = *(nextai[k] + 1) - *nextai[k]; 4775 aj = buf_rj[k] + *(nextai[k]); 4776 aa = abuf_r[k] + *(nextai[k]); 4777 nextaj = 0; 4778 for (j = 0; nextaj < anzi; j++) { 4779 if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */ 4780 ba_i[j] += aa[nextaj++]; 4781 } 4782 } 4783 nextrow[k]++; 4784 nextai[k]++; 4785 } 4786 } 4787 PetscCall(MatSetValues(mpimat, 1, &arow, bnzi, bj_i, ba_i, INSERT_VALUES)); 4788 } 4789 PetscCall(MatSeqAIJRestoreArrayRead(seqmat, &a_a)); 4790 PetscCall(MatAssemblyBegin(mpimat, MAT_FINAL_ASSEMBLY)); 4791 PetscCall(MatAssemblyEnd(mpimat, MAT_FINAL_ASSEMBLY)); 4792 4793 PetscCall(PetscFree(abuf_r[0])); 4794 PetscCall(PetscFree(abuf_r)); 4795 PetscCall(PetscFree(ba_i)); 4796 PetscCall(PetscFree3(buf_ri_k, nextrow, nextai)); 4797 PetscCall(PetscLogEventEnd(MAT_Seqstompinum, seqmat, 0, 0, 0)); 4798 PetscFunctionReturn(PETSC_SUCCESS); 4799 } 4800 4801 PetscErrorCode MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm, Mat seqmat, PetscInt m, PetscInt n, Mat *mpimat) 4802 { 4803 Mat B_mpi; 4804 Mat_SeqAIJ *a = (Mat_SeqAIJ *)seqmat->data; 4805 PetscMPIInt size, rank, tagi, tagj, *len_s, *len_si, *len_ri; 4806 PetscInt **buf_rj, **buf_ri, **buf_ri_k; 4807 PetscInt M = seqmat->rmap->n, N = seqmat->cmap->n, i, *owners, *ai = a->i, *aj = a->j; 4808 PetscInt len, proc, *dnz, *onz, bs, cbs; 4809 PetscInt k, anzi, *bi, *bj, *lnk, nlnk, arow, bnzi; 4810 PetscInt nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextai; 4811 MPI_Request *si_waits, *sj_waits, *ri_waits, *rj_waits; 4812 MPI_Status *status; 4813 PetscFreeSpaceList free_space = NULL, current_space = NULL; 4814 PetscBT lnkbt; 4815 Mat_Merge_SeqsToMPI *merge; 4816 PetscContainer container; 4817 4818 PetscFunctionBegin; 4819 PetscCall(PetscLogEventBegin(MAT_Seqstompisym, seqmat, 0, 0, 0)); 4820 4821 /* make sure it is a PETSc comm */ 4822 PetscCall(PetscCommDuplicate(comm, &comm, NULL)); 4823 PetscCallMPI(MPI_Comm_size(comm, &size)); 4824 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 4825 4826 PetscCall(PetscNew(&merge)); 4827 PetscCall(PetscMalloc1(size, &status)); 4828 4829 /* determine row ownership */ 4830 PetscCall(PetscLayoutCreate(comm, &merge->rowmap)); 4831 PetscCall(PetscLayoutSetLocalSize(merge->rowmap, m)); 4832 PetscCall(PetscLayoutSetSize(merge->rowmap, M)); 4833 PetscCall(PetscLayoutSetBlockSize(merge->rowmap, 1)); 4834 PetscCall(PetscLayoutSetUp(merge->rowmap)); 4835 PetscCall(PetscMalloc1(size, &len_si)); 4836 PetscCall(PetscMalloc1(size, &merge->len_s)); 4837 4838 m = merge->rowmap->n; 4839 owners = merge->rowmap->range; 4840 4841 /* determine the number of messages to send, their lengths */ 4842 len_s = merge->len_s; 4843 4844 len = 0; /* length of buf_si[] */ 4845 merge->nsend = 0; 4846 for (proc = 0; proc < size; proc++) { 4847 len_si[proc] = 0; 4848 if (proc == rank) { 4849 len_s[proc] = 0; 4850 } else { 4851 len_si[proc] = owners[proc + 1] - owners[proc] + 1; 4852 len_s[proc] = ai[owners[proc + 1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */ 4853 } 4854 if (len_s[proc]) { 4855 merge->nsend++; 4856 nrows = 0; 4857 for (i = owners[proc]; i < owners[proc + 1]; i++) { 4858 if (ai[i + 1] > ai[i]) nrows++; 4859 } 4860 len_si[proc] = 2 * (nrows + 1); 4861 len += len_si[proc]; 4862 } 4863 } 4864 4865 /* determine the number and length of messages to receive for ij-structure */ 4866 PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv)); 4867 PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri)); 4868 4869 /* post the Irecv of j-structure */ 4870 PetscCall(PetscCommGetNewTag(comm, &tagj)); 4871 PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rj_waits)); 4872 4873 /* post the Isend of j-structure */ 4874 PetscCall(PetscMalloc2(merge->nsend, &si_waits, merge->nsend, &sj_waits)); 4875 4876 for (proc = 0, k = 0; proc < size; proc++) { 4877 if (!len_s[proc]) continue; 4878 i = owners[proc]; 4879 PetscCallMPI(MPI_Isend(aj + ai[i], len_s[proc], MPIU_INT, proc, tagj, comm, sj_waits + k)); 4880 k++; 4881 } 4882 4883 /* receives and sends of j-structure are complete */ 4884 if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, rj_waits, status)); 4885 if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, sj_waits, status)); 4886 4887 /* send and recv i-structure */ 4888 PetscCall(PetscCommGetNewTag(comm, &tagi)); 4889 PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &ri_waits)); 4890 4891 PetscCall(PetscMalloc1(len + 1, &buf_s)); 4892 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 4893 for (proc = 0, k = 0; proc < size; proc++) { 4894 if (!len_s[proc]) continue; 4895 /* form outgoing message for i-structure: 4896 buf_si[0]: nrows to be sent 4897 [1:nrows]: row index (global) 4898 [nrows+1:2*nrows+1]: i-structure index 4899 */ 4900 nrows = len_si[proc] / 2 - 1; 4901 buf_si_i = buf_si + nrows + 1; 4902 buf_si[0] = nrows; 4903 buf_si_i[0] = 0; 4904 nrows = 0; 4905 for (i = owners[proc]; i < owners[proc + 1]; i++) { 4906 anzi = ai[i + 1] - ai[i]; 4907 if (anzi) { 4908 buf_si_i[nrows + 1] = buf_si_i[nrows] + anzi; /* i-structure */ 4909 buf_si[nrows + 1] = i - owners[proc]; /* local row index */ 4910 nrows++; 4911 } 4912 } 4913 PetscCallMPI(MPI_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, si_waits + k)); 4914 k++; 4915 buf_si += len_si[proc]; 4916 } 4917 4918 if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, ri_waits, status)); 4919 if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, si_waits, status)); 4920 4921 PetscCall(PetscInfo(seqmat, "nsend: %d, nrecv: %d\n", merge->nsend, merge->nrecv)); 4922 for (i = 0; i < merge->nrecv; i++) PetscCall(PetscInfo(seqmat, "recv len_ri=%d, len_rj=%d from [%d]\n", len_ri[i], merge->len_r[i], merge->id_r[i])); 4923 4924 PetscCall(PetscFree(len_si)); 4925 PetscCall(PetscFree(len_ri)); 4926 PetscCall(PetscFree(rj_waits)); 4927 PetscCall(PetscFree2(si_waits, sj_waits)); 4928 PetscCall(PetscFree(ri_waits)); 4929 PetscCall(PetscFree(buf_s)); 4930 PetscCall(PetscFree(status)); 4931 4932 /* compute a local seq matrix in each processor */ 4933 /* allocate bi array and free space for accumulating nonzero column info */ 4934 PetscCall(PetscMalloc1(m + 1, &bi)); 4935 bi[0] = 0; 4936 4937 /* create and initialize a linked list */ 4938 nlnk = N + 1; 4939 PetscCall(PetscLLCreate(N, N, nlnk, lnk, lnkbt)); 4940 4941 /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */ 4942 len = ai[owners[rank + 1]] - ai[owners[rank]]; 4943 PetscCall(PetscFreeSpaceGet(PetscIntMultTruncate(2, len) + 1, &free_space)); 4944 4945 current_space = free_space; 4946 4947 /* determine symbolic info for each local row */ 4948 PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextai)); 4949 4950 for (k = 0; k < merge->nrecv; k++) { 4951 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 4952 nrows = *buf_ri_k[k]; 4953 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 4954 nextai[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */ 4955 } 4956 4957 MatPreallocateBegin(comm, m, n, dnz, onz); 4958 len = 0; 4959 for (i = 0; i < m; i++) { 4960 bnzi = 0; 4961 /* add local non-zero cols of this proc's seqmat into lnk */ 4962 arow = owners[rank] + i; 4963 anzi = ai[arow + 1] - ai[arow]; 4964 aj = a->j + ai[arow]; 4965 PetscCall(PetscLLAddSorted(anzi, aj, N, &nlnk, lnk, lnkbt)); 4966 bnzi += nlnk; 4967 /* add received col data into lnk */ 4968 for (k = 0; k < merge->nrecv; k++) { /* k-th received message */ 4969 if (i == *nextrow[k]) { /* i-th row */ 4970 anzi = *(nextai[k] + 1) - *nextai[k]; 4971 aj = buf_rj[k] + *nextai[k]; 4972 PetscCall(PetscLLAddSorted(anzi, aj, N, &nlnk, lnk, lnkbt)); 4973 bnzi += nlnk; 4974 nextrow[k]++; 4975 nextai[k]++; 4976 } 4977 } 4978 if (len < bnzi) len = bnzi; /* =max(bnzi) */ 4979 4980 /* if free space is not available, make more free space */ 4981 if (current_space->local_remaining < bnzi) PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(bnzi, current_space->total_array_size), ¤t_space)); 4982 /* copy data into free space, then initialize lnk */ 4983 PetscCall(PetscLLClean(N, N, bnzi, lnk, current_space->array, lnkbt)); 4984 PetscCall(MatPreallocateSet(i + owners[rank], bnzi, current_space->array, dnz, onz)); 4985 4986 current_space->array += bnzi; 4987 current_space->local_used += bnzi; 4988 current_space->local_remaining -= bnzi; 4989 4990 bi[i + 1] = bi[i] + bnzi; 4991 } 4992 4993 PetscCall(PetscFree3(buf_ri_k, nextrow, nextai)); 4994 4995 PetscCall(PetscMalloc1(bi[m] + 1, &bj)); 4996 PetscCall(PetscFreeSpaceContiguous(&free_space, bj)); 4997 PetscCall(PetscLLDestroy(lnk, lnkbt)); 4998 4999 /* create symbolic parallel matrix B_mpi */ 5000 PetscCall(MatGetBlockSizes(seqmat, &bs, &cbs)); 5001 PetscCall(MatCreate(comm, &B_mpi)); 5002 if (n == PETSC_DECIDE) { 5003 PetscCall(MatSetSizes(B_mpi, m, n, PETSC_DETERMINE, N)); 5004 } else { 5005 PetscCall(MatSetSizes(B_mpi, m, n, PETSC_DETERMINE, PETSC_DETERMINE)); 5006 } 5007 PetscCall(MatSetBlockSizes(B_mpi, bs, cbs)); 5008 PetscCall(MatSetType(B_mpi, MATMPIAIJ)); 5009 PetscCall(MatMPIAIJSetPreallocation(B_mpi, 0, dnz, 0, onz)); 5010 MatPreallocateEnd(dnz, onz); 5011 PetscCall(MatSetOption(B_mpi, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE)); 5012 5013 /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */ 5014 B_mpi->assembled = PETSC_FALSE; 5015 merge->bi = bi; 5016 merge->bj = bj; 5017 merge->buf_ri = buf_ri; 5018 merge->buf_rj = buf_rj; 5019 merge->coi = NULL; 5020 merge->coj = NULL; 5021 merge->owners_co = NULL; 5022 5023 PetscCall(PetscCommDestroy(&comm)); 5024 5025 /* attach the supporting struct to B_mpi for reuse */ 5026 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 5027 PetscCall(PetscContainerSetPointer(container, merge)); 5028 PetscCall(PetscContainerSetUserDestroy(container, MatDestroy_MPIAIJ_SeqsToMPI)); 5029 PetscCall(PetscObjectCompose((PetscObject)B_mpi, "MatMergeSeqsToMPI", (PetscObject)container)); 5030 PetscCall(PetscContainerDestroy(&container)); 5031 *mpimat = B_mpi; 5032 5033 PetscCall(PetscLogEventEnd(MAT_Seqstompisym, seqmat, 0, 0, 0)); 5034 PetscFunctionReturn(PETSC_SUCCESS); 5035 } 5036 5037 /*@C 5038 MatCreateMPIAIJSumSeqAIJ - Creates a `MATMPIAIJ` matrix by adding sequential 5039 matrices from each processor 5040 5041 Collective 5042 5043 Input Parameters: 5044 + comm - the communicators the parallel matrix will live on 5045 . seqmat - the input sequential matrices 5046 . m - number of local rows (or `PETSC_DECIDE`) 5047 . n - number of local columns (or `PETSC_DECIDE`) 5048 - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX` 5049 5050 Output Parameter: 5051 . mpimat - the parallel matrix generated 5052 5053 Level: advanced 5054 5055 Note: 5056 The dimensions of the sequential matrix in each processor MUST be the same. 5057 The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be 5058 destroyed when mpimat is destroyed. Call `PetscObjectQuery()` to access seqmat. 5059 5060 .seealso: [](ch_matrices), `Mat`, `MatCreateAIJ()` 5061 @*/ 5062 PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm, Mat seqmat, PetscInt m, PetscInt n, MatReuse scall, Mat *mpimat) 5063 { 5064 PetscMPIInt size; 5065 5066 PetscFunctionBegin; 5067 PetscCallMPI(MPI_Comm_size(comm, &size)); 5068 if (size == 1) { 5069 PetscCall(PetscLogEventBegin(MAT_Seqstompi, seqmat, 0, 0, 0)); 5070 if (scall == MAT_INITIAL_MATRIX) { 5071 PetscCall(MatDuplicate(seqmat, MAT_COPY_VALUES, mpimat)); 5072 } else { 5073 PetscCall(MatCopy(seqmat, *mpimat, SAME_NONZERO_PATTERN)); 5074 } 5075 PetscCall(PetscLogEventEnd(MAT_Seqstompi, seqmat, 0, 0, 0)); 5076 PetscFunctionReturn(PETSC_SUCCESS); 5077 } 5078 PetscCall(PetscLogEventBegin(MAT_Seqstompi, seqmat, 0, 0, 0)); 5079 if (scall == MAT_INITIAL_MATRIX) PetscCall(MatCreateMPIAIJSumSeqAIJSymbolic(comm, seqmat, m, n, mpimat)); 5080 PetscCall(MatCreateMPIAIJSumSeqAIJNumeric(seqmat, *mpimat)); 5081 PetscCall(PetscLogEventEnd(MAT_Seqstompi, seqmat, 0, 0, 0)); 5082 PetscFunctionReturn(PETSC_SUCCESS); 5083 } 5084 5085 /*@ 5086 MatAIJGetLocalMat - Creates a `MATSEQAIJ` from a `MATAIJ` matrix. 5087 5088 Not Collective 5089 5090 Input Parameter: 5091 . A - the matrix 5092 5093 Output Parameter: 5094 . A_loc - the local sequential matrix generated 5095 5096 Level: developer 5097 5098 Notes: 5099 The matrix is created by taking `A`'s local rows and putting them into a sequential matrix 5100 with `mlocal` rows and `n` columns. Where `mlocal` is obtained with `MatGetLocalSize()` and 5101 `n` is the global column count obtained with `MatGetSize()` 5102 5103 In other words combines the two parts of a parallel `MATMPIAIJ` matrix on each process to a single matrix. 5104 5105 For parallel matrices this creates an entirely new matrix. If the matrix is sequential it merely increases the reference count. 5106 5107 Destroy the matrix with `MatDestroy()` 5108 5109 .seealso: [](ch_matrices), `Mat`, `MatMPIAIJGetLocalMat()` 5110 @*/ 5111 PetscErrorCode MatAIJGetLocalMat(Mat A, Mat *A_loc) 5112 { 5113 PetscBool mpi; 5114 5115 PetscFunctionBegin; 5116 PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &mpi)); 5117 if (mpi) { 5118 PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, A_loc)); 5119 } else { 5120 *A_loc = A; 5121 PetscCall(PetscObjectReference((PetscObject)*A_loc)); 5122 } 5123 PetscFunctionReturn(PETSC_SUCCESS); 5124 } 5125 5126 /*@ 5127 MatMPIAIJGetLocalMat - Creates a `MATSEQAIJ` from a `MATMPIAIJ` matrix. 5128 5129 Not Collective 5130 5131 Input Parameters: 5132 + A - the matrix 5133 - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX` 5134 5135 Output Parameter: 5136 . A_loc - the local sequential matrix generated 5137 5138 Level: developer 5139 5140 Notes: 5141 The matrix is created by taking all `A`'s local rows and putting them into a sequential 5142 matrix with `mlocal` rows and `n` columns.`mlocal` is the row count obtained with 5143 `MatGetLocalSize()` and `n` is the global column count obtained with `MatGetSize()`. 5144 5145 In other words combines the two parts of a parallel `MATMPIAIJ` matrix on each process to a single matrix. 5146 5147 When `A` is sequential and `MAT_INITIAL_MATRIX` is requested, the matrix returned is the diagonal part of `A` (which contains the entire matrix), 5148 with its reference count increased by one. Hence changing values of `A_loc` changes `A`. If `MAT_REUSE_MATRIX` is requested on a sequential matrix 5149 then `MatCopy`(Adiag,*`A_loc`,`SAME_NONZERO_PATTERN`) is called to fill `A_loc`. Thus one can preallocate the appropriate sequential matrix `A_loc` 5150 and then call this routine with `MAT_REUSE_MATRIX`. In this case, one can modify the values of `A_loc` without affecting the original sequential matrix. 5151 5152 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMatCondensed()`, `MatMPIAIJGetLocalMatMerge()` 5153 @*/ 5154 PetscErrorCode MatMPIAIJGetLocalMat(Mat A, MatReuse scall, Mat *A_loc) 5155 { 5156 Mat_MPIAIJ *mpimat = (Mat_MPIAIJ *)A->data; 5157 Mat_SeqAIJ *mat, *a, *b; 5158 PetscInt *ai, *aj, *bi, *bj, *cmap = mpimat->garray; 5159 const PetscScalar *aa, *ba, *aav, *bav; 5160 PetscScalar *ca, *cam; 5161 PetscMPIInt size; 5162 PetscInt am = A->rmap->n, i, j, k, cstart = A->cmap->rstart; 5163 PetscInt *ci, *cj, col, ncols_d, ncols_o, jo; 5164 PetscBool match; 5165 5166 PetscFunctionBegin; 5167 PetscCall(PetscStrbeginswith(((PetscObject)A)->type_name, MATMPIAIJ, &match)); 5168 PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPIAIJ matrix as input"); 5169 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size)); 5170 if (size == 1) { 5171 if (scall == MAT_INITIAL_MATRIX) { 5172 PetscCall(PetscObjectReference((PetscObject)mpimat->A)); 5173 *A_loc = mpimat->A; 5174 } else if (scall == MAT_REUSE_MATRIX) { 5175 PetscCall(MatCopy(mpimat->A, *A_loc, SAME_NONZERO_PATTERN)); 5176 } 5177 PetscFunctionReturn(PETSC_SUCCESS); 5178 } 5179 5180 PetscCall(PetscLogEventBegin(MAT_Getlocalmat, A, 0, 0, 0)); 5181 a = (Mat_SeqAIJ *)(mpimat->A)->data; 5182 b = (Mat_SeqAIJ *)(mpimat->B)->data; 5183 ai = a->i; 5184 aj = a->j; 5185 bi = b->i; 5186 bj = b->j; 5187 PetscCall(MatSeqAIJGetArrayRead(mpimat->A, &aav)); 5188 PetscCall(MatSeqAIJGetArrayRead(mpimat->B, &bav)); 5189 aa = aav; 5190 ba = bav; 5191 if (scall == MAT_INITIAL_MATRIX) { 5192 PetscCall(PetscMalloc1(1 + am, &ci)); 5193 ci[0] = 0; 5194 for (i = 0; i < am; i++) ci[i + 1] = ci[i] + (ai[i + 1] - ai[i]) + (bi[i + 1] - bi[i]); 5195 PetscCall(PetscMalloc1(1 + ci[am], &cj)); 5196 PetscCall(PetscMalloc1(1 + ci[am], &ca)); 5197 k = 0; 5198 for (i = 0; i < am; i++) { 5199 ncols_o = bi[i + 1] - bi[i]; 5200 ncols_d = ai[i + 1] - ai[i]; 5201 /* off-diagonal portion of A */ 5202 for (jo = 0; jo < ncols_o; jo++) { 5203 col = cmap[*bj]; 5204 if (col >= cstart) break; 5205 cj[k] = col; 5206 bj++; 5207 ca[k++] = *ba++; 5208 } 5209 /* diagonal portion of A */ 5210 for (j = 0; j < ncols_d; j++) { 5211 cj[k] = cstart + *aj++; 5212 ca[k++] = *aa++; 5213 } 5214 /* off-diagonal portion of A */ 5215 for (j = jo; j < ncols_o; j++) { 5216 cj[k] = cmap[*bj++]; 5217 ca[k++] = *ba++; 5218 } 5219 } 5220 /* put together the new matrix */ 5221 PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, am, A->cmap->N, ci, cj, ca, A_loc)); 5222 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 5223 /* Since these are PETSc arrays, change flags to free them as necessary. */ 5224 mat = (Mat_SeqAIJ *)(*A_loc)->data; 5225 mat->free_a = PETSC_TRUE; 5226 mat->free_ij = PETSC_TRUE; 5227 mat->nonew = 0; 5228 } else if (scall == MAT_REUSE_MATRIX) { 5229 mat = (Mat_SeqAIJ *)(*A_loc)->data; 5230 ci = mat->i; 5231 cj = mat->j; 5232 PetscCall(MatSeqAIJGetArrayWrite(*A_loc, &cam)); 5233 for (i = 0; i < am; i++) { 5234 /* off-diagonal portion of A */ 5235 ncols_o = bi[i + 1] - bi[i]; 5236 for (jo = 0; jo < ncols_o; jo++) { 5237 col = cmap[*bj]; 5238 if (col >= cstart) break; 5239 *cam++ = *ba++; 5240 bj++; 5241 } 5242 /* diagonal portion of A */ 5243 ncols_d = ai[i + 1] - ai[i]; 5244 for (j = 0; j < ncols_d; j++) *cam++ = *aa++; 5245 /* off-diagonal portion of A */ 5246 for (j = jo; j < ncols_o; j++) { 5247 *cam++ = *ba++; 5248 bj++; 5249 } 5250 } 5251 PetscCall(MatSeqAIJRestoreArrayWrite(*A_loc, &cam)); 5252 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid MatReuse %d", (int)scall); 5253 PetscCall(MatSeqAIJRestoreArrayRead(mpimat->A, &aav)); 5254 PetscCall(MatSeqAIJRestoreArrayRead(mpimat->B, &bav)); 5255 PetscCall(PetscLogEventEnd(MAT_Getlocalmat, A, 0, 0, 0)); 5256 PetscFunctionReturn(PETSC_SUCCESS); 5257 } 5258 5259 /*@ 5260 MatMPIAIJGetLocalMatMerge - Creates a `MATSEQAIJ` from a `MATMPIAIJ` matrix by taking all its local rows and putting them into a sequential matrix with 5261 mlocal rows and n columns. Where n is the sum of the number of columns of the diagonal and off-diagonal part 5262 5263 Not Collective 5264 5265 Input Parameters: 5266 + A - the matrix 5267 - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX` 5268 5269 Output Parameters: 5270 + glob - sequential `IS` with global indices associated with the columns of the local sequential matrix generated (can be `NULL`) 5271 - A_loc - the local sequential matrix generated 5272 5273 Level: developer 5274 5275 Note: 5276 This is different from `MatMPIAIJGetLocalMat()` since the first columns in the returning matrix are those associated with the diagonal 5277 part, then those associated with the off-diagonal part (in its local ordering) 5278 5279 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMat()`, `MatMPIAIJGetLocalMatCondensed()` 5280 @*/ 5281 PetscErrorCode MatMPIAIJGetLocalMatMerge(Mat A, MatReuse scall, IS *glob, Mat *A_loc) 5282 { 5283 Mat Ao, Ad; 5284 const PetscInt *cmap; 5285 PetscMPIInt size; 5286 PetscErrorCode (*f)(Mat, MatReuse, IS *, Mat *); 5287 5288 PetscFunctionBegin; 5289 PetscCall(MatMPIAIJGetSeqAIJ(A, &Ad, &Ao, &cmap)); 5290 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size)); 5291 if (size == 1) { 5292 if (scall == MAT_INITIAL_MATRIX) { 5293 PetscCall(PetscObjectReference((PetscObject)Ad)); 5294 *A_loc = Ad; 5295 } else if (scall == MAT_REUSE_MATRIX) { 5296 PetscCall(MatCopy(Ad, *A_loc, SAME_NONZERO_PATTERN)); 5297 } 5298 if (glob) PetscCall(ISCreateStride(PetscObjectComm((PetscObject)Ad), Ad->cmap->n, Ad->cmap->rstart, 1, glob)); 5299 PetscFunctionReturn(PETSC_SUCCESS); 5300 } 5301 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatMPIAIJGetLocalMatMerge_C", &f)); 5302 PetscCall(PetscLogEventBegin(MAT_Getlocalmat, A, 0, 0, 0)); 5303 if (f) { 5304 PetscCall((*f)(A, scall, glob, A_loc)); 5305 } else { 5306 Mat_SeqAIJ *a = (Mat_SeqAIJ *)Ad->data; 5307 Mat_SeqAIJ *b = (Mat_SeqAIJ *)Ao->data; 5308 Mat_SeqAIJ *c; 5309 PetscInt *ai = a->i, *aj = a->j; 5310 PetscInt *bi = b->i, *bj = b->j; 5311 PetscInt *ci, *cj; 5312 const PetscScalar *aa, *ba; 5313 PetscScalar *ca; 5314 PetscInt i, j, am, dn, on; 5315 5316 PetscCall(MatGetLocalSize(Ad, &am, &dn)); 5317 PetscCall(MatGetLocalSize(Ao, NULL, &on)); 5318 PetscCall(MatSeqAIJGetArrayRead(Ad, &aa)); 5319 PetscCall(MatSeqAIJGetArrayRead(Ao, &ba)); 5320 if (scall == MAT_INITIAL_MATRIX) { 5321 PetscInt k; 5322 PetscCall(PetscMalloc1(1 + am, &ci)); 5323 PetscCall(PetscMalloc1(ai[am] + bi[am], &cj)); 5324 PetscCall(PetscMalloc1(ai[am] + bi[am], &ca)); 5325 ci[0] = 0; 5326 for (i = 0, k = 0; i < am; i++) { 5327 const PetscInt ncols_o = bi[i + 1] - bi[i]; 5328 const PetscInt ncols_d = ai[i + 1] - ai[i]; 5329 ci[i + 1] = ci[i] + ncols_o + ncols_d; 5330 /* diagonal portion of A */ 5331 for (j = 0; j < ncols_d; j++, k++) { 5332 cj[k] = *aj++; 5333 ca[k] = *aa++; 5334 } 5335 /* off-diagonal portion of A */ 5336 for (j = 0; j < ncols_o; j++, k++) { 5337 cj[k] = dn + *bj++; 5338 ca[k] = *ba++; 5339 } 5340 } 5341 /* put together the new matrix */ 5342 PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, am, dn + on, ci, cj, ca, A_loc)); 5343 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 5344 /* Since these are PETSc arrays, change flags to free them as necessary. */ 5345 c = (Mat_SeqAIJ *)(*A_loc)->data; 5346 c->free_a = PETSC_TRUE; 5347 c->free_ij = PETSC_TRUE; 5348 c->nonew = 0; 5349 PetscCall(MatSetType(*A_loc, ((PetscObject)Ad)->type_name)); 5350 } else if (scall == MAT_REUSE_MATRIX) { 5351 PetscCall(MatSeqAIJGetArrayWrite(*A_loc, &ca)); 5352 for (i = 0; i < am; i++) { 5353 const PetscInt ncols_d = ai[i + 1] - ai[i]; 5354 const PetscInt ncols_o = bi[i + 1] - bi[i]; 5355 /* diagonal portion of A */ 5356 for (j = 0; j < ncols_d; j++) *ca++ = *aa++; 5357 /* off-diagonal portion of A */ 5358 for (j = 0; j < ncols_o; j++) *ca++ = *ba++; 5359 } 5360 PetscCall(MatSeqAIJRestoreArrayWrite(*A_loc, &ca)); 5361 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid MatReuse %d", (int)scall); 5362 PetscCall(MatSeqAIJRestoreArrayRead(Ad, &aa)); 5363 PetscCall(MatSeqAIJRestoreArrayRead(Ao, &aa)); 5364 if (glob) { 5365 PetscInt cst, *gidx; 5366 5367 PetscCall(MatGetOwnershipRangeColumn(A, &cst, NULL)); 5368 PetscCall(PetscMalloc1(dn + on, &gidx)); 5369 for (i = 0; i < dn; i++) gidx[i] = cst + i; 5370 for (i = 0; i < on; i++) gidx[i + dn] = cmap[i]; 5371 PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)Ad), dn + on, gidx, PETSC_OWN_POINTER, glob)); 5372 } 5373 } 5374 PetscCall(PetscLogEventEnd(MAT_Getlocalmat, A, 0, 0, 0)); 5375 PetscFunctionReturn(PETSC_SUCCESS); 5376 } 5377 5378 /*@C 5379 MatMPIAIJGetLocalMatCondensed - Creates a `MATSEQAIJ` matrix from an `MATMPIAIJ` matrix by taking all its local rows and NON-ZERO columns 5380 5381 Not Collective 5382 5383 Input Parameters: 5384 + A - the matrix 5385 . scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX` 5386 . row - index set of rows to extract (or `NULL`) 5387 - col - index set of columns to extract (or `NULL`) 5388 5389 Output Parameter: 5390 . A_loc - the local sequential matrix generated 5391 5392 Level: developer 5393 5394 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMat()` 5395 @*/ 5396 PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A, MatReuse scall, IS *row, IS *col, Mat *A_loc) 5397 { 5398 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 5399 PetscInt i, start, end, ncols, nzA, nzB, *cmap, imark, *idx; 5400 IS isrowa, iscola; 5401 Mat *aloc; 5402 PetscBool match; 5403 5404 PetscFunctionBegin; 5405 PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &match)); 5406 PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPIAIJ matrix as input"); 5407 PetscCall(PetscLogEventBegin(MAT_Getlocalmatcondensed, A, 0, 0, 0)); 5408 if (!row) { 5409 start = A->rmap->rstart; 5410 end = A->rmap->rend; 5411 PetscCall(ISCreateStride(PETSC_COMM_SELF, end - start, start, 1, &isrowa)); 5412 } else { 5413 isrowa = *row; 5414 } 5415 if (!col) { 5416 start = A->cmap->rstart; 5417 cmap = a->garray; 5418 nzA = a->A->cmap->n; 5419 nzB = a->B->cmap->n; 5420 PetscCall(PetscMalloc1(nzA + nzB, &idx)); 5421 ncols = 0; 5422 for (i = 0; i < nzB; i++) { 5423 if (cmap[i] < start) idx[ncols++] = cmap[i]; 5424 else break; 5425 } 5426 imark = i; 5427 for (i = 0; i < nzA; i++) idx[ncols++] = start + i; 5428 for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i]; 5429 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &iscola)); 5430 } else { 5431 iscola = *col; 5432 } 5433 if (scall != MAT_INITIAL_MATRIX) { 5434 PetscCall(PetscMalloc1(1, &aloc)); 5435 aloc[0] = *A_loc; 5436 } 5437 PetscCall(MatCreateSubMatrices(A, 1, &isrowa, &iscola, scall, &aloc)); 5438 if (!col) { /* attach global id of condensed columns */ 5439 PetscCall(PetscObjectCompose((PetscObject)aloc[0], "_petsc_GetLocalMatCondensed_iscol", (PetscObject)iscola)); 5440 } 5441 *A_loc = aloc[0]; 5442 PetscCall(PetscFree(aloc)); 5443 if (!row) PetscCall(ISDestroy(&isrowa)); 5444 if (!col) PetscCall(ISDestroy(&iscola)); 5445 PetscCall(PetscLogEventEnd(MAT_Getlocalmatcondensed, A, 0, 0, 0)); 5446 PetscFunctionReturn(PETSC_SUCCESS); 5447 } 5448 5449 /* 5450 * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched. 5451 * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based 5452 * on a global size. 5453 * */ 5454 static PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P, IS rows, Mat *P_oth) 5455 { 5456 Mat_MPIAIJ *p = (Mat_MPIAIJ *)P->data; 5457 Mat_SeqAIJ *pd = (Mat_SeqAIJ *)(p->A)->data, *po = (Mat_SeqAIJ *)(p->B)->data, *p_oth; 5458 PetscInt plocalsize, nrows, *ilocal, *oilocal, i, lidx, *nrcols, *nlcols, ncol; 5459 PetscMPIInt owner; 5460 PetscSFNode *iremote, *oiremote; 5461 const PetscInt *lrowindices; 5462 PetscSF sf, osf; 5463 PetscInt pcstart, *roffsets, *loffsets, *pnnz, j; 5464 PetscInt ontotalcols, dntotalcols, ntotalcols, nout; 5465 MPI_Comm comm; 5466 ISLocalToGlobalMapping mapping; 5467 const PetscScalar *pd_a, *po_a; 5468 5469 PetscFunctionBegin; 5470 PetscCall(PetscObjectGetComm((PetscObject)P, &comm)); 5471 /* plocalsize is the number of roots 5472 * nrows is the number of leaves 5473 * */ 5474 PetscCall(MatGetLocalSize(P, &plocalsize, NULL)); 5475 PetscCall(ISGetLocalSize(rows, &nrows)); 5476 PetscCall(PetscCalloc1(nrows, &iremote)); 5477 PetscCall(ISGetIndices(rows, &lrowindices)); 5478 for (i = 0; i < nrows; i++) { 5479 /* Find a remote index and an owner for a row 5480 * The row could be local or remote 5481 * */ 5482 owner = 0; 5483 lidx = 0; 5484 PetscCall(PetscLayoutFindOwnerIndex(P->rmap, lrowindices[i], &owner, &lidx)); 5485 iremote[i].index = lidx; 5486 iremote[i].rank = owner; 5487 } 5488 /* Create SF to communicate how many nonzero columns for each row */ 5489 PetscCall(PetscSFCreate(comm, &sf)); 5490 /* SF will figure out the number of nonzero columns for each row, and their 5491 * offsets 5492 * */ 5493 PetscCall(PetscSFSetGraph(sf, plocalsize, nrows, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER)); 5494 PetscCall(PetscSFSetFromOptions(sf)); 5495 PetscCall(PetscSFSetUp(sf)); 5496 5497 PetscCall(PetscCalloc1(2 * (plocalsize + 1), &roffsets)); 5498 PetscCall(PetscCalloc1(2 * plocalsize, &nrcols)); 5499 PetscCall(PetscCalloc1(nrows, &pnnz)); 5500 roffsets[0] = 0; 5501 roffsets[1] = 0; 5502 for (i = 0; i < plocalsize; i++) { 5503 /* diagonal */ 5504 nrcols[i * 2 + 0] = pd->i[i + 1] - pd->i[i]; 5505 /* off-diagonal */ 5506 nrcols[i * 2 + 1] = po->i[i + 1] - po->i[i]; 5507 /* compute offsets so that we relative location for each row */ 5508 roffsets[(i + 1) * 2 + 0] = roffsets[i * 2 + 0] + nrcols[i * 2 + 0]; 5509 roffsets[(i + 1) * 2 + 1] = roffsets[i * 2 + 1] + nrcols[i * 2 + 1]; 5510 } 5511 PetscCall(PetscCalloc1(2 * nrows, &nlcols)); 5512 PetscCall(PetscCalloc1(2 * nrows, &loffsets)); 5513 /* 'r' means root, and 'l' means leaf */ 5514 PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, nrcols, nlcols, MPI_REPLACE)); 5515 PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, roffsets, loffsets, MPI_REPLACE)); 5516 PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, nrcols, nlcols, MPI_REPLACE)); 5517 PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, roffsets, loffsets, MPI_REPLACE)); 5518 PetscCall(PetscSFDestroy(&sf)); 5519 PetscCall(PetscFree(roffsets)); 5520 PetscCall(PetscFree(nrcols)); 5521 dntotalcols = 0; 5522 ontotalcols = 0; 5523 ncol = 0; 5524 for (i = 0; i < nrows; i++) { 5525 pnnz[i] = nlcols[i * 2 + 0] + nlcols[i * 2 + 1]; 5526 ncol = PetscMax(pnnz[i], ncol); 5527 /* diagonal */ 5528 dntotalcols += nlcols[i * 2 + 0]; 5529 /* off-diagonal */ 5530 ontotalcols += nlcols[i * 2 + 1]; 5531 } 5532 /* We do not need to figure the right number of columns 5533 * since all the calculations will be done by going through the raw data 5534 * */ 5535 PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, nrows, ncol, 0, pnnz, P_oth)); 5536 PetscCall(MatSetUp(*P_oth)); 5537 PetscCall(PetscFree(pnnz)); 5538 p_oth = (Mat_SeqAIJ *)(*P_oth)->data; 5539 /* diagonal */ 5540 PetscCall(PetscCalloc1(dntotalcols, &iremote)); 5541 /* off-diagonal */ 5542 PetscCall(PetscCalloc1(ontotalcols, &oiremote)); 5543 /* diagonal */ 5544 PetscCall(PetscCalloc1(dntotalcols, &ilocal)); 5545 /* off-diagonal */ 5546 PetscCall(PetscCalloc1(ontotalcols, &oilocal)); 5547 dntotalcols = 0; 5548 ontotalcols = 0; 5549 ntotalcols = 0; 5550 for (i = 0; i < nrows; i++) { 5551 owner = 0; 5552 PetscCall(PetscLayoutFindOwnerIndex(P->rmap, lrowindices[i], &owner, NULL)); 5553 /* Set iremote for diag matrix */ 5554 for (j = 0; j < nlcols[i * 2 + 0]; j++) { 5555 iremote[dntotalcols].index = loffsets[i * 2 + 0] + j; 5556 iremote[dntotalcols].rank = owner; 5557 /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */ 5558 ilocal[dntotalcols++] = ntotalcols++; 5559 } 5560 /* off-diagonal */ 5561 for (j = 0; j < nlcols[i * 2 + 1]; j++) { 5562 oiremote[ontotalcols].index = loffsets[i * 2 + 1] + j; 5563 oiremote[ontotalcols].rank = owner; 5564 oilocal[ontotalcols++] = ntotalcols++; 5565 } 5566 } 5567 PetscCall(ISRestoreIndices(rows, &lrowindices)); 5568 PetscCall(PetscFree(loffsets)); 5569 PetscCall(PetscFree(nlcols)); 5570 PetscCall(PetscSFCreate(comm, &sf)); 5571 /* P serves as roots and P_oth is leaves 5572 * Diag matrix 5573 * */ 5574 PetscCall(PetscSFSetGraph(sf, pd->i[plocalsize], dntotalcols, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER)); 5575 PetscCall(PetscSFSetFromOptions(sf)); 5576 PetscCall(PetscSFSetUp(sf)); 5577 5578 PetscCall(PetscSFCreate(comm, &osf)); 5579 /* off-diagonal */ 5580 PetscCall(PetscSFSetGraph(osf, po->i[plocalsize], ontotalcols, oilocal, PETSC_OWN_POINTER, oiremote, PETSC_OWN_POINTER)); 5581 PetscCall(PetscSFSetFromOptions(osf)); 5582 PetscCall(PetscSFSetUp(osf)); 5583 PetscCall(MatSeqAIJGetArrayRead(p->A, &pd_a)); 5584 PetscCall(MatSeqAIJGetArrayRead(p->B, &po_a)); 5585 /* operate on the matrix internal data to save memory */ 5586 PetscCall(PetscSFBcastBegin(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE)); 5587 PetscCall(PetscSFBcastBegin(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE)); 5588 PetscCall(MatGetOwnershipRangeColumn(P, &pcstart, NULL)); 5589 /* Convert to global indices for diag matrix */ 5590 for (i = 0; i < pd->i[plocalsize]; i++) pd->j[i] += pcstart; 5591 PetscCall(PetscSFBcastBegin(sf, MPIU_INT, pd->j, p_oth->j, MPI_REPLACE)); 5592 /* We want P_oth store global indices */ 5593 PetscCall(ISLocalToGlobalMappingCreate(comm, 1, p->B->cmap->n, p->garray, PETSC_COPY_VALUES, &mapping)); 5594 /* Use memory scalable approach */ 5595 PetscCall(ISLocalToGlobalMappingSetType(mapping, ISLOCALTOGLOBALMAPPINGHASH)); 5596 PetscCall(ISLocalToGlobalMappingApply(mapping, po->i[plocalsize], po->j, po->j)); 5597 PetscCall(PetscSFBcastBegin(osf, MPIU_INT, po->j, p_oth->j, MPI_REPLACE)); 5598 PetscCall(PetscSFBcastEnd(sf, MPIU_INT, pd->j, p_oth->j, MPI_REPLACE)); 5599 /* Convert back to local indices */ 5600 for (i = 0; i < pd->i[plocalsize]; i++) pd->j[i] -= pcstart; 5601 PetscCall(PetscSFBcastEnd(osf, MPIU_INT, po->j, p_oth->j, MPI_REPLACE)); 5602 nout = 0; 5603 PetscCall(ISGlobalToLocalMappingApply(mapping, IS_GTOLM_DROP, po->i[plocalsize], po->j, &nout, po->j)); 5604 PetscCheck(nout == po->i[plocalsize], comm, PETSC_ERR_ARG_INCOMP, "n %" PetscInt_FMT " does not equal to nout %" PetscInt_FMT " ", po->i[plocalsize], nout); 5605 PetscCall(ISLocalToGlobalMappingDestroy(&mapping)); 5606 /* Exchange values */ 5607 PetscCall(PetscSFBcastEnd(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE)); 5608 PetscCall(PetscSFBcastEnd(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE)); 5609 PetscCall(MatSeqAIJRestoreArrayRead(p->A, &pd_a)); 5610 PetscCall(MatSeqAIJRestoreArrayRead(p->B, &po_a)); 5611 /* Stop PETSc from shrinking memory */ 5612 for (i = 0; i < nrows; i++) p_oth->ilen[i] = p_oth->imax[i]; 5613 PetscCall(MatAssemblyBegin(*P_oth, MAT_FINAL_ASSEMBLY)); 5614 PetscCall(MatAssemblyEnd(*P_oth, MAT_FINAL_ASSEMBLY)); 5615 /* Attach PetscSF objects to P_oth so that we can reuse it later */ 5616 PetscCall(PetscObjectCompose((PetscObject)*P_oth, "diagsf", (PetscObject)sf)); 5617 PetscCall(PetscObjectCompose((PetscObject)*P_oth, "offdiagsf", (PetscObject)osf)); 5618 PetscCall(PetscSFDestroy(&sf)); 5619 PetscCall(PetscSFDestroy(&osf)); 5620 PetscFunctionReturn(PETSC_SUCCESS); 5621 } 5622 5623 /* 5624 * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A 5625 * This supports MPIAIJ and MAIJ 5626 * */ 5627 PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A, Mat P, PetscInt dof, MatReuse reuse, Mat *P_oth) 5628 { 5629 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *p = (Mat_MPIAIJ *)P->data; 5630 Mat_SeqAIJ *p_oth; 5631 IS rows, map; 5632 PetscHMapI hamp; 5633 PetscInt i, htsize, *rowindices, off, *mapping, key, count; 5634 MPI_Comm comm; 5635 PetscSF sf, osf; 5636 PetscBool has; 5637 5638 PetscFunctionBegin; 5639 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 5640 PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAocols, A, P, 0, 0)); 5641 /* If it is the first time, create an index set of off-diag nonzero columns of A, 5642 * and then create a submatrix (that often is an overlapping matrix) 5643 * */ 5644 if (reuse == MAT_INITIAL_MATRIX) { 5645 /* Use a hash table to figure out unique keys */ 5646 PetscCall(PetscHMapICreateWithSize(a->B->cmap->n, &hamp)); 5647 PetscCall(PetscCalloc1(a->B->cmap->n, &mapping)); 5648 count = 0; 5649 /* Assume that a->g is sorted, otherwise the following does not make sense */ 5650 for (i = 0; i < a->B->cmap->n; i++) { 5651 key = a->garray[i] / dof; 5652 PetscCall(PetscHMapIHas(hamp, key, &has)); 5653 if (!has) { 5654 mapping[i] = count; 5655 PetscCall(PetscHMapISet(hamp, key, count++)); 5656 } else { 5657 /* Current 'i' has the same value the previous step */ 5658 mapping[i] = count - 1; 5659 } 5660 } 5661 PetscCall(ISCreateGeneral(comm, a->B->cmap->n, mapping, PETSC_OWN_POINTER, &map)); 5662 PetscCall(PetscHMapIGetSize(hamp, &htsize)); 5663 PetscCheck(htsize == count, comm, PETSC_ERR_ARG_INCOMP, " Size of hash map %" PetscInt_FMT " is inconsistent with count %" PetscInt_FMT, htsize, count); 5664 PetscCall(PetscCalloc1(htsize, &rowindices)); 5665 off = 0; 5666 PetscCall(PetscHMapIGetKeys(hamp, &off, rowindices)); 5667 PetscCall(PetscHMapIDestroy(&hamp)); 5668 PetscCall(PetscSortInt(htsize, rowindices)); 5669 PetscCall(ISCreateGeneral(comm, htsize, rowindices, PETSC_OWN_POINTER, &rows)); 5670 /* In case, the matrix was already created but users want to recreate the matrix */ 5671 PetscCall(MatDestroy(P_oth)); 5672 PetscCall(MatCreateSeqSubMatrixWithRows_Private(P, rows, P_oth)); 5673 PetscCall(PetscObjectCompose((PetscObject)*P_oth, "aoffdiagtopothmapping", (PetscObject)map)); 5674 PetscCall(ISDestroy(&map)); 5675 PetscCall(ISDestroy(&rows)); 5676 } else if (reuse == MAT_REUSE_MATRIX) { 5677 /* If matrix was already created, we simply update values using SF objects 5678 * that as attached to the matrix earlier. 5679 */ 5680 const PetscScalar *pd_a, *po_a; 5681 5682 PetscCall(PetscObjectQuery((PetscObject)*P_oth, "diagsf", (PetscObject *)&sf)); 5683 PetscCall(PetscObjectQuery((PetscObject)*P_oth, "offdiagsf", (PetscObject *)&osf)); 5684 PetscCheck(sf && osf, comm, PETSC_ERR_ARG_NULL, "Matrix is not initialized yet"); 5685 p_oth = (Mat_SeqAIJ *)(*P_oth)->data; 5686 /* Update values in place */ 5687 PetscCall(MatSeqAIJGetArrayRead(p->A, &pd_a)); 5688 PetscCall(MatSeqAIJGetArrayRead(p->B, &po_a)); 5689 PetscCall(PetscSFBcastBegin(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE)); 5690 PetscCall(PetscSFBcastBegin(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE)); 5691 PetscCall(PetscSFBcastEnd(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE)); 5692 PetscCall(PetscSFBcastEnd(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE)); 5693 PetscCall(MatSeqAIJRestoreArrayRead(p->A, &pd_a)); 5694 PetscCall(MatSeqAIJRestoreArrayRead(p->B, &po_a)); 5695 } else SETERRQ(comm, PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown reuse type"); 5696 PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAocols, A, P, 0, 0)); 5697 PetscFunctionReturn(PETSC_SUCCESS); 5698 } 5699 5700 /*@C 5701 MatGetBrowsOfAcols - Returns `IS` that contain rows of `B` that equal to nonzero columns of local `A` 5702 5703 Collective 5704 5705 Input Parameters: 5706 + A - the first matrix in `MATMPIAIJ` format 5707 . B - the second matrix in `MATMPIAIJ` format 5708 - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX` 5709 5710 Output Parameters: 5711 + rowb - On input index sets of rows of B to extract (or `NULL`), modified on output 5712 . colb - On input index sets of columns of B to extract (or `NULL`), modified on output 5713 - B_seq - the sequential matrix generated 5714 5715 Level: developer 5716 5717 .seealso: `Mat`, `MATMPIAIJ`, `IS`, `MatReuse` 5718 @*/ 5719 PetscErrorCode MatGetBrowsOfAcols(Mat A, Mat B, MatReuse scall, IS *rowb, IS *colb, Mat *B_seq) 5720 { 5721 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 5722 PetscInt *idx, i, start, ncols, nzA, nzB, *cmap, imark; 5723 IS isrowb, iscolb; 5724 Mat *bseq = NULL; 5725 5726 PetscFunctionBegin; 5727 PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", 5728 A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend); 5729 PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAcols, A, B, 0, 0)); 5730 5731 if (scall == MAT_INITIAL_MATRIX) { 5732 start = A->cmap->rstart; 5733 cmap = a->garray; 5734 nzA = a->A->cmap->n; 5735 nzB = a->B->cmap->n; 5736 PetscCall(PetscMalloc1(nzA + nzB, &idx)); 5737 ncols = 0; 5738 for (i = 0; i < nzB; i++) { /* row < local row index */ 5739 if (cmap[i] < start) idx[ncols++] = cmap[i]; 5740 else break; 5741 } 5742 imark = i; 5743 for (i = 0; i < nzA; i++) idx[ncols++] = start + i; /* local rows */ 5744 for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */ 5745 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &isrowb)); 5746 PetscCall(ISCreateStride(PETSC_COMM_SELF, B->cmap->N, 0, 1, &iscolb)); 5747 } else { 5748 PetscCheck(rowb && colb, PETSC_COMM_SELF, PETSC_ERR_SUP, "IS rowb and colb must be provided for MAT_REUSE_MATRIX"); 5749 isrowb = *rowb; 5750 iscolb = *colb; 5751 PetscCall(PetscMalloc1(1, &bseq)); 5752 bseq[0] = *B_seq; 5753 } 5754 PetscCall(MatCreateSubMatrices(B, 1, &isrowb, &iscolb, scall, &bseq)); 5755 *B_seq = bseq[0]; 5756 PetscCall(PetscFree(bseq)); 5757 if (!rowb) { 5758 PetscCall(ISDestroy(&isrowb)); 5759 } else { 5760 *rowb = isrowb; 5761 } 5762 if (!colb) { 5763 PetscCall(ISDestroy(&iscolb)); 5764 } else { 5765 *colb = iscolb; 5766 } 5767 PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAcols, A, B, 0, 0)); 5768 PetscFunctionReturn(PETSC_SUCCESS); 5769 } 5770 5771 /* 5772 MatGetBrowsOfAoCols_MPIAIJ - Creates a `MATSEQAIJ` matrix by taking rows of B that equal to nonzero columns 5773 of the OFF-DIAGONAL portion of local A 5774 5775 Collective 5776 5777 Input Parameters: 5778 + A,B - the matrices in `MATMPIAIJ` format 5779 - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX` 5780 5781 Output Parameter: 5782 + startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL) 5783 . startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL) 5784 . bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL) 5785 - B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N 5786 5787 Developer Note: 5788 This directly accesses information inside the VecScatter associated with the matrix-vector product 5789 for this matrix. This is not desirable.. 5790 5791 Level: developer 5792 5793 */ 5794 PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A, Mat B, MatReuse scall, PetscInt **startsj_s, PetscInt **startsj_r, MatScalar **bufa_ptr, Mat *B_oth) 5795 { 5796 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 5797 Mat_SeqAIJ *b_oth; 5798 VecScatter ctx; 5799 MPI_Comm comm; 5800 const PetscMPIInt *rprocs, *sprocs; 5801 const PetscInt *srow, *rstarts, *sstarts; 5802 PetscInt *rowlen, *bufj, *bufJ, ncols = 0, aBn = a->B->cmap->n, row, *b_othi, *b_othj, *rvalues = NULL, *svalues = NULL, *cols, sbs, rbs; 5803 PetscInt i, j, k = 0, l, ll, nrecvs, nsends, nrows, *rstartsj = NULL, *sstartsj, len; 5804 PetscScalar *b_otha, *bufa, *bufA, *vals = NULL; 5805 MPI_Request *reqs = NULL, *rwaits = NULL, *swaits = NULL; 5806 PetscMPIInt size, tag, rank, nreqs; 5807 5808 PetscFunctionBegin; 5809 PetscCall(PetscObjectGetComm((PetscObject)A, &comm)); 5810 PetscCallMPI(MPI_Comm_size(comm, &size)); 5811 5812 PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", 5813 A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend); 5814 PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAocols, A, B, 0, 0)); 5815 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 5816 5817 if (size == 1) { 5818 startsj_s = NULL; 5819 bufa_ptr = NULL; 5820 *B_oth = NULL; 5821 PetscFunctionReturn(PETSC_SUCCESS); 5822 } 5823 5824 ctx = a->Mvctx; 5825 tag = ((PetscObject)ctx)->tag; 5826 5827 PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &srow, &sprocs, &sbs)); 5828 /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */ 5829 PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL /*indices not needed*/, &rprocs, &rbs)); 5830 PetscCall(PetscMPIIntCast(nsends + nrecvs, &nreqs)); 5831 PetscCall(PetscMalloc1(nreqs, &reqs)); 5832 rwaits = reqs; 5833 swaits = PetscSafePointerPlusOffset(reqs, nrecvs); 5834 5835 if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX; 5836 if (scall == MAT_INITIAL_MATRIX) { 5837 /* i-array */ 5838 /* post receives */ 5839 if (nrecvs) PetscCall(PetscMalloc1(rbs * (rstarts[nrecvs] - rstarts[0]), &rvalues)); /* rstarts can be NULL when nrecvs=0 */ 5840 for (i = 0; i < nrecvs; i++) { 5841 rowlen = rvalues + rstarts[i] * rbs; 5842 nrows = (rstarts[i + 1] - rstarts[i]) * rbs; /* num of indices to be received */ 5843 PetscCallMPI(MPI_Irecv(rowlen, nrows, MPIU_INT, rprocs[i], tag, comm, rwaits + i)); 5844 } 5845 5846 /* pack the outgoing message */ 5847 PetscCall(PetscMalloc2(nsends + 1, &sstartsj, nrecvs + 1, &rstartsj)); 5848 5849 sstartsj[0] = 0; 5850 rstartsj[0] = 0; 5851 len = 0; /* total length of j or a array to be sent */ 5852 if (nsends) { 5853 k = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */ 5854 PetscCall(PetscMalloc1(sbs * (sstarts[nsends] - sstarts[0]), &svalues)); 5855 } 5856 for (i = 0; i < nsends; i++) { 5857 rowlen = svalues + (sstarts[i] - sstarts[0]) * sbs; 5858 nrows = sstarts[i + 1] - sstarts[i]; /* num of block rows */ 5859 for (j = 0; j < nrows; j++) { 5860 row = srow[k] + B->rmap->range[rank]; /* global row idx */ 5861 for (l = 0; l < sbs; l++) { 5862 PetscCall(MatGetRow_MPIAIJ(B, row + l, &ncols, NULL, NULL)); /* rowlength */ 5863 5864 rowlen[j * sbs + l] = ncols; 5865 5866 len += ncols; 5867 PetscCall(MatRestoreRow_MPIAIJ(B, row + l, &ncols, NULL, NULL)); 5868 } 5869 k++; 5870 } 5871 PetscCallMPI(MPI_Isend(rowlen, nrows * sbs, MPIU_INT, sprocs[i], tag, comm, swaits + i)); 5872 5873 sstartsj[i + 1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */ 5874 } 5875 /* recvs and sends of i-array are completed */ 5876 if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE)); 5877 PetscCall(PetscFree(svalues)); 5878 5879 /* allocate buffers for sending j and a arrays */ 5880 PetscCall(PetscMalloc1(len + 1, &bufj)); 5881 PetscCall(PetscMalloc1(len + 1, &bufa)); 5882 5883 /* create i-array of B_oth */ 5884 PetscCall(PetscMalloc1(aBn + 2, &b_othi)); 5885 5886 b_othi[0] = 0; 5887 len = 0; /* total length of j or a array to be received */ 5888 k = 0; 5889 for (i = 0; i < nrecvs; i++) { 5890 rowlen = rvalues + (rstarts[i] - rstarts[0]) * rbs; 5891 nrows = (rstarts[i + 1] - rstarts[i]) * rbs; /* num of rows to be received */ 5892 for (j = 0; j < nrows; j++) { 5893 b_othi[k + 1] = b_othi[k] + rowlen[j]; 5894 PetscCall(PetscIntSumError(rowlen[j], len, &len)); 5895 k++; 5896 } 5897 rstartsj[i + 1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */ 5898 } 5899 PetscCall(PetscFree(rvalues)); 5900 5901 /* allocate space for j and a arrays of B_oth */ 5902 PetscCall(PetscMalloc1(b_othi[aBn] + 1, &b_othj)); 5903 PetscCall(PetscMalloc1(b_othi[aBn] + 1, &b_otha)); 5904 5905 /* j-array */ 5906 /* post receives of j-array */ 5907 for (i = 0; i < nrecvs; i++) { 5908 nrows = rstartsj[i + 1] - rstartsj[i]; /* length of the msg received */ 5909 PetscCallMPI(MPI_Irecv(b_othj + rstartsj[i], nrows, MPIU_INT, rprocs[i], tag, comm, rwaits + i)); 5910 } 5911 5912 /* pack the outgoing message j-array */ 5913 if (nsends) k = sstarts[0]; 5914 for (i = 0; i < nsends; i++) { 5915 nrows = sstarts[i + 1] - sstarts[i]; /* num of block rows */ 5916 bufJ = bufj + sstartsj[i]; 5917 for (j = 0; j < nrows; j++) { 5918 row = srow[k++] + B->rmap->range[rank]; /* global row idx */ 5919 for (ll = 0; ll < sbs; ll++) { 5920 PetscCall(MatGetRow_MPIAIJ(B, row + ll, &ncols, &cols, NULL)); 5921 for (l = 0; l < ncols; l++) *bufJ++ = cols[l]; 5922 PetscCall(MatRestoreRow_MPIAIJ(B, row + ll, &ncols, &cols, NULL)); 5923 } 5924 } 5925 PetscCallMPI(MPI_Isend(bufj + sstartsj[i], sstartsj[i + 1] - sstartsj[i], MPIU_INT, sprocs[i], tag, comm, swaits + i)); 5926 } 5927 5928 /* recvs and sends of j-array are completed */ 5929 if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE)); 5930 } else if (scall == MAT_REUSE_MATRIX) { 5931 sstartsj = *startsj_s; 5932 rstartsj = *startsj_r; 5933 bufa = *bufa_ptr; 5934 b_oth = (Mat_SeqAIJ *)(*B_oth)->data; 5935 PetscCall(MatSeqAIJGetArrayWrite(*B_oth, &b_otha)); 5936 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not possess an object container"); 5937 5938 /* a-array */ 5939 /* post receives of a-array */ 5940 for (i = 0; i < nrecvs; i++) { 5941 nrows = rstartsj[i + 1] - rstartsj[i]; /* length of the msg received */ 5942 PetscCallMPI(MPI_Irecv(b_otha + rstartsj[i], nrows, MPIU_SCALAR, rprocs[i], tag, comm, rwaits + i)); 5943 } 5944 5945 /* pack the outgoing message a-array */ 5946 if (nsends) k = sstarts[0]; 5947 for (i = 0; i < nsends; i++) { 5948 nrows = sstarts[i + 1] - sstarts[i]; /* num of block rows */ 5949 bufA = bufa + sstartsj[i]; 5950 for (j = 0; j < nrows; j++) { 5951 row = srow[k++] + B->rmap->range[rank]; /* global row idx */ 5952 for (ll = 0; ll < sbs; ll++) { 5953 PetscCall(MatGetRow_MPIAIJ(B, row + ll, &ncols, NULL, &vals)); 5954 for (l = 0; l < ncols; l++) *bufA++ = vals[l]; 5955 PetscCall(MatRestoreRow_MPIAIJ(B, row + ll, &ncols, NULL, &vals)); 5956 } 5957 } 5958 PetscCallMPI(MPI_Isend(bufa + sstartsj[i], sstartsj[i + 1] - sstartsj[i], MPIU_SCALAR, sprocs[i], tag, comm, swaits + i)); 5959 } 5960 /* recvs and sends of a-array are completed */ 5961 if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE)); 5962 PetscCall(PetscFree(reqs)); 5963 5964 if (scall == MAT_INITIAL_MATRIX) { 5965 /* put together the new matrix */ 5966 PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, aBn, B->cmap->N, b_othi, b_othj, b_otha, B_oth)); 5967 5968 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 5969 /* Since these are PETSc arrays, change flags to free them as necessary. */ 5970 b_oth = (Mat_SeqAIJ *)(*B_oth)->data; 5971 b_oth->free_a = PETSC_TRUE; 5972 b_oth->free_ij = PETSC_TRUE; 5973 b_oth->nonew = 0; 5974 5975 PetscCall(PetscFree(bufj)); 5976 if (!startsj_s || !bufa_ptr) { 5977 PetscCall(PetscFree2(sstartsj, rstartsj)); 5978 PetscCall(PetscFree(bufa_ptr)); 5979 } else { 5980 *startsj_s = sstartsj; 5981 *startsj_r = rstartsj; 5982 *bufa_ptr = bufa; 5983 } 5984 } else if (scall == MAT_REUSE_MATRIX) { 5985 PetscCall(MatSeqAIJRestoreArrayWrite(*B_oth, &b_otha)); 5986 } 5987 5988 PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE, &nsends, &sstarts, &srow, &sprocs, &sbs)); 5989 PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE, &nrecvs, &rstarts, NULL, &rprocs, &rbs)); 5990 PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAocols, A, B, 0, 0)); 5991 PetscFunctionReturn(PETSC_SUCCESS); 5992 } 5993 5994 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat, MatType, MatReuse, Mat *); 5995 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat, MatType, MatReuse, Mat *); 5996 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat, MatType, MatReuse, Mat *); 5997 #if defined(PETSC_HAVE_MKL_SPARSE) 5998 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat, MatType, MatReuse, Mat *); 5999 #endif 6000 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIBAIJ(Mat, MatType, MatReuse, Mat *); 6001 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat, MatType, MatReuse, Mat *); 6002 #if defined(PETSC_HAVE_ELEMENTAL) 6003 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat, MatType, MatReuse, Mat *); 6004 #endif 6005 #if defined(PETSC_HAVE_SCALAPACK) 6006 PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat, MatType, MatReuse, Mat *); 6007 #endif 6008 #if defined(PETSC_HAVE_HYPRE) 6009 PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat, MatType, MatReuse, Mat *); 6010 #endif 6011 #if defined(PETSC_HAVE_CUDA) 6012 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat, MatType, MatReuse, Mat *); 6013 #endif 6014 #if defined(PETSC_HAVE_HIP) 6015 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJHIPSPARSE(Mat, MatType, MatReuse, Mat *); 6016 #endif 6017 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 6018 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJKokkos(Mat, MatType, MatReuse, Mat *); 6019 #endif 6020 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat, MatType, MatReuse, Mat *); 6021 PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat, MatType, MatReuse, Mat *); 6022 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_IS_XAIJ(Mat); 6023 6024 /* 6025 Computes (B'*A')' since computing B*A directly is untenable 6026 6027 n p p 6028 [ ] [ ] [ ] 6029 m [ A ] * n [ B ] = m [ C ] 6030 [ ] [ ] [ ] 6031 6032 */ 6033 static PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A, Mat B, Mat C) 6034 { 6035 Mat At, Bt, Ct; 6036 6037 PetscFunctionBegin; 6038 PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At)); 6039 PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &Bt)); 6040 PetscCall(MatMatMult(Bt, At, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &Ct)); 6041 PetscCall(MatDestroy(&At)); 6042 PetscCall(MatDestroy(&Bt)); 6043 PetscCall(MatTransposeSetPrecursor(Ct, C)); 6044 PetscCall(MatTranspose(Ct, MAT_REUSE_MATRIX, &C)); 6045 PetscCall(MatDestroy(&Ct)); 6046 PetscFunctionReturn(PETSC_SUCCESS); 6047 } 6048 6049 static PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A, Mat B, PetscReal fill, Mat C) 6050 { 6051 PetscBool cisdense; 6052 6053 PetscFunctionBegin; 6054 PetscCheck(A->cmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "A->cmap->n %" PetscInt_FMT " != B->rmap->n %" PetscInt_FMT, A->cmap->n, B->rmap->n); 6055 PetscCall(MatSetSizes(C, A->rmap->n, B->cmap->n, A->rmap->N, B->cmap->N)); 6056 PetscCall(MatSetBlockSizesFromMats(C, A, B)); 6057 PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &cisdense, MATMPIDENSE, MATMPIDENSECUDA, MATMPIDENSEHIP, "")); 6058 if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)A)->type_name)); 6059 PetscCall(MatSetUp(C)); 6060 6061 C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ; 6062 PetscFunctionReturn(PETSC_SUCCESS); 6063 } 6064 6065 static PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ_AB(Mat C) 6066 { 6067 Mat_Product *product = C->product; 6068 Mat A = product->A, B = product->B; 6069 6070 PetscFunctionBegin; 6071 PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", 6072 A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend); 6073 C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIAIJ; 6074 C->ops->productsymbolic = MatProductSymbolic_AB; 6075 PetscFunctionReturn(PETSC_SUCCESS); 6076 } 6077 6078 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ(Mat C) 6079 { 6080 Mat_Product *product = C->product; 6081 6082 PetscFunctionBegin; 6083 if (product->type == MATPRODUCT_AB) PetscCall(MatProductSetFromOptions_MPIDense_MPIAIJ_AB(C)); 6084 PetscFunctionReturn(PETSC_SUCCESS); 6085 } 6086 6087 /* 6088 Merge two sets of sorted nonzeros and return a CSR for the merged (sequential) matrix 6089 6090 Input Parameters: 6091 6092 j1,rowBegin1,rowEnd1,jmap1: describe the first set of nonzeros (Set1) 6093 j2,rowBegin2,rowEnd2,jmap2: describe the second set of nonzeros (Set2) 6094 6095 mat: both sets' nonzeros are on m rows, where m is the number of local rows of the matrix mat 6096 6097 For Set1, j1[] contains column indices of the nonzeros. 6098 For the k-th row (0<=k<m), [rowBegin1[k],rowEnd1[k]) index into j1[] and point to the begin/end nonzero in row k 6099 respectively (note rowEnd1[k] is not necessarily equal to rwoBegin1[k+1]). Indices in this range of j1[] are sorted, 6100 but might have repeats. jmap1[t+1] - jmap1[t] is the number of repeats for the t-th unique nonzero in Set1. 6101 6102 Similar for Set2. 6103 6104 This routine merges the two sets of nonzeros row by row and removes repeats. 6105 6106 Output Parameters: (memory is allocated by the caller) 6107 6108 i[],j[]: the CSR of the merged matrix, which has m rows. 6109 imap1[]: the k-th unique nonzero in Set1 (k=0,1,...) corresponds to imap1[k]-th unique nonzero in the merged matrix. 6110 imap2[]: similar to imap1[], but for Set2. 6111 Note we order nonzeros row-by-row and from left to right. 6112 */ 6113 static PetscErrorCode MatMergeEntries_Internal(Mat mat, const PetscInt j1[], const PetscInt j2[], const PetscCount rowBegin1[], const PetscCount rowEnd1[], const PetscCount rowBegin2[], const PetscCount rowEnd2[], const PetscCount jmap1[], const PetscCount jmap2[], PetscCount imap1[], PetscCount imap2[], PetscInt i[], PetscInt j[]) 6114 { 6115 PetscInt r, m; /* Row index of mat */ 6116 PetscCount t, t1, t2, b1, e1, b2, e2; 6117 6118 PetscFunctionBegin; 6119 PetscCall(MatGetLocalSize(mat, &m, NULL)); 6120 t1 = t2 = t = 0; /* Count unique nonzeros of in Set1, Set1 and the merged respectively */ 6121 i[0] = 0; 6122 for (r = 0; r < m; r++) { /* Do row by row merging */ 6123 b1 = rowBegin1[r]; 6124 e1 = rowEnd1[r]; 6125 b2 = rowBegin2[r]; 6126 e2 = rowEnd2[r]; 6127 while (b1 < e1 && b2 < e2) { 6128 if (j1[b1] == j2[b2]) { /* Same column index and hence same nonzero */ 6129 j[t] = j1[b1]; 6130 imap1[t1] = t; 6131 imap2[t2] = t; 6132 b1 += jmap1[t1 + 1] - jmap1[t1]; /* Jump to next unique local nonzero */ 6133 b2 += jmap2[t2 + 1] - jmap2[t2]; /* Jump to next unique remote nonzero */ 6134 t1++; 6135 t2++; 6136 t++; 6137 } else if (j1[b1] < j2[b2]) { 6138 j[t] = j1[b1]; 6139 imap1[t1] = t; 6140 b1 += jmap1[t1 + 1] - jmap1[t1]; 6141 t1++; 6142 t++; 6143 } else { 6144 j[t] = j2[b2]; 6145 imap2[t2] = t; 6146 b2 += jmap2[t2 + 1] - jmap2[t2]; 6147 t2++; 6148 t++; 6149 } 6150 } 6151 /* Merge the remaining in either j1[] or j2[] */ 6152 while (b1 < e1) { 6153 j[t] = j1[b1]; 6154 imap1[t1] = t; 6155 b1 += jmap1[t1 + 1] - jmap1[t1]; 6156 t1++; 6157 t++; 6158 } 6159 while (b2 < e2) { 6160 j[t] = j2[b2]; 6161 imap2[t2] = t; 6162 b2 += jmap2[t2 + 1] - jmap2[t2]; 6163 t2++; 6164 t++; 6165 } 6166 i[r + 1] = t; 6167 } 6168 PetscFunctionReturn(PETSC_SUCCESS); 6169 } 6170 6171 /* 6172 Split nonzeros in a block of local rows into two subsets: those in the diagonal block and those in the off-diagonal block 6173 6174 Input Parameters: 6175 mat: an MPI matrix that provides row and column layout information for splitting. Let's say its number of local rows is m. 6176 n,i[],j[],perm[]: there are n input entries, belonging to m rows. Row/col indices of the entries are stored in i[] and j[] 6177 respectively, along with a permutation array perm[]. Length of the i[],j[],perm[] arrays is n. 6178 6179 i[] is already sorted, but within a row, j[] is not sorted and might have repeats. 6180 i[] might contain negative indices at the beginning, which means the corresponding entries should be ignored in the splitting. 6181 6182 Output Parameters: 6183 j[],perm[]: the routine needs to sort j[] within each row along with perm[]. 6184 rowBegin[],rowMid[],rowEnd[]: of length m, and the memory is preallocated and zeroed by the caller. 6185 They contain indices pointing to j[]. For 0<=r<m, [rowBegin[r],rowMid[r]) point to begin/end entries of row r of the diagonal block, 6186 and [rowMid[r],rowEnd[r]) point to begin/end entries of row r of the off-diagonal block. 6187 6188 Aperm[],Ajmap[],Atot,Annz: Arrays are allocated by this routine. 6189 Atot: number of entries belonging to the diagonal block. 6190 Annz: number of unique nonzeros belonging to the diagonal block. 6191 Aperm[Atot] stores values from perm[] for entries belonging to the diagonal block. Length of Aperm[] is Atot, though it may also count 6192 repeats (i.e., same 'i,j' pair). 6193 Ajmap[Annz+1] stores the number of repeats of each unique entry belonging to the diagonal block. More precisely, Ajmap[t+1] - Ajmap[t] 6194 is the number of repeats for the t-th unique entry in the diagonal block. Ajmap[0] is always 0. 6195 6196 Atot: number of entries belonging to the diagonal block 6197 Annz: number of unique nonzeros belonging to the diagonal block. 6198 6199 Bperm[], Bjmap[], Btot, Bnnz are similar but for the off-diagonal block. 6200 6201 Aperm[],Bperm[],Ajmap[] and Bjmap[] are allocated separately by this routine with PetscMalloc1(). 6202 */ 6203 static PetscErrorCode MatSplitEntries_Internal(Mat mat, PetscCount n, const PetscInt i[], PetscInt j[], PetscCount perm[], PetscCount rowBegin[], PetscCount rowMid[], PetscCount rowEnd[], PetscCount *Atot_, PetscCount **Aperm_, PetscCount *Annz_, PetscCount **Ajmap_, PetscCount *Btot_, PetscCount **Bperm_, PetscCount *Bnnz_, PetscCount **Bjmap_) 6204 { 6205 PetscInt cstart, cend, rstart, rend, row, col; 6206 PetscCount Atot = 0, Btot = 0; /* Total number of nonzeros in the diagonal and off-diagonal blocks */ 6207 PetscCount Annz = 0, Bnnz = 0; /* Number of unique nonzeros in the diagonal and off-diagonal blocks */ 6208 PetscCount k, m, p, q, r, s, mid; 6209 PetscCount *Aperm, *Bperm, *Ajmap, *Bjmap; 6210 6211 PetscFunctionBegin; 6212 PetscCall(PetscLayoutGetRange(mat->rmap, &rstart, &rend)); 6213 PetscCall(PetscLayoutGetRange(mat->cmap, &cstart, &cend)); 6214 m = rend - rstart; 6215 6216 /* Skip negative rows */ 6217 for (k = 0; k < n; k++) 6218 if (i[k] >= 0) break; 6219 6220 /* Process [k,n): sort and partition each local row into diag and offdiag portions, 6221 fill rowBegin[], rowMid[], rowEnd[], and count Atot, Btot, Annz, Bnnz. 6222 */ 6223 while (k < n) { 6224 row = i[k]; 6225 /* Entries in [k,s) are in one row. Shift diagonal block col indices so that diag is ahead of offdiag after sorting the row */ 6226 for (s = k; s < n; s++) 6227 if (i[s] != row) break; 6228 6229 /* Shift diag columns to range of [-PETSC_MAX_INT, -1] */ 6230 for (p = k; p < s; p++) { 6231 if (j[p] >= cstart && j[p] < cend) j[p] -= PETSC_MAX_INT; 6232 else PetscAssert((j[p] >= 0) && (j[p] <= mat->cmap->N), PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column index %" PetscInt_FMT " is out of range", j[p]); 6233 } 6234 PetscCall(PetscSortIntWithCountArray(s - k, j + k, perm + k)); 6235 PetscCall(PetscSortedIntUpperBound(j, k, s, -1, &mid)); /* Separate [k,s) into [k,mid) for diag and [mid,s) for offdiag */ 6236 rowBegin[row - rstart] = k; 6237 rowMid[row - rstart] = mid; 6238 rowEnd[row - rstart] = s; 6239 6240 /* Count nonzeros of this diag/offdiag row, which might have repeats */ 6241 Atot += mid - k; 6242 Btot += s - mid; 6243 6244 /* Count unique nonzeros of this diag row */ 6245 for (p = k; p < mid;) { 6246 col = j[p]; 6247 do { 6248 j[p] += PETSC_MAX_INT; /* Revert the modified diagonal indices */ 6249 p++; 6250 } while (p < mid && j[p] == col); 6251 Annz++; 6252 } 6253 6254 /* Count unique nonzeros of this offdiag row */ 6255 for (p = mid; p < s;) { 6256 col = j[p]; 6257 do { 6258 p++; 6259 } while (p < s && j[p] == col); 6260 Bnnz++; 6261 } 6262 k = s; 6263 } 6264 6265 /* Allocation according to Atot, Btot, Annz, Bnnz */ 6266 PetscCall(PetscMalloc1(Atot, &Aperm)); 6267 PetscCall(PetscMalloc1(Btot, &Bperm)); 6268 PetscCall(PetscMalloc1(Annz + 1, &Ajmap)); 6269 PetscCall(PetscMalloc1(Bnnz + 1, &Bjmap)); 6270 6271 /* Re-scan indices and copy diag/offdiag permutation indices to Aperm, Bperm and also fill Ajmap and Bjmap */ 6272 Ajmap[0] = Bjmap[0] = Atot = Btot = Annz = Bnnz = 0; 6273 for (r = 0; r < m; r++) { 6274 k = rowBegin[r]; 6275 mid = rowMid[r]; 6276 s = rowEnd[r]; 6277 PetscCall(PetscArraycpy(PetscSafePointerPlusOffset(Aperm, Atot), PetscSafePointerPlusOffset(perm, k), mid - k)); 6278 PetscCall(PetscArraycpy(PetscSafePointerPlusOffset(Bperm, Btot), PetscSafePointerPlusOffset(perm, mid), s - mid)); 6279 Atot += mid - k; 6280 Btot += s - mid; 6281 6282 /* Scan column indices in this row and find out how many repeats each unique nonzero has */ 6283 for (p = k; p < mid;) { 6284 col = j[p]; 6285 q = p; 6286 do { 6287 p++; 6288 } while (p < mid && j[p] == col); 6289 Ajmap[Annz + 1] = Ajmap[Annz] + (p - q); 6290 Annz++; 6291 } 6292 6293 for (p = mid; p < s;) { 6294 col = j[p]; 6295 q = p; 6296 do { 6297 p++; 6298 } while (p < s && j[p] == col); 6299 Bjmap[Bnnz + 1] = Bjmap[Bnnz] + (p - q); 6300 Bnnz++; 6301 } 6302 } 6303 /* Output */ 6304 *Aperm_ = Aperm; 6305 *Annz_ = Annz; 6306 *Atot_ = Atot; 6307 *Ajmap_ = Ajmap; 6308 *Bperm_ = Bperm; 6309 *Bnnz_ = Bnnz; 6310 *Btot_ = Btot; 6311 *Bjmap_ = Bjmap; 6312 PetscFunctionReturn(PETSC_SUCCESS); 6313 } 6314 6315 /* 6316 Expand the jmap[] array to make a new one in view of nonzeros in the merged matrix 6317 6318 Input Parameters: 6319 nnz1: number of unique nonzeros in a set that was used to produce imap[], jmap[] 6320 nnz: number of unique nonzeros in the merged matrix 6321 imap[nnz1]: i-th nonzero in the set is the imap[i]-th nonzero in the merged matrix 6322 jmap[nnz1+1]: i-th nonzero in the set has jmap[i+1] - jmap[i] repeats in the set 6323 6324 Output Parameter: (memory is allocated by the caller) 6325 jmap_new[nnz+1]: i-th nonzero in the merged matrix has jmap_new[i+1] - jmap_new[i] repeats in the set 6326 6327 Example: 6328 nnz1 = 4 6329 nnz = 6 6330 imap = [1,3,4,5] 6331 jmap = [0,3,5,6,7] 6332 then, 6333 jmap_new = [0,0,3,3,5,6,7] 6334 */ 6335 static PetscErrorCode ExpandJmap_Internal(PetscCount nnz1, PetscCount nnz, const PetscCount imap[], const PetscCount jmap[], PetscCount jmap_new[]) 6336 { 6337 PetscCount k, p; 6338 6339 PetscFunctionBegin; 6340 jmap_new[0] = 0; 6341 p = nnz; /* p loops over jmap_new[] backwards */ 6342 for (k = nnz1 - 1; k >= 0; k--) { /* k loops over imap[] */ 6343 for (; p > imap[k]; p--) jmap_new[p] = jmap[k + 1]; 6344 } 6345 for (; p >= 0; p--) jmap_new[p] = jmap[0]; 6346 PetscFunctionReturn(PETSC_SUCCESS); 6347 } 6348 6349 static PetscErrorCode MatCOOStructDestroy_MPIAIJ(void *data) 6350 { 6351 MatCOOStruct_MPIAIJ *coo = (MatCOOStruct_MPIAIJ *)data; 6352 6353 PetscFunctionBegin; 6354 PetscCall(PetscSFDestroy(&coo->sf)); 6355 PetscCall(PetscFree(coo->Aperm1)); 6356 PetscCall(PetscFree(coo->Bperm1)); 6357 PetscCall(PetscFree(coo->Ajmap1)); 6358 PetscCall(PetscFree(coo->Bjmap1)); 6359 PetscCall(PetscFree(coo->Aimap2)); 6360 PetscCall(PetscFree(coo->Bimap2)); 6361 PetscCall(PetscFree(coo->Aperm2)); 6362 PetscCall(PetscFree(coo->Bperm2)); 6363 PetscCall(PetscFree(coo->Ajmap2)); 6364 PetscCall(PetscFree(coo->Bjmap2)); 6365 PetscCall(PetscFree(coo->Cperm1)); 6366 PetscCall(PetscFree2(coo->sendbuf, coo->recvbuf)); 6367 PetscCall(PetscFree(coo)); 6368 PetscFunctionReturn(PETSC_SUCCESS); 6369 } 6370 6371 PetscErrorCode MatSetPreallocationCOO_MPIAIJ(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[]) 6372 { 6373 MPI_Comm comm; 6374 PetscMPIInt rank, size; 6375 PetscInt m, n, M, N, rstart, rend, cstart, cend; /* Sizes, indices of row/col, therefore with type PetscInt */ 6376 PetscCount k, p, q, rem; /* Loop variables over coo arrays */ 6377 Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ *)mat->data; 6378 PetscContainer container; 6379 MatCOOStruct_MPIAIJ *coo; 6380 6381 PetscFunctionBegin; 6382 PetscCall(PetscFree(mpiaij->garray)); 6383 PetscCall(VecDestroy(&mpiaij->lvec)); 6384 #if defined(PETSC_USE_CTABLE) 6385 PetscCall(PetscHMapIDestroy(&mpiaij->colmap)); 6386 #else 6387 PetscCall(PetscFree(mpiaij->colmap)); 6388 #endif 6389 PetscCall(VecScatterDestroy(&mpiaij->Mvctx)); 6390 mat->assembled = PETSC_FALSE; 6391 mat->was_assembled = PETSC_FALSE; 6392 6393 PetscCall(PetscObjectGetComm((PetscObject)mat, &comm)); 6394 PetscCallMPI(MPI_Comm_size(comm, &size)); 6395 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 6396 PetscCall(PetscLayoutSetUp(mat->rmap)); 6397 PetscCall(PetscLayoutSetUp(mat->cmap)); 6398 PetscCall(PetscLayoutGetRange(mat->rmap, &rstart, &rend)); 6399 PetscCall(PetscLayoutGetRange(mat->cmap, &cstart, &cend)); 6400 PetscCall(MatGetLocalSize(mat, &m, &n)); 6401 PetscCall(MatGetSize(mat, &M, &N)); 6402 6403 /* Sort (i,j) by row along with a permutation array, so that the to-be-ignored */ 6404 /* entries come first, then local rows, then remote rows. */ 6405 PetscCount n1 = coo_n, *perm1; 6406 PetscInt *i1 = coo_i, *j1 = coo_j; 6407 6408 PetscCall(PetscMalloc1(n1, &perm1)); 6409 for (k = 0; k < n1; k++) perm1[k] = k; 6410 6411 /* Manipulate indices so that entries with negative row or col indices will have smallest 6412 row indices, local entries will have greater but negative row indices, and remote entries 6413 will have positive row indices. 6414 */ 6415 for (k = 0; k < n1; k++) { 6416 if (i1[k] < 0 || j1[k] < 0) i1[k] = PETSC_MIN_INT; /* e.g., -2^31, minimal to move them ahead */ 6417 else if (i1[k] >= rstart && i1[k] < rend) i1[k] -= PETSC_MAX_INT; /* e.g., minus 2^31-1 to shift local rows to range of [-PETSC_MAX_INT, -1] */ 6418 else { 6419 PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "MAT_NO_OFF_PROC_ENTRIES is set but insert to remote rows"); 6420 if (mpiaij->donotstash) i1[k] = PETSC_MIN_INT; /* Ignore offproc entries as if they had negative indices */ 6421 } 6422 } 6423 6424 /* Sort by row; after that, [0,k) have ignored entries, [k,rem) have local rows and [rem,n1) have remote rows */ 6425 PetscCall(PetscSortIntWithIntCountArrayPair(n1, i1, j1, perm1)); 6426 6427 /* Advance k to the first entry we need to take care of */ 6428 for (k = 0; k < n1; k++) 6429 if (i1[k] > PETSC_MIN_INT) break; 6430 PetscInt i1start = k; 6431 6432 PetscCall(PetscSortedIntUpperBound(i1, k, n1, rend - 1 - PETSC_MAX_INT, &rem)); /* rem is upper bound of the last local row */ 6433 for (; k < rem; k++) i1[k] += PETSC_MAX_INT; /* Revert row indices of local rows*/ 6434 6435 /* Send remote rows to their owner */ 6436 /* Find which rows should be sent to which remote ranks*/ 6437 PetscInt nsend = 0; /* Number of MPI ranks to send data to */ 6438 PetscMPIInt *sendto; /* [nsend], storing remote ranks */ 6439 PetscInt *nentries; /* [nsend], storing number of entries sent to remote ranks; Assume PetscInt is big enough for this count, and error if not */ 6440 const PetscInt *ranges; 6441 PetscInt maxNsend = size >= 128 ? 128 : size; /* Assume max 128 neighbors; realloc when needed */ 6442 6443 PetscCall(PetscLayoutGetRanges(mat->rmap, &ranges)); 6444 PetscCall(PetscMalloc2(maxNsend, &sendto, maxNsend, &nentries)); 6445 for (k = rem; k < n1;) { 6446 PetscMPIInt owner; 6447 PetscInt firstRow, lastRow; 6448 6449 /* Locate a row range */ 6450 firstRow = i1[k]; /* first row of this owner */ 6451 PetscCall(PetscLayoutFindOwner(mat->rmap, firstRow, &owner)); 6452 lastRow = ranges[owner + 1] - 1; /* last row of this owner */ 6453 6454 /* Find the first index 'p' in [k,n) with i[p] belonging to next owner */ 6455 PetscCall(PetscSortedIntUpperBound(i1, k, n1, lastRow, &p)); 6456 6457 /* All entries in [k,p) belong to this remote owner */ 6458 if (nsend >= maxNsend) { /* Double the remote ranks arrays if not long enough */ 6459 PetscMPIInt *sendto2; 6460 PetscInt *nentries2; 6461 PetscInt maxNsend2 = (maxNsend <= size / 2) ? maxNsend * 2 : size; 6462 6463 PetscCall(PetscMalloc2(maxNsend2, &sendto2, maxNsend2, &nentries2)); 6464 PetscCall(PetscArraycpy(sendto2, sendto, maxNsend)); 6465 PetscCall(PetscArraycpy(nentries2, nentries2, maxNsend + 1)); 6466 PetscCall(PetscFree2(sendto, nentries2)); 6467 sendto = sendto2; 6468 nentries = nentries2; 6469 maxNsend = maxNsend2; 6470 } 6471 sendto[nsend] = owner; 6472 nentries[nsend] = p - k; 6473 PetscCall(PetscCountCast(p - k, &nentries[nsend])); 6474 nsend++; 6475 k = p; 6476 } 6477 6478 /* Build 1st SF to know offsets on remote to send data */ 6479 PetscSF sf1; 6480 PetscInt nroots = 1, nroots2 = 0; 6481 PetscInt nleaves = nsend, nleaves2 = 0; 6482 PetscInt *offsets; 6483 PetscSFNode *iremote; 6484 6485 PetscCall(PetscSFCreate(comm, &sf1)); 6486 PetscCall(PetscMalloc1(nsend, &iremote)); 6487 PetscCall(PetscMalloc1(nsend, &offsets)); 6488 for (k = 0; k < nsend; k++) { 6489 iremote[k].rank = sendto[k]; 6490 iremote[k].index = 0; 6491 nleaves2 += nentries[k]; 6492 PetscCheck(nleaves2 >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Number of SF leaves is too large for PetscInt"); 6493 } 6494 PetscCall(PetscSFSetGraph(sf1, nroots, nleaves, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER)); 6495 PetscCall(PetscSFFetchAndOpWithMemTypeBegin(sf1, MPIU_INT, PETSC_MEMTYPE_HOST, &nroots2 /*rootdata*/, PETSC_MEMTYPE_HOST, nentries /*leafdata*/, PETSC_MEMTYPE_HOST, offsets /*leafupdate*/, MPI_SUM)); 6496 PetscCall(PetscSFFetchAndOpEnd(sf1, MPIU_INT, &nroots2, nentries, offsets, MPI_SUM)); /* Would nroots2 overflow, we check offsets[] below */ 6497 PetscCall(PetscSFDestroy(&sf1)); 6498 PetscAssert(nleaves2 == n1 - rem, PETSC_COMM_SELF, PETSC_ERR_PLIB, "nleaves2 %" PetscInt_FMT " != number of remote entries %" PetscCount_FMT "", nleaves2, n1 - rem); 6499 6500 /* Build 2nd SF to send remote COOs to their owner */ 6501 PetscSF sf2; 6502 nroots = nroots2; 6503 nleaves = nleaves2; 6504 PetscCall(PetscSFCreate(comm, &sf2)); 6505 PetscCall(PetscSFSetFromOptions(sf2)); 6506 PetscCall(PetscMalloc1(nleaves, &iremote)); 6507 p = 0; 6508 for (k = 0; k < nsend; k++) { 6509 PetscCheck(offsets[k] >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Number of SF roots is too large for PetscInt"); 6510 for (q = 0; q < nentries[k]; q++, p++) { 6511 iremote[p].rank = sendto[k]; 6512 iremote[p].index = offsets[k] + q; 6513 } 6514 } 6515 PetscCall(PetscSFSetGraph(sf2, nroots, nleaves, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER)); 6516 6517 /* Send the remote COOs to their owner */ 6518 PetscInt n2 = nroots, *i2, *j2; /* Buffers for received COOs from other ranks, along with a permutation array */ 6519 PetscCount *perm2; /* Though PetscInt is enough for remote entries, we use PetscCount here as we want to reuse MatSplitEntries_Internal() */ 6520 PetscCall(PetscMalloc3(n2, &i2, n2, &j2, n2, &perm2)); 6521 PetscCall(PetscSFReduceWithMemTypeBegin(sf2, MPIU_INT, PETSC_MEMTYPE_HOST, i1 + rem, PETSC_MEMTYPE_HOST, i2, MPI_REPLACE)); 6522 PetscCall(PetscSFReduceEnd(sf2, MPIU_INT, i1 + rem, i2, MPI_REPLACE)); 6523 PetscCall(PetscSFReduceWithMemTypeBegin(sf2, MPIU_INT, PETSC_MEMTYPE_HOST, j1 + rem, PETSC_MEMTYPE_HOST, j2, MPI_REPLACE)); 6524 PetscCall(PetscSFReduceEnd(sf2, MPIU_INT, j1 + rem, j2, MPI_REPLACE)); 6525 6526 PetscCall(PetscFree(offsets)); 6527 PetscCall(PetscFree2(sendto, nentries)); 6528 6529 /* Sort received COOs by row along with the permutation array */ 6530 for (k = 0; k < n2; k++) perm2[k] = k; 6531 PetscCall(PetscSortIntWithIntCountArrayPair(n2, i2, j2, perm2)); 6532 6533 /* sf2 only sends contiguous leafdata to contiguous rootdata. We record the permutation which will be used to fill leafdata */ 6534 PetscCount *Cperm1; 6535 PetscCall(PetscMalloc1(nleaves, &Cperm1)); 6536 PetscCall(PetscArraycpy(Cperm1, perm1 + rem, nleaves)); 6537 6538 /* Support for HYPRE matrices, kind of a hack. 6539 Swap min column with diagonal so that diagonal values will go first */ 6540 PetscBool hypre; 6541 const char *name; 6542 PetscCall(PetscObjectGetName((PetscObject)mat, &name)); 6543 PetscCall(PetscStrcmp("_internal_COO_mat_for_hypre", name, &hypre)); 6544 if (hypre) { 6545 PetscInt *minj; 6546 PetscBT hasdiag; 6547 6548 PetscCall(PetscBTCreate(m, &hasdiag)); 6549 PetscCall(PetscMalloc1(m, &minj)); 6550 for (k = 0; k < m; k++) minj[k] = PETSC_MAX_INT; 6551 for (k = i1start; k < rem; k++) { 6552 if (j1[k] < cstart || j1[k] >= cend) continue; 6553 const PetscInt rindex = i1[k] - rstart; 6554 if ((j1[k] - cstart) == rindex) PetscCall(PetscBTSet(hasdiag, rindex)); 6555 minj[rindex] = PetscMin(minj[rindex], j1[k]); 6556 } 6557 for (k = 0; k < n2; k++) { 6558 if (j2[k] < cstart || j2[k] >= cend) continue; 6559 const PetscInt rindex = i2[k] - rstart; 6560 if ((j2[k] - cstart) == rindex) PetscCall(PetscBTSet(hasdiag, rindex)); 6561 minj[rindex] = PetscMin(minj[rindex], j2[k]); 6562 } 6563 for (k = i1start; k < rem; k++) { 6564 const PetscInt rindex = i1[k] - rstart; 6565 if (j1[k] < cstart || j1[k] >= cend || !PetscBTLookup(hasdiag, rindex)) continue; 6566 if (j1[k] == minj[rindex]) j1[k] = i1[k] + (cstart - rstart); 6567 else if ((j1[k] - cstart) == rindex) j1[k] = minj[rindex]; 6568 } 6569 for (k = 0; k < n2; k++) { 6570 const PetscInt rindex = i2[k] - rstart; 6571 if (j2[k] < cstart || j2[k] >= cend || !PetscBTLookup(hasdiag, rindex)) continue; 6572 if (j2[k] == minj[rindex]) j2[k] = i2[k] + (cstart - rstart); 6573 else if ((j2[k] - cstart) == rindex) j2[k] = minj[rindex]; 6574 } 6575 PetscCall(PetscBTDestroy(&hasdiag)); 6576 PetscCall(PetscFree(minj)); 6577 } 6578 6579 /* Split local COOs and received COOs into diag/offdiag portions */ 6580 PetscCount *rowBegin1, *rowMid1, *rowEnd1; 6581 PetscCount *Ajmap1, *Aperm1, *Bjmap1, *Bperm1; 6582 PetscCount Annz1, Bnnz1, Atot1, Btot1; 6583 PetscCount *rowBegin2, *rowMid2, *rowEnd2; 6584 PetscCount *Ajmap2, *Aperm2, *Bjmap2, *Bperm2; 6585 PetscCount Annz2, Bnnz2, Atot2, Btot2; 6586 6587 PetscCall(PetscCalloc3(m, &rowBegin1, m, &rowMid1, m, &rowEnd1)); 6588 PetscCall(PetscCalloc3(m, &rowBegin2, m, &rowMid2, m, &rowEnd2)); 6589 PetscCall(MatSplitEntries_Internal(mat, rem, i1, j1, perm1, rowBegin1, rowMid1, rowEnd1, &Atot1, &Aperm1, &Annz1, &Ajmap1, &Btot1, &Bperm1, &Bnnz1, &Bjmap1)); 6590 PetscCall(MatSplitEntries_Internal(mat, n2, i2, j2, perm2, rowBegin2, rowMid2, rowEnd2, &Atot2, &Aperm2, &Annz2, &Ajmap2, &Btot2, &Bperm2, &Bnnz2, &Bjmap2)); 6591 6592 /* Merge local COOs with received COOs: diag with diag, offdiag with offdiag */ 6593 PetscInt *Ai, *Bi; 6594 PetscInt *Aj, *Bj; 6595 6596 PetscCall(PetscMalloc1(m + 1, &Ai)); 6597 PetscCall(PetscMalloc1(m + 1, &Bi)); 6598 PetscCall(PetscMalloc1(Annz1 + Annz2, &Aj)); /* Since local and remote entries might have dups, we might allocate excess memory */ 6599 PetscCall(PetscMalloc1(Bnnz1 + Bnnz2, &Bj)); 6600 6601 PetscCount *Aimap1, *Bimap1, *Aimap2, *Bimap2; 6602 PetscCall(PetscMalloc1(Annz1, &Aimap1)); 6603 PetscCall(PetscMalloc1(Bnnz1, &Bimap1)); 6604 PetscCall(PetscMalloc1(Annz2, &Aimap2)); 6605 PetscCall(PetscMalloc1(Bnnz2, &Bimap2)); 6606 6607 PetscCall(MatMergeEntries_Internal(mat, j1, j2, rowBegin1, rowMid1, rowBegin2, rowMid2, Ajmap1, Ajmap2, Aimap1, Aimap2, Ai, Aj)); 6608 PetscCall(MatMergeEntries_Internal(mat, j1, j2, rowMid1, rowEnd1, rowMid2, rowEnd2, Bjmap1, Bjmap2, Bimap1, Bimap2, Bi, Bj)); 6609 6610 /* Expand Ajmap1/Bjmap1 to make them based off nonzeros in A/B, since we */ 6611 /* expect nonzeros in A/B most likely have local contributing entries */ 6612 PetscInt Annz = Ai[m]; 6613 PetscInt Bnnz = Bi[m]; 6614 PetscCount *Ajmap1_new, *Bjmap1_new; 6615 6616 PetscCall(PetscMalloc1(Annz + 1, &Ajmap1_new)); 6617 PetscCall(PetscMalloc1(Bnnz + 1, &Bjmap1_new)); 6618 6619 PetscCall(ExpandJmap_Internal(Annz1, Annz, Aimap1, Ajmap1, Ajmap1_new)); 6620 PetscCall(ExpandJmap_Internal(Bnnz1, Bnnz, Bimap1, Bjmap1, Bjmap1_new)); 6621 6622 PetscCall(PetscFree(Aimap1)); 6623 PetscCall(PetscFree(Ajmap1)); 6624 PetscCall(PetscFree(Bimap1)); 6625 PetscCall(PetscFree(Bjmap1)); 6626 PetscCall(PetscFree3(rowBegin1, rowMid1, rowEnd1)); 6627 PetscCall(PetscFree3(rowBegin2, rowMid2, rowEnd2)); 6628 PetscCall(PetscFree(perm1)); 6629 PetscCall(PetscFree3(i2, j2, perm2)); 6630 6631 Ajmap1 = Ajmap1_new; 6632 Bjmap1 = Bjmap1_new; 6633 6634 /* Reallocate Aj, Bj once we know actual numbers of unique nonzeros in A and B */ 6635 if (Annz < Annz1 + Annz2) { 6636 PetscInt *Aj_new; 6637 PetscCall(PetscMalloc1(Annz, &Aj_new)); 6638 PetscCall(PetscArraycpy(Aj_new, Aj, Annz)); 6639 PetscCall(PetscFree(Aj)); 6640 Aj = Aj_new; 6641 } 6642 6643 if (Bnnz < Bnnz1 + Bnnz2) { 6644 PetscInt *Bj_new; 6645 PetscCall(PetscMalloc1(Bnnz, &Bj_new)); 6646 PetscCall(PetscArraycpy(Bj_new, Bj, Bnnz)); 6647 PetscCall(PetscFree(Bj)); 6648 Bj = Bj_new; 6649 } 6650 6651 /* Create new submatrices for on-process and off-process coupling */ 6652 PetscScalar *Aa, *Ba; 6653 MatType rtype; 6654 Mat_SeqAIJ *a, *b; 6655 PetscObjectState state; 6656 PetscCall(PetscCalloc1(Annz, &Aa)); /* Zero matrix on device */ 6657 PetscCall(PetscCalloc1(Bnnz, &Ba)); 6658 /* make Aj[] local, i.e, based off the start column of the diagonal portion */ 6659 if (cstart) { 6660 for (k = 0; k < Annz; k++) Aj[k] -= cstart; 6661 } 6662 PetscCall(MatDestroy(&mpiaij->A)); 6663 PetscCall(MatDestroy(&mpiaij->B)); 6664 PetscCall(MatGetRootType_Private(mat, &rtype)); 6665 PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, n, Ai, Aj, Aa, &mpiaij->A)); 6666 PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, mat->cmap->N, Bi, Bj, Ba, &mpiaij->B)); 6667 PetscCall(MatSetUpMultiply_MPIAIJ(mat)); 6668 mat->was_assembled = PETSC_TRUE; // was_assembled in effect means the Mvctx is built; doing so avoids redundant MatSetUpMultiply_MPIAIJ 6669 state = mpiaij->A->nonzerostate + mpiaij->B->nonzerostate; 6670 PetscCall(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat))); 6671 6672 a = (Mat_SeqAIJ *)mpiaij->A->data; 6673 b = (Mat_SeqAIJ *)mpiaij->B->data; 6674 a->singlemalloc = b->singlemalloc = PETSC_FALSE; /* Let newmat own Ai,Aj,Aa,Bi,Bj,Ba */ 6675 a->free_a = b->free_a = PETSC_TRUE; 6676 a->free_ij = b->free_ij = PETSC_TRUE; 6677 6678 /* conversion must happen AFTER multiply setup */ 6679 PetscCall(MatConvert(mpiaij->A, rtype, MAT_INPLACE_MATRIX, &mpiaij->A)); 6680 PetscCall(MatConvert(mpiaij->B, rtype, MAT_INPLACE_MATRIX, &mpiaij->B)); 6681 PetscCall(VecDestroy(&mpiaij->lvec)); 6682 PetscCall(MatCreateVecs(mpiaij->B, &mpiaij->lvec, NULL)); 6683 6684 // Put the COO struct in a container and then attach that to the matrix 6685 PetscCall(PetscMalloc1(1, &coo)); 6686 coo->n = coo_n; 6687 coo->sf = sf2; 6688 coo->sendlen = nleaves; 6689 coo->recvlen = nroots; 6690 coo->Annz = Annz; 6691 coo->Bnnz = Bnnz; 6692 coo->Annz2 = Annz2; 6693 coo->Bnnz2 = Bnnz2; 6694 coo->Atot1 = Atot1; 6695 coo->Atot2 = Atot2; 6696 coo->Btot1 = Btot1; 6697 coo->Btot2 = Btot2; 6698 coo->Ajmap1 = Ajmap1; 6699 coo->Aperm1 = Aperm1; 6700 coo->Bjmap1 = Bjmap1; 6701 coo->Bperm1 = Bperm1; 6702 coo->Aimap2 = Aimap2; 6703 coo->Ajmap2 = Ajmap2; 6704 coo->Aperm2 = Aperm2; 6705 coo->Bimap2 = Bimap2; 6706 coo->Bjmap2 = Bjmap2; 6707 coo->Bperm2 = Bperm2; 6708 coo->Cperm1 = Cperm1; 6709 // Allocate in preallocation. If not used, it has zero cost on host 6710 PetscCall(PetscMalloc2(coo->sendlen, &coo->sendbuf, coo->recvlen, &coo->recvbuf)); 6711 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 6712 PetscCall(PetscContainerSetPointer(container, coo)); 6713 PetscCall(PetscContainerSetUserDestroy(container, MatCOOStructDestroy_MPIAIJ)); 6714 PetscCall(PetscObjectCompose((PetscObject)mat, "__PETSc_MatCOOStruct_Host", (PetscObject)container)); 6715 PetscCall(PetscContainerDestroy(&container)); 6716 PetscFunctionReturn(PETSC_SUCCESS); 6717 } 6718 6719 static PetscErrorCode MatSetValuesCOO_MPIAIJ(Mat mat, const PetscScalar v[], InsertMode imode) 6720 { 6721 Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ *)mat->data; 6722 Mat A = mpiaij->A, B = mpiaij->B; 6723 PetscScalar *Aa, *Ba; 6724 PetscScalar *sendbuf, *recvbuf; 6725 const PetscCount *Ajmap1, *Ajmap2, *Aimap2; 6726 const PetscCount *Bjmap1, *Bjmap2, *Bimap2; 6727 const PetscCount *Aperm1, *Aperm2, *Bperm1, *Bperm2; 6728 const PetscCount *Cperm1; 6729 PetscContainer container; 6730 MatCOOStruct_MPIAIJ *coo; 6731 6732 PetscFunctionBegin; 6733 PetscCall(PetscObjectQuery((PetscObject)mat, "__PETSc_MatCOOStruct_Host", (PetscObject *)&container)); 6734 PetscCheck(container, PetscObjectComm((PetscObject)mat), PETSC_ERR_PLIB, "Not found MatCOOStruct on this matrix"); 6735 PetscCall(PetscContainerGetPointer(container, (void **)&coo)); 6736 sendbuf = coo->sendbuf; 6737 recvbuf = coo->recvbuf; 6738 Ajmap1 = coo->Ajmap1; 6739 Ajmap2 = coo->Ajmap2; 6740 Aimap2 = coo->Aimap2; 6741 Bjmap1 = coo->Bjmap1; 6742 Bjmap2 = coo->Bjmap2; 6743 Bimap2 = coo->Bimap2; 6744 Aperm1 = coo->Aperm1; 6745 Aperm2 = coo->Aperm2; 6746 Bperm1 = coo->Bperm1; 6747 Bperm2 = coo->Bperm2; 6748 Cperm1 = coo->Cperm1; 6749 6750 PetscCall(MatSeqAIJGetArray(A, &Aa)); /* Might read and write matrix values */ 6751 PetscCall(MatSeqAIJGetArray(B, &Ba)); 6752 6753 /* Pack entries to be sent to remote */ 6754 for (PetscCount i = 0; i < coo->sendlen; i++) sendbuf[i] = v[Cperm1[i]]; 6755 6756 /* Send remote entries to their owner and overlap the communication with local computation */ 6757 PetscCall(PetscSFReduceWithMemTypeBegin(coo->sf, MPIU_SCALAR, PETSC_MEMTYPE_HOST, sendbuf, PETSC_MEMTYPE_HOST, recvbuf, MPI_REPLACE)); 6758 /* Add local entries to A and B */ 6759 for (PetscCount i = 0; i < coo->Annz; i++) { /* All nonzeros in A are either zero'ed or added with a value (i.e., initialized) */ 6760 PetscScalar sum = 0.0; /* Do partial summation first to improve numerical stability */ 6761 for (PetscCount k = Ajmap1[i]; k < Ajmap1[i + 1]; k++) sum += v[Aperm1[k]]; 6762 Aa[i] = (imode == INSERT_VALUES ? 0.0 : Aa[i]) + sum; 6763 } 6764 for (PetscCount i = 0; i < coo->Bnnz; i++) { 6765 PetscScalar sum = 0.0; 6766 for (PetscCount k = Bjmap1[i]; k < Bjmap1[i + 1]; k++) sum += v[Bperm1[k]]; 6767 Ba[i] = (imode == INSERT_VALUES ? 0.0 : Ba[i]) + sum; 6768 } 6769 PetscCall(PetscSFReduceEnd(coo->sf, MPIU_SCALAR, sendbuf, recvbuf, MPI_REPLACE)); 6770 6771 /* Add received remote entries to A and B */ 6772 for (PetscCount i = 0; i < coo->Annz2; i++) { 6773 for (PetscCount k = Ajmap2[i]; k < Ajmap2[i + 1]; k++) Aa[Aimap2[i]] += recvbuf[Aperm2[k]]; 6774 } 6775 for (PetscCount i = 0; i < coo->Bnnz2; i++) { 6776 for (PetscCount k = Bjmap2[i]; k < Bjmap2[i + 1]; k++) Ba[Bimap2[i]] += recvbuf[Bperm2[k]]; 6777 } 6778 PetscCall(MatSeqAIJRestoreArray(A, &Aa)); 6779 PetscCall(MatSeqAIJRestoreArray(B, &Ba)); 6780 PetscFunctionReturn(PETSC_SUCCESS); 6781 } 6782 6783 /*MC 6784 MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices. 6785 6786 Options Database Keys: 6787 . -mat_type mpiaij - sets the matrix type to `MATMPIAIJ` during a call to `MatSetFromOptions()` 6788 6789 Level: beginner 6790 6791 Notes: 6792 `MatSetValues()` may be called for this matrix type with a `NULL` argument for the numerical values, 6793 in this case the values associated with the rows and columns one passes in are set to zero 6794 in the matrix 6795 6796 `MatSetOptions`(,`MAT_STRUCTURE_ONLY`,`PETSC_TRUE`) may be called for this matrix type. In this no 6797 space is allocated for the nonzero entries and any entries passed with `MatSetValues()` are ignored 6798 6799 .seealso: [](ch_matrices), `Mat`, `MATSEQAIJ`, `MATAIJ`, `MatCreateAIJ()` 6800 M*/ 6801 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B) 6802 { 6803 Mat_MPIAIJ *b; 6804 PetscMPIInt size; 6805 6806 PetscFunctionBegin; 6807 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size)); 6808 6809 PetscCall(PetscNew(&b)); 6810 B->data = (void *)b; 6811 B->ops[0] = MatOps_Values; 6812 B->assembled = PETSC_FALSE; 6813 B->insertmode = NOT_SET_VALUES; 6814 b->size = size; 6815 6816 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)B), &b->rank)); 6817 6818 /* build cache for off array entries formed */ 6819 PetscCall(MatStashCreate_Private(PetscObjectComm((PetscObject)B), 1, &B->stash)); 6820 6821 b->donotstash = PETSC_FALSE; 6822 b->colmap = NULL; 6823 b->garray = NULL; 6824 b->roworiented = PETSC_TRUE; 6825 6826 /* stuff used for matrix vector multiply */ 6827 b->lvec = NULL; 6828 b->Mvctx = NULL; 6829 6830 /* stuff for MatGetRow() */ 6831 b->rowindices = NULL; 6832 b->rowvalues = NULL; 6833 b->getrowactive = PETSC_FALSE; 6834 6835 /* flexible pointer used in CUSPARSE classes */ 6836 b->spptr = NULL; 6837 6838 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetUseScalableIncreaseOverlap_C", MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ)); 6839 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatStoreValues_C", MatStoreValues_MPIAIJ)); 6840 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatRetrieveValues_C", MatRetrieveValues_MPIAIJ)); 6841 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatIsTranspose_C", MatIsTranspose_MPIAIJ)); 6842 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocation_C", MatMPIAIJSetPreallocation_MPIAIJ)); 6843 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatResetPreallocation_C", MatResetPreallocation_MPIAIJ)); 6844 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocationCSR_C", MatMPIAIJSetPreallocationCSR_MPIAIJ)); 6845 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatDiagonalScaleLocal_C", MatDiagonalScaleLocal_MPIAIJ)); 6846 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijperm_C", MatConvert_MPIAIJ_MPIAIJPERM)); 6847 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijsell_C", MatConvert_MPIAIJ_MPIAIJSELL)); 6848 #if defined(PETSC_HAVE_CUDA) 6849 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijcusparse_C", MatConvert_MPIAIJ_MPIAIJCUSPARSE)); 6850 #endif 6851 #if defined(PETSC_HAVE_HIP) 6852 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijhipsparse_C", MatConvert_MPIAIJ_MPIAIJHIPSPARSE)); 6853 #endif 6854 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 6855 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijkokkos_C", MatConvert_MPIAIJ_MPIAIJKokkos)); 6856 #endif 6857 #if defined(PETSC_HAVE_MKL_SPARSE) 6858 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijmkl_C", MatConvert_MPIAIJ_MPIAIJMKL)); 6859 #endif 6860 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijcrl_C", MatConvert_MPIAIJ_MPIAIJCRL)); 6861 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpibaij_C", MatConvert_MPIAIJ_MPIBAIJ)); 6862 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpisbaij_C", MatConvert_MPIAIJ_MPISBAIJ)); 6863 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpidense_C", MatConvert_MPIAIJ_MPIDense)); 6864 #if defined(PETSC_HAVE_ELEMENTAL) 6865 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_elemental_C", MatConvert_MPIAIJ_Elemental)); 6866 #endif 6867 #if defined(PETSC_HAVE_SCALAPACK) 6868 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_scalapack_C", MatConvert_AIJ_ScaLAPACK)); 6869 #endif 6870 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_is_C", MatConvert_XAIJ_IS)); 6871 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpisell_C", MatConvert_MPIAIJ_MPISELL)); 6872 #if defined(PETSC_HAVE_HYPRE) 6873 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_hypre_C", MatConvert_AIJ_HYPRE)); 6874 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_transpose_mpiaij_mpiaij_C", MatProductSetFromOptions_Transpose_AIJ_AIJ)); 6875 #endif 6876 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_is_mpiaij_C", MatProductSetFromOptions_IS_XAIJ)); 6877 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_mpiaij_mpiaij_C", MatProductSetFromOptions_MPIAIJ)); 6878 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_MPIAIJ)); 6879 PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatSetValuesCOO_C", MatSetValuesCOO_MPIAIJ)); 6880 PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJ)); 6881 PetscFunctionReturn(PETSC_SUCCESS); 6882 } 6883 6884 /*@C 6885 MatCreateMPIAIJWithSplitArrays - creates a `MATMPIAIJ` matrix using arrays that contain the "diagonal" 6886 and "off-diagonal" part of the matrix in CSR format. 6887 6888 Collective 6889 6890 Input Parameters: 6891 + comm - MPI communicator 6892 . m - number of local rows (Cannot be `PETSC_DECIDE`) 6893 . n - This value should be the same as the local size used in creating the 6894 x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have 6895 calculated if `N` is given) For square matrices `n` is almost always `m`. 6896 . M - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given) 6897 . N - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given) 6898 . i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix 6899 . j - column indices, which must be local, i.e., based off the start column of the diagonal portion 6900 . a - matrix values 6901 . oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix 6902 . oj - column indices, which must be global, representing global columns in the `MATMPIAIJ` matrix 6903 - oa - matrix values 6904 6905 Output Parameter: 6906 . mat - the matrix 6907 6908 Level: advanced 6909 6910 Notes: 6911 The `i`, `j`, and `a` arrays ARE NOT copied by this routine into the internal format used by PETSc. The user 6912 must free the arrays once the matrix has been destroyed and not before. 6913 6914 The `i` and `j` indices are 0 based 6915 6916 See `MatCreateAIJ()` for the definition of "diagonal" and "off-diagonal" portion of the matrix 6917 6918 This sets local rows and cannot be used to set off-processor values. 6919 6920 Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a 6921 legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does 6922 not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because 6923 the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to 6924 keep track of the underlying array. Use `MatSetOption`(A,`MAT_NO_OFF_PROC_ENTRIES`,`PETSC_TRUE`) to disable all 6925 communication if it is known that only local entries will be set. 6926 6927 .seealso: [](ch_matrices), `Mat`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`, 6928 `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithArrays()` 6929 @*/ 6930 PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt i[], PetscInt j[], PetscScalar a[], PetscInt oi[], PetscInt oj[], PetscScalar oa[], Mat *mat) 6931 { 6932 Mat_MPIAIJ *maij; 6933 6934 PetscFunctionBegin; 6935 PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative"); 6936 PetscCheck(i[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0"); 6937 PetscCheck(oi[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "oi (row indices) must start with 0"); 6938 PetscCall(MatCreate(comm, mat)); 6939 PetscCall(MatSetSizes(*mat, m, n, M, N)); 6940 PetscCall(MatSetType(*mat, MATMPIAIJ)); 6941 maij = (Mat_MPIAIJ *)(*mat)->data; 6942 6943 (*mat)->preallocated = PETSC_TRUE; 6944 6945 PetscCall(PetscLayoutSetUp((*mat)->rmap)); 6946 PetscCall(PetscLayoutSetUp((*mat)->cmap)); 6947 6948 PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, n, i, j, a, &maij->A)); 6949 PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, (*mat)->cmap->N, oi, oj, oa, &maij->B)); 6950 6951 PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); 6952 PetscCall(MatAssemblyBegin(*mat, MAT_FINAL_ASSEMBLY)); 6953 PetscCall(MatAssemblyEnd(*mat, MAT_FINAL_ASSEMBLY)); 6954 PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_FALSE)); 6955 PetscCall(MatSetOption(*mat, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE)); 6956 PetscFunctionReturn(PETSC_SUCCESS); 6957 } 6958 6959 typedef struct { 6960 Mat *mp; /* intermediate products */ 6961 PetscBool *mptmp; /* is the intermediate product temporary ? */ 6962 PetscInt cp; /* number of intermediate products */ 6963 6964 /* support for MatGetBrowsOfAoCols_MPIAIJ for P_oth */ 6965 PetscInt *startsj_s, *startsj_r; 6966 PetscScalar *bufa; 6967 Mat P_oth; 6968 6969 /* may take advantage of merging product->B */ 6970 Mat Bloc; /* B-local by merging diag and off-diag */ 6971 6972 /* cusparse does not have support to split between symbolic and numeric phases. 6973 When api_user is true, we don't need to update the numerical values 6974 of the temporary storage */ 6975 PetscBool reusesym; 6976 6977 /* support for COO values insertion */ 6978 PetscScalar *coo_v, *coo_w; /* store on-process and off-process COO scalars, and used as MPI recv/send buffers respectively */ 6979 PetscInt **own; /* own[i] points to address of on-process COO indices for Mat mp[i] */ 6980 PetscInt **off; /* off[i] points to address of off-process COO indices for Mat mp[i] */ 6981 PetscBool hasoffproc; /* if true, have off-process values insertion (i.e. AtB or PtAP) */ 6982 PetscSF sf; /* used for non-local values insertion and memory malloc */ 6983 PetscMemType mtype; 6984 6985 /* customization */ 6986 PetscBool abmerge; 6987 PetscBool P_oth_bind; 6988 } MatMatMPIAIJBACKEND; 6989 6990 static PetscErrorCode MatDestroy_MatMatMPIAIJBACKEND(void *data) 6991 { 6992 MatMatMPIAIJBACKEND *mmdata = (MatMatMPIAIJBACKEND *)data; 6993 PetscInt i; 6994 6995 PetscFunctionBegin; 6996 PetscCall(PetscFree2(mmdata->startsj_s, mmdata->startsj_r)); 6997 PetscCall(PetscFree(mmdata->bufa)); 6998 PetscCall(PetscSFFree(mmdata->sf, mmdata->mtype, mmdata->coo_v)); 6999 PetscCall(PetscSFFree(mmdata->sf, mmdata->mtype, mmdata->coo_w)); 7000 PetscCall(MatDestroy(&mmdata->P_oth)); 7001 PetscCall(MatDestroy(&mmdata->Bloc)); 7002 PetscCall(PetscSFDestroy(&mmdata->sf)); 7003 for (i = 0; i < mmdata->cp; i++) PetscCall(MatDestroy(&mmdata->mp[i])); 7004 PetscCall(PetscFree2(mmdata->mp, mmdata->mptmp)); 7005 PetscCall(PetscFree(mmdata->own[0])); 7006 PetscCall(PetscFree(mmdata->own)); 7007 PetscCall(PetscFree(mmdata->off[0])); 7008 PetscCall(PetscFree(mmdata->off)); 7009 PetscCall(PetscFree(mmdata)); 7010 PetscFunctionReturn(PETSC_SUCCESS); 7011 } 7012 7013 /* Copy selected n entries with indices in idx[] of A to v[]. 7014 If idx is NULL, copy the whole data array of A to v[] 7015 */ 7016 static PetscErrorCode MatSeqAIJCopySubArray(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) 7017 { 7018 PetscErrorCode (*f)(Mat, PetscInt, const PetscInt[], PetscScalar[]); 7019 7020 PetscFunctionBegin; 7021 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", &f)); 7022 if (f) { 7023 PetscCall((*f)(A, n, idx, v)); 7024 } else { 7025 const PetscScalar *vv; 7026 7027 PetscCall(MatSeqAIJGetArrayRead(A, &vv)); 7028 if (n && idx) { 7029 PetscScalar *w = v; 7030 const PetscInt *oi = idx; 7031 PetscInt j; 7032 7033 for (j = 0; j < n; j++) *w++ = vv[*oi++]; 7034 } else { 7035 PetscCall(PetscArraycpy(v, vv, n)); 7036 } 7037 PetscCall(MatSeqAIJRestoreArrayRead(A, &vv)); 7038 } 7039 PetscFunctionReturn(PETSC_SUCCESS); 7040 } 7041 7042 static PetscErrorCode MatProductNumeric_MPIAIJBACKEND(Mat C) 7043 { 7044 MatMatMPIAIJBACKEND *mmdata; 7045 PetscInt i, n_d, n_o; 7046 7047 PetscFunctionBegin; 7048 MatCheckProduct(C, 1); 7049 PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty"); 7050 mmdata = (MatMatMPIAIJBACKEND *)C->product->data; 7051 if (!mmdata->reusesym) { /* update temporary matrices */ 7052 if (mmdata->P_oth) PetscCall(MatGetBrowsOfAoCols_MPIAIJ(C->product->A, C->product->B, MAT_REUSE_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth)); 7053 if (mmdata->Bloc) PetscCall(MatMPIAIJGetLocalMatMerge(C->product->B, MAT_REUSE_MATRIX, NULL, &mmdata->Bloc)); 7054 } 7055 mmdata->reusesym = PETSC_FALSE; 7056 7057 for (i = 0; i < mmdata->cp; i++) { 7058 PetscCheck(mmdata->mp[i]->ops->productnumeric, PetscObjectComm((PetscObject)mmdata->mp[i]), PETSC_ERR_PLIB, "Missing numeric op for %s", MatProductTypes[mmdata->mp[i]->product->type]); 7059 PetscCall((*mmdata->mp[i]->ops->productnumeric)(mmdata->mp[i])); 7060 } 7061 for (i = 0, n_d = 0, n_o = 0; i < mmdata->cp; i++) { 7062 PetscInt noff = mmdata->off[i + 1] - mmdata->off[i]; 7063 7064 if (mmdata->mptmp[i]) continue; 7065 if (noff) { 7066 PetscInt nown = mmdata->own[i + 1] - mmdata->own[i]; 7067 7068 PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], noff, mmdata->off[i], mmdata->coo_w + n_o)); 7069 PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], nown, mmdata->own[i], mmdata->coo_v + n_d)); 7070 n_o += noff; 7071 n_d += nown; 7072 } else { 7073 Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mmdata->mp[i]->data; 7074 7075 PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], mm->nz, NULL, mmdata->coo_v + n_d)); 7076 n_d += mm->nz; 7077 } 7078 } 7079 if (mmdata->hasoffproc) { /* offprocess insertion */ 7080 PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_SCALAR, mmdata->coo_w, mmdata->coo_v + n_d)); 7081 PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_SCALAR, mmdata->coo_w, mmdata->coo_v + n_d)); 7082 } 7083 PetscCall(MatSetValuesCOO(C, mmdata->coo_v, INSERT_VALUES)); 7084 PetscFunctionReturn(PETSC_SUCCESS); 7085 } 7086 7087 /* Support for Pt * A, A * P, or Pt * A * P */ 7088 #define MAX_NUMBER_INTERMEDIATE 4 7089 PetscErrorCode MatProductSymbolic_MPIAIJBACKEND(Mat C) 7090 { 7091 Mat_Product *product = C->product; 7092 Mat A, P, mp[MAX_NUMBER_INTERMEDIATE]; /* A, P and a series of intermediate matrices */ 7093 Mat_MPIAIJ *a, *p; 7094 MatMatMPIAIJBACKEND *mmdata; 7095 ISLocalToGlobalMapping P_oth_l2g = NULL; 7096 IS glob = NULL; 7097 const char *prefix; 7098 char pprefix[256]; 7099 const PetscInt *globidx, *P_oth_idx; 7100 PetscInt i, j, cp, m, n, M, N, *coo_i, *coo_j; 7101 PetscCount ncoo, ncoo_d, ncoo_o, ncoo_oown; 7102 PetscInt cmapt[MAX_NUMBER_INTERMEDIATE], rmapt[MAX_NUMBER_INTERMEDIATE]; /* col/row map type for each Mat in mp[]. */ 7103 /* type-0: consecutive, start from 0; type-1: consecutive with */ 7104 /* a base offset; type-2: sparse with a local to global map table */ 7105 const PetscInt *cmapa[MAX_NUMBER_INTERMEDIATE], *rmapa[MAX_NUMBER_INTERMEDIATE]; /* col/row local to global map array (table) for type-2 map type */ 7106 7107 MatProductType ptype; 7108 PetscBool mptmp[MAX_NUMBER_INTERMEDIATE], hasoffproc = PETSC_FALSE, iscuda, iship, iskokk; 7109 PetscMPIInt size; 7110 7111 PetscFunctionBegin; 7112 MatCheckProduct(C, 1); 7113 PetscCheck(!product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty"); 7114 ptype = product->type; 7115 if (product->A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { 7116 ptype = MATPRODUCT_AB; 7117 product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE; 7118 } 7119 switch (ptype) { 7120 case MATPRODUCT_AB: 7121 A = product->A; 7122 P = product->B; 7123 m = A->rmap->n; 7124 n = P->cmap->n; 7125 M = A->rmap->N; 7126 N = P->cmap->N; 7127 hasoffproc = PETSC_FALSE; /* will not scatter mat product values to other processes */ 7128 break; 7129 case MATPRODUCT_AtB: 7130 P = product->A; 7131 A = product->B; 7132 m = P->cmap->n; 7133 n = A->cmap->n; 7134 M = P->cmap->N; 7135 N = A->cmap->N; 7136 hasoffproc = PETSC_TRUE; 7137 break; 7138 case MATPRODUCT_PtAP: 7139 A = product->A; 7140 P = product->B; 7141 m = P->cmap->n; 7142 n = P->cmap->n; 7143 M = P->cmap->N; 7144 N = P->cmap->N; 7145 hasoffproc = PETSC_TRUE; 7146 break; 7147 default: 7148 SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Not for product type %s", MatProductTypes[ptype]); 7149 } 7150 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)C), &size)); 7151 if (size == 1) hasoffproc = PETSC_FALSE; 7152 7153 /* defaults */ 7154 for (i = 0; i < MAX_NUMBER_INTERMEDIATE; i++) { 7155 mp[i] = NULL; 7156 mptmp[i] = PETSC_FALSE; 7157 rmapt[i] = -1; 7158 cmapt[i] = -1; 7159 rmapa[i] = NULL; 7160 cmapa[i] = NULL; 7161 } 7162 7163 /* customization */ 7164 PetscCall(PetscNew(&mmdata)); 7165 mmdata->reusesym = product->api_user; 7166 if (ptype == MATPRODUCT_AB) { 7167 if (product->api_user) { 7168 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat"); 7169 PetscCall(PetscOptionsBool("-matmatmult_backend_mergeB", "Merge product->B local matrices", "MatMatMult", mmdata->abmerge, &mmdata->abmerge, NULL)); 7170 PetscCall(PetscOptionsBool("-matmatmult_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL)); 7171 PetscOptionsEnd(); 7172 } else { 7173 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat"); 7174 PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_mergeB", "Merge product->B local matrices", "MatMatMult", mmdata->abmerge, &mmdata->abmerge, NULL)); 7175 PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL)); 7176 PetscOptionsEnd(); 7177 } 7178 } else if (ptype == MATPRODUCT_PtAP) { 7179 if (product->api_user) { 7180 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat"); 7181 PetscCall(PetscOptionsBool("-matptap_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL)); 7182 PetscOptionsEnd(); 7183 } else { 7184 PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat"); 7185 PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL)); 7186 PetscOptionsEnd(); 7187 } 7188 } 7189 a = (Mat_MPIAIJ *)A->data; 7190 p = (Mat_MPIAIJ *)P->data; 7191 PetscCall(MatSetSizes(C, m, n, M, N)); 7192 PetscCall(PetscLayoutSetUp(C->rmap)); 7193 PetscCall(PetscLayoutSetUp(C->cmap)); 7194 PetscCall(MatSetType(C, ((PetscObject)A)->type_name)); 7195 PetscCall(MatGetOptionsPrefix(C, &prefix)); 7196 7197 cp = 0; 7198 switch (ptype) { 7199 case MATPRODUCT_AB: /* A * P */ 7200 PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth)); 7201 7202 /* A_diag * P_local (merged or not) */ 7203 if (mmdata->abmerge) { /* P's diagonal and off-diag blocks are merged to one matrix, then multiplied by A_diag */ 7204 /* P is product->B */ 7205 PetscCall(MatMPIAIJGetLocalMatMerge(P, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc)); 7206 PetscCall(MatProductCreate(a->A, mmdata->Bloc, NULL, &mp[cp])); 7207 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB)); 7208 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7209 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7210 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7211 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7212 mp[cp]->product->api_user = product->api_user; 7213 PetscCall(MatProductSetFromOptions(mp[cp])); 7214 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7215 PetscCall(ISGetIndices(glob, &globidx)); 7216 rmapt[cp] = 1; 7217 cmapt[cp] = 2; 7218 cmapa[cp] = globidx; 7219 mptmp[cp] = PETSC_FALSE; 7220 cp++; 7221 } else { /* A_diag * P_diag and A_diag * P_off */ 7222 PetscCall(MatProductCreate(a->A, p->A, NULL, &mp[cp])); 7223 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB)); 7224 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7225 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7226 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7227 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7228 mp[cp]->product->api_user = product->api_user; 7229 PetscCall(MatProductSetFromOptions(mp[cp])); 7230 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7231 rmapt[cp] = 1; 7232 cmapt[cp] = 1; 7233 mptmp[cp] = PETSC_FALSE; 7234 cp++; 7235 PetscCall(MatProductCreate(a->A, p->B, NULL, &mp[cp])); 7236 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB)); 7237 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7238 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7239 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7240 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7241 mp[cp]->product->api_user = product->api_user; 7242 PetscCall(MatProductSetFromOptions(mp[cp])); 7243 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7244 rmapt[cp] = 1; 7245 cmapt[cp] = 2; 7246 cmapa[cp] = p->garray; 7247 mptmp[cp] = PETSC_FALSE; 7248 cp++; 7249 } 7250 7251 /* A_off * P_other */ 7252 if (mmdata->P_oth) { 7253 PetscCall(MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth, &P_oth_l2g)); /* make P_oth use local col ids */ 7254 PetscCall(ISLocalToGlobalMappingGetIndices(P_oth_l2g, &P_oth_idx)); 7255 PetscCall(MatSetType(mmdata->P_oth, ((PetscObject)(a->B))->type_name)); 7256 PetscCall(MatBindToCPU(mmdata->P_oth, mmdata->P_oth_bind)); 7257 PetscCall(MatProductCreate(a->B, mmdata->P_oth, NULL, &mp[cp])); 7258 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB)); 7259 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7260 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7261 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7262 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7263 mp[cp]->product->api_user = product->api_user; 7264 PetscCall(MatProductSetFromOptions(mp[cp])); 7265 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7266 rmapt[cp] = 1; 7267 cmapt[cp] = 2; 7268 cmapa[cp] = P_oth_idx; 7269 mptmp[cp] = PETSC_FALSE; 7270 cp++; 7271 } 7272 break; 7273 7274 case MATPRODUCT_AtB: /* (P^t * A): P_diag * A_loc + P_off * A_loc */ 7275 /* A is product->B */ 7276 PetscCall(MatMPIAIJGetLocalMatMerge(A, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc)); 7277 if (A == P) { /* when A==P, we can take advantage of the already merged mmdata->Bloc */ 7278 PetscCall(MatProductCreate(mmdata->Bloc, mmdata->Bloc, NULL, &mp[cp])); 7279 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB)); 7280 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7281 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7282 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7283 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7284 mp[cp]->product->api_user = product->api_user; 7285 PetscCall(MatProductSetFromOptions(mp[cp])); 7286 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7287 PetscCall(ISGetIndices(glob, &globidx)); 7288 rmapt[cp] = 2; 7289 rmapa[cp] = globidx; 7290 cmapt[cp] = 2; 7291 cmapa[cp] = globidx; 7292 mptmp[cp] = PETSC_FALSE; 7293 cp++; 7294 } else { 7295 PetscCall(MatProductCreate(p->A, mmdata->Bloc, NULL, &mp[cp])); 7296 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB)); 7297 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7298 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7299 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7300 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7301 mp[cp]->product->api_user = product->api_user; 7302 PetscCall(MatProductSetFromOptions(mp[cp])); 7303 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7304 PetscCall(ISGetIndices(glob, &globidx)); 7305 rmapt[cp] = 1; 7306 cmapt[cp] = 2; 7307 cmapa[cp] = globidx; 7308 mptmp[cp] = PETSC_FALSE; 7309 cp++; 7310 PetscCall(MatProductCreate(p->B, mmdata->Bloc, NULL, &mp[cp])); 7311 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB)); 7312 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7313 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7314 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7315 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7316 mp[cp]->product->api_user = product->api_user; 7317 PetscCall(MatProductSetFromOptions(mp[cp])); 7318 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7319 rmapt[cp] = 2; 7320 rmapa[cp] = p->garray; 7321 cmapt[cp] = 2; 7322 cmapa[cp] = globidx; 7323 mptmp[cp] = PETSC_FALSE; 7324 cp++; 7325 } 7326 break; 7327 case MATPRODUCT_PtAP: 7328 PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth)); 7329 /* P is product->B */ 7330 PetscCall(MatMPIAIJGetLocalMatMerge(P, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc)); 7331 PetscCall(MatProductCreate(a->A, mmdata->Bloc, NULL, &mp[cp])); 7332 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_PtAP)); 7333 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7334 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7335 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7336 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7337 mp[cp]->product->api_user = product->api_user; 7338 PetscCall(MatProductSetFromOptions(mp[cp])); 7339 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7340 PetscCall(ISGetIndices(glob, &globidx)); 7341 rmapt[cp] = 2; 7342 rmapa[cp] = globidx; 7343 cmapt[cp] = 2; 7344 cmapa[cp] = globidx; 7345 mptmp[cp] = PETSC_FALSE; 7346 cp++; 7347 if (mmdata->P_oth) { 7348 PetscCall(MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth, &P_oth_l2g)); 7349 PetscCall(ISLocalToGlobalMappingGetIndices(P_oth_l2g, &P_oth_idx)); 7350 PetscCall(MatSetType(mmdata->P_oth, ((PetscObject)(a->B))->type_name)); 7351 PetscCall(MatBindToCPU(mmdata->P_oth, mmdata->P_oth_bind)); 7352 PetscCall(MatProductCreate(a->B, mmdata->P_oth, NULL, &mp[cp])); 7353 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB)); 7354 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7355 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7356 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7357 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7358 mp[cp]->product->api_user = product->api_user; 7359 PetscCall(MatProductSetFromOptions(mp[cp])); 7360 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7361 mptmp[cp] = PETSC_TRUE; 7362 cp++; 7363 PetscCall(MatProductCreate(mmdata->Bloc, mp[1], NULL, &mp[cp])); 7364 PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB)); 7365 PetscCall(MatProductSetFill(mp[cp], product->fill)); 7366 PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp)); 7367 PetscCall(MatSetOptionsPrefix(mp[cp], prefix)); 7368 PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix)); 7369 mp[cp]->product->api_user = product->api_user; 7370 PetscCall(MatProductSetFromOptions(mp[cp])); 7371 PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp])); 7372 rmapt[cp] = 2; 7373 rmapa[cp] = globidx; 7374 cmapt[cp] = 2; 7375 cmapa[cp] = P_oth_idx; 7376 mptmp[cp] = PETSC_FALSE; 7377 cp++; 7378 } 7379 break; 7380 default: 7381 SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Not for product type %s", MatProductTypes[ptype]); 7382 } 7383 /* sanity check */ 7384 if (size > 1) 7385 for (i = 0; i < cp; i++) PetscCheck(rmapt[i] != 2 || hasoffproc, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unexpected offproc map type for product %" PetscInt_FMT, i); 7386 7387 PetscCall(PetscMalloc2(cp, &mmdata->mp, cp, &mmdata->mptmp)); 7388 for (i = 0; i < cp; i++) { 7389 mmdata->mp[i] = mp[i]; 7390 mmdata->mptmp[i] = mptmp[i]; 7391 } 7392 mmdata->cp = cp; 7393 C->product->data = mmdata; 7394 C->product->destroy = MatDestroy_MatMatMPIAIJBACKEND; 7395 C->ops->productnumeric = MatProductNumeric_MPIAIJBACKEND; 7396 7397 /* memory type */ 7398 mmdata->mtype = PETSC_MEMTYPE_HOST; 7399 PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iscuda, MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE, "")); 7400 PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iship, MATSEQAIJHIPSPARSE, MATMPIAIJHIPSPARSE, "")); 7401 PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iskokk, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, "")); 7402 if (iscuda) mmdata->mtype = PETSC_MEMTYPE_CUDA; 7403 else if (iship) mmdata->mtype = PETSC_MEMTYPE_HIP; 7404 else if (iskokk) mmdata->mtype = PETSC_MEMTYPE_KOKKOS; 7405 7406 /* prepare coo coordinates for values insertion */ 7407 7408 /* count total nonzeros of those intermediate seqaij Mats 7409 ncoo_d: # of nonzeros of matrices that do not have offproc entries 7410 ncoo_o: # of nonzeros (of matrices that might have offproc entries) that will be inserted to remote procs 7411 ncoo_oown: # of nonzeros (of matrices that might have offproc entries) that will be inserted locally 7412 */ 7413 for (cp = 0, ncoo_d = 0, ncoo_o = 0, ncoo_oown = 0; cp < mmdata->cp; cp++) { 7414 Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mp[cp]->data; 7415 if (mptmp[cp]) continue; 7416 if (rmapt[cp] == 2 && hasoffproc) { /* the rows need to be scatter to all processes (might include self) */ 7417 const PetscInt *rmap = rmapa[cp]; 7418 const PetscInt mr = mp[cp]->rmap->n; 7419 const PetscInt rs = C->rmap->rstart; 7420 const PetscInt re = C->rmap->rend; 7421 const PetscInt *ii = mm->i; 7422 for (i = 0; i < mr; i++) { 7423 const PetscInt gr = rmap[i]; 7424 const PetscInt nz = ii[i + 1] - ii[i]; 7425 if (gr < rs || gr >= re) ncoo_o += nz; /* this row is offproc */ 7426 else ncoo_oown += nz; /* this row is local */ 7427 } 7428 } else ncoo_d += mm->nz; 7429 } 7430 7431 /* 7432 ncoo: total number of nonzeros (including those inserted by remote procs) belonging to this proc 7433 7434 ncoo = ncoo_d + ncoo_oown + ncoo2, which ncoo2 is number of nonzeros inserted to me by other procs. 7435 7436 off[0] points to a big index array, which is shared by off[1,2,...]. Similarly, for own[0]. 7437 7438 off[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert to others 7439 own[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert locally 7440 so, off[p+1]-off[p] is the number of nonzeros that mp[p] will send to others. 7441 7442 coo_i/j/v[]: [ncoo] row/col/val of nonzeros belonging to this proc. 7443 Ex. coo_i[]: the beginning part (of size ncoo_d + ncoo_oown) stores i of local nonzeros, and the remaining part stores i of nonzeros I will receive. 7444 */ 7445 PetscCall(PetscCalloc1(mmdata->cp + 1, &mmdata->off)); /* +1 to make a csr-like data structure */ 7446 PetscCall(PetscCalloc1(mmdata->cp + 1, &mmdata->own)); 7447 7448 /* gather (i,j) of nonzeros inserted by remote procs */ 7449 if (hasoffproc) { 7450 PetscSF msf; 7451 PetscInt ncoo2, *coo_i2, *coo_j2; 7452 7453 PetscCall(PetscMalloc1(ncoo_o, &mmdata->off[0])); 7454 PetscCall(PetscMalloc1(ncoo_oown, &mmdata->own[0])); 7455 PetscCall(PetscMalloc2(ncoo_o, &coo_i, ncoo_o, &coo_j)); /* to collect (i,j) of entries to be sent to others */ 7456 7457 for (cp = 0, ncoo_o = 0; cp < mmdata->cp; cp++) { 7458 Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mp[cp]->data; 7459 PetscInt *idxoff = mmdata->off[cp]; 7460 PetscInt *idxown = mmdata->own[cp]; 7461 if (!mptmp[cp] && rmapt[cp] == 2) { /* row map is sparse */ 7462 const PetscInt *rmap = rmapa[cp]; 7463 const PetscInt *cmap = cmapa[cp]; 7464 const PetscInt *ii = mm->i; 7465 PetscInt *coi = coo_i + ncoo_o; 7466 PetscInt *coj = coo_j + ncoo_o; 7467 const PetscInt mr = mp[cp]->rmap->n; 7468 const PetscInt rs = C->rmap->rstart; 7469 const PetscInt re = C->rmap->rend; 7470 const PetscInt cs = C->cmap->rstart; 7471 for (i = 0; i < mr; i++) { 7472 const PetscInt *jj = mm->j + ii[i]; 7473 const PetscInt gr = rmap[i]; 7474 const PetscInt nz = ii[i + 1] - ii[i]; 7475 if (gr < rs || gr >= re) { /* this is an offproc row */ 7476 for (j = ii[i]; j < ii[i + 1]; j++) { 7477 *coi++ = gr; 7478 *idxoff++ = j; 7479 } 7480 if (!cmapt[cp]) { /* already global */ 7481 for (j = 0; j < nz; j++) *coj++ = jj[j]; 7482 } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */ 7483 for (j = 0; j < nz; j++) *coj++ = jj[j] + cs; 7484 } else { /* offdiag */ 7485 for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]]; 7486 } 7487 ncoo_o += nz; 7488 } else { /* this is a local row */ 7489 for (j = ii[i]; j < ii[i + 1]; j++) *idxown++ = j; 7490 } 7491 } 7492 } 7493 mmdata->off[cp + 1] = idxoff; 7494 mmdata->own[cp + 1] = idxown; 7495 } 7496 7497 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)C), &mmdata->sf)); 7498 PetscCall(PetscSFSetGraphLayout(mmdata->sf, C->rmap, ncoo_o /*nleaves*/, NULL /*ilocal*/, PETSC_OWN_POINTER, coo_i)); 7499 PetscCall(PetscSFGetMultiSF(mmdata->sf, &msf)); 7500 PetscCall(PetscSFGetGraph(msf, &ncoo2 /*nroots*/, NULL, NULL, NULL)); 7501 ncoo = ncoo_d + ncoo_oown + ncoo2; 7502 PetscCall(PetscMalloc2(ncoo, &coo_i2, ncoo, &coo_j2)); 7503 PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_INT, coo_i, coo_i2 + ncoo_d + ncoo_oown)); /* put (i,j) of remote nonzeros at back */ 7504 PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_INT, coo_i, coo_i2 + ncoo_d + ncoo_oown)); 7505 PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_INT, coo_j, coo_j2 + ncoo_d + ncoo_oown)); 7506 PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_INT, coo_j, coo_j2 + ncoo_d + ncoo_oown)); 7507 PetscCall(PetscFree2(coo_i, coo_j)); 7508 /* allocate MPI send buffer to collect nonzero values to be sent to remote procs */ 7509 PetscCall(PetscSFMalloc(mmdata->sf, mmdata->mtype, ncoo_o * sizeof(PetscScalar), (void **)&mmdata->coo_w)); 7510 coo_i = coo_i2; 7511 coo_j = coo_j2; 7512 } else { /* no offproc values insertion */ 7513 ncoo = ncoo_d; 7514 PetscCall(PetscMalloc2(ncoo, &coo_i, ncoo, &coo_j)); 7515 7516 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)C), &mmdata->sf)); 7517 PetscCall(PetscSFSetGraph(mmdata->sf, 0, 0, NULL, PETSC_OWN_POINTER, NULL, PETSC_OWN_POINTER)); 7518 PetscCall(PetscSFSetUp(mmdata->sf)); 7519 } 7520 mmdata->hasoffproc = hasoffproc; 7521 7522 /* gather (i,j) of nonzeros inserted locally */ 7523 for (cp = 0, ncoo_d = 0; cp < mmdata->cp; cp++) { 7524 Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mp[cp]->data; 7525 PetscInt *coi = coo_i + ncoo_d; 7526 PetscInt *coj = coo_j + ncoo_d; 7527 const PetscInt *jj = mm->j; 7528 const PetscInt *ii = mm->i; 7529 const PetscInt *cmap = cmapa[cp]; 7530 const PetscInt *rmap = rmapa[cp]; 7531 const PetscInt mr = mp[cp]->rmap->n; 7532 const PetscInt rs = C->rmap->rstart; 7533 const PetscInt re = C->rmap->rend; 7534 const PetscInt cs = C->cmap->rstart; 7535 7536 if (mptmp[cp]) continue; 7537 if (rmapt[cp] == 1) { /* consecutive rows */ 7538 /* fill coo_i */ 7539 for (i = 0; i < mr; i++) { 7540 const PetscInt gr = i + rs; 7541 for (j = ii[i]; j < ii[i + 1]; j++) coi[j] = gr; 7542 } 7543 /* fill coo_j */ 7544 if (!cmapt[cp]) { /* type-0, already global */ 7545 PetscCall(PetscArraycpy(coj, jj, mm->nz)); 7546 } else if (cmapt[cp] == 1) { /* type-1, local to global for consecutive columns of C */ 7547 for (j = 0; j < mm->nz; j++) coj[j] = jj[j] + cs; /* lid + col start */ 7548 } else { /* type-2, local to global for sparse columns */ 7549 for (j = 0; j < mm->nz; j++) coj[j] = cmap[jj[j]]; 7550 } 7551 ncoo_d += mm->nz; 7552 } else if (rmapt[cp] == 2) { /* sparse rows */ 7553 for (i = 0; i < mr; i++) { 7554 const PetscInt *jj = mm->j + ii[i]; 7555 const PetscInt gr = rmap[i]; 7556 const PetscInt nz = ii[i + 1] - ii[i]; 7557 if (gr >= rs && gr < re) { /* local rows */ 7558 for (j = ii[i]; j < ii[i + 1]; j++) *coi++ = gr; 7559 if (!cmapt[cp]) { /* type-0, already global */ 7560 for (j = 0; j < nz; j++) *coj++ = jj[j]; 7561 } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */ 7562 for (j = 0; j < nz; j++) *coj++ = jj[j] + cs; 7563 } else { /* type-2, local to global for sparse columns */ 7564 for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]]; 7565 } 7566 ncoo_d += nz; 7567 } 7568 } 7569 } 7570 } 7571 if (glob) PetscCall(ISRestoreIndices(glob, &globidx)); 7572 PetscCall(ISDestroy(&glob)); 7573 if (P_oth_l2g) PetscCall(ISLocalToGlobalMappingRestoreIndices(P_oth_l2g, &P_oth_idx)); 7574 PetscCall(ISLocalToGlobalMappingDestroy(&P_oth_l2g)); 7575 /* allocate an array to store all nonzeros (inserted locally or remotely) belonging to this proc */ 7576 PetscCall(PetscSFMalloc(mmdata->sf, mmdata->mtype, ncoo * sizeof(PetscScalar), (void **)&mmdata->coo_v)); 7577 7578 /* preallocate with COO data */ 7579 PetscCall(MatSetPreallocationCOO(C, ncoo, coo_i, coo_j)); 7580 PetscCall(PetscFree2(coo_i, coo_j)); 7581 PetscFunctionReturn(PETSC_SUCCESS); 7582 } 7583 7584 PetscErrorCode MatProductSetFromOptions_MPIAIJBACKEND(Mat mat) 7585 { 7586 Mat_Product *product = mat->product; 7587 #if defined(PETSC_HAVE_DEVICE) 7588 PetscBool match = PETSC_FALSE; 7589 PetscBool usecpu = PETSC_FALSE; 7590 #else 7591 PetscBool match = PETSC_TRUE; 7592 #endif 7593 7594 PetscFunctionBegin; 7595 MatCheckProduct(mat, 1); 7596 #if defined(PETSC_HAVE_DEVICE) 7597 if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, ((PetscObject)product->A)->type_name, &match)); 7598 if (match) { /* we can always fallback to the CPU if requested */ 7599 switch (product->type) { 7600 case MATPRODUCT_AB: 7601 if (product->api_user) { 7602 PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat"); 7603 PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); 7604 PetscOptionsEnd(); 7605 } else { 7606 PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat"); 7607 PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); 7608 PetscOptionsEnd(); 7609 } 7610 break; 7611 case MATPRODUCT_AtB: 7612 if (product->api_user) { 7613 PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat"); 7614 PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); 7615 PetscOptionsEnd(); 7616 } else { 7617 PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat"); 7618 PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); 7619 PetscOptionsEnd(); 7620 } 7621 break; 7622 case MATPRODUCT_PtAP: 7623 if (product->api_user) { 7624 PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat"); 7625 PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); 7626 PetscOptionsEnd(); 7627 } else { 7628 PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat"); 7629 PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); 7630 PetscOptionsEnd(); 7631 } 7632 break; 7633 default: 7634 break; 7635 } 7636 match = (PetscBool)!usecpu; 7637 } 7638 #endif 7639 if (match) { 7640 switch (product->type) { 7641 case MATPRODUCT_AB: 7642 case MATPRODUCT_AtB: 7643 case MATPRODUCT_PtAP: 7644 mat->ops->productsymbolic = MatProductSymbolic_MPIAIJBACKEND; 7645 break; 7646 default: 7647 break; 7648 } 7649 } 7650 /* fallback to MPIAIJ ops */ 7651 if (!mat->ops->productsymbolic) PetscCall(MatProductSetFromOptions_MPIAIJ(mat)); 7652 PetscFunctionReturn(PETSC_SUCCESS); 7653 } 7654 7655 /* 7656 Produces a set of block column indices of the matrix row, one for each block represented in the original row 7657 7658 n - the number of block indices in cc[] 7659 cc - the block indices (must be large enough to contain the indices) 7660 */ 7661 static inline PetscErrorCode MatCollapseRow(Mat Amat, PetscInt row, PetscInt bs, PetscInt *n, PetscInt *cc) 7662 { 7663 PetscInt cnt = -1, nidx, j; 7664 const PetscInt *idx; 7665 7666 PetscFunctionBegin; 7667 PetscCall(MatGetRow(Amat, row, &nidx, &idx, NULL)); 7668 if (nidx) { 7669 cnt = 0; 7670 cc[cnt] = idx[0] / bs; 7671 for (j = 1; j < nidx; j++) { 7672 if (cc[cnt] < idx[j] / bs) cc[++cnt] = idx[j] / bs; 7673 } 7674 } 7675 PetscCall(MatRestoreRow(Amat, row, &nidx, &idx, NULL)); 7676 *n = cnt + 1; 7677 PetscFunctionReturn(PETSC_SUCCESS); 7678 } 7679 7680 /* 7681 Produces a set of block column indices of the matrix block row, one for each block represented in the original set of rows 7682 7683 ncollapsed - the number of block indices 7684 collapsed - the block indices (must be large enough to contain the indices) 7685 */ 7686 static inline PetscErrorCode MatCollapseRows(Mat Amat, PetscInt start, PetscInt bs, PetscInt *w0, PetscInt *w1, PetscInt *w2, PetscInt *ncollapsed, PetscInt **collapsed) 7687 { 7688 PetscInt i, nprev, *cprev = w0, ncur = 0, *ccur = w1, *merged = w2, *cprevtmp; 7689 7690 PetscFunctionBegin; 7691 PetscCall(MatCollapseRow(Amat, start, bs, &nprev, cprev)); 7692 for (i = start + 1; i < start + bs; i++) { 7693 PetscCall(MatCollapseRow(Amat, i, bs, &ncur, ccur)); 7694 PetscCall(PetscMergeIntArray(nprev, cprev, ncur, ccur, &nprev, &merged)); 7695 cprevtmp = cprev; 7696 cprev = merged; 7697 merged = cprevtmp; 7698 } 7699 *ncollapsed = nprev; 7700 if (collapsed) *collapsed = cprev; 7701 PetscFunctionReturn(PETSC_SUCCESS); 7702 } 7703 7704 /* 7705 MatCreateGraph_Simple_AIJ - create simple scalar matrix (graph) from potentially blocked matrix 7706 7707 Input Parameter: 7708 . Amat - matrix 7709 - symmetrize - make the result symmetric 7710 + scale - scale with diagonal 7711 7712 Output Parameter: 7713 . a_Gmat - output scalar graph >= 0 7714 7715 */ 7716 PETSC_INTERN PetscErrorCode MatCreateGraph_Simple_AIJ(Mat Amat, PetscBool symmetrize, PetscBool scale, PetscReal filter, PetscInt index_size, PetscInt index[], Mat *a_Gmat) 7717 { 7718 PetscInt Istart, Iend, Ii, jj, kk, ncols, nloc, NN, MM, bs; 7719 MPI_Comm comm; 7720 Mat Gmat; 7721 PetscBool ismpiaij, isseqaij; 7722 Mat a, b, c; 7723 MatType jtype; 7724 7725 PetscFunctionBegin; 7726 PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm)); 7727 PetscCall(MatGetOwnershipRange(Amat, &Istart, &Iend)); 7728 PetscCall(MatGetSize(Amat, &MM, &NN)); 7729 PetscCall(MatGetBlockSize(Amat, &bs)); 7730 nloc = (Iend - Istart) / bs; 7731 7732 PetscCall(PetscObjectBaseTypeCompare((PetscObject)Amat, MATSEQAIJ, &isseqaij)); 7733 PetscCall(PetscObjectBaseTypeCompare((PetscObject)Amat, MATMPIAIJ, &ismpiaij)); 7734 PetscCheck(isseqaij || ismpiaij, comm, PETSC_ERR_USER, "Require (MPI)AIJ matrix type"); 7735 7736 /* TODO GPU: these calls are potentially expensive if matrices are large and we want to use the GPU */ 7737 /* A solution consists in providing a new API, MatAIJGetCollapsedAIJ, and each class can provide a fast 7738 implementation */ 7739 if (bs > 1) { 7740 PetscCall(MatGetType(Amat, &jtype)); 7741 PetscCall(MatCreate(comm, &Gmat)); 7742 PetscCall(MatSetType(Gmat, jtype)); 7743 PetscCall(MatSetSizes(Gmat, nloc, nloc, PETSC_DETERMINE, PETSC_DETERMINE)); 7744 PetscCall(MatSetBlockSizes(Gmat, 1, 1)); 7745 if (isseqaij || ((Mat_MPIAIJ *)Amat->data)->garray) { 7746 PetscInt *d_nnz, *o_nnz; 7747 MatScalar *aa, val, *AA; 7748 PetscInt *aj, *ai, *AJ, nc, nmax = 0; 7749 if (isseqaij) { 7750 a = Amat; 7751 b = NULL; 7752 } else { 7753 Mat_MPIAIJ *d = (Mat_MPIAIJ *)Amat->data; 7754 a = d->A; 7755 b = d->B; 7756 } 7757 PetscCall(PetscInfo(Amat, "New bs>1 Graph. nloc=%" PetscInt_FMT "\n", nloc)); 7758 PetscCall(PetscMalloc2(nloc, &d_nnz, isseqaij ? 0 : nloc, &o_nnz)); 7759 for (c = a, kk = 0; c && kk < 2; c = b, kk++) { 7760 PetscInt *nnz = (c == a) ? d_nnz : o_nnz; 7761 const PetscInt *cols1, *cols2; 7762 for (PetscInt brow = 0, nc1, nc2, ok = 1; brow < nloc * bs; brow += bs) { // block rows 7763 PetscCall(MatGetRow(c, brow, &nc2, &cols2, NULL)); 7764 nnz[brow / bs] = nc2 / bs; 7765 if (nc2 % bs) ok = 0; 7766 if (nnz[brow / bs] > nmax) nmax = nnz[brow / bs]; 7767 for (PetscInt ii = 1; ii < bs; ii++) { // check for non-dense blocks 7768 PetscCall(MatGetRow(c, brow + ii, &nc1, &cols1, NULL)); 7769 if (nc1 != nc2) ok = 0; 7770 else { 7771 for (PetscInt jj = 0; jj < nc1 && ok == 1; jj++) { 7772 if (cols1[jj] != cols2[jj]) ok = 0; 7773 if (cols1[jj] % bs != jj % bs) ok = 0; 7774 } 7775 } 7776 PetscCall(MatRestoreRow(c, brow + ii, &nc1, &cols1, NULL)); 7777 } 7778 PetscCall(MatRestoreRow(c, brow, &nc2, &cols2, NULL)); 7779 if (!ok) { 7780 PetscCall(PetscFree2(d_nnz, o_nnz)); 7781 PetscCall(PetscInfo(Amat, "Found sparse blocks - revert to slow method\n")); 7782 goto old_bs; 7783 } 7784 } 7785 } 7786 PetscCall(MatSeqAIJSetPreallocation(Gmat, 0, d_nnz)); 7787 PetscCall(MatMPIAIJSetPreallocation(Gmat, 0, d_nnz, 0, o_nnz)); 7788 PetscCall(PetscFree2(d_nnz, o_nnz)); 7789 PetscCall(PetscMalloc2(nmax, &AA, nmax, &AJ)); 7790 // diag 7791 for (PetscInt brow = 0, n, grow; brow < nloc * bs; brow += bs) { // block rows 7792 Mat_SeqAIJ *aseq = (Mat_SeqAIJ *)a->data; 7793 ai = aseq->i; 7794 n = ai[brow + 1] - ai[brow]; 7795 aj = aseq->j + ai[brow]; 7796 for (int k = 0; k < n; k += bs) { // block columns 7797 AJ[k / bs] = aj[k] / bs + Istart / bs; // diag starts at (Istart,Istart) 7798 val = 0; 7799 if (index_size == 0) { 7800 for (int ii = 0; ii < bs; ii++) { // rows in block 7801 aa = aseq->a + ai[brow + ii] + k; 7802 for (int jj = 0; jj < bs; jj++) { // columns in block 7803 val += PetscAbs(PetscRealPart(aa[jj])); // a sort of norm 7804 } 7805 } 7806 } else { // use (index,index) value if provided 7807 for (int iii = 0; iii < index_size; iii++) { // rows in block 7808 int ii = index[iii]; 7809 aa = aseq->a + ai[brow + ii] + k; 7810 for (int jjj = 0; jjj < index_size; jjj++) { // columns in block 7811 int jj = index[jjj]; 7812 val = PetscAbs(PetscRealPart(aa[jj])); 7813 } 7814 } 7815 } 7816 PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs (%d) >= nmax (%d)", (int)(k / bs), (int)nmax); 7817 AA[k / bs] = val; 7818 } 7819 grow = Istart / bs + brow / bs; 7820 PetscCall(MatSetValues(Gmat, 1, &grow, n / bs, AJ, AA, INSERT_VALUES)); 7821 } 7822 // off-diag 7823 if (ismpiaij) { 7824 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)Amat->data; 7825 const PetscScalar *vals; 7826 const PetscInt *cols, *garray = aij->garray; 7827 PetscCheck(garray, PETSC_COMM_SELF, PETSC_ERR_USER, "No garray ?"); 7828 for (PetscInt brow = 0, grow; brow < nloc * bs; brow += bs) { // block rows 7829 PetscCall(MatGetRow(b, brow, &ncols, &cols, NULL)); 7830 for (int k = 0, cidx = 0; k < ncols; k += bs, cidx++) { 7831 PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs >= nmax"); 7832 AA[k / bs] = 0; 7833 AJ[cidx] = garray[cols[k]] / bs; 7834 } 7835 nc = ncols / bs; 7836 PetscCall(MatRestoreRow(b, brow, &ncols, &cols, NULL)); 7837 if (index_size == 0) { 7838 for (int ii = 0; ii < bs; ii++) { // rows in block 7839 PetscCall(MatGetRow(b, brow + ii, &ncols, &cols, &vals)); 7840 for (int k = 0; k < ncols; k += bs) { 7841 for (int jj = 0; jj < bs; jj++) { // cols in block 7842 PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs (%d) >= nmax (%d)", (int)(k / bs), (int)nmax); 7843 AA[k / bs] += PetscAbs(PetscRealPart(vals[k + jj])); 7844 } 7845 } 7846 PetscCall(MatRestoreRow(b, brow + ii, &ncols, &cols, &vals)); 7847 } 7848 } else { // use (index,index) value if provided 7849 for (int iii = 0; iii < index_size; iii++) { // rows in block 7850 int ii = index[iii]; 7851 PetscCall(MatGetRow(b, brow + ii, &ncols, &cols, &vals)); 7852 for (int k = 0; k < ncols; k += bs) { 7853 for (int jjj = 0; jjj < index_size; jjj++) { // cols in block 7854 int jj = index[jjj]; 7855 AA[k / bs] += PetscAbs(PetscRealPart(vals[k + jj])); 7856 } 7857 } 7858 PetscCall(MatRestoreRow(b, brow + ii, &ncols, &cols, &vals)); 7859 } 7860 } 7861 grow = Istart / bs + brow / bs; 7862 PetscCall(MatSetValues(Gmat, 1, &grow, nc, AJ, AA, INSERT_VALUES)); 7863 } 7864 } 7865 PetscCall(MatAssemblyBegin(Gmat, MAT_FINAL_ASSEMBLY)); 7866 PetscCall(MatAssemblyEnd(Gmat, MAT_FINAL_ASSEMBLY)); 7867 PetscCall(PetscFree2(AA, AJ)); 7868 } else { 7869 const PetscScalar *vals; 7870 const PetscInt *idx; 7871 PetscInt *d_nnz, *o_nnz, *w0, *w1, *w2; 7872 old_bs: 7873 /* 7874 Determine the preallocation needed for the scalar matrix derived from the vector matrix. 7875 */ 7876 PetscCall(PetscInfo(Amat, "OLD bs>1 CreateGraph\n")); 7877 PetscCall(PetscMalloc2(nloc, &d_nnz, isseqaij ? 0 : nloc, &o_nnz)); 7878 if (isseqaij) { 7879 PetscInt max_d_nnz; 7880 /* 7881 Determine exact preallocation count for (sequential) scalar matrix 7882 */ 7883 PetscCall(MatSeqAIJGetMaxRowNonzeros(Amat, &max_d_nnz)); 7884 max_d_nnz = PetscMin(nloc, bs * max_d_nnz); 7885 PetscCall(PetscMalloc3(max_d_nnz, &w0, max_d_nnz, &w1, max_d_nnz, &w2)); 7886 for (Ii = 0, jj = 0; Ii < Iend; Ii += bs, jj++) PetscCall(MatCollapseRows(Amat, Ii, bs, w0, w1, w2, &d_nnz[jj], NULL)); 7887 PetscCall(PetscFree3(w0, w1, w2)); 7888 } else if (ismpiaij) { 7889 Mat Daij, Oaij; 7890 const PetscInt *garray; 7891 PetscInt max_d_nnz; 7892 PetscCall(MatMPIAIJGetSeqAIJ(Amat, &Daij, &Oaij, &garray)); 7893 /* 7894 Determine exact preallocation count for diagonal block portion of scalar matrix 7895 */ 7896 PetscCall(MatSeqAIJGetMaxRowNonzeros(Daij, &max_d_nnz)); 7897 max_d_nnz = PetscMin(nloc, bs * max_d_nnz); 7898 PetscCall(PetscMalloc3(max_d_nnz, &w0, max_d_nnz, &w1, max_d_nnz, &w2)); 7899 for (Ii = 0, jj = 0; Ii < Iend - Istart; Ii += bs, jj++) PetscCall(MatCollapseRows(Daij, Ii, bs, w0, w1, w2, &d_nnz[jj], NULL)); 7900 PetscCall(PetscFree3(w0, w1, w2)); 7901 /* 7902 Over estimate (usually grossly over), preallocation count for off-diagonal portion of scalar matrix 7903 */ 7904 for (Ii = 0, jj = 0; Ii < Iend - Istart; Ii += bs, jj++) { 7905 o_nnz[jj] = 0; 7906 for (kk = 0; kk < bs; kk++) { /* rows that get collapsed to a single row */ 7907 PetscCall(MatGetRow(Oaij, Ii + kk, &ncols, NULL, NULL)); 7908 o_nnz[jj] += ncols; 7909 PetscCall(MatRestoreRow(Oaij, Ii + kk, &ncols, NULL, NULL)); 7910 } 7911 if (o_nnz[jj] > (NN / bs - nloc)) o_nnz[jj] = NN / bs - nloc; 7912 } 7913 } else SETERRQ(comm, PETSC_ERR_USER, "Require AIJ matrix type"); 7914 /* get scalar copy (norms) of matrix */ 7915 PetscCall(MatSeqAIJSetPreallocation(Gmat, 0, d_nnz)); 7916 PetscCall(MatMPIAIJSetPreallocation(Gmat, 0, d_nnz, 0, o_nnz)); 7917 PetscCall(PetscFree2(d_nnz, o_nnz)); 7918 for (Ii = Istart; Ii < Iend; Ii++) { 7919 PetscInt dest_row = Ii / bs; 7920 PetscCall(MatGetRow(Amat, Ii, &ncols, &idx, &vals)); 7921 for (jj = 0; jj < ncols; jj++) { 7922 PetscInt dest_col = idx[jj] / bs; 7923 PetscScalar sv = PetscAbs(PetscRealPart(vals[jj])); 7924 PetscCall(MatSetValues(Gmat, 1, &dest_row, 1, &dest_col, &sv, ADD_VALUES)); 7925 } 7926 PetscCall(MatRestoreRow(Amat, Ii, &ncols, &idx, &vals)); 7927 } 7928 PetscCall(MatAssemblyBegin(Gmat, MAT_FINAL_ASSEMBLY)); 7929 PetscCall(MatAssemblyEnd(Gmat, MAT_FINAL_ASSEMBLY)); 7930 } 7931 } else { 7932 if (symmetrize || filter >= 0 || scale) PetscCall(MatDuplicate(Amat, MAT_COPY_VALUES, &Gmat)); 7933 else { 7934 Gmat = Amat; 7935 PetscCall(PetscObjectReference((PetscObject)Gmat)); 7936 } 7937 if (isseqaij) { 7938 a = Gmat; 7939 b = NULL; 7940 } else { 7941 Mat_MPIAIJ *d = (Mat_MPIAIJ *)Gmat->data; 7942 a = d->A; 7943 b = d->B; 7944 } 7945 if (filter >= 0 || scale) { 7946 /* take absolute value of each entry */ 7947 for (c = a, kk = 0; c && kk < 2; c = b, kk++) { 7948 MatInfo info; 7949 PetscScalar *avals; 7950 PetscCall(MatGetInfo(c, MAT_LOCAL, &info)); 7951 PetscCall(MatSeqAIJGetArray(c, &avals)); 7952 for (int jj = 0; jj < info.nz_used; jj++) avals[jj] = PetscAbsScalar(avals[jj]); 7953 PetscCall(MatSeqAIJRestoreArray(c, &avals)); 7954 } 7955 } 7956 } 7957 if (symmetrize) { 7958 PetscBool isset, issym; 7959 PetscCall(MatIsSymmetricKnown(Amat, &isset, &issym)); 7960 if (!isset || !issym) { 7961 Mat matTrans; 7962 PetscCall(MatTranspose(Gmat, MAT_INITIAL_MATRIX, &matTrans)); 7963 PetscCall(MatAXPY(Gmat, 1.0, matTrans, Gmat->structurally_symmetric == PETSC_BOOL3_TRUE ? SAME_NONZERO_PATTERN : DIFFERENT_NONZERO_PATTERN)); 7964 PetscCall(MatDestroy(&matTrans)); 7965 } 7966 PetscCall(MatSetOption(Gmat, MAT_SYMMETRIC, PETSC_TRUE)); 7967 } else if (Amat != Gmat) PetscCall(MatPropagateSymmetryOptions(Amat, Gmat)); 7968 if (scale) { 7969 /* scale c for all diagonal values = 1 or -1 */ 7970 Vec diag; 7971 PetscCall(MatCreateVecs(Gmat, &diag, NULL)); 7972 PetscCall(MatGetDiagonal(Gmat, diag)); 7973 PetscCall(VecReciprocal(diag)); 7974 PetscCall(VecSqrtAbs(diag)); 7975 PetscCall(MatDiagonalScale(Gmat, diag, diag)); 7976 PetscCall(VecDestroy(&diag)); 7977 } 7978 PetscCall(MatViewFromOptions(Gmat, NULL, "-mat_graph_view")); 7979 7980 if (filter >= 0) { 7981 PetscCall(MatFilter(Gmat, filter, PETSC_TRUE, PETSC_TRUE)); 7982 PetscCall(MatViewFromOptions(Gmat, NULL, "-mat_filter_graph_view")); 7983 } 7984 *a_Gmat = Gmat; 7985 PetscFunctionReturn(PETSC_SUCCESS); 7986 } 7987 7988 /* 7989 Special version for direct calls from Fortran 7990 */ 7991 #include <petsc/private/fortranimpl.h> 7992 7993 /* Change these macros so can be used in void function */ 7994 /* Identical to PetscCallVoid, except it assigns to *_ierr */ 7995 #undef PetscCall 7996 #define PetscCall(...) \ 7997 do { \ 7998 PetscErrorCode ierr_msv_mpiaij = __VA_ARGS__; \ 7999 if (PetscUnlikely(ierr_msv_mpiaij)) { \ 8000 *_ierr = PetscError(PETSC_COMM_SELF, __LINE__, PETSC_FUNCTION_NAME, __FILE__, ierr_msv_mpiaij, PETSC_ERROR_REPEAT, " "); \ 8001 return; \ 8002 } \ 8003 } while (0) 8004 8005 #undef SETERRQ 8006 #define SETERRQ(comm, ierr, ...) \ 8007 do { \ 8008 *_ierr = PetscError(comm, __LINE__, PETSC_FUNCTION_NAME, __FILE__, ierr, PETSC_ERROR_INITIAL, __VA_ARGS__); \ 8009 return; \ 8010 } while (0) 8011 8012 #if defined(PETSC_HAVE_FORTRAN_CAPS) 8013 #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ 8014 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE) 8015 #define matsetvaluesmpiaij_ matsetvaluesmpiaij 8016 #else 8017 #endif 8018 PETSC_EXTERN void matsetvaluesmpiaij_(Mat *mmat, PetscInt *mm, const PetscInt im[], PetscInt *mn, const PetscInt in[], const PetscScalar v[], InsertMode *maddv, PetscErrorCode *_ierr) 8019 { 8020 Mat mat = *mmat; 8021 PetscInt m = *mm, n = *mn; 8022 InsertMode addv = *maddv; 8023 Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data; 8024 PetscScalar value; 8025 8026 MatCheckPreallocated(mat, 1); 8027 if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv; 8028 else PetscCheck(mat->insertmode == addv, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Cannot mix add values and insert values"); 8029 { 8030 PetscInt i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend; 8031 PetscInt cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col; 8032 PetscBool roworiented = aij->roworiented; 8033 8034 /* Some Variables required in the macro */ 8035 Mat A = aij->A; 8036 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; 8037 PetscInt *aimax = a->imax, *ai = a->i, *ailen = a->ilen, *aj = a->j; 8038 MatScalar *aa; 8039 PetscBool ignorezeroentries = (((a->ignorezeroentries) && (addv == ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE); 8040 Mat B = aij->B; 8041 Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; 8042 PetscInt *bimax = b->imax, *bi = b->i, *bilen = b->ilen, *bj = b->j, bm = aij->B->rmap->n, am = aij->A->rmap->n; 8043 MatScalar *ba; 8044 /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we 8045 * cannot use "#if defined" inside a macro. */ 8046 PETSC_UNUSED PetscBool inserted = PETSC_FALSE; 8047 8048 PetscInt *rp1, *rp2, ii, nrow1, nrow2, _i, rmax1, rmax2, N, low1, high1, low2, high2, t, lastcol1, lastcol2; 8049 PetscInt nonew = a->nonew; 8050 MatScalar *ap1, *ap2; 8051 8052 PetscFunctionBegin; 8053 PetscCall(MatSeqAIJGetArray(A, &aa)); 8054 PetscCall(MatSeqAIJGetArray(B, &ba)); 8055 for (i = 0; i < m; i++) { 8056 if (im[i] < 0) continue; 8057 PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1); 8058 if (im[i] >= rstart && im[i] < rend) { 8059 row = im[i] - rstart; 8060 lastcol1 = -1; 8061 rp1 = aj + ai[row]; 8062 ap1 = aa + ai[row]; 8063 rmax1 = aimax[row]; 8064 nrow1 = ailen[row]; 8065 low1 = 0; 8066 high1 = nrow1; 8067 lastcol2 = -1; 8068 rp2 = bj + bi[row]; 8069 ap2 = ba + bi[row]; 8070 rmax2 = bimax[row]; 8071 nrow2 = bilen[row]; 8072 low2 = 0; 8073 high2 = nrow2; 8074 8075 for (j = 0; j < n; j++) { 8076 if (roworiented) value = v[i * n + j]; 8077 else value = v[i + j * m]; 8078 if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue; 8079 if (in[j] >= cstart && in[j] < cend) { 8080 col = in[j] - cstart; 8081 MatSetValues_SeqAIJ_A_Private(row, col, value, addv, im[i], in[j]); 8082 } else if (in[j] < 0) continue; 8083 else if (PetscUnlikelyDebug(in[j] >= mat->cmap->N)) { 8084 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1); 8085 } else { 8086 if (mat->was_assembled) { 8087 if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat)); 8088 #if defined(PETSC_USE_CTABLE) 8089 PetscCall(PetscHMapIGetWithDefault(aij->colmap, in[j] + 1, 0, &col)); 8090 col--; 8091 #else 8092 col = aij->colmap[in[j]] - 1; 8093 #endif 8094 if (col < 0 && !((Mat_SeqAIJ *)(aij->A->data))->nonew) { 8095 PetscCall(MatDisAssemble_MPIAIJ(mat)); 8096 col = in[j]; 8097 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */ 8098 B = aij->B; 8099 b = (Mat_SeqAIJ *)B->data; 8100 bimax = b->imax; 8101 bi = b->i; 8102 bilen = b->ilen; 8103 bj = b->j; 8104 rp2 = bj + bi[row]; 8105 ap2 = ba + bi[row]; 8106 rmax2 = bimax[row]; 8107 nrow2 = bilen[row]; 8108 low2 = 0; 8109 high2 = nrow2; 8110 bm = aij->B->rmap->n; 8111 ba = b->a; 8112 inserted = PETSC_FALSE; 8113 } 8114 } else col = in[j]; 8115 MatSetValues_SeqAIJ_B_Private(row, col, value, addv, im[i], in[j]); 8116 } 8117 } 8118 } else if (!aij->donotstash) { 8119 if (roworiented) { 8120 PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, v + i * n, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES)))); 8121 } else { 8122 PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, v + i, m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES)))); 8123 } 8124 } 8125 } 8126 PetscCall(MatSeqAIJRestoreArray(A, &aa)); 8127 PetscCall(MatSeqAIJRestoreArray(B, &ba)); 8128 } 8129 PetscFunctionReturnVoid(); 8130 } 8131 8132 /* Undefining these here since they were redefined from their original definition above! No 8133 * other PETSc functions should be defined past this point, as it is impossible to recover the 8134 * original definitions */ 8135 #undef PetscCall 8136 #undef SETERRQ 8137