1 #include <petsc/private/matimpl.h> /*I "petscmat.h" I*/ 2 3 PETSC_INTERN PetscErrorCode MatSetBlockSizes_Default(Mat mat, PetscInt rbs, PetscInt cbs) 4 { 5 PetscFunctionBegin; 6 if (!mat->preallocated) PetscFunctionReturn(PETSC_SUCCESS); 7 PetscCheck(mat->rmap->bs <= 0 || mat->rmap->bs == rbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change row block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->rmap->bs, rbs); 8 PetscCheck(mat->cmap->bs <= 0 || mat->cmap->bs == cbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change column block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->cmap->bs, cbs); 9 PetscFunctionReturn(PETSC_SUCCESS); 10 } 11 12 PETSC_INTERN PetscErrorCode MatShift_Basic(Mat Y, PetscScalar a) 13 { 14 PetscInt i, start, end; 15 PetscScalar alpha = a; 16 PetscBool prevoption; 17 18 PetscFunctionBegin; 19 PetscCall(MatGetOption(Y, MAT_NO_OFF_PROC_ENTRIES, &prevoption)); 20 PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); 21 PetscCall(MatGetOwnershipRange(Y, &start, &end)); 22 for (i = start; i < end; i++) { 23 if (i < Y->cmap->N) PetscCall(MatSetValues(Y, 1, &i, 1, &i, &alpha, ADD_VALUES)); 24 } 25 PetscCall(MatAssemblyBegin(Y, MAT_FINAL_ASSEMBLY)); 26 PetscCall(MatAssemblyEnd(Y, MAT_FINAL_ASSEMBLY)); 27 PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, prevoption)); 28 PetscFunctionReturn(PETSC_SUCCESS); 29 } 30 31 /*@ 32 MatCreate - Creates a matrix where the type is determined 33 from either a call to `MatSetType()` or from the options database 34 with a call to `MatSetFromOptions()`. 35 36 Collective 37 38 Input Parameter: 39 . comm - MPI communicator 40 41 Output Parameter: 42 . A - the matrix 43 44 Options Database Keys: 45 + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()` 46 . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()` 47 . -mat_type seqdense - `MATSEQDENSE`, uses `MatCreateSeqDense()` 48 . -mat_type mpidense - `MATMPIDENSE` type, uses `MatCreateDense()` 49 . -mat_type seqbaij - `MATSEQBAIJ` type, uses `MatCreateSeqBAIJ()` 50 - -mat_type mpibaij - `MATMPIBAIJ` type, uses `MatCreateBAIJ()` 51 52 See the manpages for particular formats (e.g., `MATSEQAIJ`) 53 for additional format-specific options. 54 55 Level: beginner 56 57 Notes: 58 The default matrix type is `MATAIJ`, using the routines `MatCreateSeqAIJ()` or 59 `MatCreateAIJ()` if you do not set a type in the options database. If you never call 60 `MatSetType()` or `MatSetFromOptions()` it will generate an error when you try to use the 61 matrix. 62 63 .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`, 64 `MatCreateSeqDense()`, `MatCreateDense()`, 65 `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`, 66 `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`, 67 `MatConvert()` 68 @*/ 69 PetscErrorCode MatCreate(MPI_Comm comm, Mat *A) 70 { 71 Mat B; 72 73 PetscFunctionBegin; 74 PetscValidPointer(A, 2); 75 76 *A = NULL; 77 PetscCall(MatInitializePackage()); 78 79 PetscCall(PetscHeaderCreate(B, MAT_CLASSID, "Mat", "Matrix", "Mat", comm, MatDestroy, MatView)); 80 PetscCall(PetscLayoutCreate(comm, &B->rmap)); 81 PetscCall(PetscLayoutCreate(comm, &B->cmap)); 82 PetscCall(PetscStrallocpy(VECSTANDARD, &B->defaultvectype)); 83 PetscCall(PetscStrallocpy(PETSCRANDER48, &B->defaultrandtype)); 84 85 B->symmetric = PETSC_BOOL3_UNKNOWN; 86 B->hermitian = PETSC_BOOL3_UNKNOWN; 87 B->structurally_symmetric = PETSC_BOOL3_UNKNOWN; 88 B->spd = PETSC_BOOL3_UNKNOWN; 89 B->symmetry_eternal = PETSC_FALSE; 90 B->structural_symmetry_eternal = PETSC_FALSE; 91 92 B->congruentlayouts = PETSC_DECIDE; 93 B->preallocated = PETSC_FALSE; 94 #if defined(PETSC_HAVE_DEVICE) 95 B->boundtocpu = PETSC_TRUE; 96 #endif 97 *A = B; 98 PetscFunctionReturn(PETSC_SUCCESS); 99 } 100 101 /*@ 102 MatSetErrorIfFailure - Causes `Mat` to generate an immediate error, for example a zero pivot, is detected. 103 104 Logically Collective 105 106 Input Parameters: 107 + mat - matrix obtained from `MatCreate()` 108 - flg - `PETSC_TRUE` indicates you want the error generated 109 110 Level: advanced 111 112 Note: 113 If this flag is not set then the matrix operation will note the error and continue. The error may cause a later `PC` or `KSP` error 114 or result in a `KSPConvergedReason` indicating the method did not converge. 115 116 .seealso: [](ch_matrices), `Mat`, `PCSetErrorIfFailure()`, `KSPConvergedReason`, `SNESConvergedReason` 117 @*/ 118 PetscErrorCode MatSetErrorIfFailure(Mat mat, PetscBool flg) 119 { 120 PetscFunctionBegin; 121 PetscValidHeaderSpecific(mat, MAT_CLASSID, 1); 122 PetscValidLogicalCollectiveBool(mat, flg, 2); 123 mat->erroriffailure = flg; 124 PetscFunctionReturn(PETSC_SUCCESS); 125 } 126 127 /*@ 128 MatSetSizes - Sets the local and global sizes, and checks to determine compatibility 129 130 Collective 131 132 Input Parameters: 133 + A - the matrix 134 . m - number of local rows (or `PETSC_DECIDE`) 135 . n - number of local columns (or `PETSC_DECIDE`) 136 . M - number of global rows (or `PETSC_DETERMINE`) 137 - N - number of global columns (or `PETSC_DETERMINE`) 138 139 Level: beginner 140 141 Notes: 142 `m` (`n`) and `M` (`N`) cannot be both `PETSC_DECIDE` 143 If one processor calls this with `M` (`N`) of `PETSC_DECIDE` then all processors must, otherwise the program will hang. 144 145 If `PETSC_DECIDE` is not used for the arguments 'm' and 'n', then the 146 user must ensure that they are chosen to be compatible with the 147 vectors. To do this, one first considers the matrix-vector product 148 'y = A x'. The `m` that is used in the above routine must match the 149 local size used in the vector creation routine `VecCreateMPI()` for 'y'. 150 Likewise, the `n` used must match that used as the local size in 151 `VecCreateMPI()` for 'x'. 152 153 You cannot change the sizes once they have been set. 154 155 The sizes must be set before `MatSetUp()` or MatXXXSetPreallocation() is called. 156 157 .seealso: [](ch_matrices), `Mat`, `MatGetSize()`, `PetscSplitOwnership()` 158 @*/ 159 PetscErrorCode MatSetSizes(Mat A, PetscInt m, PetscInt n, PetscInt M, PetscInt N) 160 { 161 PetscFunctionBegin; 162 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 163 PetscValidLogicalCollectiveInt(A, M, 4); 164 PetscValidLogicalCollectiveInt(A, N, 5); 165 PetscCheck(M <= 0 || m <= M, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local row size %" PetscInt_FMT " cannot be larger than global row size %" PetscInt_FMT, m, M); 166 PetscCheck(N <= 0 || n <= N, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local column size %" PetscInt_FMT " cannot be larger than global column size %" PetscInt_FMT, n, N); 167 PetscCheck((A->rmap->n < 0 || A->rmap->N < 0) || (A->rmap->n == m && (M <= 0 || A->rmap->N == M)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset row sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", m, M, 168 A->rmap->n, A->rmap->N); 169 PetscCheck((A->cmap->n < 0 || A->cmap->N < 0) || (A->cmap->n == n && (N <= 0 || A->cmap->N == N)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset column sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", n, N, 170 A->cmap->n, A->cmap->N); 171 A->rmap->n = m; 172 A->cmap->n = n; 173 A->rmap->N = M > -1 ? M : A->rmap->N; 174 A->cmap->N = N > -1 ? N : A->cmap->N; 175 PetscFunctionReturn(PETSC_SUCCESS); 176 } 177 178 /*@ 179 MatSetFromOptions - Creates a matrix where the type is determined 180 from the options database. 181 182 Collective 183 184 Input Parameter: 185 . B - the matrix 186 187 Options Database Keys: 188 + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()` 189 . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()` 190 . -mat_type seqdense - `MATSEQDENSE` type, uses `MatCreateSeqDense()` 191 . -mat_type mpidense - `MATMPIDENSE`, uses `MatCreateDense()` 192 . -mat_type seqbaij - `MATSEQBAIJ`, uses `MatCreateSeqBAIJ()` 193 - -mat_type mpibaij - `MATMPIBAIJ`, uses `MatCreateBAIJ()` 194 195 See the manpages for particular formats (e.g., `MATSEQAIJ`) 196 for additional format-specific options. 197 198 Level: beginner 199 200 Notes: 201 Generates a parallel MPI matrix if the communicator has more than one processor. The default 202 matrix type is `MATAIJ`, using the routines `MatCreateSeqAIJ()` and `MatCreateAIJ()` if you 203 do not select a type in the options database. 204 205 .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`, 206 `MatCreateSeqDense()`, `MatCreateDense()`, 207 `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`, 208 `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`, 209 `MatConvert()` 210 @*/ 211 PetscErrorCode MatSetFromOptions(Mat B) 212 { 213 const char *deft = MATAIJ; 214 char type[256]; 215 PetscBool flg, set; 216 PetscInt bind_below = 0; 217 218 PetscFunctionBegin; 219 PetscValidHeaderSpecific(B, MAT_CLASSID, 1); 220 221 PetscObjectOptionsBegin((PetscObject)B); 222 223 if (B->rmap->bs < 0) { 224 PetscInt newbs = -1; 225 PetscCall(PetscOptionsInt("-mat_block_size", "Set the blocksize used to store the matrix", "MatSetBlockSize", newbs, &newbs, &flg)); 226 if (flg) { 227 PetscCall(PetscLayoutSetBlockSize(B->rmap, newbs)); 228 PetscCall(PetscLayoutSetBlockSize(B->cmap, newbs)); 229 } 230 } 231 232 PetscCall(PetscOptionsFList("-mat_type", "Matrix type", "MatSetType", MatList, deft, type, 256, &flg)); 233 if (flg) { 234 PetscCall(MatSetType(B, type)); 235 } else if (!((PetscObject)B)->type_name) { 236 PetscCall(MatSetType(B, deft)); 237 } 238 239 PetscCall(PetscOptionsName("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", &B->checksymmetryonassembly)); 240 PetscCall(PetscOptionsReal("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", B->checksymmetrytol, &B->checksymmetrytol, NULL)); 241 PetscCall(PetscOptionsBool("-mat_null_space_test", "Checks if provided null space is correct in MatAssemblyEnd()", "MatSetNullSpaceTest", B->checknullspaceonassembly, &B->checknullspaceonassembly, NULL)); 242 PetscCall(PetscOptionsBool("-mat_error_if_failure", "Generate an error if an error occurs when factoring the matrix", "MatSetErrorIfFailure", B->erroriffailure, &B->erroriffailure, NULL)); 243 244 PetscTryTypeMethod(B, setfromoptions, PetscOptionsObject); 245 246 flg = PETSC_FALSE; 247 PetscCall(PetscOptionsBool("-mat_new_nonzero_location_err", "Generate an error if new nonzeros are created in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set)); 248 if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_LOCATION_ERR, flg)); 249 flg = PETSC_FALSE; 250 PetscCall(PetscOptionsBool("-mat_new_nonzero_allocation_err", "Generate an error if new nonzeros are allocated in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set)); 251 if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, flg)); 252 flg = PETSC_FALSE; 253 PetscCall(PetscOptionsBool("-mat_ignore_zero_entries", "For AIJ/IS matrices this will stop zero values from creating a zero location in the matrix", "MatSetOption", flg, &flg, &set)); 254 if (set) PetscCall(MatSetOption(B, MAT_IGNORE_ZERO_ENTRIES, flg)); 255 256 flg = PETSC_FALSE; 257 PetscCall(PetscOptionsBool("-mat_form_explicit_transpose", "Hint to form an explicit transpose for operations like MatMultTranspose", "MatSetOption", flg, &flg, &set)); 258 if (set) PetscCall(MatSetOption(B, MAT_FORM_EXPLICIT_TRANSPOSE, flg)); 259 260 /* Bind to CPU if below a user-specified size threshold. 261 * This perhaps belongs in the options for the GPU Mat types, but MatBindToCPU() does nothing when called on non-GPU types, 262 * and putting it here makes is more maintainable than duplicating this for all. */ 263 PetscCall(PetscOptionsInt("-mat_bind_below", "Set the size threshold (in local rows) below which the Mat is bound to the CPU", "MatBindToCPU", bind_below, &bind_below, &flg)); 264 if (flg && B->rmap->n < bind_below) PetscCall(MatBindToCPU(B, PETSC_TRUE)); 265 266 /* process any options handlers added with PetscObjectAddOptionsHandler() */ 267 PetscCall(PetscObjectProcessOptionsHandlers((PetscObject)B, PetscOptionsObject)); 268 PetscOptionsEnd(); 269 PetscFunctionReturn(PETSC_SUCCESS); 270 } 271 272 /*@C 273 MatXAIJSetPreallocation - set preallocation for serial and parallel `MATAIJ`, `MATBAIJ`, and `MATSBAIJ` matrices and their unassembled versions. 274 275 Collective 276 277 Input Parameters: 278 + A - matrix being preallocated 279 . bs - block size 280 . dnnz - number of nonzero column blocks per block row of diagonal part of parallel matrix 281 . onnz - number of nonzero column blocks per block row of off-diagonal part of parallel matrix 282 . dnnzu - number of nonzero column blocks per block row of upper-triangular part of diagonal part of parallel matrix 283 - onnzu - number of nonzero column blocks per block row of upper-triangular part of off-diagonal part of parallel matrix 284 285 Level: beginner 286 287 .seealso: [](ch_matrices), `Mat`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, `MatMPIBAIJSetPreallocation()`, 288 `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, 289 `PetscSplitOwnership()` 290 @*/ 291 PetscErrorCode MatXAIJSetPreallocation(Mat A, PetscInt bs, const PetscInt dnnz[], const PetscInt onnz[], const PetscInt dnnzu[], const PetscInt onnzu[]) 292 { 293 PetscInt cbs; 294 void (*aij)(void); 295 void (*is)(void); 296 void (*hyp)(void) = NULL; 297 298 PetscFunctionBegin; 299 if (bs != PETSC_DECIDE) { /* don't mess with an already set block size */ 300 PetscCall(MatSetBlockSize(A, bs)); 301 } 302 PetscCall(PetscLayoutSetUp(A->rmap)); 303 PetscCall(PetscLayoutSetUp(A->cmap)); 304 PetscCall(MatGetBlockSizes(A, &bs, &cbs)); 305 /* these routines assumes bs == cbs, this should be checked somehow */ 306 PetscCall(MatSeqBAIJSetPreallocation(A, bs, 0, dnnz)); 307 PetscCall(MatMPIBAIJSetPreallocation(A, bs, 0, dnnz, 0, onnz)); 308 PetscCall(MatSeqSBAIJSetPreallocation(A, bs, 0, dnnzu)); 309 PetscCall(MatMPISBAIJSetPreallocation(A, bs, 0, dnnzu, 0, onnzu)); 310 /* 311 In general, we have to do extra work to preallocate for scalar (AIJ) or unassembled (IS) matrices so we check whether it will do any 312 good before going on with it. 313 */ 314 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatMPIAIJSetPreallocation_C", &aij)); 315 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatISSetPreallocation_C", &is)); 316 #if defined(PETSC_HAVE_HYPRE) 317 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatHYPRESetPreallocation_C", &hyp)); 318 #endif 319 if (!aij && !is && !hyp) PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSeqAIJSetPreallocation_C", &aij)); 320 if (aij || is || hyp) { 321 if (bs == cbs && bs == 1) { 322 PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz)); 323 PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz, 0, onnz)); 324 PetscCall(MatISSetPreallocation(A, 0, dnnz, 0, onnz)); 325 #if defined(PETSC_HAVE_HYPRE) 326 PetscCall(MatHYPRESetPreallocation(A, 0, dnnz, 0, onnz)); 327 #endif 328 } else { /* Convert block-row precallocation to scalar-row */ 329 PetscInt i, m, *sdnnz, *sonnz; 330 PetscCall(MatGetLocalSize(A, &m, NULL)); 331 PetscCall(PetscMalloc2((!!dnnz) * m, &sdnnz, (!!onnz) * m, &sonnz)); 332 for (i = 0; i < m; i++) { 333 if (dnnz) sdnnz[i] = dnnz[i / bs] * cbs; 334 if (onnz) sonnz[i] = onnz[i / bs] * cbs; 335 } 336 PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL)); 337 PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 338 PetscCall(MatISSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 339 #if defined(PETSC_HAVE_HYPRE) 340 PetscCall(MatHYPRESetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 341 #endif 342 PetscCall(PetscFree2(sdnnz, sonnz)); 343 } 344 } 345 PetscFunctionReturn(PETSC_SUCCESS); 346 } 347 348 /* 349 Merges some information from Cs header to A; the C object is then destroyed 350 351 This is somewhat different from MatHeaderReplace() it would be nice to merge the code 352 */ 353 PetscErrorCode MatHeaderMerge(Mat A, Mat *C) 354 { 355 PetscInt refct; 356 PetscOps Abops; 357 struct _MatOps Aops; 358 char *mtype, *mname, *mprefix; 359 Mat_Product *product; 360 Mat_Redundant *redundant; 361 PetscObjectState state; 362 363 PetscFunctionBegin; 364 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 365 PetscValidHeaderSpecific(*C, MAT_CLASSID, 2); 366 if (A == *C) PetscFunctionReturn(PETSC_SUCCESS); 367 PetscCheckSameComm(A, 1, *C, 2); 368 /* save the parts of A we need */ 369 Abops = ((PetscObject)A)->bops[0]; 370 Aops = A->ops[0]; 371 refct = ((PetscObject)A)->refct; 372 mtype = ((PetscObject)A)->type_name; 373 mname = ((PetscObject)A)->name; 374 state = ((PetscObject)A)->state; 375 mprefix = ((PetscObject)A)->prefix; 376 product = A->product; 377 redundant = A->redundant; 378 379 /* zero these so the destroy below does not free them */ 380 ((PetscObject)A)->type_name = NULL; 381 ((PetscObject)A)->name = NULL; 382 383 /* 384 free all the interior data structures from mat 385 cannot use PetscUseTypeMethod(A,destroy); because compiler 386 thinks it may print NULL type_name and name 387 */ 388 PetscTryTypeMethod(A, destroy); 389 390 PetscCall(PetscFree(A->defaultvectype)); 391 PetscCall(PetscFree(A->defaultrandtype)); 392 PetscCall(PetscLayoutDestroy(&A->rmap)); 393 PetscCall(PetscLayoutDestroy(&A->cmap)); 394 PetscCall(PetscFunctionListDestroy(&((PetscObject)A)->qlist)); 395 PetscCall(PetscObjectListDestroy(&((PetscObject)A)->olist)); 396 PetscCall(PetscComposedQuantitiesDestroy((PetscObject)A)); 397 398 /* copy C over to A */ 399 PetscCall(PetscFree(A->factorprefix)); 400 PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat))); 401 402 /* return the parts of A we saved */ 403 ((PetscObject)A)->bops[0] = Abops; 404 A->ops[0] = Aops; 405 ((PetscObject)A)->refct = refct; 406 ((PetscObject)A)->type_name = mtype; 407 ((PetscObject)A)->name = mname; 408 ((PetscObject)A)->prefix = mprefix; 409 ((PetscObject)A)->state = state + 1; 410 A->product = product; 411 A->redundant = redundant; 412 413 /* since these two are copied into A we do not want them destroyed in C */ 414 ((PetscObject)*C)->qlist = NULL; 415 ((PetscObject)*C)->olist = NULL; 416 417 PetscCall(PetscHeaderDestroy(C)); 418 PetscFunctionReturn(PETSC_SUCCESS); 419 } 420 /* 421 Replace A's header with that of C; the C object is then destroyed 422 423 This is essentially code moved from MatDestroy() 424 425 This is somewhat different from MatHeaderMerge() it would be nice to merge the code 426 427 Used in DM hence is declared PETSC_EXTERN 428 */ 429 PETSC_EXTERN PetscErrorCode MatHeaderReplace(Mat A, Mat *C) 430 { 431 PetscInt refct; 432 PetscObjectState state; 433 struct _p_Mat buffer; 434 MatStencilInfo stencil; 435 436 PetscFunctionBegin; 437 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 438 PetscValidHeaderSpecific(*C, MAT_CLASSID, 2); 439 if (A == *C) PetscFunctionReturn(PETSC_SUCCESS); 440 PetscCheckSameComm(A, 1, *C, 2); 441 PetscCheck(((PetscObject)*C)->refct == 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "Object C has refct %" PetscInt_FMT " > 1, would leave hanging reference", ((PetscObject)*C)->refct); 442 443 /* swap C and A */ 444 refct = ((PetscObject)A)->refct; 445 state = ((PetscObject)A)->state; 446 stencil = A->stencil; 447 PetscCall(PetscMemcpy(&buffer, A, sizeof(struct _p_Mat))); 448 PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat))); 449 PetscCall(PetscMemcpy(*C, &buffer, sizeof(struct _p_Mat))); 450 ((PetscObject)A)->refct = refct; 451 ((PetscObject)A)->state = state + 1; 452 A->stencil = stencil; 453 454 ((PetscObject)*C)->refct = 1; 455 PetscCall(MatShellSetOperation(*C, MATOP_DESTROY, (void (*)(void))NULL)); 456 PetscCall(MatDestroy(C)); 457 PetscFunctionReturn(PETSC_SUCCESS); 458 } 459 460 /*@ 461 MatBindToCPU - marks a matrix to temporarily stay on the CPU and perform computations on the CPU 462 463 Logically Collective 464 465 Input Parameters: 466 + A - the matrix 467 - flg - bind to the CPU if value of `PETSC_TRUE` 468 469 Level: intermediate 470 471 .seealso: [](ch_matrices), `Mat`, `MatBoundToCPU()` 472 @*/ 473 PetscErrorCode MatBindToCPU(Mat A, PetscBool flg) 474 { 475 PetscFunctionBegin; 476 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 477 PetscValidLogicalCollectiveBool(A, flg, 2); 478 #if defined(PETSC_HAVE_DEVICE) 479 if (A->boundtocpu == flg) PetscFunctionReturn(PETSC_SUCCESS); 480 A->boundtocpu = flg; 481 PetscTryTypeMethod(A, bindtocpu, flg); 482 #endif 483 PetscFunctionReturn(PETSC_SUCCESS); 484 } 485 486 /*@ 487 MatBoundToCPU - query if a matrix is bound to the CPU 488 489 Input Parameter: 490 . A - the matrix 491 492 Output Parameter: 493 . flg - the logical flag 494 495 Level: intermediate 496 497 .seealso: [](ch_matrices), `Mat`, `MatBindToCPU()` 498 @*/ 499 PetscErrorCode MatBoundToCPU(Mat A, PetscBool *flg) 500 { 501 PetscFunctionBegin; 502 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 503 PetscValidBoolPointer(flg, 2); 504 #if defined(PETSC_HAVE_DEVICE) 505 *flg = A->boundtocpu; 506 #else 507 *flg = PETSC_TRUE; 508 #endif 509 PetscFunctionReturn(PETSC_SUCCESS); 510 } 511 512 PetscErrorCode MatSetValuesCOO_Basic(Mat A, const PetscScalar coo_v[], InsertMode imode) 513 { 514 IS is_coo_i, is_coo_j; 515 const PetscInt *coo_i, *coo_j; 516 PetscInt n, n_i, n_j; 517 PetscScalar zero = 0.; 518 519 PetscFunctionBegin; 520 PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_i", (PetscObject *)&is_coo_i)); 521 PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_j", (PetscObject *)&is_coo_j)); 522 PetscCheck(is_coo_i, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_i IS"); 523 PetscCheck(is_coo_j, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_j IS"); 524 PetscCall(ISGetLocalSize(is_coo_i, &n_i)); 525 PetscCall(ISGetLocalSize(is_coo_j, &n_j)); 526 PetscCheck(n_i == n_j, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong local size %" PetscInt_FMT " != %" PetscInt_FMT, n_i, n_j); 527 PetscCall(ISGetIndices(is_coo_i, &coo_i)); 528 PetscCall(ISGetIndices(is_coo_j, &coo_j)); 529 if (imode != ADD_VALUES) PetscCall(MatZeroEntries(A)); 530 for (n = 0; n < n_i; n++) PetscCall(MatSetValue(A, coo_i[n], coo_j[n], coo_v ? coo_v[n] : zero, ADD_VALUES)); 531 PetscCall(ISRestoreIndices(is_coo_i, &coo_i)); 532 PetscCall(ISRestoreIndices(is_coo_j, &coo_j)); 533 PetscFunctionReturn(PETSC_SUCCESS); 534 } 535 536 PetscErrorCode MatSetPreallocationCOO_Basic(Mat A, PetscCount ncoo, const PetscInt coo_i[], const PetscInt coo_j[]) 537 { 538 Mat preallocator; 539 IS is_coo_i, is_coo_j; 540 PetscScalar zero = 0.0; 541 542 PetscFunctionBegin; 543 PetscCall(PetscLayoutSetUp(A->rmap)); 544 PetscCall(PetscLayoutSetUp(A->cmap)); 545 PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &preallocator)); 546 PetscCall(MatSetType(preallocator, MATPREALLOCATOR)); 547 PetscCall(MatSetSizes(preallocator, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N)); 548 PetscCall(MatSetLayouts(preallocator, A->rmap, A->cmap)); 549 PetscCall(MatSetUp(preallocator)); 550 for (PetscCount n = 0; n < ncoo; n++) PetscCall(MatSetValue(preallocator, coo_i[n], coo_j[n], zero, INSERT_VALUES)); 551 PetscCall(MatAssemblyBegin(preallocator, MAT_FINAL_ASSEMBLY)); 552 PetscCall(MatAssemblyEnd(preallocator, MAT_FINAL_ASSEMBLY)); 553 PetscCall(MatPreallocatorPreallocate(preallocator, PETSC_TRUE, A)); 554 PetscCall(MatDestroy(&preallocator)); 555 PetscCheck(ncoo <= PETSC_MAX_INT, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo); 556 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncoo, coo_i, PETSC_COPY_VALUES, &is_coo_i)); 557 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncoo, coo_j, PETSC_COPY_VALUES, &is_coo_j)); 558 PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_i", (PetscObject)is_coo_i)); 559 PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_j", (PetscObject)is_coo_j)); 560 PetscCall(ISDestroy(&is_coo_i)); 561 PetscCall(ISDestroy(&is_coo_j)); 562 PetscFunctionReturn(PETSC_SUCCESS); 563 } 564 565 /*@C 566 MatSetPreallocationCOO - set preallocation for matrices using a coordinate format of the entries with global indices 567 568 Collective 569 570 Input Parameters: 571 + A - matrix being preallocated 572 . ncoo - number of entries 573 . coo_i - row indices 574 - coo_j - column indices 575 576 Level: beginner 577 578 Notes: 579 The indices `coo_i` and `coo_j` may be modified within this function. The caller should not rely on them 580 having any specific value after this function returns. The arrays can be freed or reused immediately 581 after this function returns. 582 583 Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed 584 but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries 585 are allowed and will be properly added or inserted to the matrix, unless the matrix option `MAT_IGNORE_OFF_PROC_ENTRIES` 586 is set, in which case remote entries are ignored, or `MAT_NO_OFF_PROC_ENTRIES` is set, in which case an error will be generated. 587 588 If you just want to create a sequential AIJ matrix (`MATSEQAIJ`), and your matrix entries in COO format are unique, you can also use 589 `MatCreateSeqAIJFromTriple()`. But that is not recommended for iterative applications. 590 591 .seealso: [](ch_matrices), `Mat`, `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, 592 `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOOLocal()`, 593 `DMSetMatrixPreallocateSkip()`, `MatCreateSeqAIJFromTriple()` 594 @*/ 595 PetscErrorCode MatSetPreallocationCOO(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[]) 596 { 597 PetscErrorCode (*f)(Mat, PetscCount, const PetscInt[], const PetscInt[]) = NULL; 598 599 PetscFunctionBegin; 600 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 601 PetscValidType(A, 1); 602 if (ncoo) PetscValidIntPointer(coo_i, 3); 603 if (ncoo) PetscValidIntPointer(coo_j, 4); 604 PetscCall(PetscLayoutSetUp(A->rmap)); 605 PetscCall(PetscLayoutSetUp(A->cmap)); 606 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOO_C", &f)); 607 608 PetscCall(PetscLogEventBegin(MAT_PreallCOO, A, 0, 0, 0)); 609 if (f) { 610 PetscCall((*f)(A, ncoo, coo_i, coo_j)); 611 } else { /* allow fallback, very slow */ 612 PetscCall(MatSetPreallocationCOO_Basic(A, ncoo, coo_i, coo_j)); 613 } 614 PetscCall(PetscLogEventEnd(MAT_PreallCOO, A, 0, 0, 0)); 615 A->preallocated = PETSC_TRUE; 616 A->nonzerostate++; 617 PetscFunctionReturn(PETSC_SUCCESS); 618 } 619 620 /*@C 621 MatSetPreallocationCOOLocal - set preallocation for matrices using a coordinate format of the entries with local indices 622 623 Collective 624 625 Input Parameters: 626 + A - matrix being preallocated 627 . ncoo - number of entries 628 . coo_i - row indices (local numbering; may be modified) 629 - coo_j - column indices (local numbering; may be modified) 630 631 Level: beginner 632 633 Notes: 634 The local indices are translated using the local to global mapping, thus `MatSetLocalToGlobalMapping()` must have been 635 called prior to this function. For matrices created with `DMCreateMatrix()` the local to global mapping is often already provided. 636 637 The indices `coo_i` and `coo_j` may be modified within this function. They might be translated to corresponding global 638 indices, but the caller should not rely on them having any specific value after this function returns. The arrays 639 can be freed or reused immediately after this function returns. 640 641 Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed 642 but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries 643 are allowed and will be properly added or inserted to the matrix. 644 645 .seealso: [](ch_matrices), `Mat`, `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, 646 `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOO()`, 647 `DMSetMatrixPreallocateSkip()` 648 @*/ 649 PetscErrorCode MatSetPreallocationCOOLocal(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[]) 650 { 651 PetscErrorCode (*f)(Mat, PetscCount, PetscInt[], PetscInt[]) = NULL; 652 653 PetscFunctionBegin; 654 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 655 PetscValidType(A, 1); 656 if (ncoo) PetscValidIntPointer(coo_i, 3); 657 if (ncoo) PetscValidIntPointer(coo_j, 4); 658 PetscCheck(ncoo <= PETSC_MAX_INT, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo); 659 PetscCall(PetscLayoutSetUp(A->rmap)); 660 PetscCall(PetscLayoutSetUp(A->cmap)); 661 662 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOOLocal_C", &f)); 663 if (f) { 664 PetscCall((*f)(A, ncoo, coo_i, coo_j)); 665 A->nonzerostate++; 666 } else { 667 ISLocalToGlobalMapping ltog_row, ltog_col; 668 PetscCall(MatGetLocalToGlobalMapping(A, <og_row, <og_col)); 669 if (ltog_row) PetscCall(ISLocalToGlobalMappingApply(ltog_row, ncoo, coo_i, coo_i)); 670 if (ltog_col) PetscCall(ISLocalToGlobalMappingApply(ltog_col, ncoo, coo_j, coo_j)); 671 PetscCall(MatSetPreallocationCOO(A, ncoo, coo_i, coo_j)); 672 } 673 A->preallocated = PETSC_TRUE; 674 PetscFunctionReturn(PETSC_SUCCESS); 675 } 676 677 /*@ 678 MatSetValuesCOO - set values at once in a matrix preallocated using `MatSetPreallocationCOO()` 679 680 Collective 681 682 Input Parameters: 683 + A - matrix being preallocated 684 . coo_v - the matrix values (can be `NULL`) 685 - imode - the insert mode 686 687 Level: beginner 688 689 Notes: 690 The values must follow the order of the indices prescribed with `MatSetPreallocationCOO()` or `MatSetPreallocationCOOLocal()`. 691 692 When repeated entries are specified in the COO indices the `coo_v` values are first properly summed, regardless of the value of imode. 693 The imode flag indicates if coo_v must be added to the current values of the matrix (`ADD_VALUES`) or overwritten (`INSERT_VALUES`). 694 695 `MatAssemblyBegin()` and `MatAssemblyEnd()` do not need to be called after this routine. It automatically handles the assembly process. 696 697 .seealso: [](ch_matrices), `Mat`, `MatSetPreallocationCOO()`, `MatSetPreallocationCOOLocal()`, `InsertMode`, `INSERT_VALUES`, `ADD_VALUES` 698 @*/ 699 PetscErrorCode MatSetValuesCOO(Mat A, const PetscScalar coo_v[], InsertMode imode) 700 { 701 PetscErrorCode (*f)(Mat, const PetscScalar[], InsertMode) = NULL; 702 PetscBool oldFlg; 703 704 PetscFunctionBegin; 705 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 706 PetscValidType(A, 1); 707 MatCheckPreallocated(A, 1); 708 PetscValidLogicalCollectiveEnum(A, imode, 3); 709 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetValuesCOO_C", &f)); 710 PetscCall(PetscLogEventBegin(MAT_SetVCOO, A, 0, 0, 0)); 711 if (f) { 712 PetscCall((*f)(A, coo_v, imode)); // all known COO implementations do not use MatStash. They do their own off-proc communication 713 PetscCall(MatGetOption(A, MAT_NO_OFF_PROC_ENTRIES, &oldFlg)); 714 PetscCall(MatSetOption(A, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); // set A->nooffprocentries to avoid costly MatStash scatter in MatAssembly 715 } else { 716 PetscCall(MatSetValuesCOO_Basic(A, coo_v, imode)); // fall back to MatSetValues, which might use MatStash 717 } 718 PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY)); 719 PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY)); 720 if (f) PetscCall(MatSetOption(A, MAT_NO_OFF_PROC_ENTRIES, oldFlg)); 721 PetscCall(PetscLogEventEnd(MAT_SetVCOO, A, 0, 0, 0)); 722 PetscFunctionReturn(PETSC_SUCCESS); 723 } 724 725 /*@ 726 MatSetBindingPropagates - Sets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects 727 728 Input Parameters: 729 + A - the matrix 730 - flg - flag indicating whether the boundtocpu flag should be propagated 731 732 Level: developer 733 734 Notes: 735 If the value of flg is set to true, the following will occur 736 + `MatCreateSubMatrices()` and `MatCreateRedundantMatrix()` - bind created matrices to CPU if the input matrix is bound to the CPU. 737 - `MatCreateVecs()` - bind created vectors to CPU if the input matrix is bound to the CPU. 738 739 The bindingpropagates flag itself is also propagated by the above routines. 740 741 Developer Notes: 742 If the fine-scale `DMDA` has the `-dm_bind_below` option set to true, then `DMCreateInterpolationScale()` calls `MatSetBindingPropagates()` 743 on the restriction/interpolation operator to set the bindingpropagates flag to true. 744 745 .seealso: [](ch_matrices), `Mat`, `VecSetBindingPropagates()`, `MatGetBindingPropagates()` 746 @*/ 747 PetscErrorCode MatSetBindingPropagates(Mat A, PetscBool flg) 748 { 749 PetscFunctionBegin; 750 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 751 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP) 752 A->bindingpropagates = flg; 753 #endif 754 PetscFunctionReturn(PETSC_SUCCESS); 755 } 756 757 /*@ 758 MatGetBindingPropagates - Gets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects 759 760 Input Parameter: 761 . A - the matrix 762 763 Output Parameter: 764 . flg - flag indicating whether the boundtocpu flag will be propagated 765 766 Level: developer 767 768 .seealso: [](ch_matrices), `Mat`, `MatSetBindingPropagates()` 769 @*/ 770 PetscErrorCode MatGetBindingPropagates(Mat A, PetscBool *flg) 771 { 772 PetscFunctionBegin; 773 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 774 PetscValidBoolPointer(flg, 2); 775 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP) 776 *flg = A->bindingpropagates; 777 #else 778 *flg = PETSC_FALSE; 779 #endif 780 PetscFunctionReturn(PETSC_SUCCESS); 781 } 782