1 #include <petsc/private/matimpl.h> /*I "petscmat.h" I*/ 2 3 #include <../src/mat/impls/aij/seq/aij.h> 4 #include <../src/mat/impls/aij/mpi/mpiaij.h> 5 6 PETSC_INTERN PetscErrorCode MatSetBlockSizes_Default(Mat mat, PetscInt rbs, PetscInt cbs) 7 { 8 PetscFunctionBegin; 9 if (!mat->preallocated) PetscFunctionReturn(PETSC_SUCCESS); 10 PetscCheck(mat->rmap->bs <= 0 || mat->rmap->bs == rbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change row block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->rmap->bs, rbs); 11 PetscCheck(mat->cmap->bs <= 0 || mat->cmap->bs == cbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change column block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->cmap->bs, cbs); 12 PetscFunctionReturn(PETSC_SUCCESS); 13 } 14 15 PETSC_INTERN PetscErrorCode MatShift_Basic(Mat Y, PetscScalar a) 16 { 17 PetscInt i, start, end, oldValA = 0, oldValB = 0; 18 PetscScalar alpha = a; 19 PetscBool prevoption; 20 PetscBool isSeqAIJDerived, isMPIAIJDerived; // all classes sharing SEQAIJHEADER or MPIAIJHEADER 21 Mat A = NULL, B = NULL; 22 23 PetscFunctionBegin; 24 PetscCall(MatGetOption(Y, MAT_NO_OFF_PROC_ENTRIES, &prevoption)); 25 PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); 26 PetscCall(PetscObjectBaseTypeCompareAny((PetscObject)Y, &isSeqAIJDerived, MATSEQAIJ, MATSEQBAIJ, MATSEQSBAIJ, "")); 27 PetscCall(PetscObjectBaseTypeCompareAny((PetscObject)Y, &isMPIAIJDerived, MATMPIAIJ, MATMPIBAIJ, MATMPISBAIJ, "")); 28 29 if (isSeqAIJDerived) A = Y; 30 else if (isMPIAIJDerived) { 31 Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ *)Y->data; 32 A = mpiaij->A; 33 B = mpiaij->B; 34 } 35 36 if (A) { 37 oldValA = ((Mat_SeqAIJ *)(A->data))->nonew; 38 ((Mat_SeqAIJ *)(A->data))->nonew = 0; // so that new nonzero locations are allowed 39 } 40 if (B) { 41 oldValB = ((Mat_SeqAIJ *)(B->data))->nonew; 42 ((Mat_SeqAIJ *)(B->data))->nonew = 0; 43 } 44 45 PetscCall(MatGetOwnershipRange(Y, &start, &end)); 46 for (i = start; i < end; i++) { 47 if (i < Y->cmap->N) PetscCall(MatSetValues(Y, 1, &i, 1, &i, &alpha, ADD_VALUES)); 48 } 49 PetscCall(MatAssemblyBegin(Y, MAT_FINAL_ASSEMBLY)); 50 PetscCall(MatAssemblyEnd(Y, MAT_FINAL_ASSEMBLY)); 51 PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, prevoption)); 52 if (A) ((Mat_SeqAIJ *)(A->data))->nonew = oldValA; 53 if (B) ((Mat_SeqAIJ *)(B->data))->nonew = oldValB; 54 PetscFunctionReturn(PETSC_SUCCESS); 55 } 56 57 /*@ 58 MatCreate - Creates a matrix where the type is determined 59 from either a call to `MatSetType()` or from the options database 60 with a call to `MatSetFromOptions()`. 61 62 Collective 63 64 Input Parameter: 65 . comm - MPI communicator 66 67 Output Parameter: 68 . A - the matrix 69 70 Options Database Keys: 71 + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()` 72 . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()` 73 . -mat_type seqdense - `MATSEQDENSE`, uses `MatCreateSeqDense()` 74 . -mat_type mpidense - `MATMPIDENSE` type, uses `MatCreateDense()` 75 . -mat_type seqbaij - `MATSEQBAIJ` type, uses `MatCreateSeqBAIJ()` 76 - -mat_type mpibaij - `MATMPIBAIJ` type, uses `MatCreateBAIJ()` 77 78 See the manpages for particular formats (e.g., `MATSEQAIJ`) 79 for additional format-specific options. 80 81 Level: beginner 82 83 Notes: 84 The default matrix type is `MATAIJ`, using the routines `MatCreateSeqAIJ()` or 85 `MatCreateAIJ()` if you do not set a type in the options database. If you never call 86 `MatSetType()` or `MatSetFromOptions()` it will generate an error when you try to use the 87 matrix. 88 89 .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`, 90 `MatCreateSeqDense()`, `MatCreateDense()`, 91 `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`, 92 `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`, 93 `MatConvert()` 94 @*/ 95 PetscErrorCode MatCreate(MPI_Comm comm, Mat *A) 96 { 97 Mat B; 98 99 PetscFunctionBegin; 100 PetscAssertPointer(A, 2); 101 102 *A = NULL; 103 PetscCall(MatInitializePackage()); 104 105 PetscCall(PetscHeaderCreate(B, MAT_CLASSID, "Mat", "Matrix", "Mat", comm, MatDestroy, MatView)); 106 PetscCall(PetscLayoutCreate(comm, &B->rmap)); 107 PetscCall(PetscLayoutCreate(comm, &B->cmap)); 108 PetscCall(PetscStrallocpy(VECSTANDARD, &B->defaultvectype)); 109 PetscCall(PetscStrallocpy(PETSCRANDER48, &B->defaultrandtype)); 110 111 B->symmetric = PETSC_BOOL3_UNKNOWN; 112 B->hermitian = PETSC_BOOL3_UNKNOWN; 113 B->structurally_symmetric = PETSC_BOOL3_UNKNOWN; 114 B->spd = PETSC_BOOL3_UNKNOWN; 115 B->symmetry_eternal = PETSC_FALSE; 116 B->structural_symmetry_eternal = PETSC_FALSE; 117 118 B->congruentlayouts = PETSC_DECIDE; 119 B->preallocated = PETSC_FALSE; 120 #if defined(PETSC_HAVE_DEVICE) 121 B->boundtocpu = PETSC_TRUE; 122 #endif 123 *A = B; 124 PetscFunctionReturn(PETSC_SUCCESS); 125 } 126 127 /*@C 128 MatCreateFromOptions - Creates a matrix whose type is set from the options database 129 130 Collective 131 132 Input Parameters: 133 + comm - MPI communicator 134 . prefix - [optional] prefix for the options database 135 . bs - the blocksize (commonly 1) 136 . m - the local number of rows (or `PETSC_DECIDE`) 137 . n - the local number of columns (or `PETSC_DECIDE` or `PETSC_DETERMINE`) 138 . M - the global number of rows (or `PETSC_DETERMINE`) 139 - N - the global number of columns (or `PETSC_DETERMINE`) 140 141 Output Parameter: 142 . A - the matrix 143 144 Options Database Key: 145 . -mat_type - see `MatType`, for example `aij`, `aijcusparse`, `baij`, `sbaij`, dense, defaults to `aij` 146 147 Level: beginner 148 149 .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`, 150 `MatCreateSeqDense()`, `MatCreateDense()`, 151 `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`, 152 `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`, 153 `MatConvert()`, `MatCreate()` 154 @*/ 155 PetscErrorCode MatCreateFromOptions(MPI_Comm comm, const char *prefix, PetscInt bs, PetscInt m, PetscInt n, PetscInt M, PetscInt N, Mat *A) 156 { 157 PetscFunctionBegin; 158 PetscAssertPointer(A, 8); 159 PetscCall(MatCreate(comm, A)); 160 if (prefix) PetscCall(MatSetOptionsPrefix(*A, prefix)); 161 PetscCall(MatSetBlockSize(*A, bs)); 162 PetscCall(MatSetSizes(*A, m, n, M, N)); 163 PetscCall(MatSetFromOptions(*A)); 164 PetscFunctionReturn(PETSC_SUCCESS); 165 } 166 167 /*@ 168 MatSetErrorIfFailure - Causes `Mat` to generate an immediate error, for example a zero pivot, is detected. 169 170 Logically Collective 171 172 Input Parameters: 173 + mat - matrix obtained from `MatCreate()` 174 - flg - `PETSC_TRUE` indicates you want the error generated 175 176 Level: advanced 177 178 Note: 179 If this flag is not set then the matrix operation will note the error and continue. The error may cause a later `PC` or `KSP` error 180 or result in a `KSPConvergedReason` indicating the method did not converge. 181 182 .seealso: [](ch_matrices), `Mat`, `PCSetErrorIfFailure()`, `KSPConvergedReason`, `SNESConvergedReason` 183 @*/ 184 PetscErrorCode MatSetErrorIfFailure(Mat mat, PetscBool flg) 185 { 186 PetscFunctionBegin; 187 PetscValidHeaderSpecific(mat, MAT_CLASSID, 1); 188 PetscValidLogicalCollectiveBool(mat, flg, 2); 189 mat->erroriffailure = flg; 190 PetscFunctionReturn(PETSC_SUCCESS); 191 } 192 193 /*@ 194 MatSetSizes - Sets the local and global sizes, and checks to determine compatibility 195 196 Collective 197 198 Input Parameters: 199 + A - the matrix 200 . m - number of local rows (or `PETSC_DECIDE`) 201 . n - number of local columns (or `PETSC_DECIDE`) 202 . M - number of global rows (or `PETSC_DETERMINE`) 203 - N - number of global columns (or `PETSC_DETERMINE`) 204 205 Level: beginner 206 207 Notes: 208 `m` (`n`) and `M` (`N`) cannot be both `PETSC_DECIDE` 209 If one processor calls this with `M` (`N`) of `PETSC_DECIDE` then all processors must, otherwise the program will hang. 210 211 If `PETSC_DECIDE` is not used for the arguments 'm' and 'n', then the 212 user must ensure that they are chosen to be compatible with the 213 vectors. To do this, one first considers the matrix-vector product 214 'y = A x'. The `m` that is used in the above routine must match the 215 local size used in the vector creation routine `VecCreateMPI()` for 'y'. 216 Likewise, the `n` used must match that used as the local size in 217 `VecCreateMPI()` for 'x'. 218 219 You cannot change the sizes once they have been set. 220 221 The sizes must be set before `MatSetUp()` or MatXXXSetPreallocation() is called. 222 223 .seealso: [](ch_matrices), `Mat`, `MatGetSize()`, `PetscSplitOwnership()` 224 @*/ 225 PetscErrorCode MatSetSizes(Mat A, PetscInt m, PetscInt n, PetscInt M, PetscInt N) 226 { 227 PetscFunctionBegin; 228 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 229 PetscValidLogicalCollectiveInt(A, M, 4); 230 PetscValidLogicalCollectiveInt(A, N, 5); 231 PetscCheck(M <= 0 || m <= M, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local row size %" PetscInt_FMT " cannot be larger than global row size %" PetscInt_FMT, m, M); 232 PetscCheck(N <= 0 || n <= N, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local column size %" PetscInt_FMT " cannot be larger than global column size %" PetscInt_FMT, n, N); 233 PetscCheck((A->rmap->n < 0 || A->rmap->N < 0) || (A->rmap->n == m && (M <= 0 || A->rmap->N == M)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset row sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", m, M, 234 A->rmap->n, A->rmap->N); 235 PetscCheck((A->cmap->n < 0 || A->cmap->N < 0) || (A->cmap->n == n && (N <= 0 || A->cmap->N == N)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset column sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", n, N, 236 A->cmap->n, A->cmap->N); 237 A->rmap->n = m; 238 A->cmap->n = n; 239 A->rmap->N = M > -1 ? M : A->rmap->N; 240 A->cmap->N = N > -1 ? N : A->cmap->N; 241 PetscFunctionReturn(PETSC_SUCCESS); 242 } 243 244 /*@ 245 MatSetFromOptions - Creates a matrix where the type is determined 246 from the options database. 247 248 Collective 249 250 Input Parameter: 251 . B - the matrix 252 253 Options Database Keys: 254 + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()` 255 . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()` 256 . -mat_type seqdense - `MATSEQDENSE` type, uses `MatCreateSeqDense()` 257 . -mat_type mpidense - `MATMPIDENSE`, uses `MatCreateDense()` 258 . -mat_type seqbaij - `MATSEQBAIJ`, uses `MatCreateSeqBAIJ()` 259 - -mat_type mpibaij - `MATMPIBAIJ`, uses `MatCreateBAIJ()` 260 261 See the manpages for particular formats (e.g., `MATSEQAIJ`) 262 for additional format-specific options. 263 264 Level: beginner 265 266 Notes: 267 Generates a parallel MPI matrix if the communicator has more than one processor. The default 268 matrix type is `MATAIJ`, using the routines `MatCreateSeqAIJ()` and `MatCreateAIJ()` if you 269 do not select a type in the options database. 270 271 .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJ()`, `MatCreateAIJ()`, 272 `MatCreateSeqDense()`, `MatCreateDense()`, 273 `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`, 274 `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`, 275 `MatConvert()` 276 @*/ 277 PetscErrorCode MatSetFromOptions(Mat B) 278 { 279 const char *deft = MATAIJ; 280 char type[256]; 281 PetscBool flg, set; 282 PetscInt bind_below = 0; 283 284 PetscFunctionBegin; 285 PetscValidHeaderSpecific(B, MAT_CLASSID, 1); 286 287 PetscObjectOptionsBegin((PetscObject)B); 288 289 if (B->rmap->bs < 0) { 290 PetscInt newbs = -1; 291 PetscCall(PetscOptionsInt("-mat_block_size", "Set the blocksize used to store the matrix", "MatSetBlockSize", newbs, &newbs, &flg)); 292 if (flg) { 293 PetscCall(PetscLayoutSetBlockSize(B->rmap, newbs)); 294 PetscCall(PetscLayoutSetBlockSize(B->cmap, newbs)); 295 } 296 } 297 298 PetscCall(PetscOptionsFList("-mat_type", "Matrix type", "MatSetType", MatList, deft, type, 256, &flg)); 299 if (flg) { 300 PetscCall(MatSetType(B, type)); 301 } else if (!((PetscObject)B)->type_name) { 302 PetscCall(MatSetType(B, deft)); 303 } 304 305 PetscCall(PetscOptionsName("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", &B->checksymmetryonassembly)); 306 PetscCall(PetscOptionsReal("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", B->checksymmetrytol, &B->checksymmetrytol, NULL)); 307 PetscCall(PetscOptionsBool("-mat_null_space_test", "Checks if provided null space is correct in MatAssemblyEnd()", "MatSetNullSpaceTest", B->checknullspaceonassembly, &B->checknullspaceonassembly, NULL)); 308 PetscCall(PetscOptionsBool("-mat_error_if_failure", "Generate an error if an error occurs when factoring the matrix", "MatSetErrorIfFailure", B->erroriffailure, &B->erroriffailure, NULL)); 309 310 PetscTryTypeMethod(B, setfromoptions, PetscOptionsObject); 311 312 flg = PETSC_FALSE; 313 PetscCall(PetscOptionsBool("-mat_new_nonzero_location_err", "Generate an error if new nonzeros are created in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set)); 314 if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_LOCATION_ERR, flg)); 315 flg = PETSC_FALSE; 316 PetscCall(PetscOptionsBool("-mat_new_nonzero_allocation_err", "Generate an error if new nonzeros are allocated in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set)); 317 if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, flg)); 318 flg = PETSC_FALSE; 319 PetscCall(PetscOptionsBool("-mat_ignore_zero_entries", "For AIJ/IS matrices this will stop zero values from creating a zero location in the matrix", "MatSetOption", flg, &flg, &set)); 320 if (set) PetscCall(MatSetOption(B, MAT_IGNORE_ZERO_ENTRIES, flg)); 321 322 flg = PETSC_FALSE; 323 PetscCall(PetscOptionsBool("-mat_form_explicit_transpose", "Hint to form an explicit transpose for operations like MatMultTranspose", "MatSetOption", flg, &flg, &set)); 324 if (set) PetscCall(MatSetOption(B, MAT_FORM_EXPLICIT_TRANSPOSE, flg)); 325 326 /* Bind to CPU if below a user-specified size threshold. 327 * This perhaps belongs in the options for the GPU Mat types, but MatBindToCPU() does nothing when called on non-GPU types, 328 * and putting it here makes is more maintainable than duplicating this for all. */ 329 PetscCall(PetscOptionsInt("-mat_bind_below", "Set the size threshold (in local rows) below which the Mat is bound to the CPU", "MatBindToCPU", bind_below, &bind_below, &flg)); 330 if (flg && B->rmap->n < bind_below) PetscCall(MatBindToCPU(B, PETSC_TRUE)); 331 332 /* process any options handlers added with PetscObjectAddOptionsHandler() */ 333 PetscCall(PetscObjectProcessOptionsHandlers((PetscObject)B, PetscOptionsObject)); 334 PetscOptionsEnd(); 335 PetscFunctionReturn(PETSC_SUCCESS); 336 } 337 338 /*@C 339 MatXAIJSetPreallocation - set preallocation for serial and parallel `MATAIJ`, `MATBAIJ`, and `MATSBAIJ` matrices and their unassembled versions. 340 341 Collective 342 343 Input Parameters: 344 + A - matrix being preallocated 345 . bs - block size 346 . dnnz - number of nonzero column blocks per block row of diagonal part of parallel matrix 347 . onnz - number of nonzero column blocks per block row of off-diagonal part of parallel matrix 348 . dnnzu - number of nonzero column blocks per block row of upper-triangular part of diagonal part of parallel matrix 349 - onnzu - number of nonzero column blocks per block row of upper-triangular part of off-diagonal part of parallel matrix 350 351 Level: beginner 352 353 .seealso: [](ch_matrices), `Mat`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, `MatMPIBAIJSetPreallocation()`, 354 `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, 355 `PetscSplitOwnership()` 356 @*/ 357 PetscErrorCode MatXAIJSetPreallocation(Mat A, PetscInt bs, const PetscInt dnnz[], const PetscInt onnz[], const PetscInt dnnzu[], const PetscInt onnzu[]) 358 { 359 PetscInt cbs; 360 void (*aij)(void); 361 void (*is)(void); 362 void (*hyp)(void) = NULL; 363 364 PetscFunctionBegin; 365 if (bs != PETSC_DECIDE) { /* don't mess with an already set block size */ 366 PetscCall(MatSetBlockSize(A, bs)); 367 } 368 PetscCall(PetscLayoutSetUp(A->rmap)); 369 PetscCall(PetscLayoutSetUp(A->cmap)); 370 PetscCall(MatGetBlockSizes(A, &bs, &cbs)); 371 /* these routines assumes bs == cbs, this should be checked somehow */ 372 PetscCall(MatSeqBAIJSetPreallocation(A, bs, 0, dnnz)); 373 PetscCall(MatMPIBAIJSetPreallocation(A, bs, 0, dnnz, 0, onnz)); 374 PetscCall(MatSeqSBAIJSetPreallocation(A, bs, 0, dnnzu)); 375 PetscCall(MatMPISBAIJSetPreallocation(A, bs, 0, dnnzu, 0, onnzu)); 376 /* 377 In general, we have to do extra work to preallocate for scalar (AIJ) or unassembled (IS) matrices so we check whether it will do any 378 good before going on with it. 379 */ 380 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatMPIAIJSetPreallocation_C", &aij)); 381 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatISSetPreallocation_C", &is)); 382 #if defined(PETSC_HAVE_HYPRE) 383 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatHYPRESetPreallocation_C", &hyp)); 384 #endif 385 if (!aij && !is && !hyp) PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSeqAIJSetPreallocation_C", &aij)); 386 if (aij || is || hyp) { 387 if (bs == cbs && bs == 1) { 388 PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz)); 389 PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz, 0, onnz)); 390 PetscCall(MatISSetPreallocation(A, 0, dnnz, 0, onnz)); 391 #if defined(PETSC_HAVE_HYPRE) 392 PetscCall(MatHYPRESetPreallocation(A, 0, dnnz, 0, onnz)); 393 #endif 394 } else { /* Convert block-row precallocation to scalar-row */ 395 PetscInt i, m, *sdnnz, *sonnz; 396 PetscCall(MatGetLocalSize(A, &m, NULL)); 397 PetscCall(PetscMalloc2((!!dnnz) * m, &sdnnz, (!!onnz) * m, &sonnz)); 398 for (i = 0; i < m; i++) { 399 if (dnnz) sdnnz[i] = dnnz[i / bs] * cbs; 400 if (onnz) sonnz[i] = onnz[i / bs] * cbs; 401 } 402 PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL)); 403 PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 404 PetscCall(MatISSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 405 #if defined(PETSC_HAVE_HYPRE) 406 PetscCall(MatHYPRESetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 407 #endif 408 PetscCall(PetscFree2(sdnnz, sonnz)); 409 } 410 } 411 PetscFunctionReturn(PETSC_SUCCESS); 412 } 413 414 /*@C 415 MatHeaderMerge - Merges some information from the header of `C` to `A`; the `C` object is then destroyed 416 417 Collective, No Fortran Support 418 419 Input Parameters: 420 + A - a `Mat` being merged into 421 - C - the `Mat` providing the merge information 422 423 Level: developer 424 425 Developer Note: 426 This is somewhat different from `MatHeaderReplace()`, it would be nice to merge the code 427 428 .seealso: `Mat`, `MatHeaderReplace()` 429 @*/ 430 PetscErrorCode MatHeaderMerge(Mat A, Mat *C) 431 { 432 PetscInt refct; 433 PetscOps Abops; 434 struct _MatOps Aops; 435 char *mtype, *mname, *mprefix; 436 Mat_Product *product; 437 Mat_Redundant *redundant; 438 PetscObjectState state; 439 440 PetscFunctionBegin; 441 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 442 PetscValidHeaderSpecific(*C, MAT_CLASSID, 2); 443 if (A == *C) PetscFunctionReturn(PETSC_SUCCESS); 444 PetscCheckSameComm(A, 1, *C, 2); 445 /* save the parts of A we need */ 446 Abops = ((PetscObject)A)->bops[0]; 447 Aops = A->ops[0]; 448 refct = ((PetscObject)A)->refct; 449 mtype = ((PetscObject)A)->type_name; 450 mname = ((PetscObject)A)->name; 451 state = ((PetscObject)A)->state; 452 mprefix = ((PetscObject)A)->prefix; 453 product = A->product; 454 redundant = A->redundant; 455 456 /* zero these so the destroy below does not free them */ 457 ((PetscObject)A)->type_name = NULL; 458 ((PetscObject)A)->name = NULL; 459 460 /* 461 free all the interior data structures from mat 462 cannot use PetscUseTypeMethod(A,destroy); because compiler 463 thinks it may print NULL type_name and name 464 */ 465 PetscTryTypeMethod(A, destroy); 466 467 PetscCall(PetscFree(A->defaultvectype)); 468 PetscCall(PetscFree(A->defaultrandtype)); 469 PetscCall(PetscLayoutDestroy(&A->rmap)); 470 PetscCall(PetscLayoutDestroy(&A->cmap)); 471 PetscCall(PetscFunctionListDestroy(&((PetscObject)A)->qlist)); 472 PetscCall(PetscObjectListDestroy(&((PetscObject)A)->olist)); 473 PetscCall(PetscComposedQuantitiesDestroy((PetscObject)A)); 474 475 /* copy C over to A */ 476 PetscCall(PetscFree(A->factorprefix)); 477 PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat))); 478 479 /* return the parts of A we saved */ 480 ((PetscObject)A)->bops[0] = Abops; 481 A->ops[0] = Aops; 482 ((PetscObject)A)->refct = refct; 483 ((PetscObject)A)->type_name = mtype; 484 ((PetscObject)A)->name = mname; 485 ((PetscObject)A)->prefix = mprefix; 486 ((PetscObject)A)->state = state + 1; 487 A->product = product; 488 A->redundant = redundant; 489 490 /* since these two are copied into A we do not want them destroyed in C */ 491 ((PetscObject)*C)->qlist = NULL; 492 ((PetscObject)*C)->olist = NULL; 493 494 PetscCall(PetscHeaderDestroy(C)); 495 PetscFunctionReturn(PETSC_SUCCESS); 496 } 497 498 /*@ 499 MatHeaderReplace - Replaces the internal data of matrix `A` by the internal data of matrix `C` while deleting the outer wrapper of `C` 500 501 Input Parameters: 502 + A - a `Mat` whose internal data is to be replaced 503 - C - the `Mat` providing new internal data for `A` 504 505 Level: advanced 506 507 Example Usage\: 508 .vb 509 Mat C; 510 MatCreateSeqAIJWithArrays(..., &C); 511 MatHeaderReplace(A, &C); 512 // C has been destroyed and A contains the matrix entries of C 513 .ve 514 515 Note: 516 This can be used inside a function provided to `SNESSetJacobian()`, `TSSetRHSJacobian()`, or `TSSetIJacobian()` in cases where the user code computes an entirely new sparse matrix 517 (generally with a different nonzero pattern) for each Newton update. It is usually better to reuse the matrix structure of `A` instead of constructing an entirely new one. 518 519 Developer Note: 520 This is somewhat different from `MatHeaderMerge()` it would be nice to merge the code 521 522 .seealso: `Mat`, `MatHeaderMerge()` 523 @*/ 524 PetscErrorCode MatHeaderReplace(Mat A, Mat *C) 525 { 526 PetscInt refct; 527 PetscObjectState state; 528 struct _p_Mat buffer; 529 MatStencilInfo stencil; 530 531 PetscFunctionBegin; 532 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 533 PetscValidHeaderSpecific(*C, MAT_CLASSID, 2); 534 if (A == *C) PetscFunctionReturn(PETSC_SUCCESS); 535 PetscCheckSameComm(A, 1, *C, 2); 536 PetscCheck(((PetscObject)*C)->refct == 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "Object C has refct %" PetscInt_FMT " > 1, would leave hanging reference", ((PetscObject)*C)->refct); 537 538 /* swap C and A */ 539 refct = ((PetscObject)A)->refct; 540 state = ((PetscObject)A)->state; 541 stencil = A->stencil; 542 PetscCall(PetscMemcpy(&buffer, A, sizeof(struct _p_Mat))); 543 PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat))); 544 PetscCall(PetscMemcpy(*C, &buffer, sizeof(struct _p_Mat))); 545 ((PetscObject)A)->refct = refct; 546 ((PetscObject)A)->state = state + 1; 547 A->stencil = stencil; 548 549 ((PetscObject)*C)->refct = 1; 550 PetscCall(MatDestroy(C)); 551 PetscFunctionReturn(PETSC_SUCCESS); 552 } 553 554 /*@ 555 MatBindToCPU - marks a matrix to temporarily stay on the CPU and perform computations on the CPU 556 557 Logically Collective 558 559 Input Parameters: 560 + A - the matrix 561 - flg - bind to the CPU if value of `PETSC_TRUE` 562 563 Level: intermediate 564 565 .seealso: [](ch_matrices), `Mat`, `MatBoundToCPU()` 566 @*/ 567 PetscErrorCode MatBindToCPU(Mat A, PetscBool flg) 568 { 569 PetscFunctionBegin; 570 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 571 PetscValidLogicalCollectiveBool(A, flg, 2); 572 #if defined(PETSC_HAVE_DEVICE) 573 if (A->boundtocpu == flg) PetscFunctionReturn(PETSC_SUCCESS); 574 A->boundtocpu = flg; 575 PetscTryTypeMethod(A, bindtocpu, flg); 576 #endif 577 PetscFunctionReturn(PETSC_SUCCESS); 578 } 579 580 /*@ 581 MatBoundToCPU - query if a matrix is bound to the CPU 582 583 Input Parameter: 584 . A - the matrix 585 586 Output Parameter: 587 . flg - the logical flag 588 589 Level: intermediate 590 591 .seealso: [](ch_matrices), `Mat`, `MatBindToCPU()` 592 @*/ 593 PetscErrorCode MatBoundToCPU(Mat A, PetscBool *flg) 594 { 595 PetscFunctionBegin; 596 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 597 PetscAssertPointer(flg, 2); 598 #if defined(PETSC_HAVE_DEVICE) 599 *flg = A->boundtocpu; 600 #else 601 *flg = PETSC_TRUE; 602 #endif 603 PetscFunctionReturn(PETSC_SUCCESS); 604 } 605 606 PetscErrorCode MatSetValuesCOO_Basic(Mat A, const PetscScalar coo_v[], InsertMode imode) 607 { 608 IS is_coo_i, is_coo_j; 609 const PetscInt *coo_i, *coo_j; 610 PetscInt n, n_i, n_j; 611 PetscScalar zero = 0.; 612 613 PetscFunctionBegin; 614 PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_i", (PetscObject *)&is_coo_i)); 615 PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_j", (PetscObject *)&is_coo_j)); 616 PetscCheck(is_coo_i, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_i IS"); 617 PetscCheck(is_coo_j, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_j IS"); 618 PetscCall(ISGetLocalSize(is_coo_i, &n_i)); 619 PetscCall(ISGetLocalSize(is_coo_j, &n_j)); 620 PetscCheck(n_i == n_j, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong local size %" PetscInt_FMT " != %" PetscInt_FMT, n_i, n_j); 621 PetscCall(ISGetIndices(is_coo_i, &coo_i)); 622 PetscCall(ISGetIndices(is_coo_j, &coo_j)); 623 if (imode != ADD_VALUES) PetscCall(MatZeroEntries(A)); 624 for (n = 0; n < n_i; n++) PetscCall(MatSetValue(A, coo_i[n], coo_j[n], coo_v ? coo_v[n] : zero, ADD_VALUES)); 625 PetscCall(ISRestoreIndices(is_coo_i, &coo_i)); 626 PetscCall(ISRestoreIndices(is_coo_j, &coo_j)); 627 PetscFunctionReturn(PETSC_SUCCESS); 628 } 629 630 PetscErrorCode MatSetPreallocationCOO_Basic(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[]) 631 { 632 Mat preallocator; 633 IS is_coo_i, is_coo_j; 634 PetscScalar zero = 0.0; 635 636 PetscFunctionBegin; 637 PetscCall(PetscLayoutSetUp(A->rmap)); 638 PetscCall(PetscLayoutSetUp(A->cmap)); 639 PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &preallocator)); 640 PetscCall(MatSetType(preallocator, MATPREALLOCATOR)); 641 PetscCall(MatSetSizes(preallocator, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N)); 642 PetscCall(MatSetLayouts(preallocator, A->rmap, A->cmap)); 643 PetscCall(MatSetUp(preallocator)); 644 for (PetscCount n = 0; n < ncoo; n++) PetscCall(MatSetValue(preallocator, coo_i[n], coo_j[n], zero, INSERT_VALUES)); 645 PetscCall(MatAssemblyBegin(preallocator, MAT_FINAL_ASSEMBLY)); 646 PetscCall(MatAssemblyEnd(preallocator, MAT_FINAL_ASSEMBLY)); 647 PetscCall(MatPreallocatorPreallocate(preallocator, PETSC_TRUE, A)); 648 PetscCall(MatDestroy(&preallocator)); 649 PetscCheck(ncoo <= PETSC_MAX_INT, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo); 650 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncoo, coo_i, PETSC_COPY_VALUES, &is_coo_i)); 651 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncoo, coo_j, PETSC_COPY_VALUES, &is_coo_j)); 652 PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_i", (PetscObject)is_coo_i)); 653 PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_j", (PetscObject)is_coo_j)); 654 PetscCall(ISDestroy(&is_coo_i)); 655 PetscCall(ISDestroy(&is_coo_j)); 656 PetscFunctionReturn(PETSC_SUCCESS); 657 } 658 659 /*@C 660 MatSetPreallocationCOO - set preallocation for matrices using a coordinate format of the entries with global indices 661 662 Collective 663 664 Input Parameters: 665 + A - matrix being preallocated 666 . ncoo - number of entries 667 . coo_i - row indices 668 - coo_j - column indices 669 670 Level: beginner 671 672 Notes: 673 The indices `coo_i` and `coo_j` may be modified within this function. The caller should not rely on them 674 having any specific value after this function returns. The arrays can be freed or reused immediately 675 after this function returns. 676 677 Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed 678 but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries 679 are allowed and will be properly added or inserted to the matrix, unless the matrix option `MAT_IGNORE_OFF_PROC_ENTRIES` 680 is set, in which case remote entries are ignored, or `MAT_NO_OFF_PROC_ENTRIES` is set, in which case an error will be generated. 681 682 If you just want to create a sequential AIJ matrix (`MATSEQAIJ`), and your matrix entries in COO format are unique, you can also use 683 `MatCreateSeqAIJFromTriple()`. But that is not recommended for iterative applications. 684 685 .seealso: [](ch_matrices), `Mat`, `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, 686 `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOOLocal()`, 687 `DMSetMatrixPreallocateSkip()`, `MatCreateSeqAIJFromTriple()` 688 @*/ 689 PetscErrorCode MatSetPreallocationCOO(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[]) 690 { 691 PetscErrorCode (*f)(Mat, PetscCount, PetscInt[], PetscInt[]) = NULL; 692 693 PetscFunctionBegin; 694 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 695 PetscValidType(A, 1); 696 if (ncoo) PetscAssertPointer(coo_i, 3); 697 if (ncoo) PetscAssertPointer(coo_j, 4); 698 PetscCall(PetscLayoutSetUp(A->rmap)); 699 PetscCall(PetscLayoutSetUp(A->cmap)); 700 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOO_C", &f)); 701 702 PetscCall(PetscLogEventBegin(MAT_PreallCOO, A, 0, 0, 0)); 703 if (f) { 704 PetscCall((*f)(A, ncoo, coo_i, coo_j)); 705 } else { /* allow fallback, very slow */ 706 PetscCall(MatSetPreallocationCOO_Basic(A, ncoo, coo_i, coo_j)); 707 } 708 PetscCall(PetscLogEventEnd(MAT_PreallCOO, A, 0, 0, 0)); 709 A->preallocated = PETSC_TRUE; 710 A->nonzerostate++; 711 PetscFunctionReturn(PETSC_SUCCESS); 712 } 713 714 /*@C 715 MatSetPreallocationCOOLocal - set preallocation for matrices using a coordinate format of the entries with local indices 716 717 Collective 718 719 Input Parameters: 720 + A - matrix being preallocated 721 . ncoo - number of entries 722 . coo_i - row indices (local numbering; may be modified) 723 - coo_j - column indices (local numbering; may be modified) 724 725 Level: beginner 726 727 Notes: 728 The local indices are translated using the local to global mapping, thus `MatSetLocalToGlobalMapping()` must have been 729 called prior to this function. For matrices created with `DMCreateMatrix()` the local to global mapping is often already provided. 730 731 The indices `coo_i` and `coo_j` may be modified within this function. They might be translated to corresponding global 732 indices, but the caller should not rely on them having any specific value after this function returns. The arrays 733 can be freed or reused immediately after this function returns. 734 735 Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed 736 but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries 737 are allowed and will be properly added or inserted to the matrix. 738 739 .seealso: [](ch_matrices), `Mat`, `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, 740 `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOO()`, 741 `DMSetMatrixPreallocateSkip()` 742 @*/ 743 PetscErrorCode MatSetPreallocationCOOLocal(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[]) 744 { 745 PetscErrorCode (*f)(Mat, PetscCount, PetscInt[], PetscInt[]) = NULL; 746 747 PetscFunctionBegin; 748 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 749 PetscValidType(A, 1); 750 if (ncoo) PetscAssertPointer(coo_i, 3); 751 if (ncoo) PetscAssertPointer(coo_j, 4); 752 PetscCheck(ncoo <= PETSC_MAX_INT, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo); 753 PetscCall(PetscLayoutSetUp(A->rmap)); 754 PetscCall(PetscLayoutSetUp(A->cmap)); 755 756 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOOLocal_C", &f)); 757 if (f) { 758 PetscCall((*f)(A, ncoo, coo_i, coo_j)); 759 A->nonzerostate++; 760 } else { 761 ISLocalToGlobalMapping ltog_row, ltog_col; 762 PetscCall(MatGetLocalToGlobalMapping(A, <og_row, <og_col)); 763 if (ltog_row) PetscCall(ISLocalToGlobalMappingApply(ltog_row, ncoo, coo_i, coo_i)); 764 if (ltog_col) PetscCall(ISLocalToGlobalMappingApply(ltog_col, ncoo, coo_j, coo_j)); 765 PetscCall(MatSetPreallocationCOO(A, ncoo, coo_i, coo_j)); 766 } 767 A->preallocated = PETSC_TRUE; 768 PetscFunctionReturn(PETSC_SUCCESS); 769 } 770 771 /*@ 772 MatSetValuesCOO - set values at once in a matrix preallocated using `MatSetPreallocationCOO()` 773 774 Collective 775 776 Input Parameters: 777 + A - matrix being preallocated 778 . coo_v - the matrix values (can be `NULL`) 779 - imode - the insert mode 780 781 Level: beginner 782 783 Notes: 784 The values must follow the order of the indices prescribed with `MatSetPreallocationCOO()` or `MatSetPreallocationCOOLocal()`. 785 786 When repeated entries are specified in the COO indices the `coo_v` values are first properly summed, regardless of the value of imode. 787 The imode flag indicates if coo_v must be added to the current values of the matrix (`ADD_VALUES`) or overwritten (`INSERT_VALUES`). 788 789 `MatAssemblyBegin()` and `MatAssemblyEnd()` do not need to be called after this routine. It automatically handles the assembly process. 790 791 .seealso: [](ch_matrices), `Mat`, `MatSetPreallocationCOO()`, `MatSetPreallocationCOOLocal()`, `InsertMode`, `INSERT_VALUES`, `ADD_VALUES` 792 @*/ 793 PetscErrorCode MatSetValuesCOO(Mat A, const PetscScalar coo_v[], InsertMode imode) 794 { 795 PetscErrorCode (*f)(Mat, const PetscScalar[], InsertMode) = NULL; 796 PetscBool oldFlg; 797 798 PetscFunctionBegin; 799 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 800 PetscValidType(A, 1); 801 MatCheckPreallocated(A, 1); 802 PetscValidLogicalCollectiveEnum(A, imode, 3); 803 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetValuesCOO_C", &f)); 804 PetscCall(PetscLogEventBegin(MAT_SetVCOO, A, 0, 0, 0)); 805 if (f) { 806 PetscCall((*f)(A, coo_v, imode)); // all known COO implementations do not use MatStash. They do their own off-proc communication 807 PetscCall(MatGetOption(A, MAT_NO_OFF_PROC_ENTRIES, &oldFlg)); 808 PetscCall(MatSetOption(A, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); // set A->nooffprocentries to avoid costly MatStash scatter in MatAssembly 809 } else { 810 PetscCall(MatSetValuesCOO_Basic(A, coo_v, imode)); // fall back to MatSetValues, which might use MatStash 811 } 812 PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY)); 813 PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY)); 814 if (f) PetscCall(MatSetOption(A, MAT_NO_OFF_PROC_ENTRIES, oldFlg)); 815 PetscCall(PetscLogEventEnd(MAT_SetVCOO, A, 0, 0, 0)); 816 PetscFunctionReturn(PETSC_SUCCESS); 817 } 818 819 /*@ 820 MatSetBindingPropagates - Sets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects 821 822 Input Parameters: 823 + A - the matrix 824 - flg - flag indicating whether the boundtocpu flag should be propagated 825 826 Level: developer 827 828 Notes: 829 If the value of flg is set to true, the following will occur 830 + `MatCreateSubMatrices()` and `MatCreateRedundantMatrix()` - bind created matrices to CPU if the input matrix is bound to the CPU. 831 - `MatCreateVecs()` - bind created vectors to CPU if the input matrix is bound to the CPU. 832 833 The bindingpropagates flag itself is also propagated by the above routines. 834 835 Developer Notes: 836 If the fine-scale `DMDA` has the `-dm_bind_below` option set to true, then `DMCreateInterpolationScale()` calls `MatSetBindingPropagates()` 837 on the restriction/interpolation operator to set the bindingpropagates flag to true. 838 839 .seealso: [](ch_matrices), `Mat`, `VecSetBindingPropagates()`, `MatGetBindingPropagates()` 840 @*/ 841 PetscErrorCode MatSetBindingPropagates(Mat A, PetscBool flg) 842 { 843 PetscFunctionBegin; 844 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 845 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP) 846 A->bindingpropagates = flg; 847 #endif 848 PetscFunctionReturn(PETSC_SUCCESS); 849 } 850 851 /*@ 852 MatGetBindingPropagates - Gets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects 853 854 Input Parameter: 855 . A - the matrix 856 857 Output Parameter: 858 . flg - flag indicating whether the boundtocpu flag will be propagated 859 860 Level: developer 861 862 .seealso: [](ch_matrices), `Mat`, `MatSetBindingPropagates()` 863 @*/ 864 PetscErrorCode MatGetBindingPropagates(Mat A, PetscBool *flg) 865 { 866 PetscFunctionBegin; 867 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 868 PetscAssertPointer(flg, 2); 869 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP) 870 *flg = A->bindingpropagates; 871 #else 872 *flg = PETSC_FALSE; 873 #endif 874 PetscFunctionReturn(PETSC_SUCCESS); 875 } 876