1 #include <petsc/private/matimpl.h> /*I "petscmat.h" I*/ 2 3 PETSC_INTERN PetscErrorCode MatSetBlockSizes_Default(Mat mat, PetscInt rbs, PetscInt cbs) 4 { 5 PetscFunctionBegin; 6 if (!mat->preallocated) PetscFunctionReturn(0); 7 PetscCheck(mat->rmap->bs <= 0 || mat->rmap->bs == rbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change row block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->rmap->bs, rbs); 8 PetscCheck(mat->cmap->bs <= 0 || mat->cmap->bs == cbs, PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "Cannot change column block size %" PetscInt_FMT " to %" PetscInt_FMT, mat->cmap->bs, cbs); 9 PetscFunctionReturn(0); 10 } 11 12 PETSC_INTERN PetscErrorCode MatShift_Basic(Mat Y, PetscScalar a) 13 { 14 PetscInt i, start, end; 15 PetscScalar alpha = a; 16 PetscBool prevoption; 17 18 PetscFunctionBegin; 19 PetscCall(MatGetOption(Y, MAT_NO_OFF_PROC_ENTRIES, &prevoption)); 20 PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE)); 21 PetscCall(MatGetOwnershipRange(Y, &start, &end)); 22 for (i = start; i < end; i++) { 23 if (i < Y->cmap->N) PetscCall(MatSetValues(Y, 1, &i, 1, &i, &alpha, ADD_VALUES)); 24 } 25 PetscCall(MatAssemblyBegin(Y, MAT_FINAL_ASSEMBLY)); 26 PetscCall(MatAssemblyEnd(Y, MAT_FINAL_ASSEMBLY)); 27 PetscCall(MatSetOption(Y, MAT_NO_OFF_PROC_ENTRIES, prevoption)); 28 PetscFunctionReturn(0); 29 } 30 31 /*@ 32 MatCreate - Creates a matrix where the type is determined 33 from either a call to `MatSetType()` or from the options database 34 with a call to `MatSetFromOptions()`. The default matrix type is 35 `MATAIJ`, using the routines `MatCreateSeqAIJ()` or `MatCreateAIJ()` 36 if you do not set a type in the options database. If you never 37 call `MatSetType()` or `MatSetFromOptions()` it will generate an 38 error when you try to use the matrix. 39 40 Collective 41 42 Input Parameter: 43 . comm - MPI communicator 44 45 Output Parameter: 46 . A - the matrix 47 48 Options Database Keys: 49 + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()` 50 . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()` 51 . -mat_type seqdense - `MATSEQDENSE`, uses `MatCreateSeqDense()` 52 . -mat_type mpidense - `MATMPIDENSE` type, uses `MatCreateDense()` 53 . -mat_type seqbaij - `MATSEQBAIJ` type, uses `MatCreateSeqBAIJ()` 54 - -mat_type mpibaij - `MATMPIBAIJ` type, uses `MatCreateBAIJ()` 55 56 Even More Options Database Keys: 57 See the manpages for particular formats (e.g., `MatCreateSeqAIJ()`) 58 for additional format-specific options. 59 60 Level: beginner 61 62 .seealso: `MatCreateSeqAIJ()`, `MatCreateAIJ()`, 63 `MatCreateSeqDense()`, `MatCreateDense()`, 64 `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`, 65 `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`, 66 `MatConvert()` 67 @*/ 68 PetscErrorCode MatCreate(MPI_Comm comm, Mat *A) 69 { 70 Mat B; 71 72 PetscFunctionBegin; 73 PetscValidPointer(A, 2); 74 75 *A = NULL; 76 PetscCall(MatInitializePackage()); 77 78 PetscCall(PetscHeaderCreate(B, MAT_CLASSID, "Mat", "Matrix", "Mat", comm, MatDestroy, MatView)); 79 PetscCall(PetscLayoutCreate(comm, &B->rmap)); 80 PetscCall(PetscLayoutCreate(comm, &B->cmap)); 81 PetscCall(PetscStrallocpy(VECSTANDARD, &B->defaultvectype)); 82 PetscCall(PetscStrallocpy(PETSCRANDER48, &B->defaultrandtype)); 83 84 B->symmetric = PETSC_BOOL3_UNKNOWN; 85 B->hermitian = PETSC_BOOL3_UNKNOWN; 86 B->structurally_symmetric = PETSC_BOOL3_UNKNOWN; 87 B->spd = PETSC_BOOL3_UNKNOWN; 88 B->symmetry_eternal = PETSC_FALSE; 89 B->structural_symmetry_eternal = PETSC_FALSE; 90 91 B->congruentlayouts = PETSC_DECIDE; 92 B->preallocated = PETSC_FALSE; 93 #if defined(PETSC_HAVE_DEVICE) 94 B->boundtocpu = PETSC_TRUE; 95 #endif 96 *A = B; 97 PetscFunctionReturn(0); 98 } 99 100 /*@ 101 MatSetErrorIfFailure - Causes `Mat` to generate an immediate error, for example a zero pivot, is detected. 102 103 Logically Collective on mat 104 105 Input Parameters: 106 + mat - matrix obtained from `MatCreate()` 107 - flg - `PETSC_TRUE` indicates you want the error generated 108 109 Level: advanced 110 111 Note: 112 If this flag is not set then the matrix operation will note the error and continue. The error may cause a later `PC` or `KSP` error 113 or result in a `KSPConvergedReason` indicating the method did not converge. 114 115 .seealso: `PCSetErrorIfFailure()`, `KSPConvergedReason`, `SNESConvergedReason` 116 @*/ 117 PetscErrorCode MatSetErrorIfFailure(Mat mat, PetscBool flg) 118 { 119 PetscFunctionBegin; 120 PetscValidHeaderSpecific(mat, MAT_CLASSID, 1); 121 PetscValidLogicalCollectiveBool(mat, flg, 2); 122 mat->erroriffailure = flg; 123 PetscFunctionReturn(0); 124 } 125 126 /*@ 127 MatSetSizes - Sets the local and global sizes, and checks to determine compatibility 128 129 Collective on Mat 130 131 Input Parameters: 132 + A - the matrix 133 . m - number of local rows (or `PETSC_DECIDE`) 134 . n - number of local columns (or `PETSC_DECIDE`) 135 . M - number of global rows (or `PETSC_DETERMINE`) 136 - N - number of global columns (or `PETSC_DETERMINE`) 137 138 Notes: 139 m (n) and M (N) cannot be both `PETSC_DECIDE` 140 If one processor calls this with M (N) of `PETSC_DECIDE` then all processors must, otherwise the program will hang. 141 142 If `PETSC_DECIDE` is not used for the arguments 'm' and 'n', then the 143 user must ensure that they are chosen to be compatible with the 144 vectors. To do this, one first considers the matrix-vector product 145 'y = A x'. The 'm' that is used in the above routine must match the 146 local size used in the vector creation routine VecCreateMPI() for 'y'. 147 Likewise, the 'n' used must match that used as the local size in 148 `VecCreateMPI()` for 'x'. 149 150 You cannot change the sizes once they have been set. 151 152 The sizes must be set before `MatSetUp()` or MatXXXSetPreallocation() is called. 153 154 Level: beginner 155 156 .seealso: `MatGetSize()`, `PetscSplitOwnership()` 157 @*/ 158 PetscErrorCode MatSetSizes(Mat A, PetscInt m, PetscInt n, PetscInt M, PetscInt N) 159 { 160 PetscFunctionBegin; 161 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 162 PetscValidLogicalCollectiveInt(A, M, 4); 163 PetscValidLogicalCollectiveInt(A, N, 5); 164 PetscCheck(M <= 0 || m <= M, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local row size %" PetscInt_FMT " cannot be larger than global row size %" PetscInt_FMT, m, M); 165 PetscCheck(N <= 0 || n <= N, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Local column size %" PetscInt_FMT " cannot be larger than global column size %" PetscInt_FMT, n, N); 166 PetscCheck((A->rmap->n < 0 || A->rmap->N < 0) || (A->rmap->n == m && (M <= 0 || A->rmap->N == M)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset row sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", m, M, 167 A->rmap->n, A->rmap->N); 168 PetscCheck((A->cmap->n < 0 || A->cmap->N < 0) || (A->cmap->n == n && (N <= 0 || A->cmap->N == N)), PETSC_COMM_SELF, PETSC_ERR_SUP, "Cannot change/reset column sizes to %" PetscInt_FMT " local %" PetscInt_FMT " global after previously setting them to %" PetscInt_FMT " local %" PetscInt_FMT " global", n, N, 169 A->cmap->n, A->cmap->N); 170 A->rmap->n = m; 171 A->cmap->n = n; 172 A->rmap->N = M > -1 ? M : A->rmap->N; 173 A->cmap->N = N > -1 ? N : A->cmap->N; 174 PetscFunctionReturn(0); 175 } 176 177 /*@ 178 MatSetFromOptions - Creates a matrix where the type is determined 179 from the options database. Generates a parallel MPI matrix if the 180 communicator has more than one processor. The default matrix type is 181 `MATAIJ`, using the routines `MatCreateSeqAIJ()` and `MatCreateAIJ()` if 182 you do not select a type in the options database. 183 184 Collective on B 185 186 Input Parameter: 187 . A - the matrix 188 189 Options Database Keys: 190 + -mat_type seqaij - `MATSEQAIJ` type, uses `MatCreateSeqAIJ()` 191 . -mat_type mpiaij - `MATMPIAIJ` type, uses `MatCreateAIJ()` 192 . -mat_type seqdense - `MATSEQDENSE` type, uses `MatCreateSeqDense()` 193 . -mat_type mpidense - `MATMPIDENSE`, uses `MatCreateDense()` 194 . -mat_type seqbaij - `MATSEQBAIJ`, uses `MatCreateSeqBAIJ()` 195 - -mat_type mpibaij - `MATMPIBAIJ`, uses `MatCreateBAIJ()` 196 197 Even More Options Database Keys: 198 See the manpages for particular formats (e.g., `MatCreateSeqAIJ()`) 199 for additional format-specific options. 200 201 Level: beginner 202 203 .seealso: `MatCreateSeqAIJ(()`, `MatCreateAIJ()`, 204 `MatCreateSeqDense()`, `MatCreateDense()`, 205 `MatCreateSeqBAIJ()`, `MatCreateBAIJ()`, 206 `MatCreateSeqSBAIJ()`, `MatCreateSBAIJ()`, 207 `MatConvert()` 208 @*/ 209 PetscErrorCode MatSetFromOptions(Mat B) 210 { 211 const char *deft = MATAIJ; 212 char type[256]; 213 PetscBool flg, set; 214 PetscInt bind_below = 0; 215 216 PetscFunctionBegin; 217 PetscValidHeaderSpecific(B, MAT_CLASSID, 1); 218 219 PetscObjectOptionsBegin((PetscObject)B); 220 221 if (B->rmap->bs < 0) { 222 PetscInt newbs = -1; 223 PetscCall(PetscOptionsInt("-mat_block_size", "Set the blocksize used to store the matrix", "MatSetBlockSize", newbs, &newbs, &flg)); 224 if (flg) { 225 PetscCall(PetscLayoutSetBlockSize(B->rmap, newbs)); 226 PetscCall(PetscLayoutSetBlockSize(B->cmap, newbs)); 227 } 228 } 229 230 PetscCall(PetscOptionsFList("-mat_type", "Matrix type", "MatSetType", MatList, deft, type, 256, &flg)); 231 if (flg) { 232 PetscCall(MatSetType(B, type)); 233 } else if (!((PetscObject)B)->type_name) { 234 PetscCall(MatSetType(B, deft)); 235 } 236 237 PetscCall(PetscOptionsName("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", &B->checksymmetryonassembly)); 238 PetscCall(PetscOptionsReal("-mat_is_symmetric", "Checks if mat is symmetric on MatAssemblyEnd()", "MatIsSymmetric", B->checksymmetrytol, &B->checksymmetrytol, NULL)); 239 PetscCall(PetscOptionsBool("-mat_null_space_test", "Checks if provided null space is correct in MatAssemblyEnd()", "MatSetNullSpaceTest", B->checknullspaceonassembly, &B->checknullspaceonassembly, NULL)); 240 PetscCall(PetscOptionsBool("-mat_error_if_failure", "Generate an error if an error occurs when factoring the matrix", "MatSetErrorIfFailure", B->erroriffailure, &B->erroriffailure, NULL)); 241 242 PetscTryTypeMethod(B, setfromoptions, PetscOptionsObject); 243 244 flg = PETSC_FALSE; 245 PetscCall(PetscOptionsBool("-mat_new_nonzero_location_err", "Generate an error if new nonzeros are created in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set)); 246 if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_LOCATION_ERR, flg)); 247 flg = PETSC_FALSE; 248 PetscCall(PetscOptionsBool("-mat_new_nonzero_allocation_err", "Generate an error if new nonzeros are allocated in the matrix structure (useful to test preallocation)", "MatSetOption", flg, &flg, &set)); 249 if (set) PetscCall(MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, flg)); 250 flg = PETSC_FALSE; 251 PetscCall(PetscOptionsBool("-mat_ignore_zero_entries", "For AIJ/IS matrices this will stop zero values from creating a zero location in the matrix", "MatSetOption", flg, &flg, &set)); 252 if (set) PetscCall(MatSetOption(B, MAT_IGNORE_ZERO_ENTRIES, flg)); 253 254 flg = PETSC_FALSE; 255 PetscCall(PetscOptionsBool("-mat_form_explicit_transpose", "Hint to form an explicit transpose for operations like MatMultTranspose", "MatSetOption", flg, &flg, &set)); 256 if (set) PetscCall(MatSetOption(B, MAT_FORM_EXPLICIT_TRANSPOSE, flg)); 257 258 /* Bind to CPU if below a user-specified size threshold. 259 * This perhaps belongs in the options for the GPU Mat types, but MatBindToCPU() does nothing when called on non-GPU types, 260 * and putting it here makes is more maintainable than duplicating this for all. */ 261 PetscCall(PetscOptionsInt("-mat_bind_below", "Set the size threshold (in local rows) below which the Mat is bound to the CPU", "MatBindToCPU", bind_below, &bind_below, &flg)); 262 if (flg && B->rmap->n < bind_below) PetscCall(MatBindToCPU(B, PETSC_TRUE)); 263 264 /* process any options handlers added with PetscObjectAddOptionsHandler() */ 265 PetscCall(PetscObjectProcessOptionsHandlers((PetscObject)B, PetscOptionsObject)); 266 PetscOptionsEnd(); 267 PetscFunctionReturn(0); 268 } 269 270 /*@C 271 MatXAIJSetPreallocation - set preallocation for serial and parallel `MATAIJ`, `MATBAIJ`, and `MATSBAIJ` matrices and their unassembled versions. 272 273 Collective on A 274 275 Input Parameters: 276 + A - matrix being preallocated 277 . bs - block size 278 . dnnz - number of nonzero column blocks per block row of diagonal part of parallel matrix 279 . onnz - number of nonzero column blocks per block row of off-diagonal part of parallel matrix 280 . dnnzu - number of nonzero column blocks per block row of upper-triangular part of diagonal part of parallel matrix 281 - onnzu - number of nonzero column blocks per block row of upper-triangular part of off-diagonal part of parallel matrix 282 283 Level: beginner 284 285 .seealso: `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, 286 `PetscSplitOwnership()` 287 @*/ 288 PetscErrorCode MatXAIJSetPreallocation(Mat A, PetscInt bs, const PetscInt dnnz[], const PetscInt onnz[], const PetscInt dnnzu[], const PetscInt onnzu[]) 289 { 290 PetscInt cbs; 291 void (*aij)(void); 292 void (*is)(void); 293 void (*hyp)(void) = NULL; 294 295 PetscFunctionBegin; 296 if (bs != PETSC_DECIDE) { /* don't mess with an already set block size */ 297 PetscCall(MatSetBlockSize(A, bs)); 298 } 299 PetscCall(PetscLayoutSetUp(A->rmap)); 300 PetscCall(PetscLayoutSetUp(A->cmap)); 301 PetscCall(MatGetBlockSizes(A, &bs, &cbs)); 302 /* these routines assumes bs == cbs, this should be checked somehow */ 303 PetscCall(MatSeqBAIJSetPreallocation(A, bs, 0, dnnz)); 304 PetscCall(MatMPIBAIJSetPreallocation(A, bs, 0, dnnz, 0, onnz)); 305 PetscCall(MatSeqSBAIJSetPreallocation(A, bs, 0, dnnzu)); 306 PetscCall(MatMPISBAIJSetPreallocation(A, bs, 0, dnnzu, 0, onnzu)); 307 /* 308 In general, we have to do extra work to preallocate for scalar (AIJ) or unassembled (IS) matrices so we check whether it will do any 309 good before going on with it. 310 */ 311 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatMPIAIJSetPreallocation_C", &aij)); 312 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatISSetPreallocation_C", &is)); 313 #if defined(PETSC_HAVE_HYPRE) 314 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatHYPRESetPreallocation_C", &hyp)); 315 #endif 316 if (!aij && !is && !hyp) PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSeqAIJSetPreallocation_C", &aij)); 317 if (aij || is || hyp) { 318 if (bs == cbs && bs == 1) { 319 PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz)); 320 PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz, 0, onnz)); 321 PetscCall(MatISSetPreallocation(A, 0, dnnz, 0, onnz)); 322 #if defined(PETSC_HAVE_HYPRE) 323 PetscCall(MatHYPRESetPreallocation(A, 0, dnnz, 0, onnz)); 324 #endif 325 } else { /* Convert block-row precallocation to scalar-row */ 326 PetscInt i, m, *sdnnz, *sonnz; 327 PetscCall(MatGetLocalSize(A, &m, NULL)); 328 PetscCall(PetscMalloc2((!!dnnz) * m, &sdnnz, (!!onnz) * m, &sonnz)); 329 for (i = 0; i < m; i++) { 330 if (dnnz) sdnnz[i] = dnnz[i / bs] * cbs; 331 if (onnz) sonnz[i] = onnz[i / bs] * cbs; 332 } 333 PetscCall(MatSeqAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL)); 334 PetscCall(MatMPIAIJSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 335 PetscCall(MatISSetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 336 #if defined(PETSC_HAVE_HYPRE) 337 PetscCall(MatHYPRESetPreallocation(A, 0, dnnz ? sdnnz : NULL, 0, onnz ? sonnz : NULL)); 338 #endif 339 PetscCall(PetscFree2(sdnnz, sonnz)); 340 } 341 } 342 PetscFunctionReturn(0); 343 } 344 345 /* 346 Merges some information from Cs header to A; the C object is then destroyed 347 348 This is somewhat different from MatHeaderReplace() it would be nice to merge the code 349 */ 350 PetscErrorCode MatHeaderMerge(Mat A, Mat *C) 351 { 352 PetscInt refct; 353 PetscOps Abops; 354 struct _MatOps Aops; 355 char *mtype, *mname, *mprefix; 356 Mat_Product *product; 357 Mat_Redundant *redundant; 358 PetscObjectState state; 359 360 PetscFunctionBegin; 361 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 362 PetscValidHeaderSpecific(*C, MAT_CLASSID, 2); 363 if (A == *C) PetscFunctionReturn(0); 364 PetscCheckSameComm(A, 1, *C, 2); 365 /* save the parts of A we need */ 366 Abops = ((PetscObject)A)->bops[0]; 367 Aops = A->ops[0]; 368 refct = ((PetscObject)A)->refct; 369 mtype = ((PetscObject)A)->type_name; 370 mname = ((PetscObject)A)->name; 371 state = ((PetscObject)A)->state; 372 mprefix = ((PetscObject)A)->prefix; 373 product = A->product; 374 redundant = A->redundant; 375 376 /* zero these so the destroy below does not free them */ 377 ((PetscObject)A)->type_name = NULL; 378 ((PetscObject)A)->name = NULL; 379 380 /* 381 free all the interior data structures from mat 382 cannot use PetscUseTypeMethod(A,destroy); because compiler 383 thinks it may print NULL type_name and name 384 */ 385 PetscTryTypeMethod(A, destroy); 386 387 PetscCall(PetscFree(A->defaultvectype)); 388 PetscCall(PetscFree(A->defaultrandtype)); 389 PetscCall(PetscLayoutDestroy(&A->rmap)); 390 PetscCall(PetscLayoutDestroy(&A->cmap)); 391 PetscCall(PetscFunctionListDestroy(&((PetscObject)A)->qlist)); 392 PetscCall(PetscObjectListDestroy(&((PetscObject)A)->olist)); 393 PetscCall(PetscComposedQuantitiesDestroy((PetscObject)A)); 394 395 /* copy C over to A */ 396 PetscCall(PetscFree(A->factorprefix)); 397 PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat))); 398 399 /* return the parts of A we saved */ 400 ((PetscObject)A)->bops[0] = Abops; 401 A->ops[0] = Aops; 402 ((PetscObject)A)->refct = refct; 403 ((PetscObject)A)->type_name = mtype; 404 ((PetscObject)A)->name = mname; 405 ((PetscObject)A)->prefix = mprefix; 406 ((PetscObject)A)->state = state + 1; 407 A->product = product; 408 A->redundant = redundant; 409 410 /* since these two are copied into A we do not want them destroyed in C */ 411 ((PetscObject)*C)->qlist = NULL; 412 ((PetscObject)*C)->olist = NULL; 413 414 PetscCall(PetscHeaderDestroy(C)); 415 PetscFunctionReturn(0); 416 } 417 /* 418 Replace A's header with that of C; the C object is then destroyed 419 420 This is essentially code moved from MatDestroy() 421 422 This is somewhat different from MatHeaderMerge() it would be nice to merge the code 423 424 Used in DM hence is declared PETSC_EXTERN 425 */ 426 PETSC_EXTERN PetscErrorCode MatHeaderReplace(Mat A, Mat *C) 427 { 428 PetscInt refct; 429 PetscObjectState state; 430 struct _p_Mat buffer; 431 MatStencilInfo stencil; 432 433 PetscFunctionBegin; 434 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 435 PetscValidHeaderSpecific(*C, MAT_CLASSID, 2); 436 if (A == *C) PetscFunctionReturn(0); 437 PetscCheckSameComm(A, 1, *C, 2); 438 PetscCheck(((PetscObject)*C)->refct == 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "Object C has refct %" PetscInt_FMT " > 1, would leave hanging reference", ((PetscObject)*C)->refct); 439 440 /* swap C and A */ 441 refct = ((PetscObject)A)->refct; 442 state = ((PetscObject)A)->state; 443 stencil = A->stencil; 444 PetscCall(PetscMemcpy(&buffer, A, sizeof(struct _p_Mat))); 445 PetscCall(PetscMemcpy(A, *C, sizeof(struct _p_Mat))); 446 PetscCall(PetscMemcpy(*C, &buffer, sizeof(struct _p_Mat))); 447 ((PetscObject)A)->refct = refct; 448 ((PetscObject)A)->state = state + 1; 449 A->stencil = stencil; 450 451 ((PetscObject)*C)->refct = 1; 452 PetscCall(MatShellSetOperation(*C, MATOP_DESTROY, (void (*)(void))NULL)); 453 PetscCall(MatDestroy(C)); 454 PetscFunctionReturn(0); 455 } 456 457 /*@ 458 MatBindToCPU - marks a matrix to temporarily stay on the CPU and perform computations on the CPU 459 460 Logically collective on A 461 462 Input Parameters: 463 + A - the matrix 464 - flg - bind to the CPU if value of `PETSC_TRUE` 465 466 Level: intermediate 467 468 .seealso: `MatBoundToCPU()` 469 @*/ 470 PetscErrorCode MatBindToCPU(Mat A, PetscBool flg) 471 { 472 PetscFunctionBegin; 473 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 474 PetscValidLogicalCollectiveBool(A, flg, 2); 475 #if defined(PETSC_HAVE_DEVICE) 476 if (A->boundtocpu == flg) PetscFunctionReturn(0); 477 A->boundtocpu = flg; 478 PetscTryTypeMethod(A, bindtocpu, flg); 479 #endif 480 PetscFunctionReturn(0); 481 } 482 483 /*@ 484 MatBoundToCPU - query if a matrix is bound to the CPU 485 486 Input Parameter: 487 . A - the matrix 488 489 Output Parameter: 490 . flg - the logical flag 491 492 Level: intermediate 493 494 .seealso: `MatBindToCPU()` 495 @*/ 496 PetscErrorCode MatBoundToCPU(Mat A, PetscBool *flg) 497 { 498 PetscFunctionBegin; 499 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 500 PetscValidBoolPointer(flg, 2); 501 #if defined(PETSC_HAVE_DEVICE) 502 *flg = A->boundtocpu; 503 #else 504 *flg = PETSC_TRUE; 505 #endif 506 PetscFunctionReturn(0); 507 } 508 509 PetscErrorCode MatSetValuesCOO_Basic(Mat A, const PetscScalar coo_v[], InsertMode imode) 510 { 511 IS is_coo_i, is_coo_j; 512 const PetscInt *coo_i, *coo_j; 513 PetscInt n, n_i, n_j; 514 PetscScalar zero = 0.; 515 516 PetscFunctionBegin; 517 PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_i", (PetscObject *)&is_coo_i)); 518 PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_coo_j", (PetscObject *)&is_coo_j)); 519 PetscCheck(is_coo_i, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_i IS"); 520 PetscCheck(is_coo_j, PetscObjectComm((PetscObject)A), PETSC_ERR_COR, "Missing coo_j IS"); 521 PetscCall(ISGetLocalSize(is_coo_i, &n_i)); 522 PetscCall(ISGetLocalSize(is_coo_j, &n_j)); 523 PetscCheck(n_i == n_j, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong local size %" PetscInt_FMT " != %" PetscInt_FMT, n_i, n_j); 524 PetscCall(ISGetIndices(is_coo_i, &coo_i)); 525 PetscCall(ISGetIndices(is_coo_j, &coo_j)); 526 if (imode != ADD_VALUES) PetscCall(MatZeroEntries(A)); 527 for (n = 0; n < n_i; n++) PetscCall(MatSetValue(A, coo_i[n], coo_j[n], coo_v ? coo_v[n] : zero, ADD_VALUES)); 528 PetscCall(ISRestoreIndices(is_coo_i, &coo_i)); 529 PetscCall(ISRestoreIndices(is_coo_j, &coo_j)); 530 PetscFunctionReturn(0); 531 } 532 533 PetscErrorCode MatSetPreallocationCOO_Basic(Mat A, PetscCount ncoo, const PetscInt coo_i[], const PetscInt coo_j[]) 534 { 535 Mat preallocator; 536 IS is_coo_i, is_coo_j; 537 PetscScalar zero = 0.0; 538 539 PetscFunctionBegin; 540 PetscCall(PetscLayoutSetUp(A->rmap)); 541 PetscCall(PetscLayoutSetUp(A->cmap)); 542 PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &preallocator)); 543 PetscCall(MatSetType(preallocator, MATPREALLOCATOR)); 544 PetscCall(MatSetSizes(preallocator, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N)); 545 PetscCall(MatSetLayouts(preallocator, A->rmap, A->cmap)); 546 PetscCall(MatSetUp(preallocator)); 547 for (PetscCount n = 0; n < ncoo; n++) PetscCall(MatSetValue(preallocator, coo_i[n], coo_j[n], zero, INSERT_VALUES)); 548 PetscCall(MatAssemblyBegin(preallocator, MAT_FINAL_ASSEMBLY)); 549 PetscCall(MatAssemblyEnd(preallocator, MAT_FINAL_ASSEMBLY)); 550 PetscCall(MatPreallocatorPreallocate(preallocator, PETSC_TRUE, A)); 551 PetscCall(MatDestroy(&preallocator)); 552 PetscCheck(ncoo <= PETSC_MAX_INT, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo); 553 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncoo, coo_i, PETSC_COPY_VALUES, &is_coo_i)); 554 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncoo, coo_j, PETSC_COPY_VALUES, &is_coo_j)); 555 PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_i", (PetscObject)is_coo_i)); 556 PetscCall(PetscObjectCompose((PetscObject)A, "__PETSc_coo_j", (PetscObject)is_coo_j)); 557 PetscCall(ISDestroy(&is_coo_i)); 558 PetscCall(ISDestroy(&is_coo_j)); 559 PetscFunctionReturn(0); 560 } 561 562 /*@C 563 MatSetPreallocationCOO - set preallocation for matrices using a coordinate format of the entries with global indices 564 565 Collective on A 566 567 Input Parameters: 568 + A - matrix being preallocated 569 . ncoo - number of entries 570 . coo_i - row indices 571 - coo_j - column indices 572 573 Level: beginner 574 575 Notes: 576 The indices coo_i and coo_j may be modified within this function. The caller should not rely on them 577 having any specific value after this function returns. The arrays can be freed or reused immediately 578 after this function returns. 579 580 Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed 581 but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries 582 are allowed and will be properly added or inserted to the matrix, unless the matrix option `MAT_IGNORE_OFF_PROC_ENTRIES` 583 is set, in which case remote entries are ignored, or `MAT_NO_OFF_PROC_ENTRIES` is set, in which case an error will be generated. 584 585 If you just want to create a sequential AIJ matrix (`MATSEQAIJ`), and your matrix entries in COO format are unique, you can also use 586 `MatCreateSeqAIJFromTriple()`. But that is not recommended for iterative applications. 587 588 .seealso: `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOOLocal()`, `DMSetMatrixPreallocateSkip()`, `MatCreateSeqAIJFromTriple()` 589 @*/ 590 PetscErrorCode MatSetPreallocationCOO(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[]) 591 { 592 PetscErrorCode (*f)(Mat, PetscCount, const PetscInt[], const PetscInt[]) = NULL; 593 594 PetscFunctionBegin; 595 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 596 PetscValidType(A, 1); 597 if (ncoo) PetscValidIntPointer(coo_i, 3); 598 if (ncoo) PetscValidIntPointer(coo_j, 4); 599 PetscCall(PetscLayoutSetUp(A->rmap)); 600 PetscCall(PetscLayoutSetUp(A->cmap)); 601 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOO_C", &f)); 602 603 PetscCall(PetscLogEventBegin(MAT_PreallCOO, A, 0, 0, 0)); 604 if (f) { 605 PetscCall((*f)(A, ncoo, coo_i, coo_j)); 606 } else { /* allow fallback, very slow */ 607 PetscCall(MatSetPreallocationCOO_Basic(A, ncoo, coo_i, coo_j)); 608 } 609 PetscCall(PetscLogEventEnd(MAT_PreallCOO, A, 0, 0, 0)); 610 A->preallocated = PETSC_TRUE; 611 A->nonzerostate++; 612 PetscFunctionReturn(0); 613 } 614 615 /*@C 616 MatSetPreallocationCOOLocal - set preallocation for matrices using a coordinate format of the entries with local indices 617 618 Collective on A 619 620 Input Parameters: 621 + A - matrix being preallocated 622 . ncoo - number of entries 623 . coo_i - row indices (local numbering; may be modified) 624 - coo_j - column indices (local numbering; may be modified) 625 626 Level: beginner 627 628 Notes: 629 The local indices are translated using the local to global mapping, thus `MatSetLocalToGlobalMapping()` must have been 630 called prior to this function. For matrices created with `DMCreateMatrix()` the local to global mapping is often already provided. 631 632 The indices coo_i and coo_j may be modified within this function. They might be translated to corresponding global 633 indices, but the caller should not rely on them having any specific value after this function returns. The arrays 634 can be freed or reused immediately after this function returns. 635 636 Entries can be repeated, see `MatSetValuesCOO()`. Entries with negative row or column indices are allowed 637 but will be ignored. The corresponding entries in `MatSetValuesCOO()` will be ignored too. Remote entries 638 are allowed and will be properly added or inserted to the matrix. 639 640 .seealso: `MatSetValuesCOO()`, `MatSeqAIJSetPreallocation()`, `MatMPIAIJSetPreallocation()`, `MatSeqBAIJSetPreallocation()`, `MatMPIBAIJSetPreallocation()`, `MatSeqSBAIJSetPreallocation()`, `MatMPISBAIJSetPreallocation()`, `MatSetPreallocationCOO()`, `DMSetMatrixPreallocateSkip()` 641 @*/ 642 PetscErrorCode MatSetPreallocationCOOLocal(Mat A, PetscCount ncoo, PetscInt coo_i[], PetscInt coo_j[]) 643 { 644 PetscErrorCode (*f)(Mat, PetscCount, PetscInt[], PetscInt[]) = NULL; 645 646 PetscFunctionBegin; 647 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 648 PetscValidType(A, 1); 649 if (ncoo) PetscValidIntPointer(coo_i, 3); 650 if (ncoo) PetscValidIntPointer(coo_j, 4); 651 PetscCheck(ncoo <= PETSC_MAX_INT, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "ncoo %" PetscCount_FMT " overflowed PetscInt; configure --with-64-bit-indices or request support", ncoo); 652 PetscCall(PetscLayoutSetUp(A->rmap)); 653 PetscCall(PetscLayoutSetUp(A->cmap)); 654 655 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetPreallocationCOOLocal_C", &f)); 656 if (f) { 657 PetscCall((*f)(A, ncoo, coo_i, coo_j)); 658 A->nonzerostate++; 659 } else { 660 ISLocalToGlobalMapping ltog_row, ltog_col; 661 PetscCall(MatGetLocalToGlobalMapping(A, <og_row, <og_col)); 662 if (ltog_row) PetscCall(ISLocalToGlobalMappingApply(ltog_row, ncoo, coo_i, coo_i)); 663 if (ltog_col) PetscCall(ISLocalToGlobalMappingApply(ltog_col, ncoo, coo_j, coo_j)); 664 PetscCall(MatSetPreallocationCOO(A, ncoo, coo_i, coo_j)); 665 } 666 A->preallocated = PETSC_TRUE; 667 PetscFunctionReturn(0); 668 } 669 670 /*@ 671 MatSetValuesCOO - set values at once in a matrix preallocated using `MatSetPreallocationCOO()` 672 673 Collective on A 674 675 Input Parameters: 676 + A - matrix being preallocated 677 . coo_v - the matrix values (can be NULL) 678 - imode - the insert mode 679 680 Level: beginner 681 682 Notes: 683 The values must follow the order of the indices prescribed with `MatSetPreallocationCOO()` or `MatSetPreallocationCOOLocal()`. 684 685 When repeated entries are specified in the COO indices the coo_v values are first properly summed, regardless of the value of imode. 686 The imode flag indicates if coo_v must be added to the current values of the matrix (`ADD_VALUES`) or overwritten (`INSERT_VALUES`). 687 688 `MatAssemblyBegin()` and `MatAssemblyEnd()` do not need to be called after this routine. It automatically handles the assembly process. 689 690 .seealso: `MatSetPreallocationCOO()`, `MatSetPreallocationCOOLocal()`, `InsertMode`, `INSERT_VALUES`, `ADD_VALUES` 691 @*/ 692 PetscErrorCode MatSetValuesCOO(Mat A, const PetscScalar coo_v[], InsertMode imode) 693 { 694 PetscErrorCode (*f)(Mat, const PetscScalar[], InsertMode) = NULL; 695 696 PetscFunctionBegin; 697 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 698 PetscValidType(A, 1); 699 MatCheckPreallocated(A, 1); 700 PetscValidLogicalCollectiveEnum(A, imode, 3); 701 PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSetValuesCOO_C", &f)); 702 PetscCall(PetscLogEventBegin(MAT_SetVCOO, A, 0, 0, 0)); 703 if (f) { 704 PetscCall((*f)(A, coo_v, imode)); 705 } else { /* allow fallback */ 706 PetscCall(MatSetValuesCOO_Basic(A, coo_v, imode)); 707 } 708 PetscCall(PetscLogEventEnd(MAT_SetVCOO, A, 0, 0, 0)); 709 PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY)); 710 PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY)); 711 PetscFunctionReturn(0); 712 } 713 714 /*@ 715 MatSetBindingPropagates - Sets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects 716 717 Input Parameters: 718 + A - the matrix 719 - flg - flag indicating whether the boundtocpu flag should be propagated 720 721 Level: developer 722 723 Notes: 724 If the value of flg is set to true, the following will occur: 725 726 `MatCreateSubMatrices()` and `MatCreateRedundantMatrix()` will bind created matrices to CPU if the input matrix is bound to the CPU. 727 728 `MatCreateVecs()` will bind created vectors to CPU if the input matrix is bound to the CPU. 729 The bindingpropagates flag itself is also propagated by the above routines. 730 731 Developer Note: 732 If the fine-scale `DMDA `has the -dm_bind_below option set to true, then `DMCreateInterpolationScale()` calls `MatSetBindingPropagates()` 733 on the restriction/interpolation operator to set the bindingpropagates flag to true. 734 735 .seealso: `VecSetBindingPropagates()`, `MatGetBindingPropagates()` 736 @*/ 737 PetscErrorCode MatSetBindingPropagates(Mat A, PetscBool flg) 738 { 739 PetscFunctionBegin; 740 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 741 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) 742 A->bindingpropagates = flg; 743 #endif 744 PetscFunctionReturn(0); 745 } 746 747 /*@ 748 MatGetBindingPropagates - Gets whether the state of being bound to the CPU for a GPU matrix type propagates to child and some other associated objects 749 750 Input Parameter: 751 . A - the matrix 752 753 Output Parameter: 754 . flg - flag indicating whether the boundtocpu flag will be propagated 755 756 Level: developer 757 758 .seealso: `MatSetBindingPropagates()` 759 @*/ 760 PetscErrorCode MatGetBindingPropagates(Mat A, PetscBool *flg) 761 { 762 PetscFunctionBegin; 763 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 764 PetscValidBoolPointer(flg, 2); 765 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA) 766 *flg = A->bindingpropagates; 767 #else 768 *flg = PETSC_FALSE; 769 #endif 770 PetscFunctionReturn(0); 771 } 772