1 #define PETSC_DESIRE_FEATURE_TEST_MACROS 2 /* 3 This file defines the initialization of PETSc, including PetscInitialize() 4 */ 5 #include <petsc/private/petscimpl.h> /*I "petscsys.h" I*/ 6 #include <petsc/private/logimpl.h> 7 #include <petscviewer.h> 8 #include <petsc/private/garbagecollector.h> 9 10 #if !defined(PETSC_HAVE_WINDOWS_COMPILERS) 11 #include <petsc/private/valgrind/valgrind.h> 12 #endif 13 14 #if defined(PETSC_USE_FORTRAN_BINDINGS) 15 #include <petsc/private/ftnimpl.h> 16 #endif 17 18 #if PetscDefined(USE_COVERAGE) 19 EXTERN_C_BEGIN 20 #if defined(PETSC_HAVE___GCOV_DUMP) 21 #define __gcov_flush(x) __gcov_dump(x) 22 #endif 23 void __gcov_flush(void); 24 EXTERN_C_END 25 #endif 26 27 #if defined(PETSC_SERIALIZE_FUNCTIONS) 28 PETSC_INTERN PetscFPT PetscFPTData; 29 PetscFPT PetscFPTData = 0; 30 #endif 31 32 #if PetscDefined(HAVE_SAWS) 33 #include <petscviewersaws.h> 34 #endif 35 36 PETSC_INTERN FILE *petsc_history; 37 38 PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void); 39 PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void); 40 PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int); 41 PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int); 42 PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **); 43 44 /* user may set these BEFORE calling PetscInitialize() */ 45 MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL; 46 #if PetscDefined(HAVE_MPI_INIT_THREAD) 47 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE; 48 #else 49 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE; 50 #endif 51 52 PetscMPIInt Petsc_Counter_keyval = MPI_KEYVAL_INVALID; 53 PetscMPIInt Petsc_InnerComm_keyval = MPI_KEYVAL_INVALID; 54 PetscMPIInt Petsc_OuterComm_keyval = MPI_KEYVAL_INVALID; 55 PetscMPIInt Petsc_ShmComm_keyval = MPI_KEYVAL_INVALID; 56 PetscMPIInt Petsc_CreationIdx_keyval = MPI_KEYVAL_INVALID; 57 PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID; 58 59 PetscMPIInt Petsc_SharedWD_keyval = MPI_KEYVAL_INVALID; 60 PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID; 61 62 /* 63 Declare and set all the string names of the PETSc enums 64 */ 65 const char *const PetscBools[] = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL}; 66 const char *const PetscBool3s[] = {"FALSE", "TRUE", "UNKNOWN", "PetscBool3", "PETSC_", NULL}; 67 const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL}; 68 69 PetscBool PetscPreLoadingUsed = PETSC_FALSE; 70 PetscBool PetscPreLoadingOn = PETSC_FALSE; 71 72 PetscInt PetscHotRegionDepth; 73 74 PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE; 75 76 #if defined(PETSC_HAVE_THREADSAFETY) 77 PetscSpinlock PetscViewerASCIISpinLockOpen; 78 PetscSpinlock PetscViewerASCIISpinLockStdout; 79 PetscSpinlock PetscViewerASCIISpinLockStderr; 80 PetscSpinlock PetscCommSpinLock; 81 #endif 82 83 extern PetscInt PetscNumBLASThreads; 84 85 /*@C 86 PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args 87 88 Collective, No Fortran Support 89 90 Input Parameters: 91 + argc - number of args 92 . args - array of command line arguments 93 . filename - optional name of the program file, pass `NULL` to ignore 94 - help - optional help, pass `NULL` to ignore 95 96 Level: advanced 97 98 Notes: 99 this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to 100 indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to 101 be called multiple times from Julia without the problem of trying to initialize MPI more than once. 102 103 Developer Notes: 104 Turns off PETSc signal handling to allow Julia to manage signals 105 106 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()` 107 */ 108 PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help) 109 { 110 int myargc = argc; 111 char **myargs = args; 112 113 PetscFunctionBegin; 114 PetscCall(PetscInitialize(&myargc, &myargs, filename, help)); 115 PetscCall(PetscPopSignalHandler()); 116 PetscBeganMPI = PETSC_FALSE; 117 PetscFunctionReturn(PETSC_SUCCESS); 118 } 119 120 /*@C 121 PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without 122 the command line arguments. 123 124 Collective 125 126 Level: advanced 127 128 .seealso: `PetscInitialize()`, `PetscInitializeFortran()` 129 @*/ 130 PetscErrorCode PetscInitializeNoArguments(void) PeNS 131 { 132 int argc = 0; 133 char **args = NULL; 134 135 PetscFunctionBegin; 136 PetscCall(PetscInitialize(&argc, &args, NULL, NULL)); 137 PetscFunctionReturn(PETSC_SUCCESS); 138 } 139 140 /*@ 141 PetscInitialized - Determine whether PETSc is initialized. 142 143 Output Parameter: 144 . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise 145 146 Level: beginner 147 148 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()` 149 @*/ 150 PetscErrorCode PetscInitialized(PetscBool *isInitialized) 151 { 152 PetscFunctionBegin; 153 if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1); 154 *isInitialized = PetscInitializeCalled; 155 PetscFunctionReturn(PETSC_SUCCESS); 156 } 157 158 /*@ 159 PetscFinalized - Determine whether `PetscFinalize()` has been called yet 160 161 Output Parameter: 162 . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise 163 164 Level: developer 165 166 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()` 167 @*/ 168 PetscErrorCode PetscFinalized(PetscBool *isFinalized) 169 { 170 PetscFunctionBegin; 171 if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1); 172 *isFinalized = PetscFinalizeCalled; 173 PetscFunctionReturn(PETSC_SUCCESS); 174 } 175 176 PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]); 177 178 /* 179 This function is the MPI reduction operation used to compute the sum of the 180 first half of the datatype and the max of the second half. 181 */ 182 MPI_Op MPIU_MAXSUM_OP = 0; 183 MPI_Op Petsc_Garbage_SetIntersectOp = 0; 184 185 PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 186 { 187 PetscFunctionBegin; 188 if (*datatype == MPIU_INT_MPIINT && PetscDefined(USE_64BIT_INDICES)) { 189 #if defined(PETSC_USE_64BIT_INDICES) 190 struct petsc_mpiu_int_mpiint *xin = (struct petsc_mpiu_int_mpiint *)in, *xout = (struct petsc_mpiu_int_mpiint *)out; 191 PetscMPIInt count = *cnt; 192 193 for (PetscMPIInt i = 0; i < count; i++) { 194 xout[i].a = PetscMax(xout[i].a, xin[i].a); 195 xout[i].b += xin[i].b; 196 } 197 #endif 198 } else if (*datatype == MPIU_2INT || *datatype == MPIU_INT_MPIINT) { 199 PetscInt *xin = (PetscInt *)in, *xout = (PetscInt *)out; 200 PetscMPIInt count = *cnt; 201 202 for (PetscMPIInt i = 0; i < count; i++) { 203 xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]); 204 xout[2 * i + 1] += xin[2 * i + 1]; 205 } 206 } else { 207 PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT and MPIU_INT_MPIINT data types"); 208 (void)ierr; 209 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 210 } 211 PetscFunctionReturnVoid(); 212 } 213 214 /*@ 215 PetscMaxSum - Returns the max of the first entry over all MPI processes and the sum of the second entry. 216 217 Collective 218 219 Input Parameters: 220 + comm - the communicator 221 - array - an arry of length 2 times `size`, the number of MPI processes 222 223 Output Parameters: 224 + max - the maximum of `array[2*rank]` over all MPI processes 225 - sum - the sum of the `array[2*rank + 1]` over all MPI processes 226 227 Level: developer 228 229 .seealso: `PetscInitialize()` 230 @*/ 231 PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt array[], PetscInt *max, PetscInt *sum) 232 { 233 PetscFunctionBegin; 234 #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK) 235 { 236 struct { 237 PetscInt max, sum; 238 } work; 239 PetscCallMPI(MPI_Reduce_scatter_block((void *)array, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm)); 240 *max = work.max; 241 *sum = work.sum; 242 } 243 #else 244 { 245 PetscMPIInt size, rank; 246 struct { 247 PetscInt max, sum; 248 } *work; 249 PetscCallMPI(MPI_Comm_size(comm, &size)); 250 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 251 PetscCall(PetscMalloc1(size, &work)); 252 PetscCallMPI(MPIU_Allreduce((void *)array, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm)); 253 *max = work[rank].max; 254 *sum = work[rank].sum; 255 PetscCall(PetscFree(work)); 256 } 257 #endif 258 PetscFunctionReturn(PETSC_SUCCESS); 259 } 260 261 #if (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)) 262 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128) 263 #include <quadmath.h> 264 #endif 265 MPI_Op MPIU_SUM___FP16___FLOAT128 = 0; 266 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 267 MPI_Op MPIU_SUM = 0; 268 #endif 269 270 PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 271 { 272 PetscMPIInt i, count = *cnt; 273 274 PetscFunctionBegin; 275 if (*datatype == MPIU_REAL) { 276 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 277 for (i = 0; i < count; i++) xout[i] += xin[i]; 278 } 279 #if defined(PETSC_HAVE_COMPLEX) 280 else if (*datatype == MPIU_COMPLEX) { 281 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 282 for (i = 0; i < count; i++) xout[i] += xin[i]; 283 } 284 #endif 285 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128) 286 else if (*datatype == MPIU___FLOAT128) { 287 __float128 *xin = (__float128 *)in, *xout = (__float128 *)out; 288 for (i = 0; i < count; i++) xout[i] += xin[i]; 289 #if defined(PETSC_HAVE_COMPLEX) 290 } else if (*datatype == MPIU___COMPLEX128) { 291 __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out; 292 for (i = 0; i < count; i++) xout[i] += xin[i]; 293 #endif 294 } 295 #endif 296 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16) 297 else if (*datatype == MPIU___FP16) { 298 __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out; 299 for (i = 0; i < count; i++) xout[i] = (__fp16)(xin[i] + xout[i]); 300 } 301 #endif 302 else { 303 #if (!defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)) && (!defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16)) 304 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types")); 305 #elif !defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16) 306 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types")); 307 #elif !defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128) 308 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types")); 309 #else 310 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types")); 311 #endif 312 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 313 } 314 PetscFunctionReturnVoid(); 315 } 316 #endif 317 318 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 319 MPI_Op MPIU_MAX = 0; 320 MPI_Op MPIU_MIN = 0; 321 322 PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 323 { 324 PetscInt i, count = *cnt; 325 326 PetscFunctionBegin; 327 if (*datatype == MPIU_REAL) { 328 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 329 for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]); 330 } 331 #if defined(PETSC_HAVE_COMPLEX) 332 else if (*datatype == MPIU_COMPLEX) { 333 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 334 for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i]; 335 } 336 #endif 337 else { 338 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types")); 339 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 340 } 341 PetscFunctionReturnVoid(); 342 } 343 344 PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 345 { 346 PetscInt i, count = *cnt; 347 348 PetscFunctionBegin; 349 if (*datatype == MPIU_REAL) { 350 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 351 for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]); 352 } 353 #if defined(PETSC_HAVE_COMPLEX) 354 else if (*datatype == MPIU_COMPLEX) { 355 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 356 for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i]; 357 } 358 #endif 359 else { 360 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types")); 361 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 362 } 363 PetscFunctionReturnVoid(); 364 } 365 #endif 366 367 /* 368 Private routine to delete internal tag/name counter storage when a communicator is freed. 369 370 This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this data as an attribute is freed. 371 372 Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval() 373 374 */ 375 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state) 376 { 377 PetscCommCounter *counter = (PetscCommCounter *)count_val; 378 struct PetscCommStash *comms = counter->comms, *pcomm; 379 380 PetscFunctionBegin; 381 PetscCallReturnMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm)); 382 PetscCallReturnMPI(PetscFree(counter->iflags)); 383 while (comms) { 384 PetscCallMPIReturnMPI(MPI_Comm_free(&comms->comm)); 385 pcomm = comms; 386 comms = comms->next; 387 PetscCallReturnMPI(PetscFree(pcomm)); 388 } 389 PetscCallReturnMPI(PetscFree(counter)); 390 PetscFunctionReturn(MPI_SUCCESS); 391 } 392 393 /* 394 This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user 395 calls MPI_Comm_free(). 396 397 This is the only entry point for breaking the links between inner and outer comms. 398 399 This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator. 400 401 Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval() 402 403 */ 404 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state) 405 { 406 union 407 { 408 MPI_Comm comm; 409 void *ptr; 410 } icomm; 411 412 PetscFunctionBegin; 413 PetscCheckReturnMPI(keyval == Petsc_InnerComm_keyval, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval"); 414 icomm.ptr = attr_val; 415 if (PetscDefined(USE_DEBUG)) { 416 /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */ 417 PetscMPIInt flg; 418 union 419 { 420 MPI_Comm comm; 421 void *ptr; 422 } ocomm; 423 PetscCallMPIReturnMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg)); 424 PetscCheckReturnMPI(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute"); 425 PetscCheckReturnMPI(ocomm.comm == comm, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm"); 426 } 427 PetscCallMPIReturnMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval)); 428 PetscCallReturnMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm)); 429 PetscFunctionReturn(MPI_SUCCESS); 430 } 431 432 /* 433 * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr(). It should not be reached any other way. 434 */ 435 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state) 436 { 437 PetscFunctionBegin; 438 PetscCallReturnMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm)); 439 PetscFunctionReturn(MPI_SUCCESS); 440 } 441 442 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *); 443 444 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32) 445 PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *); 446 PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *); 447 PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *); 448 #endif 449 450 PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE; 451 452 PETSC_INTERN int PetscGlobalArgc; 453 PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran; 454 int PetscGlobalArgc = 0; 455 char **PetscGlobalArgs = NULL; 456 char **PetscGlobalArgsFortran = NULL; 457 PetscSegBuffer PetscCitationsList; 458 459 PetscErrorCode PetscCitationsInitialize(void) 460 { 461 PetscFunctionBegin; 462 PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList)); 463 464 PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\ 465 Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\ 466 and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\ 467 and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\ 468 and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\ 469 and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\ 470 and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith and Hansol Suh\n\ 471 and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\ 472 Title = {{PETSc/TAO} Users Manual},\n\ 473 Number = {ANL-21/39 - Revision 3.23},\n\ 474 Doi = {10.2172/2565610},\n\ 475 Institution = {Argonne National Laboratory},\n\ 476 Year = {2025}\n}\n", 477 NULL)); 478 479 PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\ 480 Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\ 481 Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\ 482 Booktitle = {Modern Software Tools in Scientific Computing},\n\ 483 Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\ 484 Pages = {163--202},\n\ 485 Publisher = {Birkh{\\\"{a}}user Press},\n\ 486 Year = {1997}\n}\n", 487 NULL)); 488 PetscFunctionReturn(PETSC_SUCCESS); 489 } 490 491 static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */ 492 493 PetscErrorCode PetscSetProgramName(const char name[]) 494 { 495 PetscFunctionBegin; 496 PetscCall(PetscStrncpy(programname, name, sizeof(programname))); 497 PetscFunctionReturn(PETSC_SUCCESS); 498 } 499 500 /*@C 501 PetscGetProgramName - Gets the name of the running program. 502 503 Not Collective 504 505 Input Parameter: 506 . len - length of the string name 507 508 Output Parameter: 509 . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN` 510 511 Level: advanced 512 513 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()` 514 @*/ 515 PetscErrorCode PetscGetProgramName(char name[], size_t len) 516 { 517 PetscFunctionBegin; 518 PetscCall(PetscStrncpy(name, programname, len)); 519 PetscFunctionReturn(PETSC_SUCCESS); 520 } 521 522 /*@C 523 PetscGetArgs - Allows you to access the raw command line arguments anywhere 524 after `PetscInitialize()` is called but before `PetscFinalize()`. 525 526 Not Collective, No Fortran Support 527 528 Output Parameters: 529 + argc - count of the number of command line arguments 530 - args - the command line arguments 531 532 Level: intermediate 533 534 Notes: 535 This is usually used to pass the command line arguments into other libraries 536 that are called internally deep in PETSc or the application. 537 538 The first argument contains the program name as is normal for C programs. 539 540 See `PetscGetArguments()` for a variant of this routine. 541 542 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()` 543 @*/ 544 PetscErrorCode PetscGetArgs(int *argc, char ***args) 545 { 546 PetscFunctionBegin; 547 PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()"); 548 *argc = PetscGlobalArgc; 549 *args = PetscGlobalArgs; 550 PetscFunctionReturn(PETSC_SUCCESS); 551 } 552 553 /*@C 554 PetscGetArguments - Allows you to access the command line arguments anywhere 555 after `PetscInitialize()` is called but before `PetscFinalize()`. 556 557 Not Collective, No Fortran Support 558 559 Output Parameter: 560 . args - the command line arguments 561 562 Level: intermediate 563 564 Note: 565 This does NOT start with the program name and IS `NULL` terminated (the final argument is void) 566 567 Use `PetscFreeArguments()` to return the memory used by the arguments. 568 569 This makes a copy of the arguments and the array of arguments, while `PetscGetArgs()` does not make a copy, 570 it returns the array of arguments that was passed into the main program. 571 572 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()` 573 @*/ 574 PetscErrorCode PetscGetArguments(char ***args) 575 { 576 PetscInt i, argc = PetscGlobalArgc; 577 578 PetscFunctionBegin; 579 PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()"); 580 if (!argc) { 581 *args = NULL; 582 PetscFunctionReturn(PETSC_SUCCESS); 583 } 584 PetscCall(PetscMalloc1(argc, args)); 585 for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i])); 586 (*args)[argc - 1] = NULL; 587 PetscFunctionReturn(PETSC_SUCCESS); 588 } 589 590 /*@C 591 PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()` 592 593 Not Collective, No Fortran Support 594 595 Output Parameter: 596 . args - the command line arguments 597 598 Level: intermediate 599 600 Developer Note: 601 This should be PetscRestoreArguments() 602 603 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()` 604 @*/ 605 PetscErrorCode PetscFreeArguments(char **args) 606 { 607 PetscFunctionBegin; 608 if (args) { 609 PetscInt i = 0; 610 611 while (args[i]) PetscCall(PetscFree(args[i++])); 612 PetscCall(PetscFree(args)); 613 } 614 PetscFunctionReturn(PETSC_SUCCESS); 615 } 616 617 #if PetscDefined(HAVE_SAWS) 618 #include <petscconfiginfo.h> 619 620 PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[]) 621 { 622 PetscFunctionBegin; 623 if (!PetscGlobalRank) { 624 char cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64]; 625 int port; 626 PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE; 627 size_t applinelen, introlen; 628 char sawsurl[256]; 629 630 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg)); 631 if (flg) { 632 char sawslog[PETSC_MAX_PATH_LEN]; 633 634 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL)); 635 if (sawslog[0]) { 636 PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog)); 637 } else { 638 PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL)); 639 } 640 } 641 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg)); 642 if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert)); 643 PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL)); 644 if (selectport) { 645 PetscCallSAWs(SAWs_Get_Available_Port, (&port)); 646 PetscCallSAWs(SAWs_Set_Port, (port)); 647 } else { 648 PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg)); 649 if (flg) PetscCallSAWs(SAWs_Set_Port, (port)); 650 } 651 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg)); 652 if (flg) { 653 PetscCallSAWs(SAWs_Set_Document_Root, (root)); 654 PetscCall(PetscStrcmp(root, ".", &rootlocal)); 655 } else { 656 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg)); 657 if (flg) { 658 PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root))); 659 PetscCallSAWs(SAWs_Set_Document_Root, (root)); 660 } 661 } 662 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2)); 663 if (flg2) { 664 char jsdir[PETSC_MAX_PATH_LEN]; 665 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option"); 666 PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root)); 667 PetscCall(PetscTestDirectory(jsdir, 'r', &flg)); 668 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory"); 669 PetscCallSAWs(SAWs_Push_Local_Header, ()); 670 } 671 PetscCall(PetscGetProgramName(programname, sizeof(programname))); 672 PetscCall(PetscStrlen(help, &applinelen)); 673 introlen = 4096 + applinelen; 674 applinelen += 1024; 675 PetscCall(PetscMalloc(applinelen, &appline)); 676 PetscCall(PetscMalloc(introlen, &intro)); 677 678 if (rootlocal) { 679 PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname)); 680 PetscCall(PetscTestFile(appline, 'r', &rootlocal)); 681 } 682 PetscCall(PetscOptionsGetAll(NULL, &options)); 683 if (rootlocal && help) { 684 PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help)); 685 } else if (help) { 686 PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help)); 687 } else { 688 PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options)); 689 } 690 PetscCall(PetscFree(options)); 691 PetscCall(PetscGetVersion(version, sizeof(version))); 692 PetscCall(PetscSNPrintf(intro, introlen, 693 "<body>\n" 694 "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n" 695 "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n" 696 "%s", 697 version, petscconfigureoptions, appline)); 698 PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro)); 699 PetscCall(PetscFree(intro)); 700 PetscCall(PetscFree(appline)); 701 if (selectport) { 702 PetscBool silent; 703 704 /* another process may have grabbed the port so keep trying */ 705 while (SAWs_Initialize()) { 706 PetscCallSAWs(SAWs_Get_Available_Port, (&port)); 707 PetscCallSAWs(SAWs_Set_Port, (port)); 708 } 709 710 PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL)); 711 if (!silent) { 712 PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl)); 713 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl)); 714 } 715 } else { 716 PetscCallSAWs(SAWs_Initialize, ()); 717 } 718 PetscCall(PetscCitationsRegister("@TechReport{ saws,\n" 719 " Author = {Matt Otten and Jed Brown and Barry Smith},\n" 720 " Title = {Scientific Application Web Server (SAWs) Users Manual},\n" 721 " Institution = {Argonne National Laboratory},\n" 722 " Year = 2013\n}\n", 723 NULL)); 724 } 725 PetscFunctionReturn(PETSC_SUCCESS); 726 } 727 #endif 728 729 /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */ 730 PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void) 731 { 732 PetscFunctionBegin; 733 #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG) 734 /* see MPI.py for details on this bug */ 735 (void)setenv("HWLOC_COMPONENTS", "-x86", 1); 736 #endif 737 PetscFunctionReturn(PETSC_SUCCESS); 738 } 739 740 #if PetscDefined(HAVE_ADIOS) 741 #include <adios.h> 742 #include <adios_read.h> 743 int64_t Petsc_adios_group; 744 #endif 745 #if PetscDefined(HAVE_OPENMP) 746 #include <omp.h> 747 PetscInt PetscNumOMPThreads; 748 #endif 749 750 #include <petsc/private/deviceimpl.h> 751 #if PetscDefined(HAVE_CUDA) 752 #include <petscdevice_cuda.h> 753 // REMOVE ME 754 cudaStream_t PetscDefaultCudaStream = NULL; 755 #endif 756 #if PetscDefined(HAVE_HIP) 757 #include <petscdevice_hip.h> 758 // REMOVE ME 759 hipStream_t PetscDefaultHipStream = NULL; 760 #endif 761 762 #if PetscDefined(HAVE_DLFCN_H) 763 #include <dlfcn.h> 764 #endif 765 PETSC_INTERN PetscErrorCode PetscLogInitialize(void); 766 #if PetscDefined(HAVE_VIENNACL) 767 PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void); 768 PetscBool PetscViennaCLSynchronize = PETSC_FALSE; 769 #endif 770 771 PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE; 772 773 /* 774 PetscInitialize_Common - shared code between C and Fortran initialization 775 776 prog: program name 777 file: optional PETSc database file name. Might be in Fortran string format when 'ftn' is true 778 help: program help message 779 ftn: is it called from Fortran initialization (petscinitializef_)? 780 len: length of file string, used when Fortran is true 781 */ 782 PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscInt len) 783 { 784 PetscMPIInt size; 785 PetscBool flg = PETSC_TRUE; 786 char hostname[256]; 787 PetscBool blas_view_flag = PETSC_FALSE; 788 789 PetscFunctionBegin; 790 if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS); 791 /* these must be initialized in a routine, not as a constant declaration */ 792 PETSC_STDOUT = stdout; 793 PETSC_STDERR = stderr; 794 795 /* PetscCall can be used from now */ 796 PetscErrorHandlingInitialized = PETSC_TRUE; 797 798 /* 799 The checking over compatible runtime libraries is complicated by the MPI ABI initiative 800 https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with 801 MPICH v3.1 (Released February 2014) 802 IBM MPI v2.1 (December 2014) 803 Intel MPI Library v5.0 (2014) 804 Cray MPT v7.0.0 (June 2014) 805 As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions 806 listed above and since that time are compatible. 807 808 Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number 809 at compile time or runtime. Thus we will need to systematically track the allowed versions 810 and how they are represented in the mpi.h and MPI_Get_library_version() output in order 811 to perform the checking. 812 813 Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI). 814 815 Questions: 816 817 Should the checks for ABI incompatibility be only on the major version number below? 818 Presumably the output to stderr will be removed before a release. 819 */ 820 821 #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION) 822 { 823 char mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING]; 824 PetscMPIInt mpilibraryversionlength; 825 826 PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength)); 827 /* check for MPICH versions before MPI ABI initiative */ 828 #if defined(MPICH_VERSION) 829 #if MPICH_NUMVERSION < 30100000 830 { 831 char *ver, *lf; 832 PetscBool flg = PETSC_FALSE; 833 834 PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver)); 835 if (ver) { 836 PetscCall(PetscStrchr(ver, '\n', &lf)); 837 if (lf) { 838 *lf = 0; 839 PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg)); 840 } 841 } 842 if (!flg) { 843 PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION)); 844 flg = PETSC_TRUE; 845 } 846 } 847 #endif 848 /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */ 849 #elif defined(PETSC_HAVE_OPENMPI) 850 { 851 char *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf; 852 PetscBool flg = PETSC_FALSE; 853 #define PSTRSZ 2 854 char ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"}; 855 char ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "}; 856 int i; 857 for (i = 0; i < PSTRSZ; i++) { 858 PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver)); 859 if (ver) { 860 PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR)); 861 PetscCall(PetscStrstr(ver, bs, &bsf)); 862 if (bsf) flg = PETSC_TRUE; 863 break; 864 } 865 } 866 if (!flg) { 867 PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR)); 868 flg = PETSC_TRUE; 869 } 870 } 871 #endif 872 } 873 #endif 874 875 #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__)) 876 /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */ 877 PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly"); 878 #endif 879 880 PetscCall(PetscOptionsCreateDefault()); 881 882 PetscFinalizeCalled = PETSC_FALSE; 883 884 PetscCall(PetscSetProgramName(prog)); 885 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen)); 886 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout)); 887 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr)); 888 PetscCall(PetscSpinlockCreate(&PetscCommSpinLock)); 889 890 if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD; 891 PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN)); 892 893 if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) { 894 PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS)); 895 PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE)); 896 } 897 898 /* Done after init due to a bug in MPICH-GM? */ 899 PetscCall(PetscErrorPrintfInitialize()); 900 901 PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank)); 902 PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize)); 903 904 MPIU_BOOL = MPI_C_BOOL; 905 MPIU_ENUM = MPI_INT; 906 MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64; 907 if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED; 908 else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG; 909 #if defined(PETSC_SIZEOF_LONG_LONG) 910 else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG; 911 #endif 912 else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t"); 913 914 /* 915 Initialized the global complex variable; this is because with 916 shared libraries the constructors for global variables 917 are not called; at least on IRIX. 918 */ 919 #if defined(PETSC_HAVE_COMPLEX) 920 { 921 #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128) 922 PetscComplex ic(0.0, 1.0); 923 PETSC_i = ic; 924 #else 925 PETSC_i = _Complex_I; 926 #endif 927 } 928 #endif /* PETSC_HAVE_COMPLEX */ 929 930 /* 931 Create the PETSc MPI reduction operator that sums of the first 932 half of the entries and maxes the second half. 933 */ 934 PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP)); 935 936 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128) 937 PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128)); 938 PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128)); 939 #if defined(PETSC_HAVE_COMPLEX) 940 PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128)); 941 PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128)); 942 #endif 943 #endif 944 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16) 945 PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16)); 946 PetscCallMPI(MPI_Type_commit(&MPIU___FP16)); 947 #endif 948 949 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 950 PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM)); 951 PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX)); 952 PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN)); 953 #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)) 954 PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128)); 955 #endif 956 957 PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR)); 958 PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp)); 959 PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR)); 960 961 /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */ 962 #if !defined(PETSC_HAVE_MPIUNI) 963 { 964 PetscMPIInt blockSizes[2] = {1, 1}; 965 MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)}; 966 MPI_Datatype blockTypes[2] = {MPIU_REAL, MPIU_INT}, tmpStruct; 967 968 PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct)); 969 PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT)); 970 PetscCallMPI(MPI_Type_free(&tmpStruct)); 971 PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT)); 972 } 973 { 974 PetscMPIInt blockSizes[2] = {1, 1}; 975 MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)}; 976 MPI_Datatype blockTypes[2] = {MPIU_SCALAR, MPIU_INT}, tmpStruct; 977 978 PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct)); 979 PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT)); 980 PetscCallMPI(MPI_Type_free(&tmpStruct)); 981 PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT)); 982 } 983 #endif 984 985 #if defined(PETSC_USE_64BIT_INDICES) 986 PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT)); 987 PetscCallMPI(MPI_Type_commit(&MPIU_2INT)); 988 989 #if !defined(PETSC_HAVE_MPIUNI) 990 { 991 int blockSizes[] = {1, 1}; 992 MPI_Aint blockOffsets[] = {offsetof(struct petsc_mpiu_int_mpiint, a), offsetof(struct petsc_mpiu_int_mpiint, b)}; 993 MPI_Datatype blockTypes[] = {MPIU_INT, MPI_INT}, tmpStruct; 994 995 PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct)); 996 PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_int_mpiint), &MPIU_INT_MPIINT)); 997 PetscCallMPI(MPI_Type_free(&tmpStruct)); 998 PetscCallMPI(MPI_Type_commit(&MPIU_INT_MPIINT)); 999 } 1000 #endif 1001 #endif 1002 PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT)); 1003 PetscCallMPI(MPI_Type_commit(&MPI_4INT)); 1004 PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT)); 1005 PetscCallMPI(MPI_Type_commit(&MPIU_4INT)); 1006 1007 /* 1008 Attributes to be set on PETSc communicators 1009 */ 1010 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, NULL)); 1011 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, NULL)); 1012 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, NULL)); 1013 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, NULL)); 1014 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, NULL)); 1015 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, NULL)); 1016 1017 #if defined(PETSC_USE_FORTRAN_BINDINGS) 1018 if (ftn) PetscCall(PetscInitFortran_Private(file, len)); 1019 else 1020 #endif 1021 PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file)); 1022 1023 if (PetscDefined(HAVE_MPIUNI)) { 1024 const char *mpienv = getenv("PMI_SIZE"); 1025 if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE"); 1026 if (mpienv) { 1027 PetscInt isize; 1028 PetscBool mflag = PETSC_FALSE; 1029 1030 PetscCall(PetscOptionsStringToInt(mpienv, &isize)); 1031 PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpiuni-allow-multiprocess-launch", &mflag, NULL)); 1032 PetscCheck(isize == 1 || mflag, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc. Or run with -mpiuni-allow-multiprocess-launch to allow multiple independent MPI-uni jobs."); 1033 } 1034 } 1035 1036 /* call a second time so it can look in the options database */ 1037 PetscCall(PetscErrorPrintfInitialize()); 1038 1039 /* 1040 Check system options and print help 1041 */ 1042 PetscCall(PetscOptionsCheckInitial_Private(help)); 1043 1044 /* 1045 Creates the logging data structures; this is enabled even if logging is not turned on 1046 This is the last thing we do before returning to the user code to prevent having the 1047 logging numbers contaminated by any startup time associated with MPI 1048 */ 1049 PetscCall(PetscLogInitialize()); 1050 1051 /* 1052 Initialize PetscDevice and PetscDeviceContext 1053 1054 Note to any future devs thinking of moving this, proper initialization requires: 1055 1. MPI initialized 1056 2. Options DB initialized 1057 3. PETSc error handling initialized, specifically signal handlers. This expects to set up 1058 its own SIGSEV handler via the push/pop interface. 1059 4. Logging initialized 1060 */ 1061 PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD)); 1062 1063 #if PetscDefined(HAVE_VIENNACL) 1064 flg = PETSC_FALSE; 1065 PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg)); 1066 if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL)); 1067 PetscViennaCLSynchronize = flg; 1068 PetscCall(PetscViennaCLInit()); 1069 #endif 1070 1071 PetscCall(PetscCitationsInitialize()); 1072 1073 #if defined(PETSC_HAVE_SAWS) 1074 PetscCall(PetscInitializeSAWs(ftn ? NULL : help)); 1075 flg = PETSC_FALSE; 1076 PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg)); 1077 if (flg) PetscCall(PetscStackViewSAWs()); 1078 #endif 1079 1080 /* 1081 Load the dynamic libraries (on machines that support them), this registers all 1082 the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes) 1083 */ 1084 PetscCall(PetscInitialize_DynamicLibraries()); 1085 1086 PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size)); 1087 PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size)); 1088 PetscCall(PetscGetHostName(hostname, sizeof(hostname))); 1089 PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname)); 1090 #if defined(PETSC_HAVE_OPENMP) 1091 { 1092 PetscBool omp_view_flag; 1093 char *threads = getenv("OMP_NUM_THREADS"); 1094 1095 if (threads) { 1096 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads)); 1097 (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads); 1098 } else { 1099 PetscNumOMPThreads = (PetscInt)omp_get_max_threads(); 1100 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads)); 1101 } 1102 PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys"); 1103 PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg)); 1104 PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag)); 1105 PetscOptionsEnd(); 1106 if (flg) { 1107 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads)); 1108 omp_set_num_threads((int)PetscNumOMPThreads); 1109 } 1110 if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads)); 1111 } 1112 #endif 1113 1114 PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys"); 1115 PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag)); 1116 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS) 1117 { 1118 char *threads = NULL; 1119 1120 /* determine any default number of threads requested in the environment; TODO: Apple libraries? */ 1121 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) 1122 threads = getenv("BLIS_NUM_THREADS"); 1123 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads)); 1124 if (!threads) { 1125 threads = getenv("OMP_NUM_THREADS"); 1126 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads)); 1127 } 1128 #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS) 1129 threads = getenv("MKL_NUM_THREADS"); 1130 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads)); 1131 if (!threads) { 1132 threads = getenv("OMP_NUM_THREADS"); 1133 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads)); 1134 } 1135 #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS) 1136 threads = getenv("OPENBLAS_NUM_THREADS"); 1137 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads)); 1138 if (!threads) { 1139 threads = getenv("OMP_NUM_THREADS"); 1140 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads)); 1141 } 1142 #endif 1143 if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads); 1144 PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg)); 1145 if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads)); 1146 if (flg || threads) { 1147 PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads)); 1148 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads)); 1149 } 1150 } 1151 #elif defined(PETSC_HAVE_APPLE_ACCELERATE) 1152 PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n")); 1153 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n")); 1154 #else 1155 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n")); 1156 #endif 1157 PetscOptionsEnd(); 1158 1159 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32) 1160 /* 1161 Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI 1162 1163 Currently not used because it is not supported by MPICH. 1164 */ 1165 if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL)); 1166 #endif 1167 1168 #if defined(PETSC_SERIALIZE_FUNCTIONS) 1169 PetscCall(PetscFPTCreate(10000)); 1170 #endif 1171 1172 #if defined(PETSC_HAVE_HWLOC) 1173 { 1174 PetscViewer viewer; 1175 PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg)); 1176 if (flg) { 1177 PetscCall(PetscProcessPlacementView(viewer)); 1178 PetscCall(PetscViewerDestroy(&viewer)); 1179 } 1180 } 1181 #endif 1182 1183 flg = PETSC_TRUE; 1184 PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL)); 1185 if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE)); 1186 1187 #if defined(PETSC_HAVE_ADIOS) 1188 PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD); 1189 PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default); 1190 PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", ""); 1191 PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, ""); 1192 #endif 1193 1194 #if defined(__VALGRIND_H) 1195 PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE; 1196 #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE) 1197 if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack")); 1198 #endif 1199 #endif 1200 /* 1201 Set flag that we are completely initialized 1202 */ 1203 PetscInitializeCalled = PETSC_TRUE; 1204 1205 PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg)); 1206 if (flg) PetscCall(PetscPythonInitialize(NULL, NULL)); 1207 1208 PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg)); 1209 if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n")); 1210 if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin()); 1211 else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case"); 1212 PetscFunctionReturn(PETSC_SUCCESS); 1213 } 1214 1215 // "Unknown section 'Environmental Variables'" 1216 // PetscClangLinter pragma disable: -fdoc-section-header-unknown 1217 /*@C 1218 PetscInitialize - Initializes the PETSc database and MPI. 1219 `PetscInitialize()` calls MPI_Init() if that has yet to be called, 1220 so this routine should always be called near the beginning of 1221 your program -- usually the very first line! 1222 1223 Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set 1224 1225 Input Parameters: 1226 + argc - count of number of command line arguments 1227 . args - the command line arguments 1228 . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format. 1229 Use `NULL` or empty string to not check for code specific file. 1230 Also checks `~/.petscrc`, `.petscrc` and `petscrc`. 1231 Use `-skip_petscrc` in the code specific file (or command line) to skip `~/.petscrc`, `.petscrc` and `petscrc` files. 1232 - help - [optional] Help message to print, use `NULL` for no message 1233 1234 If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that 1235 communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`. 1236 then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even 1237 if different subcommunicators of the job are doing different things with PETSc. 1238 1239 Options Database Keys: 1240 + -help [intro] - prints help method for each option; if `intro` is given the program stops after printing the introductory help message 1241 . -start_in_debugger [noxterm,dbx,xdb,gdb,...] - Starts program in debugger 1242 . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected 1243 . -on_error_emacs <machinename> - causes `emacsclient` to jump to error file if an error is detected 1244 . -on_error_abort - calls `abort()` when error detected (no traceback) 1245 . -on_error_mpiabort - calls `MPI_abort()` when error detected 1246 . -error_output_stdout - prints PETSc error messages to `stdout` instead of the default `stderr` 1247 . -error_output_none - does not print the error messages (but handles errors in the same way as if this was not called) 1248 . -debugger_ranks [rank1,rank2,...] - Indicates MPI ranks to start in debugger 1249 . -debugger_pause [sleeptime] (in seconds) - Pauses debugger, use if it takes a long time for the debugger to start up on your system 1250 . -stop_for_debugger - Print message on how to attach debugger manually to 1251 process and wait (`-debugger_pause`) seconds for attachment 1252 . -malloc_dump - prints a list of all unfreed memory at the end of the run 1253 . -malloc_test - like `-malloc_dump` `-malloc_debug`, only active for debugging build, ignored in optimized build. Often set in `PETSC_OPTIONS` environmental variable 1254 . -malloc_view - show a list of all allocated memory during `PetscFinalize()` 1255 . -malloc_view_threshold <t> - only list memory allocations of size greater than t with `-malloc_view` 1256 . -malloc_requested_size - malloc logging will record the requested size rather than (possibly large) size after alignment 1257 . -fp_trap - Stops on floating point exceptions 1258 . -no_signal_handler - Indicates not to trap error signals 1259 . -shared_tmp - indicates `/tmp` directory is known to be shared by all processors 1260 . -not_shared_tmp - indicates each processor has own `/tmp` 1261 . -tmp - alternative directory to use instead of `/tmp` 1262 . -python <exe> - Initializes Python, and optionally takes a Python executable name 1263 - -mpiuni-allow-multiprocess-launch - allow `mpiexec` to launch multiple independent MPI-Uni jobs, otherwise a sanity check error is invoked to prevent misuse of MPI-Uni 1264 1265 Options Database Keys for Option Database: 1266 + -skip_petscrc - skip the default option files `~/.petscrc`, `.petscrc`, `petscrc` 1267 . -options_monitor - monitor all set options to standard output for the whole program run 1268 - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()` 1269 1270 Options -options_monitor_{all,cancel} are 1271 position-independent and apply to all options set since the PETSc start. 1272 They can be used also in option files. 1273 1274 See `PetscOptionsMonitorSet()` to do monitoring programmatically. 1275 1276 Options Database Keys for Profiling: 1277 See Users-Manual: ch_profiling for details. 1278 + -info [filename][:[~]<list,of,classnames>[:[~]self]] - Prints verbose information. See `PetscInfo()`. 1279 . -log_sync - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event, 1280 however it slows things down and gives a distorted view of the overall runtime. 1281 . -log_trace [filename] - Print traces of all PETSc calls to the screen (useful to determine where a program 1282 hangs without running in the debugger). See `PetscLogTraceBegin()`. 1283 . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers) 1284 . -log_view_memory - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`. 1285 . -log_view_gpu_time - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView(). 1286 . -log_exclude: <vec,mat,pc,ksp,snes> - excludes subset of object classes from logging 1287 . -log [filename] - Logs profiling information in a dump file, see `PetscLogDump()`. 1288 . -log_all [filename] - Same as `-log`. 1289 . -log_mpe [filename] - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution) 1290 . -log_perfstubs - Starts a log handler with the perfstubs interface (which is used by TAU) 1291 . -log_nvtx - Starts an nvtx log handler for use with Nsight 1292 . -log_roctx - Starts an roctx log handler for use with rocprof on AMD GPUs 1293 . -viewfromoptions on,off - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off 1294 . -get_total_flops - Returns total flops done by all processors 1295 . -memory_view - Print memory usage at end of run 1296 - -check_pointer_intensity 0,1,2 - if pointers are checked for validity (debug version only), using 0 will result in faster code 1297 1298 Options Database Keys for SAWs: 1299 + -saws_port <portnumber> - port number to publish SAWs data, default is 8080 1300 . -saws_port_auto_select - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen 1301 this is useful when you are running many jobs that utilize SAWs at the same time 1302 . -saws_log <filename> - save a log of all SAWs communication 1303 . -saws_https <certificate file> - have SAWs use HTTPS instead of HTTP 1304 - -saws_root <directory> - allow SAWs to have access to the given directory to search for requested resources and files 1305 1306 Environmental Variables: 1307 + `PETSC_TMP` - alternative directory to use instead of `/tmp` 1308 . `PETSC_SHARED_TMP` - `/tmp` is shared by all processes 1309 . `PETSC_NOT_SHARED_TMP` - each process has its own private `/tmp` 1310 . `PETSC_OPTIONS` - a string containing additional options for PETSc in the form of command line "-key value" pairs 1311 . `PETSC_OPTIONS_YAML` - (requires configuring PETSc to use libyaml with `--download-yaml`) a string containing additional options for PETSc in the form of a YAML document 1312 . `PETSC_VIEWER_SOCKET_PORT` - socket number to use for socket viewer 1313 - `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to 1314 1315 Level: beginner 1316 1317 Note: 1318 If for some reason you must call `MPI_Init()` separately from `PetscInitialize()`, call 1319 it before `PetscInitialize()`. 1320 1321 Fortran Notes: 1322 In Fortran this routine can be called with 1323 .vb 1324 call PetscInitialize(ierr) 1325 call PetscInitialize(file,ierr) or 1326 call PetscInitialize(file,help,ierr) 1327 .ve 1328 1329 If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after 1330 calling `PetscInitialize()`. 1331 1332 Options Database Key for Developers: 1333 . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form: 1334 "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py) 1335 1336 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()` 1337 @*/ 1338 PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[]) 1339 { 1340 PetscMPIInt flag; 1341 const char *prog = "Unknown Name"; 1342 1343 PetscFunctionBegin; 1344 if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS); 1345 PetscCallMPI(MPI_Initialized(&flag)); 1346 if (!flag) { 1347 PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first"); 1348 PetscCall(PetscPreMPIInit_Private()); 1349 #if defined(PETSC_HAVE_MPI_INIT_THREAD) 1350 { 1351 PetscMPIInt provided; 1352 PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided)); 1353 PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required"); 1354 if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up 1355 } 1356 #else 1357 PetscCallMPI(MPI_Init(argc, args)); 1358 #endif 1359 PetscBeganMPI = PETSC_TRUE; 1360 } 1361 1362 if (argc && *argc) prog = **args; 1363 if (argc && args) { 1364 PetscGlobalArgc = *argc; 1365 PetscGlobalArgs = *args; 1366 } 1367 PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, 0)); 1368 PetscFunctionReturn(PETSC_SUCCESS); 1369 } 1370 1371 PETSC_INTERN PetscObject *PetscObjects; 1372 PETSC_INTERN PetscInt PetscObjectsCounts; 1373 PETSC_INTERN PetscInt PetscObjectsMaxCounts; 1374 PETSC_INTERN PetscBool PetscObjectsLog; 1375 1376 /* 1377 Frees all the MPI types and operations that PETSc may have created 1378 */ 1379 PetscErrorCode PetscFreeMPIResources(void) 1380 { 1381 PetscFunctionBegin; 1382 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128) 1383 PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128)); 1384 #if defined(PETSC_HAVE_COMPLEX) 1385 PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128)); 1386 #endif 1387 #endif 1388 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16) 1389 PetscCallMPI(MPI_Type_free(&MPIU___FP16)); 1390 #endif 1391 1392 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 1393 PetscCallMPI(MPI_Op_free(&MPIU_SUM)); 1394 PetscCallMPI(MPI_Op_free(&MPIU_MAX)); 1395 PetscCallMPI(MPI_Op_free(&MPIU_MIN)); 1396 #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)) 1397 PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128)); 1398 #endif 1399 1400 PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR)); 1401 PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT)); 1402 PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT)); 1403 #if defined(PETSC_USE_64BIT_INDICES) 1404 PetscCallMPI(MPI_Type_free(&MPIU_2INT)); 1405 PetscCallMPI(MPI_Type_free(&MPIU_INT_MPIINT)); 1406 #endif 1407 PetscCallMPI(MPI_Type_free(&MPI_4INT)); 1408 PetscCallMPI(MPI_Type_free(&MPIU_4INT)); 1409 PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP)); 1410 PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp)); 1411 PetscFunctionReturn(PETSC_SUCCESS); 1412 } 1413 1414 PETSC_INTERN PetscErrorCode PetscLogFinalize(void); 1415 PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]); 1416 1417 /*@ 1418 PetscFinalize - Checks for options to be called at the conclusion of a PETSc program and frees any remaining PETSc objects and data structures. 1419 of the program. Automatically calls `MPI_Finalize()` if the user had not called `MPI_Init()` before calling `PetscInitialize()`. 1420 1421 Collective on `PETSC_COMM_WORLD` 1422 1423 Options Database Keys: 1424 + -options_view - Calls `PetscOptionsView()` 1425 . -options_left - Prints unused options that remain in the database 1426 . -objects_dump [all] - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed 1427 . -mpidump - Calls PetscMPIDump() 1428 . -malloc_dump <optional filename> - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed 1429 . -memory_view - Prints total memory usage 1430 - -malloc_view <optional filename> - Prints list of all memory allocated and in what functions 1431 1432 Level: beginner 1433 1434 Note: 1435 See `PetscInitialize()` for other runtime options. 1436 1437 You can call `PetscInitialize()` after `PetscFinalize()` but only with MPI-Uni or if you called `MPI_Init()` before ever calling `PetscInitialize()`. 1438 1439 .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()` 1440 @*/ 1441 PetscErrorCode PetscFinalize(void) 1442 { 1443 PetscMPIInt rank; 1444 PetscInt nopt; 1445 PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE; 1446 PetscBool flg; 1447 char mname[PETSC_MAX_PATH_LEN]; 1448 1449 PetscFunctionBegin; 1450 PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()"); 1451 PetscCall(PetscInfo(NULL, "PetscFinalize() called\n")); 1452 1453 PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg)); 1454 if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd()); 1455 1456 PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL)); 1457 PetscGlobalArgc = 0; 1458 PetscGlobalArgs = NULL; 1459 1460 /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */ 1461 { 1462 union 1463 { 1464 MPI_Comm comm; 1465 void *ptr; 1466 } ucomm; 1467 PetscMPIInt flg; 1468 void *tmp; 1469 1470 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg)); 1471 if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg)); 1472 if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF)); 1473 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg)); 1474 if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg)); 1475 if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD)); 1476 } 1477 1478 PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank)); 1479 #if defined(PETSC_HAVE_ADIOS) 1480 PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE); 1481 PetscCallExternal(adios_finalize, rank); 1482 #endif 1483 PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg)); 1484 if (flg) { 1485 char *cits, filename[PETSC_MAX_PATH_LEN]; 1486 FILE *fd = PETSC_STDOUT; 1487 1488 PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL)); 1489 if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd)); 1490 PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits)); 1491 cits[0] = 0; 1492 PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits)); 1493 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n")); 1494 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n")); 1495 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits)); 1496 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n")); 1497 PetscCall(PetscFClose(PETSC_COMM_WORLD, fd)); 1498 PetscCall(PetscFree(cits)); 1499 } 1500 PetscCall(PetscSegBufferDestroy(&PetscCitationsList)); 1501 1502 #if defined(PETSC_SERIALIZE_FUNCTIONS) 1503 PetscCall(PetscFPTDestroy()); 1504 #endif 1505 1506 #if defined(PETSC_HAVE_X) 1507 flg1 = PETSC_FALSE; 1508 PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL)); 1509 if (flg1) { 1510 /* this is a crude hack, but better than nothing */ 1511 PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL)); 1512 } 1513 #endif 1514 1515 #if !defined(PETSC_HAVE_THREADSAFETY) 1516 PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL)); 1517 if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n")); 1518 #endif 1519 1520 if (PetscDefined(USE_LOG)) { 1521 flg1 = PETSC_FALSE; 1522 PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL)); 1523 if (flg1) { 1524 PetscLogDouble flops = 0; 1525 PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD)); 1526 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops)); 1527 } 1528 } 1529 1530 if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) { 1531 mname[0] = 0; 1532 PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1)); 1533 if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL)); 1534 } 1535 1536 #if defined(PETSC_HAVE_KOKKOS) 1537 // Free PETSc/kokkos stuff before the potentially non-null PETSc default gpu stream is destroyed by PetscObjectRegisterDestroyAll 1538 if (PetscKokkosInitialized) { 1539 PetscCall(PetscKokkosFinalize_Private()); 1540 PetscKokkosInitialized = PETSC_FALSE; 1541 } 1542 #endif 1543 1544 // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_(). 1545 PetscCall(PetscObjectRegisterDestroyAll()); 1546 1547 if (PetscDefined(USE_LOG)) { 1548 PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE)); 1549 PetscCall(PetscLogViewFromOptions()); 1550 PetscCall(PetscOptionsPopCreateViewerOff()); 1551 // It should be turned on with PetscLogGpuTime() and never turned off except in this place 1552 PetscLogGpuTimeFlag = PETSC_FALSE; 1553 1554 // Free any objects created by the last block of code. 1555 PetscCall(PetscObjectRegisterDestroyAll()); 1556 1557 mname[0] = 0; 1558 PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1)); 1559 PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2)); 1560 if (flg1 || flg2) PetscCall(PetscLogDump(mname)); 1561 } 1562 1563 flg1 = PETSC_FALSE; 1564 PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL)); 1565 if (!flg1) PetscCall(PetscPopSignalHandler()); 1566 flg1 = PETSC_FALSE; 1567 PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL)); 1568 if (flg1) PetscCall(PetscMPIDump(stdout)); 1569 flg1 = PETSC_FALSE; 1570 flg2 = PETSC_FALSE; 1571 /* preemptive call to avoid listing this option in options table as unused */ 1572 PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1)); 1573 PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1)); 1574 PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL)); 1575 1576 if (flg2) PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); 1577 1578 /* to prevent PETSc -options_left from warning */ 1579 PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1)); 1580 PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1)); 1581 1582 flg3 = PETSC_FALSE; /* default value is required */ 1583 PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1)); 1584 if (!flg1) flg3 = PETSC_TRUE; 1585 if (flg3) { 1586 if (!flg2 && flg1) { /* have not yet printed the options */ 1587 PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); 1588 } 1589 PetscCall(PetscOptionsAllUsed(NULL, &nopt)); 1590 if (nopt) { 1591 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n")); 1592 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n")); 1593 if (nopt == 1) { 1594 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n")); 1595 } else { 1596 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt)); 1597 } 1598 } else if (flg3 && flg1) { 1599 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n")); 1600 } 1601 PetscCall(PetscOptionsLeft(NULL)); 1602 } 1603 1604 #if defined(PETSC_HAVE_SAWS) 1605 if (!PetscGlobalRank) { 1606 PetscCall(PetscStackSAWsViewOff()); 1607 PetscCallSAWs(SAWs_Finalize, ()); 1608 } 1609 #endif 1610 1611 /* 1612 List all objects the user may have forgot to free 1613 */ 1614 if (PetscDefined(USE_LOG) && PetscObjectsLog) { 1615 PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1)); 1616 if (flg1) { 1617 MPI_Comm local_comm; 1618 char string[64]; 1619 1620 PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL)); 1621 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1622 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1623 PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE)); 1624 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1625 PetscCallMPI(MPI_Comm_free(&local_comm)); 1626 } 1627 } 1628 1629 PetscObjectsCounts = 0; 1630 PetscObjectsMaxCounts = 0; 1631 PetscCall(PetscFree(PetscObjects)); 1632 1633 /* 1634 Destroy any packages that registered a finalize 1635 */ 1636 PetscCall(PetscRegisterFinalizeAll()); 1637 1638 PetscCall(PetscLogFinalize()); 1639 1640 /* 1641 Print PetscFunctionLists that have not been properly freed 1642 */ 1643 if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll()); 1644 1645 if (petsc_history) { 1646 PetscCall(PetscCloseHistoryFile(&petsc_history)); 1647 petsc_history = NULL; 1648 } 1649 PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton)); 1650 PetscCall(PetscInfoDestroy()); 1651 1652 #if !defined(PETSC_HAVE_THREADSAFETY) 1653 if (!(PETSC_RUNNING_ON_VALGRIND)) { 1654 char fname[PETSC_MAX_PATH_LEN]; 1655 char sname[PETSC_MAX_PATH_LEN]; 1656 FILE *fd; 1657 int err; 1658 1659 flg2 = PETSC_FALSE; 1660 flg3 = PETSC_FALSE; 1661 if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL)); 1662 PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL)); 1663 fname[0] = 0; 1664 PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1)); 1665 if (flg1 && fname[0]) { 1666 PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank)); 1667 fd = fopen(sname, "w"); 1668 PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname); 1669 PetscCall(PetscMallocDump(fd)); 1670 err = fclose(fd); 1671 PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file"); 1672 } else if (flg1 || flg2 || flg3) { 1673 MPI_Comm local_comm; 1674 1675 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1676 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1677 PetscCall(PetscMallocDump(stdout)); 1678 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1679 PetscCallMPI(MPI_Comm_free(&local_comm)); 1680 } 1681 fname[0] = 0; 1682 PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1)); 1683 if (flg1 && fname[0]) { 1684 PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank)); 1685 fd = fopen(sname, "w"); 1686 PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname); 1687 PetscCall(PetscMallocView(fd)); 1688 err = fclose(fd); 1689 PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file"); 1690 } else if (flg1) { 1691 MPI_Comm local_comm; 1692 1693 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1694 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1695 PetscCall(PetscMallocView(stdout)); 1696 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1697 PetscCallMPI(MPI_Comm_free(&local_comm)); 1698 } 1699 } 1700 #endif 1701 1702 /* 1703 Close any open dynamic libraries 1704 */ 1705 PetscCall(PetscFinalize_DynamicLibraries()); 1706 1707 /* Can be destroyed only after all the options are used */ 1708 PetscCall(PetscOptionsDestroyDefault()); 1709 1710 #if defined(PETSC_HAVE_NVSHMEM) 1711 if (PetscBeganNvshmem) { 1712 PetscCall(PetscNvshmemFinalize()); 1713 PetscBeganNvshmem = PETSC_FALSE; 1714 } 1715 #endif 1716 1717 PetscCall(PetscFreeMPIResources()); 1718 1719 /* 1720 Destroy any known inner MPI_Comm's and attributes pointing to them 1721 Note this will not destroy any new communicators the user has created. 1722 1723 If all PETSc objects were not destroyed those left over objects will have hanging references to 1724 the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again 1725 */ 1726 { 1727 PetscCommCounter *counter; 1728 PetscMPIInt flg; 1729 MPI_Comm icomm; 1730 union 1731 { 1732 MPI_Comm comm; 1733 void *ptr; 1734 } ucomm; 1735 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg)); 1736 if (flg) { 1737 icomm = ucomm.comm; 1738 PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg)); 1739 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory"); 1740 1741 PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval)); 1742 PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval)); 1743 PetscCallMPI(MPI_Comm_free(&icomm)); 1744 } 1745 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg)); 1746 if (flg) { 1747 icomm = ucomm.comm; 1748 PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg)); 1749 PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory"); 1750 1751 PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval)); 1752 PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval)); 1753 PetscCallMPI(MPI_Comm_free(&icomm)); 1754 } 1755 } 1756 1757 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval)); 1758 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval)); 1759 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval)); 1760 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval)); 1761 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval)); 1762 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval)); 1763 1764 // Free keyvals which may be silently created by some routines 1765 if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval)); 1766 if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval)); 1767 1768 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen)); 1769 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout)); 1770 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr)); 1771 PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock)); 1772 1773 if (PetscBeganMPI) { 1774 PetscMPIInt flag; 1775 PetscCallMPI(MPI_Finalized(&flag)); 1776 PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()"); 1777 /* wait until the very last moment to disable error handling */ 1778 PetscErrorHandlingInitialized = PETSC_FALSE; 1779 PetscCallMPI(MPI_Finalize()); 1780 } else PetscErrorHandlingInitialized = PETSC_FALSE; 1781 1782 /* 1783 Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because 1784 the communicator has some outstanding requests on it. Specifically if the 1785 flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See 1786 src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate() 1787 is never freed as it should be. Thus one may obtain messages of the form 1788 [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the 1789 memory was not freed. 1790 1791 */ 1792 PetscCall(PetscMallocClear()); 1793 PetscCall(PetscStackReset()); 1794 1795 PetscInitializeCalled = PETSC_FALSE; 1796 PetscFinalizeCalled = PETSC_TRUE; 1797 #if defined(PETSC_USE_COVERAGE) 1798 /* 1799 flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the 1800 gcov files are still being added to the directories as git tries to remove the directories. 1801 */ 1802 __gcov_flush(); 1803 #endif 1804 /* To match PetscFunctionBegin() at the beginning of this function */ 1805 PetscStackClearTop; 1806 return PETSC_SUCCESS; 1807 } 1808 1809 #if defined(PETSC_MISSING_LAPACK_lsame_) 1810 PETSC_EXTERN int lsame_(char *a, char *b) 1811 { 1812 if (*a == *b) return 1; 1813 if (*a + 32 == *b) return 1; 1814 if (*a - 32 == *b) return 1; 1815 return 0; 1816 } 1817 #endif 1818 1819 #if defined(PETSC_MISSING_LAPACK_lsame) 1820 PETSC_EXTERN int lsame(char *a, char *b) 1821 { 1822 if (*a == *b) return 1; 1823 if (*a + 32 == *b) return 1; 1824 if (*a - 32 == *b) return 1; 1825 return 0; 1826 } 1827 #endif 1828 1829 static inline PetscMPIInt MPIU_Allreduce_Count(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) 1830 { 1831 PetscMPIInt err; 1832 #if !defined(PETSC_HAVE_MPI_LARGE_COUNT) 1833 PetscMPIInt count2; 1834 1835 PetscMPIIntCast_Internal(count, &count2); 1836 err = MPI_Allreduce((void *)inbuf, outbuf, count2, dtype, op, comm); 1837 #else 1838 err = MPI_Allreduce_c((void *)inbuf, outbuf, count, dtype, op, comm); 1839 #endif 1840 return err; 1841 } 1842 1843 /* 1844 When count is 1 and dtype == MPIU_INT performs the reduction in PetscInt64 to check for integer overflow 1845 */ 1846 PetscMPIInt MPIU_Allreduce_Private(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) 1847 { 1848 PetscMPIInt err; 1849 if (!PetscDefined(USE_64BIT_INDICES) && count == 1 && dtype == MPIU_INT && (op == MPI_SUM || op == MPI_PROD)) { 1850 PetscInt64 incnt, outcnt; 1851 void *inbufd, *outbufd; 1852 1853 if (inbuf != MPI_IN_PLACE) { 1854 incnt = *(PetscInt32 *)inbuf; 1855 inbufd = &incnt; 1856 outbufd = &outcnt; 1857 err = MPIU_Allreduce_Count(inbufd, outbufd, count, MPIU_INT64, op, comm); 1858 } else { 1859 outcnt = *(PetscInt32 *)outbuf; 1860 outbufd = &outcnt; 1861 err = MPIU_Allreduce_Count(MPI_IN_PLACE, outbufd, count, MPIU_INT64, op, comm); 1862 } 1863 if (!err && outcnt > PETSC_INT_MAX) err = MPI_ERR_OTHER; 1864 *(PetscInt32 *)outbuf = (PetscInt32)outcnt; 1865 } else { 1866 err = MPIU_Allreduce_Count(inbuf, outbuf, count, dtype, op, comm); 1867 } 1868 return err; 1869 } 1870 1871 // Check if MPIU_Allreduce() is called on the same filename:lineno and with the same data count across all processes. Error out if otherwise. 1872 PetscErrorCode PetscCheckAllreduceSameLineAndCount_Private(MPI_Comm comm, const char *filename, PetscMPIInt lineno, PetscMPIInt count) 1873 { 1874 PetscMPIInt rbuf[4]; 1875 1876 PetscFunctionBegin; 1877 rbuf[0] = lineno; 1878 rbuf[1] = -rbuf[0]; 1879 rbuf[2] = count; 1880 rbuf[3] = -rbuf[2]; 1881 PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, rbuf, 4, MPI_INT, MPI_MAX, comm)); 1882 1883 if (rbuf[0] != -rbuf[1]) { 1884 size_t len; 1885 PetscMPIInt size, rank, ilen, *recvcounts = NULL, *displs = NULL; 1886 char *str = NULL, *str0 = NULL; 1887 1888 PetscCallMPI(MPI_Comm_size(comm, &size)); 1889 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1890 PetscCall(PetscStrlen(filename, &len)); 1891 len += 128; /* add enough space for the leading and trailing chars in PetscSNPrintf around __FILE__ */ 1892 PetscCall(PetscMalloc1(len, &str)); 1893 PetscCall(PetscSNPrintf(str, len, " On process %d, %s:%d\n", rank, filename, lineno)); 1894 PetscCall(PetscStrlen(str, &len)); /* string length exclusive of the NULL terminator */ 1895 ilen = (PetscMPIInt)len; 1896 if (rank == 0) PetscCall(PetscMalloc2(size, &recvcounts, size + 1, &displs)); 1897 PetscCallMPI(MPI_Gather(&ilen, 1, MPI_INT, recvcounts, 1, MPI_INT, 0, comm)); 1898 if (rank == 0) { 1899 displs[0] = 0; 1900 for (PetscMPIInt i = 0; i < size; i++) displs[i + 1] = displs[i] + recvcounts[i]; 1901 PetscCall(PetscMalloc1(displs[size], &str0)); 1902 } 1903 PetscCallMPI(MPI_Gatherv(str, ilen, MPI_CHAR, str0, recvcounts, displs, MPI_CHAR, 0, comm)); 1904 if (rank == 0) str0[displs[size] - 1] = 0; /* replace the ending \n with NULL */ 1905 PetscCall(PetscFree(str)); 1906 if (rank == 0) PetscCall(PetscFree2(recvcounts, displs)); 1907 SETERRQ(comm, PETSC_ERR_PLIB, "MPIU_Allreduce() called in different locations on different processes:\n%s", str0); 1908 } 1909 PetscCheck(rbuf[2] == -rbuf[3], comm, PETSC_ERR_PLIB, "MPIU_Allreduce() called with different counts %d on different processes", count); 1910 PetscFunctionReturn(PETSC_SUCCESS); 1911 } 1912 1913 /*@C 1914 PetscCtxDestroyDefault - An implementation of a `PetscCtxDestroyFn` that uses `PetscFree()` to free the context 1915 1916 Input Parameter: 1917 . ctx - the context to be destroyed 1918 1919 Level: intermediate 1920 1921 Note: 1922 This is not called directly, rather it is passed to `DMSetApplicationContextDestroy()`, `PetscContainerSetDestroy()`, 1923 `PetscObjectContainterCreate()` and similar routines and then called by the destructor of the associated object. 1924 1925 .seealso: `PetscObject`, `PetscCtxDestroyFn`, `PetscObjectDestroy()`, `DMSetApplicationContextDestroy()`, `PetscContainerSetDestroy()`, 1926 `PetscObjectContainterCreate()` 1927 @*/ 1928 PETSC_EXTERN PetscErrorCode PetscCtxDestroyDefault(void **ctx) 1929 { 1930 PetscFunctionBegin; 1931 PetscCall(PetscFree(*ctx)); 1932 PetscFunctionReturn(PETSC_SUCCESS); 1933 } 1934