1 #define PETSC_DESIRE_FEATURE_TEST_MACROS 2 /* 3 This file defines the initialization of PETSc, including PetscInitialize() 4 */ 5 #include <petsc/private/petscimpl.h> /*I "petscsys.h" I*/ 6 #include <petsc/private/logimpl.h> 7 #include <petscviewer.h> 8 #include <petsc/private/garbagecollector.h> 9 10 #if !defined(PETSC_HAVE_WINDOWS_COMPILERS) 11 #include <petsc/private/valgrind/valgrind.h> 12 #endif 13 14 #if defined(PETSC_USE_FORTRAN_BINDINGS) 15 #include <petsc/private/fortranimpl.h> 16 #endif 17 18 #if PetscDefined(USE_COVERAGE) 19 EXTERN_C_BEGIN 20 #if defined(PETSC_HAVE___GCOV_DUMP) 21 #define __gcov_flush(x) __gcov_dump(x) 22 #endif 23 void __gcov_flush(void); 24 EXTERN_C_END 25 #endif 26 27 #if defined(PETSC_SERIALIZE_FUNCTIONS) 28 PETSC_INTERN PetscFPT PetscFPTData; 29 PetscFPT PetscFPTData = 0; 30 #endif 31 32 #if PetscDefined(HAVE_SAWS) 33 #include <petscviewersaws.h> 34 #endif 35 36 PETSC_INTERN FILE *petsc_history; 37 38 PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void); 39 PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void); 40 PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int); 41 PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int); 42 PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **); 43 44 /* user may set these BEFORE calling PetscInitialize() */ 45 MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL; 46 #if PetscDefined(HAVE_MPI_INIT_THREAD) 47 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE; 48 #else 49 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE; 50 #endif 51 52 PetscMPIInt Petsc_Counter_keyval = MPI_KEYVAL_INVALID; 53 PetscMPIInt Petsc_InnerComm_keyval = MPI_KEYVAL_INVALID; 54 PetscMPIInt Petsc_OuterComm_keyval = MPI_KEYVAL_INVALID; 55 PetscMPIInt Petsc_ShmComm_keyval = MPI_KEYVAL_INVALID; 56 PetscMPIInt Petsc_CreationIdx_keyval = MPI_KEYVAL_INVALID; 57 PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID; 58 59 PetscMPIInt Petsc_SharedWD_keyval = MPI_KEYVAL_INVALID; 60 PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID; 61 62 /* 63 Declare and set all the string names of the PETSc enums 64 */ 65 const char *const PetscBools[] = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL}; 66 const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL}; 67 68 PetscBool PetscPreLoadingUsed = PETSC_FALSE; 69 PetscBool PetscPreLoadingOn = PETSC_FALSE; 70 71 PetscInt PetscHotRegionDepth; 72 73 PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE; 74 75 #if defined(PETSC_HAVE_THREADSAFETY) 76 PetscSpinlock PetscViewerASCIISpinLockOpen; 77 PetscSpinlock PetscViewerASCIISpinLockStdout; 78 PetscSpinlock PetscViewerASCIISpinLockStderr; 79 PetscSpinlock PetscCommSpinLock; 80 #endif 81 82 extern PetscInt PetscNumBLASThreads; 83 84 /*@C 85 PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args 86 87 Collective, No Fortran Support 88 89 Input Parameters: 90 + argc - number of args 91 . args - array of command line arguments 92 . filename - optional name of the program file, pass `NULL` to ignore 93 - help - optional help, pass `NULL` to ignore 94 95 Level: advanced 96 97 Notes: 98 this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to 99 indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to 100 be called multiple times from Julia without the problem of trying to initialize MPI more than once. 101 102 Developer Notes: 103 Turns off PETSc signal handling to allow Julia to manage signals 104 105 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()` 106 */ 107 PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help) 108 { 109 int myargc = argc; 110 char **myargs = args; 111 112 PetscFunctionBegin; 113 PetscCall(PetscInitialize(&myargc, &myargs, filename, help)); 114 PetscCall(PetscPopSignalHandler()); 115 PetscBeganMPI = PETSC_FALSE; 116 PetscFunctionReturn(PETSC_SUCCESS); 117 } 118 119 /*@C 120 PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without 121 the command line arguments. 122 123 Collective 124 125 Level: advanced 126 127 .seealso: `PetscInitialize()`, `PetscInitializeFortran()` 128 @*/ 129 PetscErrorCode PetscInitializeNoArguments(void) 130 { 131 int argc = 0; 132 char **args = NULL; 133 134 PetscFunctionBegin; 135 PetscCall(PetscInitialize(&argc, &args, NULL, NULL)); 136 PetscFunctionReturn(PETSC_SUCCESS); 137 } 138 139 /*@ 140 PetscInitialized - Determine whether PETSc is initialized. 141 142 Output Parameter: 143 . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise 144 145 Level: beginner 146 147 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()` 148 @*/ 149 PetscErrorCode PetscInitialized(PetscBool *isInitialized) 150 { 151 PetscFunctionBegin; 152 if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1); 153 *isInitialized = PetscInitializeCalled; 154 PetscFunctionReturn(PETSC_SUCCESS); 155 } 156 157 /*@ 158 PetscFinalized - Determine whether `PetscFinalize()` has been called yet 159 160 Output Parameter: 161 . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise 162 163 Level: developer 164 165 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()` 166 @*/ 167 PetscErrorCode PetscFinalized(PetscBool *isFinalized) 168 { 169 PetscFunctionBegin; 170 if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1); 171 *isFinalized = PetscFinalizeCalled; 172 PetscFunctionReturn(PETSC_SUCCESS); 173 } 174 175 PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]); 176 177 /* 178 This function is the MPI reduction operation used to compute the sum of the 179 first half of the datatype and the max of the second half. 180 */ 181 MPI_Op MPIU_MAXSUM_OP = 0; 182 MPI_Op Petsc_Garbage_SetIntersectOp = 0; 183 184 PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, int *cnt, MPI_Datatype *datatype) 185 { 186 PetscInt *xin = (PetscInt *)in, *xout = (PetscInt *)out, i, count = *cnt; 187 188 PetscFunctionBegin; 189 if (*datatype != MPIU_2INT) { 190 PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT data types"); 191 (void)ierr; 192 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 193 } 194 195 for (i = 0; i < count; i++) { 196 xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]); 197 xout[2 * i + 1] += xin[2 * i + 1]; 198 } 199 PetscFunctionReturnVoid(); 200 } 201 202 /* 203 Returns the max of the first entry owned by this processor and the 204 sum of the second entry. 205 206 The reason sizes[2*i] contains lengths sizes[2*i+1] contains flag of 1 if length is nonzero 207 is so that the MPIU_MAXSUM_OP() can set TWO values, if we passed in only sizes[i] with lengths 208 there would be no place to store the both needed results. 209 */ 210 PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt sizes[], PetscInt *max, PetscInt *sum) 211 { 212 PetscFunctionBegin; 213 #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK) 214 { 215 struct { 216 PetscInt max, sum; 217 } work; 218 PetscCallMPI(MPI_Reduce_scatter_block((void *)sizes, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm)); 219 *max = work.max; 220 *sum = work.sum; 221 } 222 #else 223 { 224 PetscMPIInt size, rank; 225 struct { 226 PetscInt max, sum; 227 } *work; 228 PetscCallMPI(MPI_Comm_size(comm, &size)); 229 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 230 PetscCall(PetscMalloc1(size, &work)); 231 PetscCall(MPIU_Allreduce((void *)sizes, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm)); 232 *max = work[rank].max; 233 *sum = work[rank].sum; 234 PetscCall(PetscFree(work)); 235 } 236 #endif 237 PetscFunctionReturn(PETSC_SUCCESS); 238 } 239 240 #if defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_HAVE_REAL___FP16) 241 #if defined(PETSC_HAVE_REAL___FLOAT128) 242 #include <quadmath.h> 243 #endif 244 MPI_Op MPIU_SUM___FP16___FLOAT128 = 0; 245 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 246 MPI_Op MPIU_SUM = 0; 247 #endif 248 249 PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 250 { 251 PetscInt i, count = *cnt; 252 253 PetscFunctionBegin; 254 if (*datatype == MPIU_REAL) { 255 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 256 for (i = 0; i < count; i++) xout[i] += xin[i]; 257 } 258 #if defined(PETSC_HAVE_COMPLEX) 259 else if (*datatype == MPIU_COMPLEX) { 260 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 261 for (i = 0; i < count; i++) xout[i] += xin[i]; 262 } 263 #endif 264 #if defined(PETSC_HAVE_REAL___FLOAT128) 265 else if (*datatype == MPIU___FLOAT128) { 266 __float128 *xin = (__float128 *)in, *xout = (__float128 *)out; 267 for (i = 0; i < count; i++) xout[i] += xin[i]; 268 #if defined(PETSC_HAVE_COMPLEX) 269 } else if (*datatype == MPIU___COMPLEX128) { 270 __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out; 271 for (i = 0; i < count; i++) xout[i] += xin[i]; 272 #endif 273 } 274 #endif 275 #if defined(PETSC_HAVE_REAL___FP16) 276 else if (*datatype == MPIU___FP16) { 277 __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out; 278 for (i = 0; i < count; i++) xout[i] += xin[i]; 279 } 280 #endif 281 else { 282 #if !defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_HAVE_REAL___FP16) 283 PetscCallAbort(MPI_COMM_SElF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types")); 284 #elif !defined(PETSC_HAVE_REAL___FP16) 285 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types")); 286 #elif !defined(PETSC_HAVE_REAL___FLOAT128) 287 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types")); 288 #else 289 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types")); 290 #endif 291 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 292 } 293 PetscFunctionReturnVoid(); 294 } 295 #endif 296 297 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 298 MPI_Op MPIU_MAX = 0; 299 MPI_Op MPIU_MIN = 0; 300 301 PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 302 { 303 PetscInt i, count = *cnt; 304 305 PetscFunctionBegin; 306 if (*datatype == MPIU_REAL) { 307 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 308 for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]); 309 } 310 #if defined(PETSC_HAVE_COMPLEX) 311 else if (*datatype == MPIU_COMPLEX) { 312 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 313 for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i]; 314 } 315 #endif 316 else { 317 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types")); 318 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 319 } 320 PetscFunctionReturnVoid(); 321 } 322 323 PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 324 { 325 PetscInt i, count = *cnt; 326 327 PetscFunctionBegin; 328 if (*datatype == MPIU_REAL) { 329 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 330 for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]); 331 } 332 #if defined(PETSC_HAVE_COMPLEX) 333 else if (*datatype == MPIU_COMPLEX) { 334 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 335 for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i]; 336 } 337 #endif 338 else { 339 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types")); 340 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 341 } 342 PetscFunctionReturnVoid(); 343 } 344 #endif 345 346 /* 347 Private routine to delete internal tag/name counter storage when a communicator is freed. 348 349 This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this data as an attribute is freed. 350 351 Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval() 352 353 */ 354 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state) 355 { 356 PetscCommCounter *counter = (PetscCommCounter *)count_val; 357 struct PetscCommStash *comms = counter->comms, *pcomm; 358 359 PetscFunctionBegin; 360 PetscCallMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm)); 361 PetscCallMPI(PetscFree(counter->iflags)); 362 while (comms) { 363 PetscCallMPI(MPI_Comm_free(&comms->comm)); 364 pcomm = comms; 365 comms = comms->next; 366 PetscCall(PetscFree(pcomm)); 367 } 368 PetscCallMPI(PetscFree(counter)); 369 PetscFunctionReturn(MPI_SUCCESS); 370 } 371 372 /* 373 This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user 374 calls MPI_Comm_free(). 375 376 This is the only entry point for breaking the links between inner and outer comms. 377 378 This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator. 379 380 Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval() 381 382 */ 383 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state) 384 { 385 union 386 { 387 MPI_Comm comm; 388 void *ptr; 389 } icomm; 390 391 PetscFunctionBegin; 392 if (keyval != Petsc_InnerComm_keyval) SETERRMPI(PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval"); 393 icomm.ptr = attr_val; 394 if (PetscDefined(USE_DEBUG)) { 395 /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */ 396 PetscMPIInt flg; 397 union 398 { 399 MPI_Comm comm; 400 void *ptr; 401 } ocomm; 402 PetscCallMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg)); 403 if (!flg) SETERRMPI(PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute"); 404 if (ocomm.comm != comm) SETERRMPI(PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm"); 405 } 406 PetscCallMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval)); 407 PetscCallMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm)); 408 PetscFunctionReturn(MPI_SUCCESS); 409 } 410 411 /* 412 * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr(). It should not be reached any other way. 413 */ 414 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state) 415 { 416 PetscFunctionBegin; 417 PetscCallMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm)); 418 PetscFunctionReturn(MPI_SUCCESS); 419 } 420 421 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *); 422 423 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32) 424 PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *); 425 PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *); 426 PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *); 427 #endif 428 429 PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE; 430 431 PETSC_INTERN int PetscGlobalArgc; 432 PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran; 433 int PetscGlobalArgc = 0; 434 char **PetscGlobalArgs = NULL; 435 char **PetscGlobalArgsFortran = NULL; 436 PetscSegBuffer PetscCitationsList; 437 438 PetscErrorCode PetscCitationsInitialize(void) 439 { 440 PetscFunctionBegin; 441 PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList)); 442 443 PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\ 444 Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\ 445 and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\ 446 and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\ 447 and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\ 448 and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\ 449 and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith\n\ 450 and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\ 451 Title = {{PETSc/TAO} Users Manual},\n\ 452 Number = {ANL-21/39 - Revision 3.21},\n\ 453 Doi = {10.2172/2205494},\n\ 454 Institution = {Argonne National Laboratory},\n\ 455 Year = {2024}\n}\n", 456 NULL)); 457 458 PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\ 459 Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\ 460 Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\ 461 Booktitle = {Modern Software Tools in Scientific Computing},\n\ 462 Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\ 463 Pages = {163--202},\n\ 464 Publisher = {Birkh{\\\"{a}}user Press},\n\ 465 Year = {1997}\n}\n", 466 NULL)); 467 PetscFunctionReturn(PETSC_SUCCESS); 468 } 469 470 static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */ 471 472 PetscErrorCode PetscSetProgramName(const char name[]) 473 { 474 PetscFunctionBegin; 475 PetscCall(PetscStrncpy(programname, name, sizeof(programname))); 476 PetscFunctionReturn(PETSC_SUCCESS); 477 } 478 479 /*@C 480 PetscGetProgramName - Gets the name of the running program. 481 482 Not Collective 483 484 Input Parameter: 485 . len - length of the string name 486 487 Output Parameter: 488 . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN` 489 490 Level: advanced 491 492 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()` 493 @*/ 494 PetscErrorCode PetscGetProgramName(char name[], size_t len) 495 { 496 PetscFunctionBegin; 497 PetscCall(PetscStrncpy(name, programname, len)); 498 PetscFunctionReturn(PETSC_SUCCESS); 499 } 500 501 /*@C 502 PetscGetArgs - Allows you to access the raw command line arguments anywhere 503 after PetscInitialize() is called but before `PetscFinalize()`. 504 505 Not Collective, No Fortran Support 506 507 Output Parameters: 508 + argc - count of number of command line arguments 509 - args - the command line arguments 510 511 Level: intermediate 512 513 Notes: 514 This is usually used to pass the command line arguments into other libraries 515 that are called internally deep in PETSc or the application. 516 517 The first argument contains the program name as is normal for C programs. 518 519 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()` 520 @*/ 521 PetscErrorCode PetscGetArgs(int *argc, char ***args) 522 { 523 PetscFunctionBegin; 524 PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()"); 525 *argc = PetscGlobalArgc; 526 *args = PetscGlobalArgs; 527 PetscFunctionReturn(PETSC_SUCCESS); 528 } 529 530 /*@C 531 PetscGetArguments - Allows you to access the command line arguments anywhere 532 after `PetscInitialize()` is called but before `PetscFinalize()`. 533 534 Not Collective, No Fortran Support 535 536 Output Parameter: 537 . args - the command line arguments 538 539 Level: intermediate 540 541 Note: 542 This does NOT start with the program name and IS `NULL` terminated (final arg is void) 543 544 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()` 545 @*/ 546 PetscErrorCode PetscGetArguments(char ***args) 547 { 548 PetscInt i, argc = PetscGlobalArgc; 549 550 PetscFunctionBegin; 551 PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()"); 552 if (!argc) { 553 *args = NULL; 554 PetscFunctionReturn(PETSC_SUCCESS); 555 } 556 PetscCall(PetscMalloc1(argc, args)); 557 for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i])); 558 (*args)[argc - 1] = NULL; 559 PetscFunctionReturn(PETSC_SUCCESS); 560 } 561 562 /*@C 563 PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()` 564 565 Not Collective, No Fortran Support 566 567 Output Parameter: 568 . args - the command line arguments 569 570 Level: intermediate 571 572 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()` 573 @*/ 574 PetscErrorCode PetscFreeArguments(char **args) 575 { 576 PetscFunctionBegin; 577 if (args) { 578 PetscInt i = 0; 579 580 while (args[i]) PetscCall(PetscFree(args[i++])); 581 PetscCall(PetscFree(args)); 582 } 583 PetscFunctionReturn(PETSC_SUCCESS); 584 } 585 586 #if PetscDefined(HAVE_SAWS) 587 #include <petscconfiginfo.h> 588 589 PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[]) 590 { 591 PetscFunctionBegin; 592 if (!PetscGlobalRank) { 593 char cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64]; 594 int port; 595 PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE; 596 size_t applinelen, introlen; 597 char sawsurl[256]; 598 599 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg)); 600 if (flg) { 601 char sawslog[PETSC_MAX_PATH_LEN]; 602 603 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL)); 604 if (sawslog[0]) { 605 PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog)); 606 } else { 607 PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL)); 608 } 609 } 610 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg)); 611 if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert)); 612 PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL)); 613 if (selectport) { 614 PetscCallSAWs(SAWs_Get_Available_Port, (&port)); 615 PetscCallSAWs(SAWs_Set_Port, (port)); 616 } else { 617 PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg)); 618 if (flg) PetscCallSAWs(SAWs_Set_Port, (port)); 619 } 620 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg)); 621 if (flg) { 622 PetscCallSAWs(SAWs_Set_Document_Root, (root)); 623 PetscCall(PetscStrcmp(root, ".", &rootlocal)); 624 } else { 625 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg)); 626 if (flg) { 627 PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root))); 628 PetscCallSAWs(SAWs_Set_Document_Root, (root)); 629 } 630 } 631 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2)); 632 if (flg2) { 633 char jsdir[PETSC_MAX_PATH_LEN]; 634 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option"); 635 PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root)); 636 PetscCall(PetscTestDirectory(jsdir, 'r', &flg)); 637 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory"); 638 PetscCallSAWs(SAWs_Push_Local_Header, ()); 639 } 640 PetscCall(PetscGetProgramName(programname, sizeof(programname))); 641 PetscCall(PetscStrlen(help, &applinelen)); 642 introlen = 4096 + applinelen; 643 applinelen += 1024; 644 PetscCall(PetscMalloc(applinelen, &appline)); 645 PetscCall(PetscMalloc(introlen, &intro)); 646 647 if (rootlocal) { 648 PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname)); 649 PetscCall(PetscTestFile(appline, 'r', &rootlocal)); 650 } 651 PetscCall(PetscOptionsGetAll(NULL, &options)); 652 if (rootlocal && help) { 653 PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help)); 654 } else if (help) { 655 PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help)); 656 } else { 657 PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options)); 658 } 659 PetscCall(PetscFree(options)); 660 PetscCall(PetscGetVersion(version, sizeof(version))); 661 PetscCall(PetscSNPrintf(intro, introlen, 662 "<body>\n" 663 "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n" 664 "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n" 665 "%s", 666 version, petscconfigureoptions, appline)); 667 PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro)); 668 PetscCall(PetscFree(intro)); 669 PetscCall(PetscFree(appline)); 670 if (selectport) { 671 PetscBool silent; 672 673 /* another process may have grabbed the port so keep trying */ 674 while (SAWs_Initialize()) { 675 PetscCallSAWs(SAWs_Get_Available_Port, (&port)); 676 PetscCallSAWs(SAWs_Set_Port, (port)); 677 } 678 679 PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL)); 680 if (!silent) { 681 PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl)); 682 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl)); 683 } 684 } else { 685 PetscCallSAWs(SAWs_Initialize, ()); 686 } 687 PetscCall(PetscCitationsRegister("@TechReport{ saws,\n" 688 " Author = {Matt Otten and Jed Brown and Barry Smith},\n" 689 " Title = {Scientific Application Web Server (SAWs) Users Manual},\n" 690 " Institution = {Argonne National Laboratory},\n" 691 " Year = 2013\n}\n", 692 NULL)); 693 } 694 PetscFunctionReturn(PETSC_SUCCESS); 695 } 696 #endif 697 698 /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */ 699 PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void) 700 { 701 PetscFunctionBegin; 702 #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG) 703 /* see MPI.py for details on this bug */ 704 (void)setenv("HWLOC_COMPONENTS", "-x86", 1); 705 #endif 706 PetscFunctionReturn(PETSC_SUCCESS); 707 } 708 709 #if PetscDefined(HAVE_ADIOS) 710 #include <adios.h> 711 #include <adios_read.h> 712 int64_t Petsc_adios_group; 713 #endif 714 #if PetscDefined(HAVE_OPENMP) 715 #include <omp.h> 716 PetscInt PetscNumOMPThreads; 717 #endif 718 719 #include <petsc/private/deviceimpl.h> 720 #if PetscDefined(HAVE_CUDA) 721 #include <petscdevice_cuda.h> 722 // REMOVE ME 723 cudaStream_t PetscDefaultCudaStream = NULL; 724 #endif 725 #if PetscDefined(HAVE_HIP) 726 #include <petscdevice_hip.h> 727 // REMOVE ME 728 hipStream_t PetscDefaultHipStream = NULL; 729 #endif 730 731 #if PetscDefined(HAVE_DLFCN_H) 732 #include <dlfcn.h> 733 #endif 734 PETSC_INTERN PetscErrorCode PetscLogInitialize(void); 735 #if PetscDefined(HAVE_VIENNACL) 736 PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void); 737 PetscBool PetscViennaCLSynchronize = PETSC_FALSE; 738 #endif 739 740 PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE; 741 742 /* 743 PetscInitialize_Common - shared code between C and Fortran initialization 744 745 prog: program name 746 file: optional PETSc database file name. Might be in Fortran string format when 'ftn' is true 747 help: program help message 748 ftn: is it called from Fortran initialization (petscinitializef_)? 749 readarguments,len: used when fortran is true 750 */ 751 PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscBool readarguments, PetscInt len) 752 { 753 PetscMPIInt size; 754 PetscBool flg = PETSC_TRUE; 755 char hostname[256]; 756 PetscBool blas_view_flag = PETSC_FALSE; 757 758 PetscFunctionBegin; 759 if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS); 760 /* these must be initialized in a routine, not as a constant declaration */ 761 PETSC_STDOUT = stdout; 762 PETSC_STDERR = stderr; 763 764 /* PetscCall can be used from now */ 765 PetscErrorHandlingInitialized = PETSC_TRUE; 766 767 /* 768 The checking over compatible runtime libraries is complicated by the MPI ABI initiative 769 https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with 770 MPICH v3.1 (Released February 2014) 771 IBM MPI v2.1 (December 2014) 772 Intel MPI Library v5.0 (2014) 773 Cray MPT v7.0.0 (June 2014) 774 As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions 775 listed above and since that time are compatible. 776 777 Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number 778 at compile time or runtime. Thus we will need to systematically track the allowed versions 779 and how they are represented in the mpi.h and MPI_Get_library_version() output in order 780 to perform the checking. 781 782 Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI). 783 784 Questions: 785 786 Should the checks for ABI incompatibility be only on the major version number below? 787 Presumably the output to stderr will be removed before a release. 788 */ 789 790 #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION) 791 { 792 char mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING]; 793 PetscMPIInt mpilibraryversionlength; 794 795 PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength)); 796 /* check for MPICH versions before MPI ABI initiative */ 797 #if defined(MPICH_VERSION) 798 #if MPICH_NUMVERSION < 30100000 799 { 800 char *ver, *lf; 801 PetscBool flg = PETSC_FALSE; 802 803 PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver)); 804 if (ver) { 805 PetscCall(PetscStrchr(ver, '\n', &lf)); 806 if (lf) { 807 *lf = 0; 808 PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg)); 809 } 810 } 811 if (!flg) { 812 PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION)); 813 flg = PETSC_TRUE; 814 } 815 } 816 #endif 817 /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */ 818 #elif defined(PETSC_HAVE_OPENMPI) 819 { 820 char *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf; 821 PetscBool flg = PETSC_FALSE; 822 #define PSTRSZ 2 823 char ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"}; 824 char ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "}; 825 int i; 826 for (i = 0; i < PSTRSZ; i++) { 827 PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver)); 828 if (ver) { 829 PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR)); 830 PetscCall(PetscStrstr(ver, bs, &bsf)); 831 if (bsf) flg = PETSC_TRUE; 832 break; 833 } 834 } 835 if (!flg) { 836 PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR)); 837 flg = PETSC_TRUE; 838 } 839 } 840 #endif 841 } 842 #endif 843 844 #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__)) 845 /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */ 846 PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly"); 847 #endif 848 849 /* on Windows - set printf to default to printing 2 digit exponents */ 850 #if defined(PETSC_HAVE__SET_OUTPUT_FORMAT) 851 _set_output_format(_TWO_DIGIT_EXPONENT); 852 #endif 853 854 PetscCall(PetscOptionsCreateDefault()); 855 856 PetscFinalizeCalled = PETSC_FALSE; 857 858 PetscCall(PetscSetProgramName(prog)); 859 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen)); 860 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout)); 861 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr)); 862 PetscCall(PetscSpinlockCreate(&PetscCommSpinLock)); 863 864 if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD; 865 PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN)); 866 867 if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) { 868 PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS)); 869 PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE)); 870 } 871 872 /* Done after init due to a bug in MPICH-GM? */ 873 PetscCall(PetscErrorPrintfInitialize()); 874 875 PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank)); 876 PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize)); 877 878 MPIU_BOOL = MPI_INT; 879 MPIU_ENUM = MPI_INT; 880 MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64; 881 if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED; 882 else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG; 883 #if defined(PETSC_SIZEOF_LONG_LONG) 884 else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG; 885 #endif 886 else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t"); 887 888 /* 889 Initialized the global complex variable; this is because with 890 shared libraries the constructors for global variables 891 are not called; at least on IRIX. 892 */ 893 #if defined(PETSC_HAVE_COMPLEX) 894 { 895 #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128) 896 PetscComplex ic(0.0, 1.0); 897 PETSC_i = ic; 898 #else 899 PETSC_i = _Complex_I; 900 #endif 901 } 902 #endif /* PETSC_HAVE_COMPLEX */ 903 904 /* 905 Create the PETSc MPI reduction operator that sums of the first 906 half of the entries and maxes the second half. 907 */ 908 PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP)); 909 910 #if defined(PETSC_HAVE_REAL___FLOAT128) 911 PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128)); 912 PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128)); 913 #if defined(PETSC_HAVE_COMPLEX) 914 PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128)); 915 PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128)); 916 #endif 917 #endif 918 #if defined(PETSC_HAVE_REAL___FP16) 919 PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16)); 920 PetscCallMPI(MPI_Type_commit(&MPIU___FP16)); 921 #endif 922 923 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 924 PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM)); 925 PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX)); 926 PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN)); 927 #elif defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_HAVE_REAL___FP16) 928 PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128)); 929 #endif 930 931 PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR)); 932 PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp)); 933 PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR)); 934 935 /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */ 936 #if !defined(PETSC_HAVE_MPIUNI) 937 { 938 PetscMPIInt blockSizes[2] = {1, 1}; 939 MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)}; 940 MPI_Datatype blockTypes[2] = {MPIU_REAL, MPIU_INT}, tmpStruct; 941 942 PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct)); 943 PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT)); 944 PetscCallMPI(MPI_Type_free(&tmpStruct)); 945 PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT)); 946 } 947 { 948 PetscMPIInt blockSizes[2] = {1, 1}; 949 MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)}; 950 MPI_Datatype blockTypes[2] = {MPIU_SCALAR, MPIU_INT}, tmpStruct; 951 952 PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct)); 953 PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT)); 954 PetscCallMPI(MPI_Type_free(&tmpStruct)); 955 PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT)); 956 } 957 #endif 958 959 #if defined(PETSC_USE_64BIT_INDICES) 960 PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT)); 961 PetscCallMPI(MPI_Type_commit(&MPIU_2INT)); 962 #endif 963 PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT)); 964 PetscCallMPI(MPI_Type_commit(&MPI_4INT)); 965 PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT)); 966 PetscCallMPI(MPI_Type_commit(&MPIU_4INT)); 967 968 /* 969 Attributes to be set on PETSc communicators 970 */ 971 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, (void *)0)); 972 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, (void *)0)); 973 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, (void *)0)); 974 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, (void *)0)); 975 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, (void *)0)); 976 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, (void *)0)); 977 978 #if defined(PETSC_USE_FORTRAN_BINDINGS) 979 if (ftn) PetscCall(PetscInitFortran_Private(readarguments, file, len)); 980 else 981 #endif 982 PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file)); 983 984 /* call a second time so it can look in the options database */ 985 PetscCall(PetscErrorPrintfInitialize()); 986 987 /* 988 Check system options and print help 989 */ 990 PetscCall(PetscOptionsCheckInitial_Private(help)); 991 992 /* 993 Creates the logging data structures; this is enabled even if logging is not turned on 994 This is the last thing we do before returning to the user code to prevent having the 995 logging numbers contaminated by any startup time associated with MPI 996 */ 997 PetscCall(PetscLogInitialize()); 998 999 /* 1000 Initialize PetscDevice and PetscDeviceContext 1001 1002 Note to any future devs thinking of moving this, proper initialization requires: 1003 1. MPI initialized 1004 2. Options DB initialized 1005 3. Petsc error handling initialized, specifically signal handlers. This expects to set up 1006 its own SIGSEV handler via the push/pop interface. 1007 4. Logging initialized 1008 */ 1009 PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD)); 1010 1011 #if PetscDefined(HAVE_VIENNACL) 1012 flg = PETSC_FALSE; 1013 PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg)); 1014 if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL)); 1015 PetscViennaCLSynchronize = flg; 1016 PetscCall(PetscViennaCLInit()); 1017 #endif 1018 1019 PetscCall(PetscCitationsInitialize()); 1020 1021 #if defined(PETSC_HAVE_SAWS) 1022 PetscCall(PetscInitializeSAWs(ftn ? NULL : help)); 1023 flg = PETSC_FALSE; 1024 PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg)); 1025 if (flg) PetscCall(PetscStackViewSAWs()); 1026 #endif 1027 1028 /* 1029 Load the dynamic libraries (on machines that support them), this registers all 1030 the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes) 1031 */ 1032 PetscCall(PetscInitialize_DynamicLibraries()); 1033 1034 PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size)); 1035 PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size)); 1036 PetscCall(PetscGetHostName(hostname, sizeof(hostname))); 1037 PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname)); 1038 #if defined(PETSC_HAVE_OPENMP) 1039 { 1040 PetscBool omp_view_flag; 1041 char *threads = getenv("OMP_NUM_THREADS"); 1042 1043 if (threads) { 1044 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads)); 1045 (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads); 1046 } else { 1047 PetscNumOMPThreads = (PetscInt)omp_get_max_threads(); 1048 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads)); 1049 } 1050 PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys"); 1051 PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg)); 1052 PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag)); 1053 PetscOptionsEnd(); 1054 if (flg) { 1055 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads)); 1056 omp_set_num_threads((int)PetscNumOMPThreads); 1057 } 1058 if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads)); 1059 } 1060 #endif 1061 1062 PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys"); 1063 PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag)); 1064 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS) 1065 { 1066 char *threads = NULL; 1067 1068 /* determine any default number of threads requested in the environment; TODO: Apple libraries? */ 1069 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) 1070 threads = getenv("BLIS_NUM_THREADS"); 1071 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads)); 1072 if (!threads) { 1073 threads = getenv("OMP_NUM_THREADS"); 1074 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads)); 1075 } 1076 #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS) 1077 threads = getenv("MKL_NUM_THREADS"); 1078 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads)); 1079 if (!threads) { 1080 threads = getenv("OMP_NUM_THREADS"); 1081 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads)); 1082 } 1083 #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS) 1084 threads = getenv("OPENBLAS_NUM_THREADS"); 1085 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads)); 1086 if (!threads) { 1087 threads = getenv("OMP_NUM_THREADS"); 1088 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads)); 1089 } 1090 #endif 1091 if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads); 1092 PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg)); 1093 if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads)); 1094 if (flg || threads) { 1095 PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads)); 1096 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads)); 1097 } 1098 } 1099 #elif defined(PETSC_HAVE_APPLE_ACCELERATE) 1100 PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n")); 1101 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n")); 1102 #else 1103 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n")); 1104 #endif 1105 PetscOptionsEnd(); 1106 1107 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32) 1108 /* 1109 Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI 1110 1111 Currently not used because it is not supported by MPICH. 1112 */ 1113 if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL)); 1114 #endif 1115 1116 #if defined(PETSC_SERIALIZE_FUNCTIONS) 1117 PetscCall(PetscFPTCreate(10000)); 1118 #endif 1119 1120 #if defined(PETSC_HAVE_HWLOC) 1121 { 1122 PetscViewer viewer; 1123 PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg)); 1124 if (flg) { 1125 PetscCall(PetscProcessPlacementView(viewer)); 1126 PetscCall(PetscViewerDestroy(&viewer)); 1127 } 1128 } 1129 #endif 1130 1131 flg = PETSC_TRUE; 1132 PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL)); 1133 if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE)); 1134 1135 #if defined(PETSC_HAVE_ADIOS) 1136 PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD); 1137 PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default); 1138 PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", ""); 1139 PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, ""); 1140 #endif 1141 1142 #if defined(__VALGRIND_H) 1143 PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE; 1144 #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE) 1145 if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack")); 1146 #endif 1147 #endif 1148 /* 1149 Set flag that we are completely initialized 1150 */ 1151 PetscInitializeCalled = PETSC_TRUE; 1152 1153 PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg)); 1154 if (flg) PetscCall(PetscPythonInitialize(NULL, NULL)); 1155 1156 PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg)); 1157 if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n")); 1158 if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin()); 1159 else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case"); 1160 PetscFunctionReturn(PETSC_SUCCESS); 1161 } 1162 1163 // "Unknown section 'Environmental Variables'" 1164 // PetscClangLinter pragma disable: -fdoc-section-header-unknown 1165 /*@C 1166 PetscInitialize - Initializes the PETSc database and MPI. 1167 `PetscInitialize()` calls MPI_Init() if that has yet to be called, 1168 so this routine should always be called near the beginning of 1169 your program -- usually the very first line! 1170 1171 Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set 1172 1173 Input Parameters: 1174 + argc - count of number of command line arguments 1175 . args - the command line arguments 1176 . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format. 1177 Use NULL or empty string to not check for code specific file. 1178 Also checks ~/.petscrc, .petscrc and petscrc. 1179 Use -skip_petscrc in the code specific file (or command line) to skip ~/.petscrc, .petscrc and petscrc files. 1180 - help - [optional] Help message to print, use NULL for no message 1181 1182 If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that 1183 communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`. Thus if you are running a 1184 four process job and two processes will run PETSc and have `PetscInitialize()` and PetscFinalize() and two process will not, 1185 then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even 1186 if different subcommunicators of the job are doing different things with PETSc. 1187 1188 Options Database Keys: 1189 + -help [intro] - prints help method for each option; if intro is given the program stops after printing the introductory help message 1190 . -start_in_debugger [noxterm,dbx,xdb,gdb,...] - Starts program in debugger 1191 . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected 1192 . -on_error_emacs <machinename> - causes emacsclient to jump to error file 1193 . -on_error_abort - calls `abort()` when error detected (no traceback) 1194 . -on_error_mpiabort - calls `MPI_abort()` when error detected 1195 . -error_output_stdout - prints PETSc error messages to stdout instead of the default stderr 1196 . -error_output_none - does not print the error messages (but handles errors in the same way as if this was not called) 1197 . -debugger_ranks [rank1,rank2,...] - Indicates ranks to start in debugger 1198 . -debugger_pause [sleeptime] (in seconds) - Pauses debugger 1199 . -stop_for_debugger - Print message on how to attach debugger manually to 1200 process and wait (-debugger_pause) seconds for attachment 1201 . -malloc_dump - prints a list of all unfreed memory at the end of the run 1202 . -malloc_test - like -malloc_dump -malloc_debug, but only active for debugging builds, ignored in optimized build. May want to set in PETSC_OPTIONS environmental variable 1203 . -malloc_view - show a list of all allocated memory during `PetscFinalize()` 1204 . -malloc_view_threshold <t> - only list memory allocations of size greater than t with -malloc_view 1205 . -malloc_requested_size - malloc logging will record the requested size rather than size after alignment 1206 . -fp_trap - Stops on floating point exceptions 1207 . -no_signal_handler - Indicates not to trap error signals 1208 . -shared_tmp - indicates /tmp directory is shared by all processors 1209 . -not_shared_tmp - each processor has own /tmp 1210 . -tmp - alternative name of /tmp directory 1211 . -get_total_flops - returns total flops done by all processors 1212 - -memory_view - Print memory usage at end of run 1213 1214 Options Database Keys for Option Database: 1215 + -skip_petscrc - skip the default option files ~/.petscrc, .petscrc, petscrc 1216 . -options_monitor - monitor all set options to standard output for the whole program run 1217 - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()` 1218 1219 Options -options_monitor_{all,cancel} are 1220 position-independent and apply to all options set since the PETSc start. 1221 They can be used also in option files. 1222 1223 See `PetscOptionsMonitorSet()` to do monitoring programmatically. 1224 1225 Options Database Keys for Profiling: 1226 See Users-Manual: ch_profiling for details. 1227 + -info [filename][:[~]<list,of,classnames>[:[~]self]] - Prints verbose information. See `PetscInfo()`. 1228 . -log_sync - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event, 1229 however it slows things down and gives a distorted view of the overall runtime. 1230 . -log_trace [filename] - Print traces of all PETSc calls to the screen (useful to determine where a program 1231 hangs without running in the debugger). See `PetscLogTraceBegin()`. 1232 . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers) 1233 . -log_view_memory - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`. 1234 . -log_view_gpu_time - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView(). 1235 . -log_exclude: <vec,mat,pc,ksp,snes> - excludes subset of object classes from logging 1236 . -log [filename] - Logs profiling information in a dump file, see `PetscLogDump()`. 1237 . -log_all [filename] - Same as `-log`. 1238 . -log_mpe [filename] - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution) 1239 . -log_perfstubs - Starts a log handler with the perfstubs interface (which is used by TAU) 1240 . -log_nvtx - Starts an nvtx log handler for use with Nsight 1241 . -viewfromoptions on,off - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off 1242 - -check_pointer_intensity 0,1,2 - if pointers are checked for validity (debug version only), using 0 will result in faster code 1243 1244 Options Database Keys for SAWs: 1245 + -saws_port <portnumber> - port number to publish SAWs data, default is 8080 1246 . -saws_port_auto_select - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen 1247 this is useful when you are running many jobs that utilize SAWs at the same time 1248 . -saws_log <filename> - save a log of all SAWs communication 1249 . -saws_https <certificate file> - have SAWs use HTTPS instead of HTTP 1250 - -saws_root <directory> - allow SAWs to have access to the given directory to search for requested resources and files 1251 1252 Environmental Variables: 1253 + `PETSC_TMP` - alternative tmp directory 1254 . `PETSC_SHARED_TMP` - tmp is shared by all processes 1255 . `PETSC_NOT_SHARED_TMP` - each process has its own private tmp 1256 . `PETSC_OPTIONS` - a string containing additional options for petsc in the form of command line "-key value" pairs 1257 . `PETSC_OPTIONS_YAML` - (requires configuring PETSc to use libyaml) a string containing additional options for petsc in the form of a YAML document 1258 . `PETSC_VIEWER_SOCKET_PORT` - socket number to use for socket viewer 1259 - `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to 1260 1261 Level: beginner 1262 1263 Note: 1264 If for some reason you must call `MPI_Init()` separately, call 1265 it before `PetscInitialize()`. 1266 1267 Fortran Notes: 1268 In Fortran this routine can be called with 1269 .vb 1270 call PetscInitialize(ierr) 1271 call PetscInitialize(file,ierr) or 1272 call PetscInitialize(file,help,ierr) 1273 .ve 1274 1275 If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after 1276 calling `PetscInitialize()`. 1277 1278 Options Database Key for Developers: 1279 . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form: 1280 "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py) 1281 1282 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()` 1283 @*/ 1284 PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[]) 1285 { 1286 PetscMPIInt flag; 1287 const char *prog = "Unknown Name", *mpienv; 1288 1289 PetscFunctionBegin; 1290 if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS); 1291 PetscCallMPI(MPI_Initialized(&flag)); 1292 if (!flag) { 1293 PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first"); 1294 PetscCall(PetscPreMPIInit_Private()); 1295 #if defined(PETSC_HAVE_MPI_INIT_THREAD) 1296 { 1297 PetscMPIInt provided; 1298 PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided)); 1299 PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required"); 1300 if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up 1301 } 1302 #else 1303 PetscCallMPI(MPI_Init(argc, args)); 1304 #endif 1305 if (PetscDefined(HAVE_MPIUNI)) { 1306 mpienv = getenv("PMI_SIZE"); 1307 if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE"); 1308 if (mpienv) { 1309 PetscInt isize; 1310 PetscCall(PetscOptionsStringToInt(mpienv, &isize)); 1311 if (isize != 1) printf("You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc\n"); 1312 PetscCheck(isize == 1, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc"); 1313 } 1314 } 1315 PetscBeganMPI = PETSC_TRUE; 1316 } 1317 1318 if (argc && *argc) prog = **args; 1319 if (argc && args) { 1320 PetscGlobalArgc = *argc; 1321 PetscGlobalArgs = *args; 1322 } 1323 PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, PETSC_FALSE, 0)); 1324 PetscFunctionReturn(PETSC_SUCCESS); 1325 } 1326 1327 PETSC_INTERN PetscObject *PetscObjects; 1328 PETSC_INTERN PetscInt PetscObjectsCounts; 1329 PETSC_INTERN PetscInt PetscObjectsMaxCounts; 1330 PETSC_INTERN PetscBool PetscObjectsLog; 1331 1332 /* 1333 Frees all the MPI types and operations that PETSc may have created 1334 */ 1335 PetscErrorCode PetscFreeMPIResources(void) 1336 { 1337 PetscFunctionBegin; 1338 #if defined(PETSC_HAVE_REAL___FLOAT128) 1339 PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128)); 1340 #if defined(PETSC_HAVE_COMPLEX) 1341 PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128)); 1342 #endif 1343 #endif 1344 #if defined(PETSC_HAVE_REAL___FP16) 1345 PetscCallMPI(MPI_Type_free(&MPIU___FP16)); 1346 #endif 1347 1348 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 1349 PetscCallMPI(MPI_Op_free(&MPIU_SUM)); 1350 PetscCallMPI(MPI_Op_free(&MPIU_MAX)); 1351 PetscCallMPI(MPI_Op_free(&MPIU_MIN)); 1352 #elif defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_HAVE_REAL___FP16) 1353 PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128)); 1354 #endif 1355 1356 PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR)); 1357 PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT)); 1358 PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT)); 1359 #if defined(PETSC_USE_64BIT_INDICES) 1360 PetscCallMPI(MPI_Type_free(&MPIU_2INT)); 1361 #endif 1362 PetscCallMPI(MPI_Type_free(&MPI_4INT)); 1363 PetscCallMPI(MPI_Type_free(&MPIU_4INT)); 1364 PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP)); 1365 PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp)); 1366 PetscFunctionReturn(PETSC_SUCCESS); 1367 } 1368 1369 PETSC_INTERN PetscErrorCode PetscLogFinalize(void); 1370 PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]); 1371 1372 /*@ 1373 PetscFinalize - Checks for options to be called at the conclusion 1374 of the program. `MPI_Finalize()` is called only if the user had not 1375 called `MPI_Init()` before calling `PetscInitialize()`. 1376 1377 Collective on `PETSC_COMM_WORLD` 1378 1379 Options Database Keys: 1380 + -options_view - Calls `PetscOptionsView()` 1381 . -options_left - Prints unused options that remain in the database 1382 . -objects_dump [all] - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed 1383 . -mpidump - Calls PetscMPIDump() 1384 . -malloc_dump <optional filename> - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed 1385 . -memory_view - Prints total memory usage 1386 - -malloc_view <optional filename> - Prints list of all memory allocated and in what functions 1387 1388 Level: beginner 1389 1390 Note: 1391 See `PetscInitialize()` for other runtime options. 1392 1393 .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()` 1394 @*/ 1395 PetscErrorCode PetscFinalize(void) 1396 { 1397 PetscMPIInt rank; 1398 PetscInt nopt; 1399 PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE; 1400 PetscBool flg; 1401 char mname[PETSC_MAX_PATH_LEN]; 1402 1403 PetscFunctionBegin; 1404 PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()"); 1405 PetscCall(PetscInfo(NULL, "PetscFinalize() called\n")); 1406 1407 PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg)); 1408 if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd()); 1409 1410 PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL)); 1411 PetscGlobalArgc = 0; 1412 PetscGlobalArgs = NULL; 1413 1414 /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */ 1415 { 1416 union 1417 { 1418 MPI_Comm comm; 1419 void *ptr; 1420 } ucomm; 1421 PetscMPIInt flg; 1422 void *tmp; 1423 1424 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg)); 1425 if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg)); 1426 if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF)); 1427 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg)); 1428 if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg)); 1429 if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD)); 1430 } 1431 1432 PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank)); 1433 #if defined(PETSC_HAVE_ADIOS) 1434 PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE); 1435 PetscCallExternal(adios_finalize, rank); 1436 #endif 1437 PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg)); 1438 if (flg) { 1439 char *cits, filename[PETSC_MAX_PATH_LEN]; 1440 FILE *fd = PETSC_STDOUT; 1441 1442 PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL)); 1443 if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd)); 1444 PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits)); 1445 cits[0] = 0; 1446 PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits)); 1447 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n")); 1448 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n")); 1449 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits)); 1450 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n")); 1451 PetscCall(PetscFClose(PETSC_COMM_WORLD, fd)); 1452 PetscCall(PetscFree(cits)); 1453 } 1454 PetscCall(PetscSegBufferDestroy(&PetscCitationsList)); 1455 1456 #if defined(PETSC_SERIALIZE_FUNCTIONS) 1457 PetscCall(PetscFPTDestroy()); 1458 #endif 1459 1460 #if defined(PETSC_HAVE_SAWS) 1461 flg = PETSC_FALSE; 1462 PetscCall(PetscOptionsGetBool(NULL, NULL, "-saw_options", &flg, NULL)); 1463 if (flg) PetscCall(PetscOptionsSAWsDestroy()); 1464 #endif 1465 1466 #if defined(PETSC_HAVE_X) 1467 flg1 = PETSC_FALSE; 1468 PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL)); 1469 if (flg1) { 1470 /* this is a crude hack, but better than nothing */ 1471 PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL)); 1472 } 1473 #endif 1474 1475 #if !defined(PETSC_HAVE_THREADSAFETY) 1476 PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL)); 1477 if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n")); 1478 #endif 1479 1480 if (PetscDefined(USE_LOG)) { 1481 flg1 = PETSC_FALSE; 1482 PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL)); 1483 if (flg1) { 1484 PetscLogDouble flops = 0; 1485 PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD)); 1486 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops)); 1487 } 1488 } 1489 1490 if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) { 1491 mname[0] = 0; 1492 PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1)); 1493 if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL)); 1494 } 1495 1496 #if defined(PETSC_HAVE_KOKKOS) 1497 // Free petsc/kokkos stuff before the potentially non-null petsc default gpu stream is destroyed by PetscObjectRegisterDestroyAll 1498 if (PetscKokkosInitialized) { 1499 PetscCall(PetscKokkosFinalize_Private()); 1500 PetscKokkosInitialized = PETSC_FALSE; 1501 } 1502 #endif 1503 1504 // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_(). 1505 PetscCall(PetscObjectRegisterDestroyAll()); 1506 1507 if (PetscDefined(USE_LOG)) { 1508 PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE)); 1509 PetscCall(PetscLogViewFromOptions()); 1510 PetscCall(PetscOptionsPopCreateViewerOff()); 1511 // It should be turned on with PetscLogGpuTime() and never turned off except in this place 1512 PetscLogGpuTimeFlag = PETSC_FALSE; 1513 1514 // Free any objects created by the last block of code. 1515 PetscCall(PetscObjectRegisterDestroyAll()); 1516 1517 mname[0] = 0; 1518 PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1)); 1519 PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2)); 1520 if (flg1 || flg2) PetscCall(PetscLogDump(mname)); 1521 } 1522 1523 flg1 = PETSC_FALSE; 1524 PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL)); 1525 if (!flg1) PetscCall(PetscPopSignalHandler()); 1526 flg1 = PETSC_FALSE; 1527 PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL)); 1528 if (flg1) PetscCall(PetscMPIDump(stdout)); 1529 flg1 = PETSC_FALSE; 1530 flg2 = PETSC_FALSE; 1531 /* preemptive call to avoid listing this option in options table as unused */ 1532 PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1)); 1533 PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1)); 1534 PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL)); 1535 1536 if (flg2) { PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); } 1537 1538 /* to prevent PETSc -options_left from warning */ 1539 PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1)); 1540 PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1)); 1541 1542 flg3 = PETSC_FALSE; /* default value is required */ 1543 PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1)); 1544 if (!flg1) flg3 = PETSC_TRUE; 1545 if (flg3) { 1546 if (!flg2 && flg1) { /* have not yet printed the options */ 1547 PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); 1548 } 1549 PetscCall(PetscOptionsAllUsed(NULL, &nopt)); 1550 if (nopt) { 1551 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n")); 1552 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n")); 1553 if (nopt == 1) { 1554 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n")); 1555 } else { 1556 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt)); 1557 } 1558 } else if (flg3 && flg1) { 1559 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n")); 1560 } 1561 PetscCall(PetscOptionsLeft(NULL)); 1562 } 1563 1564 #if defined(PETSC_HAVE_SAWS) 1565 if (!PetscGlobalRank) { 1566 PetscCall(PetscStackSAWsViewOff()); 1567 PetscCallSAWs(SAWs_Finalize, ()); 1568 } 1569 #endif 1570 1571 /* 1572 List all objects the user may have forgot to free 1573 */ 1574 if (PetscDefined(USE_LOG) && PetscObjectsLog) { 1575 PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1)); 1576 if (flg1) { 1577 MPI_Comm local_comm; 1578 char string[64]; 1579 1580 PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL)); 1581 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1582 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1583 PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE)); 1584 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1585 PetscCallMPI(MPI_Comm_free(&local_comm)); 1586 } 1587 } 1588 1589 PetscObjectsCounts = 0; 1590 PetscObjectsMaxCounts = 0; 1591 PetscCall(PetscFree(PetscObjects)); 1592 1593 /* 1594 Destroy any packages that registered a finalize 1595 */ 1596 PetscCall(PetscRegisterFinalizeAll()); 1597 1598 PetscCall(PetscLogFinalize()); 1599 1600 /* 1601 Print PetscFunctionLists that have not been properly freed 1602 */ 1603 if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll()); 1604 1605 if (petsc_history) { 1606 PetscCall(PetscCloseHistoryFile(&petsc_history)); 1607 petsc_history = NULL; 1608 } 1609 PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton)); 1610 PetscCall(PetscInfoDestroy()); 1611 1612 #if !defined(PETSC_HAVE_THREADSAFETY) 1613 if (!(PETSC_RUNNING_ON_VALGRIND)) { 1614 char fname[PETSC_MAX_PATH_LEN]; 1615 char sname[PETSC_MAX_PATH_LEN]; 1616 FILE *fd; 1617 int err; 1618 1619 flg2 = PETSC_FALSE; 1620 flg3 = PETSC_FALSE; 1621 if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL)); 1622 PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL)); 1623 fname[0] = 0; 1624 PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1)); 1625 if (flg1 && fname[0]) { 1626 PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank)); 1627 fd = fopen(sname, "w"); 1628 PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname); 1629 PetscCall(PetscMallocDump(fd)); 1630 err = fclose(fd); 1631 PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file"); 1632 } else if (flg1 || flg2 || flg3) { 1633 MPI_Comm local_comm; 1634 1635 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1636 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1637 PetscCall(PetscMallocDump(stdout)); 1638 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1639 PetscCallMPI(MPI_Comm_free(&local_comm)); 1640 } 1641 fname[0] = 0; 1642 PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1)); 1643 if (flg1 && fname[0]) { 1644 PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank)); 1645 fd = fopen(sname, "w"); 1646 PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname); 1647 PetscCall(PetscMallocView(fd)); 1648 err = fclose(fd); 1649 PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file"); 1650 } else if (flg1) { 1651 MPI_Comm local_comm; 1652 1653 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1654 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1655 PetscCall(PetscMallocView(stdout)); 1656 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1657 PetscCallMPI(MPI_Comm_free(&local_comm)); 1658 } 1659 } 1660 #endif 1661 1662 /* 1663 Close any open dynamic libraries 1664 */ 1665 PetscCall(PetscFinalize_DynamicLibraries()); 1666 1667 /* Can be destroyed only after all the options are used */ 1668 PetscCall(PetscOptionsDestroyDefault()); 1669 1670 #if defined(PETSC_HAVE_NVSHMEM) 1671 if (PetscBeganNvshmem) { 1672 PetscCall(PetscNvshmemFinalize()); 1673 PetscBeganNvshmem = PETSC_FALSE; 1674 } 1675 #endif 1676 1677 PetscCall(PetscFreeMPIResources()); 1678 1679 /* 1680 Destroy any known inner MPI_Comm's and attributes pointing to them 1681 Note this will not destroy any new communicators the user has created. 1682 1683 If all PETSc objects were not destroyed those left over objects will have hanging references to 1684 the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again 1685 */ 1686 { 1687 PetscCommCounter *counter; 1688 PetscMPIInt flg; 1689 MPI_Comm icomm; 1690 union 1691 { 1692 MPI_Comm comm; 1693 void *ptr; 1694 } ucomm; 1695 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg)); 1696 if (flg) { 1697 icomm = ucomm.comm; 1698 PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg)); 1699 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory"); 1700 1701 PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval)); 1702 PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval)); 1703 PetscCallMPI(MPI_Comm_free(&icomm)); 1704 } 1705 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg)); 1706 if (flg) { 1707 icomm = ucomm.comm; 1708 PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg)); 1709 PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory"); 1710 1711 PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval)); 1712 PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval)); 1713 PetscCallMPI(MPI_Comm_free(&icomm)); 1714 } 1715 } 1716 1717 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval)); 1718 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval)); 1719 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval)); 1720 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval)); 1721 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval)); 1722 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval)); 1723 1724 // Free keyvals which may be silently created by some routines 1725 if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval)); 1726 if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval)); 1727 1728 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen)); 1729 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout)); 1730 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr)); 1731 PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock)); 1732 1733 if (PetscBeganMPI) { 1734 PetscMPIInt flag; 1735 PetscCallMPI(MPI_Finalized(&flag)); 1736 PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()"); 1737 /* wait until the very last moment to disable error handling */ 1738 PetscErrorHandlingInitialized = PETSC_FALSE; 1739 PetscCallMPI(MPI_Finalize()); 1740 } else PetscErrorHandlingInitialized = PETSC_FALSE; 1741 1742 /* 1743 1744 Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because 1745 the communicator has some outstanding requests on it. Specifically if the 1746 flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See 1747 src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate() 1748 is never freed as it should be. Thus one may obtain messages of the form 1749 [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the 1750 memory was not freed. 1751 1752 */ 1753 PetscCall(PetscMallocClear()); 1754 PetscCall(PetscStackReset()); 1755 1756 PetscInitializeCalled = PETSC_FALSE; 1757 PetscFinalizeCalled = PETSC_TRUE; 1758 #if defined(PETSC_USE_COVERAGE) 1759 /* 1760 flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the 1761 gcov files are still being added to the directories as git tries to remove the directories. 1762 */ 1763 __gcov_flush(); 1764 #endif 1765 /* To match PetscFunctionBegin() at the beginning of this function */ 1766 PetscStackClearTop; 1767 return PETSC_SUCCESS; 1768 } 1769 1770 #if defined(PETSC_MISSING_LAPACK_lsame_) 1771 PETSC_EXTERN int lsame_(char *a, char *b) 1772 { 1773 if (*a == *b) return 1; 1774 if (*a + 32 == *b) return 1; 1775 if (*a - 32 == *b) return 1; 1776 return 0; 1777 } 1778 #endif 1779 1780 #if defined(PETSC_MISSING_LAPACK_lsame) 1781 PETSC_EXTERN int lsame(char *a, char *b) 1782 { 1783 if (*a == *b) return 1; 1784 if (*a + 32 == *b) return 1; 1785 if (*a - 32 == *b) return 1; 1786 return 0; 1787 } 1788 #endif 1789