1 #define PETSC_DESIRE_FEATURE_TEST_MACROS 2 /* 3 This file defines the initialization of PETSc, including PetscInitialize() 4 */ 5 #include <petsc/private/petscimpl.h> /*I "petscsys.h" I*/ 6 #include <petsc/private/logimpl.h> 7 #include <petscviewer.h> 8 #include <petsc/private/garbagecollector.h> 9 10 #if !defined(PETSC_HAVE_WINDOWS_COMPILERS) 11 #include <petsc/private/valgrind/valgrind.h> 12 #endif 13 14 #if defined(PETSC_USE_FORTRAN_BINDINGS) 15 #include <petsc/private/fortranimpl.h> 16 #endif 17 18 #if PetscDefined(USE_COVERAGE) 19 EXTERN_C_BEGIN 20 #if defined(PETSC_HAVE___GCOV_DUMP) 21 #define __gcov_flush(x) __gcov_dump(x) 22 #endif 23 void __gcov_flush(void); 24 EXTERN_C_END 25 #endif 26 27 #if defined(PETSC_SERIALIZE_FUNCTIONS) 28 PETSC_INTERN PetscFPT PetscFPTData; 29 PetscFPT PetscFPTData = 0; 30 #endif 31 32 #if PetscDefined(HAVE_SAWS) 33 #include <petscviewersaws.h> 34 #endif 35 36 PETSC_INTERN FILE *petsc_history; 37 38 PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void); 39 PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void); 40 PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int); 41 PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int); 42 PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **); 43 44 /* user may set these BEFORE calling PetscInitialize() */ 45 MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL; 46 #if PetscDefined(HAVE_MPI_INIT_THREAD) 47 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE; 48 #else 49 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE; 50 #endif 51 52 PetscMPIInt Petsc_Counter_keyval = MPI_KEYVAL_INVALID; 53 PetscMPIInt Petsc_InnerComm_keyval = MPI_KEYVAL_INVALID; 54 PetscMPIInt Petsc_OuterComm_keyval = MPI_KEYVAL_INVALID; 55 PetscMPIInt Petsc_ShmComm_keyval = MPI_KEYVAL_INVALID; 56 PetscMPIInt Petsc_CreationIdx_keyval = MPI_KEYVAL_INVALID; 57 PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID; 58 59 PetscMPIInt Petsc_SharedWD_keyval = MPI_KEYVAL_INVALID; 60 PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID; 61 62 /* 63 Declare and set all the string names of the PETSc enums 64 */ 65 const char *const PetscBools[] = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL}; 66 const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL}; 67 68 PetscBool PetscPreLoadingUsed = PETSC_FALSE; 69 PetscBool PetscPreLoadingOn = PETSC_FALSE; 70 71 PetscInt PetscHotRegionDepth; 72 73 PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE; 74 75 #if defined(PETSC_HAVE_THREADSAFETY) 76 PetscSpinlock PetscViewerASCIISpinLockOpen; 77 PetscSpinlock PetscViewerASCIISpinLockStdout; 78 PetscSpinlock PetscViewerASCIISpinLockStderr; 79 PetscSpinlock PetscCommSpinLock; 80 #endif 81 82 extern PetscInt PetscNumBLASThreads; 83 84 /*@C 85 PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args 86 87 Collective, No Fortran Support 88 89 Input Parameters: 90 + argc - number of args 91 . args - array of command line arguments 92 . filename - optional name of the program file, pass `NULL` to ignore 93 - help - optional help, pass `NULL` to ignore 94 95 Level: advanced 96 97 Notes: 98 this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to 99 indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to 100 be called multiple times from Julia without the problem of trying to initialize MPI more than once. 101 102 Developer Notes: 103 Turns off PETSc signal handling to allow Julia to manage signals 104 105 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()` 106 */ 107 PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help) 108 { 109 int myargc = argc; 110 char **myargs = args; 111 112 PetscFunctionBegin; 113 PetscCall(PetscInitialize(&myargc, &myargs, filename, help)); 114 PetscCall(PetscPopSignalHandler()); 115 PetscBeganMPI = PETSC_FALSE; 116 PetscFunctionReturn(PETSC_SUCCESS); 117 } 118 119 /*@C 120 PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without 121 the command line arguments. 122 123 Collective 124 125 Level: advanced 126 127 .seealso: `PetscInitialize()`, `PetscInitializeFortran()` 128 @*/ 129 PetscErrorCode PetscInitializeNoArguments(void) 130 { 131 int argc = 0; 132 char **args = NULL; 133 134 PetscFunctionBegin; 135 PetscCall(PetscInitialize(&argc, &args, NULL, NULL)); 136 PetscFunctionReturn(PETSC_SUCCESS); 137 } 138 139 /*@ 140 PetscInitialized - Determine whether PETSc is initialized. 141 142 Output Parameter: 143 . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise 144 145 Level: beginner 146 147 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()` 148 @*/ 149 PetscErrorCode PetscInitialized(PetscBool *isInitialized) 150 { 151 PetscFunctionBegin; 152 if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1); 153 *isInitialized = PetscInitializeCalled; 154 PetscFunctionReturn(PETSC_SUCCESS); 155 } 156 157 /*@ 158 PetscFinalized - Determine whether `PetscFinalize()` has been called yet 159 160 Output Parameter: 161 . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise 162 163 Level: developer 164 165 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()` 166 @*/ 167 PetscErrorCode PetscFinalized(PetscBool *isFinalized) 168 { 169 PetscFunctionBegin; 170 if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1); 171 *isFinalized = PetscFinalizeCalled; 172 PetscFunctionReturn(PETSC_SUCCESS); 173 } 174 175 PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]); 176 177 /* 178 This function is the MPI reduction operation used to compute the sum of the 179 first half of the datatype and the max of the second half. 180 */ 181 MPI_Op MPIU_MAXSUM_OP = 0; 182 MPI_Op Petsc_Garbage_SetIntersectOp = 0; 183 184 PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 185 { 186 PetscFunctionBegin; 187 if (*datatype == MPIU_INT_MPIINT && PetscDefined(USE_64BIT_INDICES)) { 188 #if defined(PETSC_USE_64BIT_INDICES) 189 struct petsc_mpiu_int_mpiint *xin = (struct petsc_mpiu_int_mpiint *)in, *xout = (struct petsc_mpiu_int_mpiint *)out; 190 PetscMPIInt count = *cnt; 191 192 for (PetscMPIInt i = 0; i < count; i++) { 193 xout[i].a = PetscMax(xout[i].a, xin[i].a); 194 xout[i].b += xin[i].b; 195 } 196 #endif 197 } else if (*datatype == MPIU_2INT || *datatype == MPIU_INT_MPIINT) { 198 PetscInt *xin = (PetscInt *)in, *xout = (PetscInt *)out; 199 PetscMPIInt count = *cnt; 200 201 for (PetscMPIInt i = 0; i < count; i++) { 202 xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]); 203 xout[2 * i + 1] += xin[2 * i + 1]; 204 } 205 } else { 206 PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT and MPIU_INT_MPIINT data types"); 207 (void)ierr; 208 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 209 } 210 PetscFunctionReturnVoid(); 211 } 212 213 /*@ 214 PetscMaxSum - Returns the max of the first entry over all MPI processes and the sum of the second entry. 215 216 Collective 217 218 Input Parameters: 219 + comm - the communicator 220 - array - an arry of length 2 times `size`, the number of MPI processes 221 222 Output Parameters: 223 + max - the maximum of `array[2*rank]` over all MPI processes 224 - sum - the sum of the `array[2*rank + 1]` over all MPI processes 225 226 Level: developer 227 228 .seealso: `PetscInitialize()` 229 @*/ 230 PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt array[], PetscInt *max, PetscInt *sum) 231 { 232 PetscFunctionBegin; 233 #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK) 234 { 235 struct { 236 PetscInt max, sum; 237 } work; 238 PetscCallMPI(MPI_Reduce_scatter_block((void *)array, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm)); 239 *max = work.max; 240 *sum = work.sum; 241 } 242 #else 243 { 244 PetscMPIInt size, rank; 245 struct { 246 PetscInt max, sum; 247 } *work; 248 PetscCallMPI(MPI_Comm_size(comm, &size)); 249 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 250 PetscCall(PetscMalloc1(size, &work)); 251 PetscCallMPI(MPIU_Allreduce((void *)array, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm)); 252 *max = work[rank].max; 253 *sum = work[rank].sum; 254 PetscCall(PetscFree(work)); 255 } 256 #endif 257 PetscFunctionReturn(PETSC_SUCCESS); 258 } 259 260 #if defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_HAVE_REAL___FP16) 261 #if defined(PETSC_HAVE_REAL___FLOAT128) 262 #include <quadmath.h> 263 #endif 264 MPI_Op MPIU_SUM___FP16___FLOAT128 = 0; 265 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 266 MPI_Op MPIU_SUM = 0; 267 #endif 268 269 PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 270 { 271 PetscMPIInt i, count = *cnt; 272 273 PetscFunctionBegin; 274 if (*datatype == MPIU_REAL) { 275 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 276 for (i = 0; i < count; i++) xout[i] += xin[i]; 277 } 278 #if defined(PETSC_HAVE_COMPLEX) 279 else if (*datatype == MPIU_COMPLEX) { 280 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 281 for (i = 0; i < count; i++) xout[i] += xin[i]; 282 } 283 #endif 284 #if defined(PETSC_HAVE_REAL___FLOAT128) 285 else if (*datatype == MPIU___FLOAT128) { 286 __float128 *xin = (__float128 *)in, *xout = (__float128 *)out; 287 for (i = 0; i < count; i++) xout[i] += xin[i]; 288 #if defined(PETSC_HAVE_COMPLEX) 289 } else if (*datatype == MPIU___COMPLEX128) { 290 __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out; 291 for (i = 0; i < count; i++) xout[i] += xin[i]; 292 #endif 293 } 294 #endif 295 #if defined(PETSC_HAVE_REAL___FP16) 296 else if (*datatype == MPIU___FP16) { 297 __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out; 298 for (i = 0; i < count; i++) xout[i] = (__fp16)(xin[i] + xout[i]); 299 } 300 #endif 301 else { 302 #if !defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_HAVE_REAL___FP16) 303 PetscCallAbort(MPI_COMM_SElF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types")); 304 #elif !defined(PETSC_HAVE_REAL___FP16) 305 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types")); 306 #elif !defined(PETSC_HAVE_REAL___FLOAT128) 307 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types")); 308 #else 309 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types")); 310 #endif 311 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 312 } 313 PetscFunctionReturnVoid(); 314 } 315 #endif 316 317 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 318 MPI_Op MPIU_MAX = 0; 319 MPI_Op MPIU_MIN = 0; 320 321 PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 322 { 323 PetscInt i, count = *cnt; 324 325 PetscFunctionBegin; 326 if (*datatype == MPIU_REAL) { 327 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 328 for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]); 329 } 330 #if defined(PETSC_HAVE_COMPLEX) 331 else if (*datatype == MPIU_COMPLEX) { 332 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 333 for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i]; 334 } 335 #endif 336 else { 337 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types")); 338 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 339 } 340 PetscFunctionReturnVoid(); 341 } 342 343 PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype) 344 { 345 PetscInt i, count = *cnt; 346 347 PetscFunctionBegin; 348 if (*datatype == MPIU_REAL) { 349 PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out; 350 for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]); 351 } 352 #if defined(PETSC_HAVE_COMPLEX) 353 else if (*datatype == MPIU_COMPLEX) { 354 PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out; 355 for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i]; 356 } 357 #endif 358 else { 359 PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types")); 360 PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG); 361 } 362 PetscFunctionReturnVoid(); 363 } 364 #endif 365 366 /* 367 Private routine to delete internal tag/name counter storage when a communicator is freed. 368 369 This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this data as an attribute is freed. 370 371 Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval() 372 373 */ 374 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state) 375 { 376 PetscCommCounter *counter = (PetscCommCounter *)count_val; 377 struct PetscCommStash *comms = counter->comms, *pcomm; 378 379 PetscFunctionBegin; 380 PetscCallReturnMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm)); 381 PetscCallReturnMPI(PetscFree(counter->iflags)); 382 while (comms) { 383 PetscCallMPIReturnMPI(MPI_Comm_free(&comms->comm)); 384 pcomm = comms; 385 comms = comms->next; 386 PetscCallReturnMPI(PetscFree(pcomm)); 387 } 388 PetscCallReturnMPI(PetscFree(counter)); 389 PetscFunctionReturn(MPI_SUCCESS); 390 } 391 392 /* 393 This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user 394 calls MPI_Comm_free(). 395 396 This is the only entry point for breaking the links between inner and outer comms. 397 398 This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator. 399 400 Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval() 401 402 */ 403 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state) 404 { 405 union 406 { 407 MPI_Comm comm; 408 void *ptr; 409 } icomm; 410 411 PetscFunctionBegin; 412 PetscCheckReturnMPI(keyval == Petsc_InnerComm_keyval, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval"); 413 icomm.ptr = attr_val; 414 if (PetscDefined(USE_DEBUG)) { 415 /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */ 416 PetscMPIInt flg; 417 union 418 { 419 MPI_Comm comm; 420 void *ptr; 421 } ocomm; 422 PetscCallMPIReturnMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg)); 423 PetscCheckReturnMPI(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute"); 424 PetscCheckReturnMPI(ocomm.comm == comm, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm"); 425 } 426 PetscCallMPIReturnMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval)); 427 PetscCallReturnMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm)); 428 PetscFunctionReturn(MPI_SUCCESS); 429 } 430 431 /* 432 * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr(). It should not be reached any other way. 433 */ 434 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state) 435 { 436 PetscFunctionBegin; 437 PetscCallReturnMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm)); 438 PetscFunctionReturn(MPI_SUCCESS); 439 } 440 441 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *); 442 443 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32) 444 PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *); 445 PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *); 446 PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *); 447 #endif 448 449 PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE; 450 451 PETSC_INTERN int PetscGlobalArgc; 452 PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran; 453 int PetscGlobalArgc = 0; 454 char **PetscGlobalArgs = NULL; 455 char **PetscGlobalArgsFortran = NULL; 456 PetscSegBuffer PetscCitationsList; 457 458 PetscErrorCode PetscCitationsInitialize(void) 459 { 460 PetscFunctionBegin; 461 PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList)); 462 463 PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\ 464 Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\ 465 and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\ 466 and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\ 467 and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\ 468 and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\ 469 and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith\n\ 470 and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\ 471 Title = {{PETSc/TAO} Users Manual},\n\ 472 Number = {ANL-21/39 - Revision 3.21},\n\ 473 Doi = {10.2172/2205494},\n\ 474 Institution = {Argonne National Laboratory},\n\ 475 Year = {2024}\n}\n", 476 NULL)); 477 478 PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\ 479 Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\ 480 Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\ 481 Booktitle = {Modern Software Tools in Scientific Computing},\n\ 482 Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\ 483 Pages = {163--202},\n\ 484 Publisher = {Birkh{\\\"{a}}user Press},\n\ 485 Year = {1997}\n}\n", 486 NULL)); 487 PetscFunctionReturn(PETSC_SUCCESS); 488 } 489 490 static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */ 491 492 PetscErrorCode PetscSetProgramName(const char name[]) 493 { 494 PetscFunctionBegin; 495 PetscCall(PetscStrncpy(programname, name, sizeof(programname))); 496 PetscFunctionReturn(PETSC_SUCCESS); 497 } 498 499 /*@C 500 PetscGetProgramName - Gets the name of the running program. 501 502 Not Collective 503 504 Input Parameter: 505 . len - length of the string name 506 507 Output Parameter: 508 . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN` 509 510 Level: advanced 511 512 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()` 513 @*/ 514 PetscErrorCode PetscGetProgramName(char name[], size_t len) 515 { 516 PetscFunctionBegin; 517 PetscCall(PetscStrncpy(name, programname, len)); 518 PetscFunctionReturn(PETSC_SUCCESS); 519 } 520 521 /*@C 522 PetscGetArgs - Allows you to access the raw command line arguments anywhere 523 after PetscInitialize() is called but before `PetscFinalize()`. 524 525 Not Collective, No Fortran Support 526 527 Output Parameters: 528 + argc - count of number of command line arguments 529 - args - the command line arguments 530 531 Level: intermediate 532 533 Notes: 534 This is usually used to pass the command line arguments into other libraries 535 that are called internally deep in PETSc or the application. 536 537 The first argument contains the program name as is normal for C programs. 538 539 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()` 540 @*/ 541 PetscErrorCode PetscGetArgs(int *argc, char ***args) 542 { 543 PetscFunctionBegin; 544 PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()"); 545 *argc = PetscGlobalArgc; 546 *args = PetscGlobalArgs; 547 PetscFunctionReturn(PETSC_SUCCESS); 548 } 549 550 /*@C 551 PetscGetArguments - Allows you to access the command line arguments anywhere 552 after `PetscInitialize()` is called but before `PetscFinalize()`. 553 554 Not Collective, No Fortran Support 555 556 Output Parameter: 557 . args - the command line arguments 558 559 Level: intermediate 560 561 Note: 562 This does NOT start with the program name and IS `NULL` terminated (final arg is void) 563 564 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()` 565 @*/ 566 PetscErrorCode PetscGetArguments(char ***args) 567 { 568 PetscInt i, argc = PetscGlobalArgc; 569 570 PetscFunctionBegin; 571 PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()"); 572 if (!argc) { 573 *args = NULL; 574 PetscFunctionReturn(PETSC_SUCCESS); 575 } 576 PetscCall(PetscMalloc1(argc, args)); 577 for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i])); 578 (*args)[argc - 1] = NULL; 579 PetscFunctionReturn(PETSC_SUCCESS); 580 } 581 582 /*@C 583 PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()` 584 585 Not Collective, No Fortran Support 586 587 Output Parameter: 588 . args - the command line arguments 589 590 Level: intermediate 591 592 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()` 593 @*/ 594 PetscErrorCode PetscFreeArguments(char **args) 595 { 596 PetscFunctionBegin; 597 if (args) { 598 PetscInt i = 0; 599 600 while (args[i]) PetscCall(PetscFree(args[i++])); 601 PetscCall(PetscFree(args)); 602 } 603 PetscFunctionReturn(PETSC_SUCCESS); 604 } 605 606 #if PetscDefined(HAVE_SAWS) 607 #include <petscconfiginfo.h> 608 609 PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[]) 610 { 611 PetscFunctionBegin; 612 if (!PetscGlobalRank) { 613 char cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64]; 614 int port; 615 PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE; 616 size_t applinelen, introlen; 617 char sawsurl[256]; 618 619 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg)); 620 if (flg) { 621 char sawslog[PETSC_MAX_PATH_LEN]; 622 623 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL)); 624 if (sawslog[0]) { 625 PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog)); 626 } else { 627 PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL)); 628 } 629 } 630 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg)); 631 if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert)); 632 PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL)); 633 if (selectport) { 634 PetscCallSAWs(SAWs_Get_Available_Port, (&port)); 635 PetscCallSAWs(SAWs_Set_Port, (port)); 636 } else { 637 PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg)); 638 if (flg) PetscCallSAWs(SAWs_Set_Port, (port)); 639 } 640 PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg)); 641 if (flg) { 642 PetscCallSAWs(SAWs_Set_Document_Root, (root)); 643 PetscCall(PetscStrcmp(root, ".", &rootlocal)); 644 } else { 645 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg)); 646 if (flg) { 647 PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root))); 648 PetscCallSAWs(SAWs_Set_Document_Root, (root)); 649 } 650 } 651 PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2)); 652 if (flg2) { 653 char jsdir[PETSC_MAX_PATH_LEN]; 654 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option"); 655 PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root)); 656 PetscCall(PetscTestDirectory(jsdir, 'r', &flg)); 657 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory"); 658 PetscCallSAWs(SAWs_Push_Local_Header, ()); 659 } 660 PetscCall(PetscGetProgramName(programname, sizeof(programname))); 661 PetscCall(PetscStrlen(help, &applinelen)); 662 introlen = 4096 + applinelen; 663 applinelen += 1024; 664 PetscCall(PetscMalloc(applinelen, &appline)); 665 PetscCall(PetscMalloc(introlen, &intro)); 666 667 if (rootlocal) { 668 PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname)); 669 PetscCall(PetscTestFile(appline, 'r', &rootlocal)); 670 } 671 PetscCall(PetscOptionsGetAll(NULL, &options)); 672 if (rootlocal && help) { 673 PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help)); 674 } else if (help) { 675 PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help)); 676 } else { 677 PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options)); 678 } 679 PetscCall(PetscFree(options)); 680 PetscCall(PetscGetVersion(version, sizeof(version))); 681 PetscCall(PetscSNPrintf(intro, introlen, 682 "<body>\n" 683 "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n" 684 "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n" 685 "%s", 686 version, petscconfigureoptions, appline)); 687 PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro)); 688 PetscCall(PetscFree(intro)); 689 PetscCall(PetscFree(appline)); 690 if (selectport) { 691 PetscBool silent; 692 693 /* another process may have grabbed the port so keep trying */ 694 while (SAWs_Initialize()) { 695 PetscCallSAWs(SAWs_Get_Available_Port, (&port)); 696 PetscCallSAWs(SAWs_Set_Port, (port)); 697 } 698 699 PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL)); 700 if (!silent) { 701 PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl)); 702 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl)); 703 } 704 } else { 705 PetscCallSAWs(SAWs_Initialize, ()); 706 } 707 PetscCall(PetscCitationsRegister("@TechReport{ saws,\n" 708 " Author = {Matt Otten and Jed Brown and Barry Smith},\n" 709 " Title = {Scientific Application Web Server (SAWs) Users Manual},\n" 710 " Institution = {Argonne National Laboratory},\n" 711 " Year = 2013\n}\n", 712 NULL)); 713 } 714 PetscFunctionReturn(PETSC_SUCCESS); 715 } 716 #endif 717 718 /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */ 719 PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void) 720 { 721 PetscFunctionBegin; 722 #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG) 723 /* see MPI.py for details on this bug */ 724 (void)setenv("HWLOC_COMPONENTS", "-x86", 1); 725 #endif 726 PetscFunctionReturn(PETSC_SUCCESS); 727 } 728 729 #if PetscDefined(HAVE_ADIOS) 730 #include <adios.h> 731 #include <adios_read.h> 732 int64_t Petsc_adios_group; 733 #endif 734 #if PetscDefined(HAVE_OPENMP) 735 #include <omp.h> 736 PetscInt PetscNumOMPThreads; 737 #endif 738 739 #include <petsc/private/deviceimpl.h> 740 #if PetscDefined(HAVE_CUDA) 741 #include <petscdevice_cuda.h> 742 // REMOVE ME 743 cudaStream_t PetscDefaultCudaStream = NULL; 744 #endif 745 #if PetscDefined(HAVE_HIP) 746 #include <petscdevice_hip.h> 747 // REMOVE ME 748 hipStream_t PetscDefaultHipStream = NULL; 749 #endif 750 751 #if PetscDefined(HAVE_DLFCN_H) 752 #include <dlfcn.h> 753 #endif 754 PETSC_INTERN PetscErrorCode PetscLogInitialize(void); 755 #if PetscDefined(HAVE_VIENNACL) 756 PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void); 757 PetscBool PetscViennaCLSynchronize = PETSC_FALSE; 758 #endif 759 760 PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE; 761 762 /* 763 PetscInitialize_Common - shared code between C and Fortran initialization 764 765 prog: program name 766 file: optional PETSc database file name. Might be in Fortran string format when 'ftn' is true 767 help: program help message 768 ftn: is it called from Fortran initialization (petscinitializef_)? 769 readarguments,len: used when fortran is true 770 */ 771 PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscBool readarguments, PetscInt len) 772 { 773 PetscMPIInt size; 774 PetscBool flg = PETSC_TRUE; 775 char hostname[256]; 776 PetscBool blas_view_flag = PETSC_FALSE; 777 778 PetscFunctionBegin; 779 if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS); 780 /* these must be initialized in a routine, not as a constant declaration */ 781 PETSC_STDOUT = stdout; 782 PETSC_STDERR = stderr; 783 784 /* PetscCall can be used from now */ 785 PetscErrorHandlingInitialized = PETSC_TRUE; 786 787 /* 788 The checking over compatible runtime libraries is complicated by the MPI ABI initiative 789 https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with 790 MPICH v3.1 (Released February 2014) 791 IBM MPI v2.1 (December 2014) 792 Intel MPI Library v5.0 (2014) 793 Cray MPT v7.0.0 (June 2014) 794 As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions 795 listed above and since that time are compatible. 796 797 Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number 798 at compile time or runtime. Thus we will need to systematically track the allowed versions 799 and how they are represented in the mpi.h and MPI_Get_library_version() output in order 800 to perform the checking. 801 802 Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI). 803 804 Questions: 805 806 Should the checks for ABI incompatibility be only on the major version number below? 807 Presumably the output to stderr will be removed before a release. 808 */ 809 810 #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION) 811 { 812 char mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING]; 813 PetscMPIInt mpilibraryversionlength; 814 815 PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength)); 816 /* check for MPICH versions before MPI ABI initiative */ 817 #if defined(MPICH_VERSION) 818 #if MPICH_NUMVERSION < 30100000 819 { 820 char *ver, *lf; 821 PetscBool flg = PETSC_FALSE; 822 823 PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver)); 824 if (ver) { 825 PetscCall(PetscStrchr(ver, '\n', &lf)); 826 if (lf) { 827 *lf = 0; 828 PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg)); 829 } 830 } 831 if (!flg) { 832 PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION)); 833 flg = PETSC_TRUE; 834 } 835 } 836 #endif 837 /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */ 838 #elif defined(PETSC_HAVE_OPENMPI) 839 { 840 char *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf; 841 PetscBool flg = PETSC_FALSE; 842 #define PSTRSZ 2 843 char ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"}; 844 char ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "}; 845 int i; 846 for (i = 0; i < PSTRSZ; i++) { 847 PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver)); 848 if (ver) { 849 PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR)); 850 PetscCall(PetscStrstr(ver, bs, &bsf)); 851 if (bsf) flg = PETSC_TRUE; 852 break; 853 } 854 } 855 if (!flg) { 856 PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR)); 857 flg = PETSC_TRUE; 858 } 859 } 860 #endif 861 } 862 #endif 863 864 #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__)) 865 /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */ 866 PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly"); 867 #endif 868 869 /* on Windows - set printf to default to printing 2 digit exponents */ 870 #if defined(PETSC_HAVE__SET_OUTPUT_FORMAT) 871 _set_output_format(_TWO_DIGIT_EXPONENT); 872 #endif 873 874 PetscCall(PetscOptionsCreateDefault()); 875 876 PetscFinalizeCalled = PETSC_FALSE; 877 878 PetscCall(PetscSetProgramName(prog)); 879 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen)); 880 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout)); 881 PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr)); 882 PetscCall(PetscSpinlockCreate(&PetscCommSpinLock)); 883 884 if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD; 885 PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN)); 886 887 if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) { 888 PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS)); 889 PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE)); 890 } 891 892 /* Done after init due to a bug in MPICH-GM? */ 893 PetscCall(PetscErrorPrintfInitialize()); 894 895 PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank)); 896 PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize)); 897 898 MPIU_BOOL = MPI_INT; 899 MPIU_ENUM = MPI_INT; 900 MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64; 901 if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED; 902 else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG; 903 #if defined(PETSC_SIZEOF_LONG_LONG) 904 else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG; 905 #endif 906 else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t"); 907 908 /* 909 Initialized the global complex variable; this is because with 910 shared libraries the constructors for global variables 911 are not called; at least on IRIX. 912 */ 913 #if defined(PETSC_HAVE_COMPLEX) 914 { 915 #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128) 916 PetscComplex ic(0.0, 1.0); 917 PETSC_i = ic; 918 #else 919 PETSC_i = _Complex_I; 920 #endif 921 } 922 #endif /* PETSC_HAVE_COMPLEX */ 923 924 /* 925 Create the PETSc MPI reduction operator that sums of the first 926 half of the entries and maxes the second half. 927 */ 928 PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP)); 929 930 #if defined(PETSC_HAVE_REAL___FLOAT128) 931 PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128)); 932 PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128)); 933 #if defined(PETSC_HAVE_COMPLEX) 934 PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128)); 935 PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128)); 936 #endif 937 #endif 938 #if defined(PETSC_HAVE_REAL___FP16) 939 PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16)); 940 PetscCallMPI(MPI_Type_commit(&MPIU___FP16)); 941 #endif 942 943 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 944 PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM)); 945 PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX)); 946 PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN)); 947 #elif defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_HAVE_REAL___FP16) 948 PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128)); 949 #endif 950 951 PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR)); 952 PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp)); 953 PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR)); 954 955 #if defined(PETSC_USE_64BIT_INDICES) 956 PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT)); 957 PetscCallMPI(MPI_Type_commit(&MPIU_2INT)); 958 PetscCallMPI(MPI_Type_commit(&MPIU_2INT)); 959 #endif 960 961 /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */ 962 #if !defined(PETSC_HAVE_MPIUNI) 963 { 964 PetscMPIInt blockSizes[2] = {1, 1}; 965 MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)}; 966 MPI_Datatype blockTypes[2] = {MPIU_REAL, MPIU_INT}, tmpStruct; 967 968 PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct)); 969 PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT)); 970 PetscCallMPI(MPI_Type_free(&tmpStruct)); 971 PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT)); 972 } 973 { 974 PetscMPIInt blockSizes[2] = {1, 1}; 975 MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)}; 976 MPI_Datatype blockTypes[2] = {MPIU_SCALAR, MPIU_INT}, tmpStruct; 977 978 PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct)); 979 PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT)); 980 PetscCallMPI(MPI_Type_free(&tmpStruct)); 981 PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT)); 982 } 983 #endif 984 985 #if defined(PETSC_USE_64BIT_INDICES) 986 PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT)); 987 PetscCallMPI(MPI_Type_commit(&MPIU_2INT)); 988 989 #if !defined(PETSC_HAVE_MPIUNI) 990 { 991 int blockSizes[] = {1, 1}; 992 MPI_Aint blockOffsets[] = {offsetof(struct petsc_mpiu_int_mpiint, a), offsetof(struct petsc_mpiu_int_mpiint, b)}; 993 MPI_Datatype blockTypes[] = {MPIU_INT, MPI_INT}, tmpStruct; 994 995 PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct)); 996 PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_int_mpiint), &MPIU_INT_MPIINT)); 997 PetscCallMPI(MPI_Type_free(&tmpStruct)); 998 PetscCallMPI(MPI_Type_commit(&MPIU_INT_MPIINT)); 999 } 1000 #endif 1001 #endif 1002 PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT)); 1003 PetscCallMPI(MPI_Type_commit(&MPI_4INT)); 1004 PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT)); 1005 PetscCallMPI(MPI_Type_commit(&MPIU_4INT)); 1006 1007 /* 1008 Attributes to be set on PETSc communicators 1009 */ 1010 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, (void *)0)); 1011 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, (void *)0)); 1012 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, (void *)0)); 1013 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, (void *)0)); 1014 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, (void *)0)); 1015 PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, (void *)0)); 1016 1017 #if defined(PETSC_USE_FORTRAN_BINDINGS) 1018 if (ftn) PetscCall(PetscInitFortran_Private(readarguments, file, len)); 1019 else 1020 #endif 1021 PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file)); 1022 1023 /* call a second time so it can look in the options database */ 1024 PetscCall(PetscErrorPrintfInitialize()); 1025 1026 /* 1027 Check system options and print help 1028 */ 1029 PetscCall(PetscOptionsCheckInitial_Private(help)); 1030 1031 /* 1032 Creates the logging data structures; this is enabled even if logging is not turned on 1033 This is the last thing we do before returning to the user code to prevent having the 1034 logging numbers contaminated by any startup time associated with MPI 1035 */ 1036 PetscCall(PetscLogInitialize()); 1037 1038 /* 1039 Initialize PetscDevice and PetscDeviceContext 1040 1041 Note to any future devs thinking of moving this, proper initialization requires: 1042 1. MPI initialized 1043 2. Options DB initialized 1044 3. Petsc error handling initialized, specifically signal handlers. This expects to set up 1045 its own SIGSEV handler via the push/pop interface. 1046 4. Logging initialized 1047 */ 1048 PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD)); 1049 1050 #if PetscDefined(HAVE_VIENNACL) 1051 flg = PETSC_FALSE; 1052 PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg)); 1053 if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL)); 1054 PetscViennaCLSynchronize = flg; 1055 PetscCall(PetscViennaCLInit()); 1056 #endif 1057 1058 PetscCall(PetscCitationsInitialize()); 1059 1060 #if defined(PETSC_HAVE_SAWS) 1061 PetscCall(PetscInitializeSAWs(ftn ? NULL : help)); 1062 flg = PETSC_FALSE; 1063 PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg)); 1064 if (flg) PetscCall(PetscStackViewSAWs()); 1065 #endif 1066 1067 /* 1068 Load the dynamic libraries (on machines that support them), this registers all 1069 the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes) 1070 */ 1071 PetscCall(PetscInitialize_DynamicLibraries()); 1072 1073 PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size)); 1074 PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size)); 1075 PetscCall(PetscGetHostName(hostname, sizeof(hostname))); 1076 PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname)); 1077 #if defined(PETSC_HAVE_OPENMP) 1078 { 1079 PetscBool omp_view_flag; 1080 char *threads = getenv("OMP_NUM_THREADS"); 1081 1082 if (threads) { 1083 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads)); 1084 (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads); 1085 } else { 1086 PetscNumOMPThreads = (PetscInt)omp_get_max_threads(); 1087 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads)); 1088 } 1089 PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys"); 1090 PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg)); 1091 PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag)); 1092 PetscOptionsEnd(); 1093 if (flg) { 1094 PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads)); 1095 omp_set_num_threads((int)PetscNumOMPThreads); 1096 } 1097 if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads)); 1098 } 1099 #endif 1100 1101 PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys"); 1102 PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag)); 1103 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS) 1104 { 1105 char *threads = NULL; 1106 1107 /* determine any default number of threads requested in the environment; TODO: Apple libraries? */ 1108 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) 1109 threads = getenv("BLIS_NUM_THREADS"); 1110 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads)); 1111 if (!threads) { 1112 threads = getenv("OMP_NUM_THREADS"); 1113 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads)); 1114 } 1115 #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS) 1116 threads = getenv("MKL_NUM_THREADS"); 1117 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads)); 1118 if (!threads) { 1119 threads = getenv("OMP_NUM_THREADS"); 1120 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads)); 1121 } 1122 #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS) 1123 threads = getenv("OPENBLAS_NUM_THREADS"); 1124 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads)); 1125 if (!threads) { 1126 threads = getenv("OMP_NUM_THREADS"); 1127 if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads)); 1128 } 1129 #endif 1130 if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads); 1131 PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg)); 1132 if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads)); 1133 if (flg || threads) { 1134 PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads)); 1135 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads)); 1136 } 1137 } 1138 #elif defined(PETSC_HAVE_APPLE_ACCELERATE) 1139 PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n")); 1140 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n")); 1141 #else 1142 if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n")); 1143 #endif 1144 PetscOptionsEnd(); 1145 1146 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32) 1147 /* 1148 Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI 1149 1150 Currently not used because it is not supported by MPICH. 1151 */ 1152 if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL)); 1153 #endif 1154 1155 #if defined(PETSC_SERIALIZE_FUNCTIONS) 1156 PetscCall(PetscFPTCreate(10000)); 1157 #endif 1158 1159 #if defined(PETSC_HAVE_HWLOC) 1160 { 1161 PetscViewer viewer; 1162 PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg)); 1163 if (flg) { 1164 PetscCall(PetscProcessPlacementView(viewer)); 1165 PetscCall(PetscViewerDestroy(&viewer)); 1166 } 1167 } 1168 #endif 1169 1170 flg = PETSC_TRUE; 1171 PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL)); 1172 if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE)); 1173 1174 #if defined(PETSC_HAVE_ADIOS) 1175 PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD); 1176 PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default); 1177 PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", ""); 1178 PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, ""); 1179 #endif 1180 1181 #if defined(__VALGRIND_H) 1182 PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE; 1183 #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE) 1184 if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack")); 1185 #endif 1186 #endif 1187 /* 1188 Set flag that we are completely initialized 1189 */ 1190 PetscInitializeCalled = PETSC_TRUE; 1191 1192 PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg)); 1193 if (flg) PetscCall(PetscPythonInitialize(NULL, NULL)); 1194 1195 PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg)); 1196 if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n")); 1197 if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin()); 1198 else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case"); 1199 PetscFunctionReturn(PETSC_SUCCESS); 1200 } 1201 1202 // "Unknown section 'Environmental Variables'" 1203 // PetscClangLinter pragma disable: -fdoc-section-header-unknown 1204 /*@C 1205 PetscInitialize - Initializes the PETSc database and MPI. 1206 `PetscInitialize()` calls MPI_Init() if that has yet to be called, 1207 so this routine should always be called near the beginning of 1208 your program -- usually the very first line! 1209 1210 Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set 1211 1212 Input Parameters: 1213 + argc - count of number of command line arguments 1214 . args - the command line arguments 1215 . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format. 1216 Use NULL or empty string to not check for code specific file. 1217 Also checks ~/.petscrc, .petscrc and petscrc. 1218 Use -skip_petscrc in the code specific file (or command line) to skip ~/.petscrc, .petscrc and petscrc files. 1219 - help - [optional] Help message to print, use NULL for no message 1220 1221 If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that 1222 communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`. Thus if you are running a 1223 four process job and two processes will run PETSc and have `PetscInitialize()` and PetscFinalize() and two process will not, 1224 then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even 1225 if different subcommunicators of the job are doing different things with PETSc. 1226 1227 Options Database Keys: 1228 + -help [intro] - prints help method for each option; if intro is given the program stops after printing the introductory help message 1229 . -start_in_debugger [noxterm,dbx,xdb,gdb,...] - Starts program in debugger 1230 . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected 1231 . -on_error_emacs <machinename> - causes emacsclient to jump to error file 1232 . -on_error_abort - calls `abort()` when error detected (no traceback) 1233 . -on_error_mpiabort - calls `MPI_abort()` when error detected 1234 . -error_output_stdout - prints PETSc error messages to stdout instead of the default stderr 1235 . -error_output_none - does not print the error messages (but handles errors in the same way as if this was not called) 1236 . -debugger_ranks [rank1,rank2,...] - Indicates ranks to start in debugger 1237 . -debugger_pause [sleeptime] (in seconds) - Pauses debugger 1238 . -stop_for_debugger - Print message on how to attach debugger manually to 1239 process and wait (-debugger_pause) seconds for attachment 1240 . -malloc_dump - prints a list of all unfreed memory at the end of the run 1241 . -malloc_test - like -malloc_dump -malloc_debug, but only active for debugging builds, ignored in optimized build. May want to set in PETSC_OPTIONS environmental variable 1242 . -malloc_view - show a list of all allocated memory during `PetscFinalize()` 1243 . -malloc_view_threshold <t> - only list memory allocations of size greater than t with -malloc_view 1244 . -malloc_requested_size - malloc logging will record the requested size rather than size after alignment 1245 . -fp_trap - Stops on floating point exceptions 1246 . -no_signal_handler - Indicates not to trap error signals 1247 . -shared_tmp - indicates /tmp directory is shared by all processors 1248 . -not_shared_tmp - each processor has own /tmp 1249 . -tmp - alternative name of /tmp directory 1250 . -get_total_flops - returns total flops done by all processors 1251 - -memory_view - Print memory usage at end of run 1252 1253 Options Database Keys for Option Database: 1254 + -skip_petscrc - skip the default option files ~/.petscrc, .petscrc, petscrc 1255 . -options_monitor - monitor all set options to standard output for the whole program run 1256 - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()` 1257 1258 Options -options_monitor_{all,cancel} are 1259 position-independent and apply to all options set since the PETSc start. 1260 They can be used also in option files. 1261 1262 See `PetscOptionsMonitorSet()` to do monitoring programmatically. 1263 1264 Options Database Keys for Profiling: 1265 See Users-Manual: ch_profiling for details. 1266 + -info [filename][:[~]<list,of,classnames>[:[~]self]] - Prints verbose information. See `PetscInfo()`. 1267 . -log_sync - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event, 1268 however it slows things down and gives a distorted view of the overall runtime. 1269 . -log_trace [filename] - Print traces of all PETSc calls to the screen (useful to determine where a program 1270 hangs without running in the debugger). See `PetscLogTraceBegin()`. 1271 . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers) 1272 . -log_view_memory - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`. 1273 . -log_view_gpu_time - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView(). 1274 . -log_exclude: <vec,mat,pc,ksp,snes> - excludes subset of object classes from logging 1275 . -log [filename] - Logs profiling information in a dump file, see `PetscLogDump()`. 1276 . -log_all [filename] - Same as `-log`. 1277 . -log_mpe [filename] - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution) 1278 . -log_perfstubs - Starts a log handler with the perfstubs interface (which is used by TAU) 1279 . -log_nvtx - Starts an nvtx log handler for use with Nsight 1280 . -viewfromoptions on,off - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off 1281 - -check_pointer_intensity 0,1,2 - if pointers are checked for validity (debug version only), using 0 will result in faster code 1282 1283 Options Database Keys for SAWs: 1284 + -saws_port <portnumber> - port number to publish SAWs data, default is 8080 1285 . -saws_port_auto_select - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen 1286 this is useful when you are running many jobs that utilize SAWs at the same time 1287 . -saws_log <filename> - save a log of all SAWs communication 1288 . -saws_https <certificate file> - have SAWs use HTTPS instead of HTTP 1289 - -saws_root <directory> - allow SAWs to have access to the given directory to search for requested resources and files 1290 1291 Environmental Variables: 1292 + `PETSC_TMP` - alternative tmp directory 1293 . `PETSC_SHARED_TMP` - tmp is shared by all processes 1294 . `PETSC_NOT_SHARED_TMP` - each process has its own private tmp 1295 . `PETSC_OPTIONS` - a string containing additional options for petsc in the form of command line "-key value" pairs 1296 . `PETSC_OPTIONS_YAML` - (requires configuring PETSc to use libyaml) a string containing additional options for petsc in the form of a YAML document 1297 . `PETSC_VIEWER_SOCKET_PORT` - socket number to use for socket viewer 1298 - `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to 1299 1300 Level: beginner 1301 1302 Note: 1303 If for some reason you must call `MPI_Init()` separately, call 1304 it before `PetscInitialize()`. 1305 1306 Fortran Notes: 1307 In Fortran this routine can be called with 1308 .vb 1309 call PetscInitialize(ierr) 1310 call PetscInitialize(file,ierr) or 1311 call PetscInitialize(file,help,ierr) 1312 .ve 1313 1314 If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after 1315 calling `PetscInitialize()`. 1316 1317 Options Database Key for Developers: 1318 . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form: 1319 "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py) 1320 1321 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()` 1322 @*/ 1323 PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[]) 1324 { 1325 PetscMPIInt flag; 1326 const char *prog = "Unknown Name", *mpienv; 1327 1328 PetscFunctionBegin; 1329 if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS); 1330 PetscCallMPI(MPI_Initialized(&flag)); 1331 if (!flag) { 1332 PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first"); 1333 PetscCall(PetscPreMPIInit_Private()); 1334 #if defined(PETSC_HAVE_MPI_INIT_THREAD) 1335 { 1336 PetscMPIInt provided; 1337 PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided)); 1338 PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required"); 1339 if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up 1340 } 1341 #else 1342 PetscCallMPI(MPI_Init(argc, args)); 1343 #endif 1344 if (PetscDefined(HAVE_MPIUNI)) { 1345 mpienv = getenv("PMI_SIZE"); 1346 if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE"); 1347 if (mpienv) { 1348 PetscInt isize; 1349 PetscCall(PetscOptionsStringToInt(mpienv, &isize)); 1350 if (isize != 1) printf("You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc\n"); 1351 PetscCheck(isize == 1, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc"); 1352 } 1353 } 1354 PetscBeganMPI = PETSC_TRUE; 1355 } 1356 1357 if (argc && *argc) prog = **args; 1358 if (argc && args) { 1359 PetscGlobalArgc = *argc; 1360 PetscGlobalArgs = *args; 1361 } 1362 PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, PETSC_FALSE, 0)); 1363 PetscFunctionReturn(PETSC_SUCCESS); 1364 } 1365 1366 PETSC_INTERN PetscObject *PetscObjects; 1367 PETSC_INTERN PetscInt PetscObjectsCounts; 1368 PETSC_INTERN PetscInt PetscObjectsMaxCounts; 1369 PETSC_INTERN PetscBool PetscObjectsLog; 1370 1371 /* 1372 Frees all the MPI types and operations that PETSc may have created 1373 */ 1374 PetscErrorCode PetscFreeMPIResources(void) 1375 { 1376 PetscFunctionBegin; 1377 #if defined(PETSC_HAVE_REAL___FLOAT128) 1378 PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128)); 1379 #if defined(PETSC_HAVE_COMPLEX) 1380 PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128)); 1381 #endif 1382 #endif 1383 #if defined(PETSC_HAVE_REAL___FP16) 1384 PetscCallMPI(MPI_Type_free(&MPIU___FP16)); 1385 #endif 1386 1387 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16) 1388 PetscCallMPI(MPI_Op_free(&MPIU_SUM)); 1389 PetscCallMPI(MPI_Op_free(&MPIU_MAX)); 1390 PetscCallMPI(MPI_Op_free(&MPIU_MIN)); 1391 #elif defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_HAVE_REAL___FP16) 1392 PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128)); 1393 #endif 1394 1395 PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR)); 1396 PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT)); 1397 PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT)); 1398 #if defined(PETSC_USE_64BIT_INDICES) 1399 PetscCallMPI(MPI_Type_free(&MPIU_2INT)); 1400 PetscCallMPI(MPI_Type_free(&MPIU_INT_MPIINT)); 1401 #endif 1402 PetscCallMPI(MPI_Type_free(&MPI_4INT)); 1403 PetscCallMPI(MPI_Type_free(&MPIU_4INT)); 1404 PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP)); 1405 PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp)); 1406 PetscFunctionReturn(PETSC_SUCCESS); 1407 } 1408 1409 PETSC_INTERN PetscErrorCode PetscLogFinalize(void); 1410 PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]); 1411 1412 /*@ 1413 PetscFinalize - Checks for options to be called at the conclusion 1414 of the program. `MPI_Finalize()` is called only if the user had not 1415 called `MPI_Init()` before calling `PetscInitialize()`. 1416 1417 Collective on `PETSC_COMM_WORLD` 1418 1419 Options Database Keys: 1420 + -options_view - Calls `PetscOptionsView()` 1421 . -options_left - Prints unused options that remain in the database 1422 . -objects_dump [all] - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed 1423 . -mpidump - Calls PetscMPIDump() 1424 . -malloc_dump <optional filename> - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed 1425 . -memory_view - Prints total memory usage 1426 - -malloc_view <optional filename> - Prints list of all memory allocated and in what functions 1427 1428 Level: beginner 1429 1430 Note: 1431 See `PetscInitialize()` for other runtime options. 1432 1433 .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()` 1434 @*/ 1435 PetscErrorCode PetscFinalize(void) 1436 { 1437 PetscMPIInt rank; 1438 PetscInt nopt; 1439 PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE; 1440 PetscBool flg; 1441 char mname[PETSC_MAX_PATH_LEN]; 1442 1443 PetscFunctionBegin; 1444 PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()"); 1445 PetscCall(PetscInfo(NULL, "PetscFinalize() called\n")); 1446 1447 PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg)); 1448 if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd()); 1449 1450 PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL)); 1451 PetscGlobalArgc = 0; 1452 PetscGlobalArgs = NULL; 1453 1454 /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */ 1455 { 1456 union 1457 { 1458 MPI_Comm comm; 1459 void *ptr; 1460 } ucomm; 1461 PetscMPIInt flg; 1462 void *tmp; 1463 1464 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg)); 1465 if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg)); 1466 if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF)); 1467 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg)); 1468 if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg)); 1469 if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD)); 1470 } 1471 1472 PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank)); 1473 #if defined(PETSC_HAVE_ADIOS) 1474 PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE); 1475 PetscCallExternal(adios_finalize, rank); 1476 #endif 1477 PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg)); 1478 if (flg) { 1479 char *cits, filename[PETSC_MAX_PATH_LEN]; 1480 FILE *fd = PETSC_STDOUT; 1481 1482 PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL)); 1483 if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd)); 1484 PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits)); 1485 cits[0] = 0; 1486 PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits)); 1487 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n")); 1488 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n")); 1489 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits)); 1490 PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n")); 1491 PetscCall(PetscFClose(PETSC_COMM_WORLD, fd)); 1492 PetscCall(PetscFree(cits)); 1493 } 1494 PetscCall(PetscSegBufferDestroy(&PetscCitationsList)); 1495 1496 #if defined(PETSC_SERIALIZE_FUNCTIONS) 1497 PetscCall(PetscFPTDestroy()); 1498 #endif 1499 1500 #if defined(PETSC_HAVE_SAWS) 1501 flg = PETSC_FALSE; 1502 PetscCall(PetscOptionsGetBool(NULL, NULL, "-saw_options", &flg, NULL)); 1503 if (flg) PetscCall(PetscOptionsSAWsDestroy()); 1504 #endif 1505 1506 #if defined(PETSC_HAVE_X) 1507 flg1 = PETSC_FALSE; 1508 PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL)); 1509 if (flg1) { 1510 /* this is a crude hack, but better than nothing */ 1511 PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL)); 1512 } 1513 #endif 1514 1515 #if !defined(PETSC_HAVE_THREADSAFETY) 1516 PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL)); 1517 if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n")); 1518 #endif 1519 1520 if (PetscDefined(USE_LOG)) { 1521 flg1 = PETSC_FALSE; 1522 PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL)); 1523 if (flg1) { 1524 PetscLogDouble flops = 0; 1525 PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD)); 1526 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops)); 1527 } 1528 } 1529 1530 if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) { 1531 mname[0] = 0; 1532 PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1)); 1533 if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL)); 1534 } 1535 1536 #if defined(PETSC_HAVE_KOKKOS) 1537 // Free petsc/kokkos stuff before the potentially non-null petsc default gpu stream is destroyed by PetscObjectRegisterDestroyAll 1538 if (PetscKokkosInitialized) { 1539 PetscCall(PetscKokkosFinalize_Private()); 1540 PetscKokkosInitialized = PETSC_FALSE; 1541 } 1542 #endif 1543 1544 // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_(). 1545 PetscCall(PetscObjectRegisterDestroyAll()); 1546 1547 if (PetscDefined(USE_LOG)) { 1548 PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE)); 1549 PetscCall(PetscLogViewFromOptions()); 1550 PetscCall(PetscOptionsPopCreateViewerOff()); 1551 // It should be turned on with PetscLogGpuTime() and never turned off except in this place 1552 PetscLogGpuTimeFlag = PETSC_FALSE; 1553 1554 // Free any objects created by the last block of code. 1555 PetscCall(PetscObjectRegisterDestroyAll()); 1556 1557 mname[0] = 0; 1558 PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1)); 1559 PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2)); 1560 if (flg1 || flg2) PetscCall(PetscLogDump(mname)); 1561 } 1562 1563 flg1 = PETSC_FALSE; 1564 PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL)); 1565 if (!flg1) PetscCall(PetscPopSignalHandler()); 1566 flg1 = PETSC_FALSE; 1567 PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL)); 1568 if (flg1) PetscCall(PetscMPIDump(stdout)); 1569 flg1 = PETSC_FALSE; 1570 flg2 = PETSC_FALSE; 1571 /* preemptive call to avoid listing this option in options table as unused */ 1572 PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1)); 1573 PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1)); 1574 PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL)); 1575 1576 if (flg2) { PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); } 1577 1578 /* to prevent PETSc -options_left from warning */ 1579 PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1)); 1580 PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1)); 1581 1582 flg3 = PETSC_FALSE; /* default value is required */ 1583 PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1)); 1584 if (!flg1) flg3 = PETSC_TRUE; 1585 if (flg3) { 1586 if (!flg2 && flg1) { /* have not yet printed the options */ 1587 PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); 1588 } 1589 PetscCall(PetscOptionsAllUsed(NULL, &nopt)); 1590 if (nopt) { 1591 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n")); 1592 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n")); 1593 if (nopt == 1) { 1594 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n")); 1595 } else { 1596 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt)); 1597 } 1598 } else if (flg3 && flg1) { 1599 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n")); 1600 } 1601 PetscCall(PetscOptionsLeft(NULL)); 1602 } 1603 1604 #if defined(PETSC_HAVE_SAWS) 1605 if (!PetscGlobalRank) { 1606 PetscCall(PetscStackSAWsViewOff()); 1607 PetscCallSAWs(SAWs_Finalize, ()); 1608 } 1609 #endif 1610 1611 /* 1612 List all objects the user may have forgot to free 1613 */ 1614 if (PetscDefined(USE_LOG) && PetscObjectsLog) { 1615 PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1)); 1616 if (flg1) { 1617 MPI_Comm local_comm; 1618 char string[64]; 1619 1620 PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL)); 1621 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1622 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1623 PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE)); 1624 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1625 PetscCallMPI(MPI_Comm_free(&local_comm)); 1626 } 1627 } 1628 1629 PetscObjectsCounts = 0; 1630 PetscObjectsMaxCounts = 0; 1631 PetscCall(PetscFree(PetscObjects)); 1632 1633 /* 1634 Destroy any packages that registered a finalize 1635 */ 1636 PetscCall(PetscRegisterFinalizeAll()); 1637 1638 PetscCall(PetscLogFinalize()); 1639 1640 /* 1641 Print PetscFunctionLists that have not been properly freed 1642 */ 1643 if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll()); 1644 1645 if (petsc_history) { 1646 PetscCall(PetscCloseHistoryFile(&petsc_history)); 1647 petsc_history = NULL; 1648 } 1649 PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton)); 1650 PetscCall(PetscInfoDestroy()); 1651 1652 #if !defined(PETSC_HAVE_THREADSAFETY) 1653 if (!(PETSC_RUNNING_ON_VALGRIND)) { 1654 char fname[PETSC_MAX_PATH_LEN]; 1655 char sname[PETSC_MAX_PATH_LEN]; 1656 FILE *fd; 1657 int err; 1658 1659 flg2 = PETSC_FALSE; 1660 flg3 = PETSC_FALSE; 1661 if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL)); 1662 PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL)); 1663 fname[0] = 0; 1664 PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1)); 1665 if (flg1 && fname[0]) { 1666 PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank)); 1667 fd = fopen(sname, "w"); 1668 PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname); 1669 PetscCall(PetscMallocDump(fd)); 1670 err = fclose(fd); 1671 PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file"); 1672 } else if (flg1 || flg2 || flg3) { 1673 MPI_Comm local_comm; 1674 1675 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1676 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1677 PetscCall(PetscMallocDump(stdout)); 1678 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1679 PetscCallMPI(MPI_Comm_free(&local_comm)); 1680 } 1681 fname[0] = 0; 1682 PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1)); 1683 if (flg1 && fname[0]) { 1684 PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank)); 1685 fd = fopen(sname, "w"); 1686 PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname); 1687 PetscCall(PetscMallocView(fd)); 1688 err = fclose(fd); 1689 PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file"); 1690 } else if (flg1) { 1691 MPI_Comm local_comm; 1692 1693 PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm)); 1694 PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1)); 1695 PetscCall(PetscMallocView(stdout)); 1696 PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1)); 1697 PetscCallMPI(MPI_Comm_free(&local_comm)); 1698 } 1699 } 1700 #endif 1701 1702 /* 1703 Close any open dynamic libraries 1704 */ 1705 PetscCall(PetscFinalize_DynamicLibraries()); 1706 1707 /* Can be destroyed only after all the options are used */ 1708 PetscCall(PetscOptionsDestroyDefault()); 1709 1710 #if defined(PETSC_HAVE_NVSHMEM) 1711 if (PetscBeganNvshmem) { 1712 PetscCall(PetscNvshmemFinalize()); 1713 PetscBeganNvshmem = PETSC_FALSE; 1714 } 1715 #endif 1716 1717 PetscCall(PetscFreeMPIResources()); 1718 1719 /* 1720 Destroy any known inner MPI_Comm's and attributes pointing to them 1721 Note this will not destroy any new communicators the user has created. 1722 1723 If all PETSc objects were not destroyed those left over objects will have hanging references to 1724 the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again 1725 */ 1726 { 1727 PetscCommCounter *counter; 1728 PetscMPIInt flg; 1729 MPI_Comm icomm; 1730 union 1731 { 1732 MPI_Comm comm; 1733 void *ptr; 1734 } ucomm; 1735 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg)); 1736 if (flg) { 1737 icomm = ucomm.comm; 1738 PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg)); 1739 PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory"); 1740 1741 PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval)); 1742 PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval)); 1743 PetscCallMPI(MPI_Comm_free(&icomm)); 1744 } 1745 PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg)); 1746 if (flg) { 1747 icomm = ucomm.comm; 1748 PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg)); 1749 PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory"); 1750 1751 PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval)); 1752 PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval)); 1753 PetscCallMPI(MPI_Comm_free(&icomm)); 1754 } 1755 } 1756 1757 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval)); 1758 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval)); 1759 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval)); 1760 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval)); 1761 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval)); 1762 PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval)); 1763 1764 // Free keyvals which may be silently created by some routines 1765 if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval)); 1766 if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval)); 1767 1768 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen)); 1769 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout)); 1770 PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr)); 1771 PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock)); 1772 1773 if (PetscBeganMPI) { 1774 PetscMPIInt flag; 1775 PetscCallMPI(MPI_Finalized(&flag)); 1776 PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()"); 1777 /* wait until the very last moment to disable error handling */ 1778 PetscErrorHandlingInitialized = PETSC_FALSE; 1779 PetscCallMPI(MPI_Finalize()); 1780 } else PetscErrorHandlingInitialized = PETSC_FALSE; 1781 1782 /* 1783 1784 Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because 1785 the communicator has some outstanding requests on it. Specifically if the 1786 flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See 1787 src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate() 1788 is never freed as it should be. Thus one may obtain messages of the form 1789 [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the 1790 memory was not freed. 1791 1792 */ 1793 PetscCall(PetscMallocClear()); 1794 PetscCall(PetscStackReset()); 1795 1796 PetscInitializeCalled = PETSC_FALSE; 1797 PetscFinalizeCalled = PETSC_TRUE; 1798 #if defined(PETSC_USE_COVERAGE) 1799 /* 1800 flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the 1801 gcov files are still being added to the directories as git tries to remove the directories. 1802 */ 1803 __gcov_flush(); 1804 #endif 1805 /* To match PetscFunctionBegin() at the beginning of this function */ 1806 PetscStackClearTop; 1807 return PETSC_SUCCESS; 1808 } 1809 1810 #if defined(PETSC_MISSING_LAPACK_lsame_) 1811 PETSC_EXTERN int lsame_(char *a, char *b) 1812 { 1813 if (*a == *b) return 1; 1814 if (*a + 32 == *b) return 1; 1815 if (*a - 32 == *b) return 1; 1816 return 0; 1817 } 1818 #endif 1819 1820 #if defined(PETSC_MISSING_LAPACK_lsame) 1821 PETSC_EXTERN int lsame(char *a, char *b) 1822 { 1823 if (*a == *b) return 1; 1824 if (*a + 32 == *b) return 1; 1825 if (*a - 32 == *b) return 1; 1826 return 0; 1827 } 1828 #endif 1829 1830 static inline PetscMPIInt MPIU_Allreduce_Count(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) 1831 { 1832 PetscMPIInt err; 1833 #if !defined(PETSC_HAVE_MPI_LARGE_COUNT) 1834 PetscMPIInt count2; 1835 1836 PetscMPIIntCast_Internal(count, &count2); 1837 err = MPI_Allreduce((void *)inbuf, outbuf, count2, dtype, op, comm); 1838 #else 1839 err = MPI_Allreduce_c((void *)inbuf, outbuf, count, dtype, op, comm); 1840 #endif 1841 return err; 1842 } 1843 1844 /* 1845 When count is 1 and dtype == MPIU_INT performs the reduction in PetscInt64 to check for integer overflow 1846 */ 1847 PetscMPIInt MPIU_Allreduce_Private(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) 1848 { 1849 PetscMPIInt err; 1850 if (!PetscDefined(USE_64BIT_INDICES) && count == 1 && dtype == MPIU_INT) { 1851 PetscInt64 incnt, outcnt; 1852 void *inbufd, *outbufd; 1853 1854 if (inbuf != MPI_IN_PLACE) { 1855 incnt = *(PetscInt32 *)inbuf; 1856 inbufd = &incnt; 1857 } else { 1858 outcnt = *(PetscInt32 *)outbuf; 1859 inbufd = (void *)MPI_IN_PLACE; 1860 } 1861 outbufd = &outcnt; 1862 err = MPIU_Allreduce_Count(inbufd, outbufd, count, MPIU_INT64, op, comm); 1863 if (!err && outcnt > PETSC_INT_MAX) err = MPI_ERR_OTHER; 1864 *(PetscInt32 *)outbuf = (PetscInt32)outcnt; 1865 } else { 1866 err = MPIU_Allreduce_Count(inbuf, outbuf, count, dtype, op, comm); 1867 } 1868 return err; 1869 } 1870