xref: /petsc/src/sys/objects/pinit.c (revision bd1587442888ecfa2176fd832616cd1e28a092f3)
1 #define PETSC_DESIRE_FEATURE_TEST_MACROS
2 /*
3    This file defines the initialization of PETSc, including PetscInitialize()
4 */
5 #include <petsc/private/petscimpl.h> /*I  "petscsys.h"   I*/
6 #include <petsc/private/logimpl.h>
7 #include <petscviewer.h>
8 #include <petsc/private/garbagecollector.h>
9 
10 #if !defined(PETSC_HAVE_WINDOWS_COMPILERS)
11   #include <petsc/private/valgrind/valgrind.h>
12 #endif
13 
14 #if defined(PETSC_USE_FORTRAN_BINDINGS)
15   #include <petsc/private/fortranimpl.h>
16 #endif
17 
18 #if PetscDefined(USE_COVERAGE)
19 EXTERN_C_BEGIN
20   #if defined(PETSC_HAVE___GCOV_DUMP)
21     #define __gcov_flush(x) __gcov_dump(x)
22   #endif
23 void __gcov_flush(void);
24 EXTERN_C_END
25 #endif
26 
27 #if defined(PETSC_SERIALIZE_FUNCTIONS)
28 PETSC_INTERN PetscFPT PetscFPTData;
29 PetscFPT              PetscFPTData = 0;
30 #endif
31 
32 #if PetscDefined(HAVE_SAWS)
33   #include <petscviewersaws.h>
34 #endif
35 
36 PETSC_INTERN FILE *petsc_history;
37 
38 PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void);
39 PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void);
40 PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int);
41 PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int);
42 PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **);
43 
44 /* user may set these BEFORE calling PetscInitialize() */
45 MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL;
46 #if PetscDefined(HAVE_MPI_INIT_THREAD)
47 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE;
48 #else
49 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE;
50 #endif
51 
52 PetscMPIInt Petsc_Counter_keyval      = MPI_KEYVAL_INVALID;
53 PetscMPIInt Petsc_InnerComm_keyval    = MPI_KEYVAL_INVALID;
54 PetscMPIInt Petsc_OuterComm_keyval    = MPI_KEYVAL_INVALID;
55 PetscMPIInt Petsc_ShmComm_keyval      = MPI_KEYVAL_INVALID;
56 PetscMPIInt Petsc_CreationIdx_keyval  = MPI_KEYVAL_INVALID;
57 PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID;
58 
59 PetscMPIInt Petsc_SharedWD_keyval  = MPI_KEYVAL_INVALID;
60 PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID;
61 
62 /*
63      Declare and set all the string names of the PETSc enums
64 */
65 const char *const PetscBools[]     = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL};
66 const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL};
67 
68 PetscBool PetscPreLoadingUsed = PETSC_FALSE;
69 PetscBool PetscPreLoadingOn   = PETSC_FALSE;
70 
71 PetscInt PetscHotRegionDepth;
72 
73 PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE;
74 
75 #if defined(PETSC_HAVE_THREADSAFETY)
76 PetscSpinlock PetscViewerASCIISpinLockOpen;
77 PetscSpinlock PetscViewerASCIISpinLockStdout;
78 PetscSpinlock PetscViewerASCIISpinLockStderr;
79 PetscSpinlock PetscCommSpinLock;
80 #endif
81 
82 extern PetscInt PetscNumBLASThreads;
83 
84 /*@C
85   PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args
86 
87   Collective, No Fortran Support
88 
89   Input Parameters:
90 + argc     - number of args
91 . args     - array of command line arguments
92 . filename - optional name of the program file, pass `NULL` to ignore
93 - help     - optional help, pass `NULL` to ignore
94 
95   Level: advanced
96 
97   Notes:
98   this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to
99   indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to
100   be called multiple times from Julia without the problem of trying to initialize MPI more than once.
101 
102   Developer Notes:
103   Turns off PETSc signal handling to allow Julia to manage signals
104 
105 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()`
106 */
107 PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help)
108 {
109   int    myargc = argc;
110   char **myargs = args;
111 
112   PetscFunctionBegin;
113   PetscCall(PetscInitialize(&myargc, &myargs, filename, help));
114   PetscCall(PetscPopSignalHandler());
115   PetscBeganMPI = PETSC_FALSE;
116   PetscFunctionReturn(PETSC_SUCCESS);
117 }
118 
119 /*@C
120   PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without
121   the command line arguments.
122 
123   Collective
124 
125   Level: advanced
126 
127 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`
128 @*/
129 PetscErrorCode PetscInitializeNoArguments(void)
130 {
131   int    argc = 0;
132   char **args = NULL;
133 
134   PetscFunctionBegin;
135   PetscCall(PetscInitialize(&argc, &args, NULL, NULL));
136   PetscFunctionReturn(PETSC_SUCCESS);
137 }
138 
139 /*@
140   PetscInitialized - Determine whether PETSc is initialized.
141 
142   Output Parameter:
143 . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise
144 
145   Level: beginner
146 
147 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
148 @*/
149 PetscErrorCode PetscInitialized(PetscBool *isInitialized)
150 {
151   PetscFunctionBegin;
152   if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1);
153   *isInitialized = PetscInitializeCalled;
154   PetscFunctionReturn(PETSC_SUCCESS);
155 }
156 
157 /*@
158   PetscFinalized - Determine whether `PetscFinalize()` has been called yet
159 
160   Output Parameter:
161 . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise
162 
163   Level: developer
164 
165 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
166 @*/
167 PetscErrorCode PetscFinalized(PetscBool *isFinalized)
168 {
169   PetscFunctionBegin;
170   if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1);
171   *isFinalized = PetscFinalizeCalled;
172   PetscFunctionReturn(PETSC_SUCCESS);
173 }
174 
175 PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]);
176 
177 /*
178        This function is the MPI reduction operation used to compute the sum of the
179    first half of the datatype and the max of the second half.
180 */
181 MPI_Op MPIU_MAXSUM_OP               = 0;
182 MPI_Op Petsc_Garbage_SetIntersectOp = 0;
183 
184 PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
185 {
186   PetscFunctionBegin;
187   if (*datatype == MPIU_INT_MPIINT && PetscDefined(USE_64BIT_INDICES)) {
188 #if defined(PETSC_USE_64BIT_INDICES)
189     struct petsc_mpiu_int_mpiint *xin = (struct petsc_mpiu_int_mpiint *)in, *xout = (struct petsc_mpiu_int_mpiint *)out;
190     PetscMPIInt                   count = *cnt;
191 
192     for (PetscMPIInt i = 0; i < count; i++) {
193       xout[i].a = PetscMax(xout[i].a, xin[i].a);
194       xout[i].b += xin[i].b;
195     }
196 #endif
197   } else if (*datatype == MPIU_2INT || *datatype == MPIU_INT_MPIINT) {
198     PetscInt   *xin = (PetscInt *)in, *xout = (PetscInt *)out;
199     PetscMPIInt count = *cnt;
200 
201     for (PetscMPIInt i = 0; i < count; i++) {
202       xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]);
203       xout[2 * i + 1] += xin[2 * i + 1];
204     }
205   } else {
206     PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT and MPIU_INT_MPIINT data types");
207     (void)ierr;
208     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
209   }
210   PetscFunctionReturnVoid();
211 }
212 
213 /*@
214   PetscMaxSum - Returns the max of the first entry over all MPI processes and the sum of the second entry.
215 
216   Collective
217 
218   Input Parameters:
219 + comm  - the communicator
220 - array - an arry of length 2 times `size`, the number of MPI processes
221 
222   Output Parameters:
223 + max - the maximum of `array[2*rank]` over all MPI processes
224 - sum - the sum of the `array[2*rank + 1]` over all MPI processes
225 
226   Level: developer
227 
228 .seealso: `PetscInitialize()`
229 @*/
230 PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt array[], PetscInt *max, PetscInt *sum)
231 {
232   PetscFunctionBegin;
233 #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
234   {
235     struct {
236       PetscInt max, sum;
237     } work;
238     PetscCallMPI(MPI_Reduce_scatter_block((void *)array, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm));
239     *max = work.max;
240     *sum = work.sum;
241   }
242 #else
243   {
244     PetscMPIInt size, rank;
245     struct {
246       PetscInt max, sum;
247     } *work;
248     PetscCallMPI(MPI_Comm_size(comm, &size));
249     PetscCallMPI(MPI_Comm_rank(comm, &rank));
250     PetscCall(PetscMalloc1(size, &work));
251     PetscCallMPI(MPIU_Allreduce((void *)array, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm));
252     *max = work[rank].max;
253     *sum = work[rank].sum;
254     PetscCall(PetscFree(work));
255   }
256 #endif
257   PetscFunctionReturn(PETSC_SUCCESS);
258 }
259 
260 #if (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
261   #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
262     #include <quadmath.h>
263   #endif
264 MPI_Op MPIU_SUM___FP16___FLOAT128 = 0;
265   #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
266 MPI_Op MPIU_SUM = 0;
267   #endif
268 
269 PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
270 {
271   PetscMPIInt i, count = *cnt;
272 
273   PetscFunctionBegin;
274   if (*datatype == MPIU_REAL) {
275     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
276     for (i = 0; i < count; i++) xout[i] += xin[i];
277   }
278   #if defined(PETSC_HAVE_COMPLEX)
279   else if (*datatype == MPIU_COMPLEX) {
280     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
281     for (i = 0; i < count; i++) xout[i] += xin[i];
282   }
283   #endif
284   #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
285   else if (*datatype == MPIU___FLOAT128) {
286     __float128 *xin = (__float128 *)in, *xout = (__float128 *)out;
287     for (i = 0; i < count; i++) xout[i] += xin[i];
288     #if defined(PETSC_HAVE_COMPLEX)
289   } else if (*datatype == MPIU___COMPLEX128) {
290     __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out;
291     for (i = 0; i < count; i++) xout[i] += xin[i];
292     #endif
293   }
294   #endif
295   #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
296   else if (*datatype == MPIU___FP16) {
297     __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out;
298     for (i = 0; i < count; i++) xout[i] = (__fp16)(xin[i] + xout[i]);
299   }
300   #endif
301   else {
302   #if (!defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)) && (!defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16))
303     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
304   #elif !defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16)
305     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types"));
306   #elif !defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)
307     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types"));
308   #else
309     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types"));
310   #endif
311     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
312   }
313   PetscFunctionReturnVoid();
314 }
315 #endif
316 
317 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
318 MPI_Op MPIU_MAX = 0;
319 MPI_Op MPIU_MIN = 0;
320 
321 PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
322 {
323   PetscInt i, count = *cnt;
324 
325   PetscFunctionBegin;
326   if (*datatype == MPIU_REAL) {
327     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
328     for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]);
329   }
330   #if defined(PETSC_HAVE_COMPLEX)
331   else if (*datatype == MPIU_COMPLEX) {
332     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
333     for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
334   }
335   #endif
336   else {
337     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
338     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
339   }
340   PetscFunctionReturnVoid();
341 }
342 
343 PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
344 {
345   PetscInt i, count = *cnt;
346 
347   PetscFunctionBegin;
348   if (*datatype == MPIU_REAL) {
349     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
350     for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]);
351   }
352   #if defined(PETSC_HAVE_COMPLEX)
353   else if (*datatype == MPIU_COMPLEX) {
354     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
355     for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
356   }
357   #endif
358   else {
359     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types"));
360     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
361   }
362   PetscFunctionReturnVoid();
363 }
364 #endif
365 
366 /*
367    Private routine to delete internal tag/name counter storage when a communicator is freed.
368 
369    This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this  data as an attribute is freed.
370 
371    Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
372 
373 */
374 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state)
375 {
376   PetscCommCounter      *counter = (PetscCommCounter *)count_val;
377   struct PetscCommStash *comms   = counter->comms, *pcomm;
378 
379   PetscFunctionBegin;
380   PetscCallReturnMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm));
381   PetscCallReturnMPI(PetscFree(counter->iflags));
382   while (comms) {
383     PetscCallMPIReturnMPI(MPI_Comm_free(&comms->comm));
384     pcomm = comms;
385     comms = comms->next;
386     PetscCallReturnMPI(PetscFree(pcomm));
387   }
388   PetscCallReturnMPI(PetscFree(counter));
389   PetscFunctionReturn(MPI_SUCCESS);
390 }
391 
392 /*
393   This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user
394   calls MPI_Comm_free().
395 
396   This is the only entry point for breaking the links between inner and outer comms.
397 
398   This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator.
399 
400   Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
401 
402 */
403 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
404 {
405   union
406   {
407     MPI_Comm comm;
408     void    *ptr;
409   } icomm;
410 
411   PetscFunctionBegin;
412   PetscCheckReturnMPI(keyval == Petsc_InnerComm_keyval, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval");
413   icomm.ptr = attr_val;
414   if (PetscDefined(USE_DEBUG)) {
415     /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */
416     PetscMPIInt flg;
417     union
418     {
419       MPI_Comm comm;
420       void    *ptr;
421     } ocomm;
422     PetscCallMPIReturnMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg));
423     PetscCheckReturnMPI(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute");
424     PetscCheckReturnMPI(ocomm.comm == comm, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm");
425   }
426   PetscCallMPIReturnMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval));
427   PetscCallReturnMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm));
428   PetscFunctionReturn(MPI_SUCCESS);
429 }
430 
431 /*
432  * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr().  It should not be reached any other way.
433  */
434 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
435 {
436   PetscFunctionBegin;
437   PetscCallReturnMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm));
438   PetscFunctionReturn(MPI_SUCCESS);
439 }
440 
441 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *);
442 
443 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
444 PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *);
445 PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
446 PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
447 #endif
448 
449 PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE;
450 
451 PETSC_INTERN int    PetscGlobalArgc;
452 PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran;
453 int                 PetscGlobalArgc        = 0;
454 char              **PetscGlobalArgs        = NULL;
455 char              **PetscGlobalArgsFortran = NULL;
456 PetscSegBuffer      PetscCitationsList;
457 
458 PetscErrorCode PetscCitationsInitialize(void)
459 {
460   PetscFunctionBegin;
461   PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList));
462 
463   PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\
464   Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\
465     and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\
466     and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\
467     and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\
468     and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\
469     and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith\n\
470     and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\
471   Title = {{PETSc/TAO} Users Manual},\n\
472   Number = {ANL-21/39 - Revision 3.21},\n\
473   Doi = {10.2172/2205494},\n\
474   Institution = {Argonne National Laboratory},\n\
475   Year = {2024}\n}\n",
476                                    NULL));
477 
478   PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\
479   Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\
480   Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\
481   Booktitle = {Modern Software Tools in Scientific Computing},\n\
482   Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\
483   Pages = {163--202},\n\
484   Publisher = {Birkh{\\\"{a}}user Press},\n\
485   Year = {1997}\n}\n",
486                                    NULL));
487   PetscFunctionReturn(PETSC_SUCCESS);
488 }
489 
490 static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */
491 
492 PetscErrorCode PetscSetProgramName(const char name[])
493 {
494   PetscFunctionBegin;
495   PetscCall(PetscStrncpy(programname, name, sizeof(programname)));
496   PetscFunctionReturn(PETSC_SUCCESS);
497 }
498 
499 /*@C
500   PetscGetProgramName - Gets the name of the running program.
501 
502   Not Collective
503 
504   Input Parameter:
505 . len - length of the string name
506 
507   Output Parameter:
508 . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN`
509 
510   Level: advanced
511 
512 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
513 @*/
514 PetscErrorCode PetscGetProgramName(char name[], size_t len)
515 {
516   PetscFunctionBegin;
517   PetscCall(PetscStrncpy(name, programname, len));
518   PetscFunctionReturn(PETSC_SUCCESS);
519 }
520 
521 /*@C
522   PetscGetArgs - Allows you to access the raw command line arguments anywhere
523   after PetscInitialize() is called but before `PetscFinalize()`.
524 
525   Not Collective, No Fortran Support
526 
527   Output Parameters:
528 + argc - count of number of command line arguments
529 - args - the command line arguments
530 
531   Level: intermediate
532 
533   Notes:
534   This is usually used to pass the command line arguments into other libraries
535   that are called internally deep in PETSc or the application.
536 
537   The first argument contains the program name as is normal for C programs.
538 
539 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
540 @*/
541 PetscErrorCode PetscGetArgs(int *argc, char ***args)
542 {
543   PetscFunctionBegin;
544   PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
545   *argc = PetscGlobalArgc;
546   *args = PetscGlobalArgs;
547   PetscFunctionReturn(PETSC_SUCCESS);
548 }
549 
550 /*@C
551   PetscGetArguments - Allows you to access the  command line arguments anywhere
552   after `PetscInitialize()` is called but before `PetscFinalize()`.
553 
554   Not Collective, No Fortran Support
555 
556   Output Parameter:
557 . args - the command line arguments
558 
559   Level: intermediate
560 
561   Note:
562   This does NOT start with the program name and IS `NULL` terminated (final arg is void)
563 
564 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()`
565 @*/
566 PetscErrorCode PetscGetArguments(char ***args)
567 {
568   PetscInt i, argc = PetscGlobalArgc;
569 
570   PetscFunctionBegin;
571   PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
572   if (!argc) {
573     *args = NULL;
574     PetscFunctionReturn(PETSC_SUCCESS);
575   }
576   PetscCall(PetscMalloc1(argc, args));
577   for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i]));
578   (*args)[argc - 1] = NULL;
579   PetscFunctionReturn(PETSC_SUCCESS);
580 }
581 
582 /*@C
583   PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()`
584 
585   Not Collective, No Fortran Support
586 
587   Output Parameter:
588 . args - the command line arguments
589 
590   Level: intermediate
591 
592 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()`
593 @*/
594 PetscErrorCode PetscFreeArguments(char **args)
595 {
596   PetscFunctionBegin;
597   if (args) {
598     PetscInt i = 0;
599 
600     while (args[i]) PetscCall(PetscFree(args[i++]));
601     PetscCall(PetscFree(args));
602   }
603   PetscFunctionReturn(PETSC_SUCCESS);
604 }
605 
606 #if PetscDefined(HAVE_SAWS)
607   #include <petscconfiginfo.h>
608 
609 PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[])
610 {
611   PetscFunctionBegin;
612   if (!PetscGlobalRank) {
613     char      cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64];
614     int       port;
615     PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE;
616     size_t    applinelen, introlen;
617     char      sawsurl[256];
618 
619     PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg));
620     if (flg) {
621       char sawslog[PETSC_MAX_PATH_LEN];
622 
623       PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL));
624       if (sawslog[0]) {
625         PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog));
626       } else {
627         PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL));
628       }
629     }
630     PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg));
631     if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert));
632     PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL));
633     if (selectport) {
634       PetscCallSAWs(SAWs_Get_Available_Port, (&port));
635       PetscCallSAWs(SAWs_Set_Port, (port));
636     } else {
637       PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg));
638       if (flg) PetscCallSAWs(SAWs_Set_Port, (port));
639     }
640     PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg));
641     if (flg) {
642       PetscCallSAWs(SAWs_Set_Document_Root, (root));
643       PetscCall(PetscStrcmp(root, ".", &rootlocal));
644     } else {
645       PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg));
646       if (flg) {
647         PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root)));
648         PetscCallSAWs(SAWs_Set_Document_Root, (root));
649       }
650     }
651     PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2));
652     if (flg2) {
653       char jsdir[PETSC_MAX_PATH_LEN];
654       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option");
655       PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root));
656       PetscCall(PetscTestDirectory(jsdir, 'r', &flg));
657       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory");
658       PetscCallSAWs(SAWs_Push_Local_Header, ());
659     }
660     PetscCall(PetscGetProgramName(programname, sizeof(programname)));
661     PetscCall(PetscStrlen(help, &applinelen));
662     introlen = 4096 + applinelen;
663     applinelen += 1024;
664     PetscCall(PetscMalloc(applinelen, &appline));
665     PetscCall(PetscMalloc(introlen, &intro));
666 
667     if (rootlocal) {
668       PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname));
669       PetscCall(PetscTestFile(appline, 'r', &rootlocal));
670     }
671     PetscCall(PetscOptionsGetAll(NULL, &options));
672     if (rootlocal && help) {
673       PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help));
674     } else if (help) {
675       PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help));
676     } else {
677       PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options));
678     }
679     PetscCall(PetscFree(options));
680     PetscCall(PetscGetVersion(version, sizeof(version)));
681     PetscCall(PetscSNPrintf(intro, introlen,
682                             "<body>\n"
683                             "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n"
684                             "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n"
685                             "%s",
686                             version, petscconfigureoptions, appline));
687     PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro));
688     PetscCall(PetscFree(intro));
689     PetscCall(PetscFree(appline));
690     if (selectport) {
691       PetscBool silent;
692 
693       /* another process may have grabbed the port so keep trying */
694       while (SAWs_Initialize()) {
695         PetscCallSAWs(SAWs_Get_Available_Port, (&port));
696         PetscCallSAWs(SAWs_Set_Port, (port));
697       }
698 
699       PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL));
700       if (!silent) {
701         PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl));
702         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl));
703       }
704     } else {
705       PetscCallSAWs(SAWs_Initialize, ());
706     }
707     PetscCall(PetscCitationsRegister("@TechReport{ saws,\n"
708                                      "  Author = {Matt Otten and Jed Brown and Barry Smith},\n"
709                                      "  Title  = {Scientific Application Web Server (SAWs) Users Manual},\n"
710                                      "  Institution = {Argonne National Laboratory},\n"
711                                      "  Year   = 2013\n}\n",
712                                      NULL));
713   }
714   PetscFunctionReturn(PETSC_SUCCESS);
715 }
716 #endif
717 
718 /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */
719 PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void)
720 {
721   PetscFunctionBegin;
722 #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG)
723   /* see MPI.py for details on this bug */
724   (void)setenv("HWLOC_COMPONENTS", "-x86", 1);
725 #endif
726   PetscFunctionReturn(PETSC_SUCCESS);
727 }
728 
729 #if PetscDefined(HAVE_ADIOS)
730   #include <adios.h>
731   #include <adios_read.h>
732 int64_t Petsc_adios_group;
733 #endif
734 #if PetscDefined(HAVE_OPENMP)
735   #include <omp.h>
736 PetscInt PetscNumOMPThreads;
737 #endif
738 
739 #include <petsc/private/deviceimpl.h>
740 #if PetscDefined(HAVE_CUDA)
741   #include <petscdevice_cuda.h>
742 // REMOVE ME
743 cudaStream_t PetscDefaultCudaStream = NULL;
744 #endif
745 #if PetscDefined(HAVE_HIP)
746   #include <petscdevice_hip.h>
747 // REMOVE ME
748 hipStream_t PetscDefaultHipStream = NULL;
749 #endif
750 
751 #if PetscDefined(HAVE_DLFCN_H)
752   #include <dlfcn.h>
753 #endif
754 PETSC_INTERN PetscErrorCode PetscLogInitialize(void);
755 #if PetscDefined(HAVE_VIENNACL)
756 PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void);
757 PetscBool                   PetscViennaCLSynchronize = PETSC_FALSE;
758 #endif
759 
760 PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE;
761 
762 /*
763   PetscInitialize_Common  - shared code between C and Fortran initialization
764 
765   prog:     program name
766   file:     optional PETSc database file name. Might be in Fortran string format when 'ftn' is true
767   help:     program help message
768   ftn:      is it called from Fortran initialization (petscinitializef_)?
769   readarguments,len: used when fortran is true
770 */
771 PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscBool readarguments, PetscInt len)
772 {
773   PetscMPIInt size;
774   PetscBool   flg = PETSC_TRUE;
775   char        hostname[256];
776   PetscBool   blas_view_flag = PETSC_FALSE;
777 
778   PetscFunctionBegin;
779   if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
780   /* these must be initialized in a routine, not as a constant declaration */
781   PETSC_STDOUT = stdout;
782   PETSC_STDERR = stderr;
783 
784   /* PetscCall can be used from now */
785   PetscErrorHandlingInitialized = PETSC_TRUE;
786 
787   /*
788       The checking over compatible runtime libraries is complicated by the MPI ABI initiative
789       https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with
790         MPICH v3.1 (Released February 2014)
791         IBM MPI v2.1 (December 2014)
792         Intel MPI Library v5.0 (2014)
793         Cray MPT v7.0.0 (June 2014)
794       As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions
795       listed above and since that time are compatible.
796 
797       Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number
798       at compile time or runtime. Thus we will need to systematically track the allowed versions
799       and how they are represented in the mpi.h and MPI_Get_library_version() output in order
800       to perform the checking.
801 
802       Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI).
803 
804       Questions:
805 
806         Should the checks for ABI incompatibility be only on the major version number below?
807         Presumably the output to stderr will be removed before a release.
808   */
809 
810 #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION)
811   {
812     char        mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING];
813     PetscMPIInt mpilibraryversionlength;
814 
815     PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength));
816     /* check for MPICH versions before MPI ABI initiative */
817   #if defined(MPICH_VERSION)
818     #if MPICH_NUMVERSION < 30100000
819     {
820       char     *ver, *lf;
821       PetscBool flg = PETSC_FALSE;
822 
823       PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver));
824       if (ver) {
825         PetscCall(PetscStrchr(ver, '\n', &lf));
826         if (lf) {
827           *lf = 0;
828           PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg));
829         }
830       }
831       if (!flg) {
832         PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION));
833         flg = PETSC_TRUE;
834       }
835     }
836     #endif
837       /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */
838   #elif defined(PETSC_HAVE_OPENMPI)
839     {
840       char     *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf;
841       PetscBool flg                                              = PETSC_FALSE;
842     #define PSTRSZ 2
843       char      ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"};
844       char      ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "};
845       int       i;
846       for (i = 0; i < PSTRSZ; i++) {
847         PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver));
848         if (ver) {
849           PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
850           PetscCall(PetscStrstr(ver, bs, &bsf));
851           if (bsf) flg = PETSC_TRUE;
852           break;
853         }
854       }
855       if (!flg) {
856         PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
857         flg = PETSC_TRUE;
858       }
859     }
860   #endif
861   }
862 #endif
863 
864 #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__))
865   /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */
866   PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly");
867 #endif
868 
869   /* on Windows - set printf to default to printing 2 digit exponents */
870 #if defined(PETSC_HAVE__SET_OUTPUT_FORMAT)
871   _set_output_format(_TWO_DIGIT_EXPONENT);
872 #endif
873 
874   PetscCall(PetscOptionsCreateDefault());
875 
876   PetscFinalizeCalled = PETSC_FALSE;
877 
878   PetscCall(PetscSetProgramName(prog));
879   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen));
880   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout));
881   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr));
882   PetscCall(PetscSpinlockCreate(&PetscCommSpinLock));
883 
884   if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD;
885   PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN));
886 
887   if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) {
888     PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS));
889     PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE));
890   }
891 
892   /* Done after init due to a bug in MPICH-GM? */
893   PetscCall(PetscErrorPrintfInitialize());
894 
895   PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank));
896   PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize));
897 
898   MPIU_BOOL        = MPI_INT;
899   MPIU_ENUM        = MPI_INT;
900   MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64;
901   if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED;
902   else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG;
903 #if defined(PETSC_SIZEOF_LONG_LONG)
904   else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG;
905 #endif
906   else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t");
907 
908   /*
909      Initialized the global complex variable; this is because with
910      shared libraries the constructors for global variables
911      are not called; at least on IRIX.
912   */
913 #if defined(PETSC_HAVE_COMPLEX)
914   {
915   #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128)
916     PetscComplex ic(0.0, 1.0);
917     PETSC_i = ic;
918   #else
919     PETSC_i = _Complex_I;
920   #endif
921   }
922 #endif /* PETSC_HAVE_COMPLEX */
923 
924   /*
925      Create the PETSc MPI reduction operator that sums of the first
926      half of the entries and maxes the second half.
927   */
928   PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP));
929 
930 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
931   PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128));
932   PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128));
933   #if defined(PETSC_HAVE_COMPLEX)
934   PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128));
935   PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128));
936   #endif
937 #endif
938 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
939   PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16));
940   PetscCallMPI(MPI_Type_commit(&MPIU___FP16));
941 #endif
942 
943 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
944   PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM));
945   PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX));
946   PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN));
947 #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
948   PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128));
949 #endif
950 
951   PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR));
952   PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp));
953   PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR));
954 
955   /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */
956 #if !defined(PETSC_HAVE_MPIUNI)
957   {
958     PetscMPIInt  blockSizes[2]   = {1, 1};
959     MPI_Aint     blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)};
960     MPI_Datatype blockTypes[2]   = {MPIU_REAL, MPIU_INT}, tmpStruct;
961 
962     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
963     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT));
964     PetscCallMPI(MPI_Type_free(&tmpStruct));
965     PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT));
966   }
967   {
968     PetscMPIInt  blockSizes[2]   = {1, 1};
969     MPI_Aint     blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)};
970     MPI_Datatype blockTypes[2]   = {MPIU_SCALAR, MPIU_INT}, tmpStruct;
971 
972     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
973     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT));
974     PetscCallMPI(MPI_Type_free(&tmpStruct));
975     PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT));
976   }
977 #endif
978 
979 #if defined(PETSC_USE_64BIT_INDICES)
980   PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT));
981   PetscCallMPI(MPI_Type_commit(&MPIU_2INT));
982 
983   #if !defined(PETSC_HAVE_MPIUNI)
984   {
985     int          blockSizes[]   = {1, 1};
986     MPI_Aint     blockOffsets[] = {offsetof(struct petsc_mpiu_int_mpiint, a), offsetof(struct petsc_mpiu_int_mpiint, b)};
987     MPI_Datatype blockTypes[]   = {MPIU_INT, MPI_INT}, tmpStruct;
988 
989     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
990     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_int_mpiint), &MPIU_INT_MPIINT));
991     PetscCallMPI(MPI_Type_free(&tmpStruct));
992     PetscCallMPI(MPI_Type_commit(&MPIU_INT_MPIINT));
993   }
994   #endif
995 #endif
996   PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT));
997   PetscCallMPI(MPI_Type_commit(&MPI_4INT));
998   PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT));
999   PetscCallMPI(MPI_Type_commit(&MPIU_4INT));
1000 
1001   /*
1002      Attributes to be set on PETSc communicators
1003   */
1004   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, NULL));
1005   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, NULL));
1006   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, NULL));
1007   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, NULL));
1008   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, NULL));
1009   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, NULL));
1010 
1011 #if defined(PETSC_USE_FORTRAN_BINDINGS)
1012   if (ftn) PetscCall(PetscInitFortran_Private(readarguments, file, len));
1013   else
1014 #endif
1015     PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file));
1016 
1017   /* call a second time so it can look in the options database */
1018   PetscCall(PetscErrorPrintfInitialize());
1019 
1020   /*
1021      Check system options and print help
1022   */
1023   PetscCall(PetscOptionsCheckInitial_Private(help));
1024 
1025   /*
1026     Creates the logging data structures; this is enabled even if logging is not turned on
1027     This is the last thing we do before returning to the user code to prevent having the
1028     logging numbers contaminated by any startup time associated with MPI
1029   */
1030   PetscCall(PetscLogInitialize());
1031 
1032   /*
1033    Initialize PetscDevice and PetscDeviceContext
1034 
1035    Note to any future devs thinking of moving this, proper initialization requires:
1036    1. MPI initialized
1037    2. Options DB initialized
1038    3. Petsc error handling initialized, specifically signal handlers. This expects to set up
1039       its own SIGSEV handler via the push/pop interface.
1040    4. Logging initialized
1041   */
1042   PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD));
1043 
1044 #if PetscDefined(HAVE_VIENNACL)
1045   flg = PETSC_FALSE;
1046   PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg));
1047   if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL));
1048   PetscViennaCLSynchronize = flg;
1049   PetscCall(PetscViennaCLInit());
1050 #endif
1051 
1052   PetscCall(PetscCitationsInitialize());
1053 
1054 #if defined(PETSC_HAVE_SAWS)
1055   PetscCall(PetscInitializeSAWs(ftn ? NULL : help));
1056   flg = PETSC_FALSE;
1057   PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg));
1058   if (flg) PetscCall(PetscStackViewSAWs());
1059 #endif
1060 
1061   /*
1062      Load the dynamic libraries (on machines that support them), this registers all
1063      the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes)
1064   */
1065   PetscCall(PetscInitialize_DynamicLibraries());
1066 
1067   PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
1068   PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size));
1069   PetscCall(PetscGetHostName(hostname, sizeof(hostname)));
1070   PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname));
1071 #if defined(PETSC_HAVE_OPENMP)
1072   {
1073     PetscBool omp_view_flag;
1074     char     *threads = getenv("OMP_NUM_THREADS");
1075 
1076     if (threads) {
1077       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads));
1078       (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads);
1079     } else {
1080       PetscNumOMPThreads = (PetscInt)omp_get_max_threads();
1081       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads));
1082     }
1083     PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys");
1084     PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg));
1085     PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag));
1086     PetscOptionsEnd();
1087     if (flg) {
1088       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads));
1089       omp_set_num_threads((int)PetscNumOMPThreads);
1090     }
1091     if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads));
1092   }
1093 #endif
1094 
1095   PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys");
1096   PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag));
1097 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1098   {
1099     char *threads = NULL;
1100 
1101     /* determine any default number of threads requested in the environment; TODO: Apple libraries? */
1102   #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS)
1103     threads = getenv("BLIS_NUM_THREADS");
1104     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads));
1105     if (!threads) {
1106       threads = getenv("OMP_NUM_THREADS");
1107       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads));
1108     }
1109   #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS)
1110     threads = getenv("MKL_NUM_THREADS");
1111     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads));
1112     if (!threads) {
1113       threads = getenv("OMP_NUM_THREADS");
1114       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads));
1115     }
1116   #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1117     threads = getenv("OPENBLAS_NUM_THREADS");
1118     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads));
1119     if (!threads) {
1120       threads = getenv("OMP_NUM_THREADS");
1121       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads));
1122     }
1123   #endif
1124     if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads);
1125     PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg));
1126     if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads));
1127     if (flg || threads) {
1128       PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads));
1129       if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads));
1130     }
1131   }
1132 #elif defined(PETSC_HAVE_APPLE_ACCELERATE)
1133   PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1134   if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1135 #else
1136   if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n"));
1137 #endif
1138   PetscOptionsEnd();
1139 
1140 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
1141   /*
1142       Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI
1143 
1144       Currently not used because it is not supported by MPICH.
1145   */
1146   if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL));
1147 #endif
1148 
1149 #if defined(PETSC_SERIALIZE_FUNCTIONS)
1150   PetscCall(PetscFPTCreate(10000));
1151 #endif
1152 
1153 #if defined(PETSC_HAVE_HWLOC)
1154   {
1155     PetscViewer viewer;
1156     PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg));
1157     if (flg) {
1158       PetscCall(PetscProcessPlacementView(viewer));
1159       PetscCall(PetscViewerDestroy(&viewer));
1160     }
1161   }
1162 #endif
1163 
1164   flg = PETSC_TRUE;
1165   PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL));
1166   if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE));
1167 
1168 #if defined(PETSC_HAVE_ADIOS)
1169   PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD);
1170   PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default);
1171   PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", "");
1172   PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, "");
1173 #endif
1174 
1175 #if defined(__VALGRIND_H)
1176   PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE;
1177   #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE)
1178   if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack"));
1179   #endif
1180 #endif
1181   /*
1182       Set flag that we are completely initialized
1183   */
1184   PetscInitializeCalled = PETSC_TRUE;
1185 
1186   PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg));
1187   if (flg) PetscCall(PetscPythonInitialize(NULL, NULL));
1188 
1189   PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1190   if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n"));
1191   if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin());
1192   else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case");
1193   PetscFunctionReturn(PETSC_SUCCESS);
1194 }
1195 
1196 // "Unknown section 'Environmental Variables'"
1197 // PetscClangLinter pragma disable: -fdoc-section-header-unknown
1198 /*@C
1199   PetscInitialize - Initializes the PETSc database and MPI.
1200   `PetscInitialize()` calls MPI_Init() if that has yet to be called,
1201   so this routine should always be called near the beginning of
1202   your program -- usually the very first line!
1203 
1204   Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set
1205 
1206   Input Parameters:
1207 + argc - count of number of command line arguments
1208 . args - the command line arguments
1209 . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format.
1210           Use NULL or empty string to not check for code specific file.
1211           Also checks ~/.petscrc, .petscrc and petscrc.
1212           Use -skip_petscrc in the code specific file (or command line) to skip ~/.petscrc, .petscrc and petscrc files.
1213 - help - [optional] Help message to print, use NULL for no message
1214 
1215    If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that
1216    communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`. Thus if you are running a
1217    four process job and two processes will run PETSc and have `PetscInitialize()` and PetscFinalize() and two process will not,
1218    then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even
1219    if different subcommunicators of the job are doing different things with PETSc.
1220 
1221   Options Database Keys:
1222 + -help [intro]                                       - prints help method for each option; if intro is given the program stops after printing the introductory help message
1223 . -start_in_debugger [noxterm,dbx,xdb,gdb,...]        - Starts program in debugger
1224 . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected
1225 . -on_error_emacs <machinename>                       - causes emacsclient to jump to error file
1226 . -on_error_abort                                     - calls `abort()` when error detected (no traceback)
1227 . -on_error_mpiabort                                  - calls `MPI_abort()` when error detected
1228 . -error_output_stdout                                - prints PETSc error messages to stdout instead of the default stderr
1229 . -error_output_none                                  - does not print the error messages (but handles errors in the same way as if this was not called)
1230 . -debugger_ranks [rank1,rank2,...]                   - Indicates ranks to start in debugger
1231 . -debugger_pause [sleeptime] (in seconds)            - Pauses debugger
1232 . -stop_for_debugger                                  - Print message on how to attach debugger manually to
1233                         process and wait (-debugger_pause) seconds for attachment
1234 . -malloc_dump                                        - prints a list of all unfreed memory at the end of the run
1235 . -malloc_test                                        - like -malloc_dump -malloc_debug, but only active for debugging builds, ignored in optimized build. May want to set in PETSC_OPTIONS environmental variable
1236 . -malloc_view                                        - show a list of all allocated memory during `PetscFinalize()`
1237 . -malloc_view_threshold <t>                          - only list memory allocations of size greater than t with -malloc_view
1238 . -malloc_requested_size                              - malloc logging will record the requested size rather than size after alignment
1239 . -fp_trap                                            - Stops on floating point exceptions
1240 . -no_signal_handler                                  - Indicates not to trap error signals
1241 . -shared_tmp                                         - indicates /tmp directory is shared by all processors
1242 . -not_shared_tmp                                     - each processor has own /tmp
1243 . -tmp                                                - alternative name of /tmp directory
1244 . -get_total_flops                                    - returns total flops done by all processors
1245 - -memory_view                                        - Print memory usage at end of run
1246 
1247   Options Database Keys for Option Database:
1248 + -skip_petscrc           - skip the default option files ~/.petscrc, .petscrc, petscrc
1249 . -options_monitor        - monitor all set options to standard output for the whole program run
1250 - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()`
1251 
1252    Options -options_monitor_{all,cancel} are
1253    position-independent and apply to all options set since the PETSc start.
1254    They can be used also in option files.
1255 
1256    See `PetscOptionsMonitorSet()` to do monitoring programmatically.
1257 
1258   Options Database Keys for Profiling:
1259    See Users-Manual: ch_profiling for details.
1260 + -info [filename][:[~]<list,of,classnames>[:[~]self]] - Prints verbose information. See `PetscInfo()`.
1261 . -log_sync                                            - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event,
1262         however it slows things down and gives a distorted view of the overall runtime.
1263 . -log_trace [filename]                                - Print traces of all PETSc calls to the screen (useful to determine where a program
1264         hangs without running in the debugger).  See `PetscLogTraceBegin()`.
1265 . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers)
1266 . -log_view_memory                                     - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`.
1267 . -log_view_gpu_time                                   - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView().
1268 . -log_exclude: <vec,mat,pc,ksp,snes>                  - excludes subset of object classes from logging
1269 . -log [filename]                                      - Logs profiling information in a dump file, see `PetscLogDump()`.
1270 . -log_all [filename]                                  - Same as `-log`.
1271 . -log_mpe [filename]                                  - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution)
1272 . -log_perfstubs                                       - Starts a log handler with the perfstubs interface (which is used by TAU)
1273 . -log_nvtx                                            - Starts an nvtx log handler for use with Nsight
1274 . -viewfromoptions on,off                              - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off
1275 - -check_pointer_intensity 0,1,2                       - if pointers are checked for validity (debug version only), using 0 will result in faster code
1276 
1277   Options Database Keys for SAWs:
1278 + -saws_port <portnumber>        - port number to publish SAWs data, default is 8080
1279 . -saws_port_auto_select         - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen
1280                                    this is useful when you are running many jobs that utilize SAWs at the same time
1281 . -saws_log <filename>           - save a log of all SAWs communication
1282 . -saws_https <certificate file> - have SAWs use HTTPS instead of HTTP
1283 - -saws_root <directory>         - allow SAWs to have access to the given directory to search for requested resources and files
1284 
1285   Environmental Variables:
1286 +   `PETSC_TMP` - alternative tmp directory
1287 .   `PETSC_SHARED_TMP` - tmp is shared by all processes
1288 .   `PETSC_NOT_SHARED_TMP` - each process has its own private tmp
1289 .   `PETSC_OPTIONS` - a string containing additional options for petsc in the form of command line "-key value" pairs
1290 .   `PETSC_OPTIONS_YAML` - (requires configuring PETSc to use libyaml) a string containing additional options for petsc in the form of a YAML document
1291 .   `PETSC_VIEWER_SOCKET_PORT` - socket number to use for socket viewer
1292 -   `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to
1293 
1294   Level: beginner
1295 
1296   Note:
1297   If for some reason you must call `MPI_Init()` separately, call
1298   it before `PetscInitialize()`.
1299 
1300   Fortran Notes:
1301   In Fortran this routine can be called with
1302 .vb
1303        call PetscInitialize(ierr)
1304        call PetscInitialize(file,ierr) or
1305        call PetscInitialize(file,help,ierr)
1306 .ve
1307 
1308   If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after
1309   calling `PetscInitialize()`.
1310 
1311   Options Database Key for Developers:
1312 . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form:
1313     "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py)
1314 
1315 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()`
1316 @*/
1317 PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[])
1318 {
1319   PetscMPIInt flag;
1320   const char *prog = "Unknown Name", *mpienv;
1321 
1322   PetscFunctionBegin;
1323   if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
1324   PetscCallMPI(MPI_Initialized(&flag));
1325   if (!flag) {
1326     PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first");
1327     PetscCall(PetscPreMPIInit_Private());
1328 #if defined(PETSC_HAVE_MPI_INIT_THREAD)
1329     {
1330       PetscMPIInt provided;
1331       PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided));
1332       PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required");
1333       if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up
1334     }
1335 #else
1336     PetscCallMPI(MPI_Init(argc, args));
1337 #endif
1338     if (PetscDefined(HAVE_MPIUNI)) {
1339       mpienv = getenv("PMI_SIZE");
1340       if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE");
1341       if (mpienv) {
1342         PetscInt isize;
1343         PetscCall(PetscOptionsStringToInt(mpienv, &isize));
1344         if (isize != 1) printf("You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc\n");
1345         PetscCheck(isize == 1, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc");
1346       }
1347     }
1348     PetscBeganMPI = PETSC_TRUE;
1349   }
1350 
1351   if (argc && *argc) prog = **args;
1352   if (argc && args) {
1353     PetscGlobalArgc = *argc;
1354     PetscGlobalArgs = *args;
1355   }
1356   PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, PETSC_FALSE, 0));
1357   PetscFunctionReturn(PETSC_SUCCESS);
1358 }
1359 
1360 PETSC_INTERN PetscObject *PetscObjects;
1361 PETSC_INTERN PetscInt     PetscObjectsCounts;
1362 PETSC_INTERN PetscInt     PetscObjectsMaxCounts;
1363 PETSC_INTERN PetscBool    PetscObjectsLog;
1364 
1365 /*
1366     Frees all the MPI types and operations that PETSc may have created
1367 */
1368 PetscErrorCode PetscFreeMPIResources(void)
1369 {
1370   PetscFunctionBegin;
1371 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
1372   PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128));
1373   #if defined(PETSC_HAVE_COMPLEX)
1374   PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128));
1375   #endif
1376 #endif
1377 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
1378   PetscCallMPI(MPI_Type_free(&MPIU___FP16));
1379 #endif
1380 
1381 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
1382   PetscCallMPI(MPI_Op_free(&MPIU_SUM));
1383   PetscCallMPI(MPI_Op_free(&MPIU_MAX));
1384   PetscCallMPI(MPI_Op_free(&MPIU_MIN));
1385 #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
1386   PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128));
1387 #endif
1388 
1389   PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR));
1390   PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT));
1391   PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT));
1392 #if defined(PETSC_USE_64BIT_INDICES)
1393   PetscCallMPI(MPI_Type_free(&MPIU_2INT));
1394   PetscCallMPI(MPI_Type_free(&MPIU_INT_MPIINT));
1395 #endif
1396   PetscCallMPI(MPI_Type_free(&MPI_4INT));
1397   PetscCallMPI(MPI_Type_free(&MPIU_4INT));
1398   PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP));
1399   PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp));
1400   PetscFunctionReturn(PETSC_SUCCESS);
1401 }
1402 
1403 PETSC_INTERN PetscErrorCode PetscLogFinalize(void);
1404 PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]);
1405 
1406 /*@
1407   PetscFinalize - Checks for options to be called at the conclusion
1408   of the program. `MPI_Finalize()` is called only if the user had not
1409   called `MPI_Init()` before calling `PetscInitialize()`.
1410 
1411   Collective on `PETSC_COMM_WORLD`
1412 
1413   Options Database Keys:
1414 + -options_view                    - Calls `PetscOptionsView()`
1415 . -options_left                    - Prints unused options that remain in the database
1416 . -objects_dump [all]              - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed
1417 . -mpidump                         - Calls PetscMPIDump()
1418 . -malloc_dump <optional filename> - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed
1419 . -memory_view                     - Prints total memory usage
1420 - -malloc_view <optional filename> - Prints list of all memory allocated and in what functions
1421 
1422   Level: beginner
1423 
1424   Note:
1425   See `PetscInitialize()` for other runtime options.
1426 
1427 .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()`
1428 @*/
1429 PetscErrorCode PetscFinalize(void)
1430 {
1431   PetscMPIInt rank;
1432   PetscInt    nopt;
1433   PetscBool   flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE;
1434   PetscBool   flg;
1435   char        mname[PETSC_MAX_PATH_LEN];
1436 
1437   PetscFunctionBegin;
1438   PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()");
1439   PetscCall(PetscInfo(NULL, "PetscFinalize() called\n"));
1440 
1441   PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1442   if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd());
1443 
1444   PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL));
1445   PetscGlobalArgc = 0;
1446   PetscGlobalArgs = NULL;
1447 
1448   /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */
1449   {
1450     union
1451     {
1452       MPI_Comm comm;
1453       void    *ptr;
1454     } ucomm;
1455     PetscMPIInt flg;
1456     void       *tmp;
1457 
1458     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1459     if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1460     if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF));
1461     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1462     if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1463     if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD));
1464   }
1465 
1466   PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1467 #if defined(PETSC_HAVE_ADIOS)
1468   PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE);
1469   PetscCallExternal(adios_finalize, rank);
1470 #endif
1471   PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg));
1472   if (flg) {
1473     char *cits, filename[PETSC_MAX_PATH_LEN];
1474     FILE *fd = PETSC_STDOUT;
1475 
1476     PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL));
1477     if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd));
1478     PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits));
1479     cits[0] = 0;
1480     PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits));
1481     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n"));
1482     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1483     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits));
1484     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1485     PetscCall(PetscFClose(PETSC_COMM_WORLD, fd));
1486     PetscCall(PetscFree(cits));
1487   }
1488   PetscCall(PetscSegBufferDestroy(&PetscCitationsList));
1489 
1490 #if defined(PETSC_SERIALIZE_FUNCTIONS)
1491   PetscCall(PetscFPTDestroy());
1492 #endif
1493 
1494 #if defined(PETSC_HAVE_SAWS)
1495   flg = PETSC_FALSE;
1496   PetscCall(PetscOptionsGetBool(NULL, NULL, "-saw_options", &flg, NULL));
1497   if (flg) PetscCall(PetscOptionsSAWsDestroy());
1498 #endif
1499 
1500 #if defined(PETSC_HAVE_X)
1501   flg1 = PETSC_FALSE;
1502   PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL));
1503   if (flg1) {
1504     /*  this is a crude hack, but better than nothing */
1505     PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL));
1506   }
1507 #endif
1508 
1509 #if !defined(PETSC_HAVE_THREADSAFETY)
1510   PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL));
1511   if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n"));
1512 #endif
1513 
1514   if (PetscDefined(USE_LOG)) {
1515     flg1 = PETSC_FALSE;
1516     PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL));
1517     if (flg1) {
1518       PetscLogDouble flops = 0;
1519       PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD));
1520       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops));
1521     }
1522   }
1523 
1524   if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) {
1525     mname[0] = 0;
1526     PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1));
1527     if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL));
1528   }
1529 
1530 #if defined(PETSC_HAVE_KOKKOS)
1531   // Free petsc/kokkos stuff before the potentially non-null petsc default gpu stream is destroyed by PetscObjectRegisterDestroyAll
1532   if (PetscKokkosInitialized) {
1533     PetscCall(PetscKokkosFinalize_Private());
1534     PetscKokkosInitialized = PETSC_FALSE;
1535   }
1536 #endif
1537 
1538   // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_().
1539   PetscCall(PetscObjectRegisterDestroyAll());
1540 
1541   if (PetscDefined(USE_LOG)) {
1542     PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE));
1543     PetscCall(PetscLogViewFromOptions());
1544     PetscCall(PetscOptionsPopCreateViewerOff());
1545     //  It should be turned on with PetscLogGpuTime() and never turned off except in this place
1546     PetscLogGpuTimeFlag = PETSC_FALSE;
1547 
1548     // Free any objects created by the last block of code.
1549     PetscCall(PetscObjectRegisterDestroyAll());
1550 
1551     mname[0] = 0;
1552     PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1));
1553     PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2));
1554     if (flg1 || flg2) PetscCall(PetscLogDump(mname));
1555   }
1556 
1557   flg1 = PETSC_FALSE;
1558   PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL));
1559   if (!flg1) PetscCall(PetscPopSignalHandler());
1560   flg1 = PETSC_FALSE;
1561   PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL));
1562   if (flg1) PetscCall(PetscMPIDump(stdout));
1563   flg1 = PETSC_FALSE;
1564   flg2 = PETSC_FALSE;
1565   /* preemptive call to avoid listing this option in options table as unused */
1566   PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1));
1567   PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1568   PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL));
1569 
1570   if (flg2) { PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); }
1571 
1572   /* to prevent PETSc -options_left from warning */
1573   PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1));
1574   PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1));
1575 
1576   flg3 = PETSC_FALSE; /* default value is required */
1577   PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1));
1578   if (!flg1) flg3 = PETSC_TRUE;
1579   if (flg3) {
1580     if (!flg2 && flg1) { /* have not yet printed the options */
1581       PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD));
1582     }
1583     PetscCall(PetscOptionsAllUsed(NULL, &nopt));
1584     if (nopt) {
1585       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n"));
1586       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n"));
1587       if (nopt == 1) {
1588         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n"));
1589       } else {
1590         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt));
1591       }
1592     } else if (flg3 && flg1) {
1593       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n"));
1594     }
1595     PetscCall(PetscOptionsLeft(NULL));
1596   }
1597 
1598 #if defined(PETSC_HAVE_SAWS)
1599   if (!PetscGlobalRank) {
1600     PetscCall(PetscStackSAWsViewOff());
1601     PetscCallSAWs(SAWs_Finalize, ());
1602   }
1603 #endif
1604 
1605   /*
1606        List all objects the user may have forgot to free
1607   */
1608   if (PetscDefined(USE_LOG) && PetscObjectsLog) {
1609     PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1610     if (flg1) {
1611       MPI_Comm local_comm;
1612       char     string[64];
1613 
1614       PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL));
1615       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1616       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1617       PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE));
1618       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1619       PetscCallMPI(MPI_Comm_free(&local_comm));
1620     }
1621   }
1622 
1623   PetscObjectsCounts    = 0;
1624   PetscObjectsMaxCounts = 0;
1625   PetscCall(PetscFree(PetscObjects));
1626 
1627   /*
1628      Destroy any packages that registered a finalize
1629   */
1630   PetscCall(PetscRegisterFinalizeAll());
1631 
1632   PetscCall(PetscLogFinalize());
1633 
1634   /*
1635      Print PetscFunctionLists that have not been properly freed
1636   */
1637   if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll());
1638 
1639   if (petsc_history) {
1640     PetscCall(PetscCloseHistoryFile(&petsc_history));
1641     petsc_history = NULL;
1642   }
1643   PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton));
1644   PetscCall(PetscInfoDestroy());
1645 
1646 #if !defined(PETSC_HAVE_THREADSAFETY)
1647   if (!(PETSC_RUNNING_ON_VALGRIND)) {
1648     char  fname[PETSC_MAX_PATH_LEN];
1649     char  sname[PETSC_MAX_PATH_LEN];
1650     FILE *fd;
1651     int   err;
1652 
1653     flg2 = PETSC_FALSE;
1654     flg3 = PETSC_FALSE;
1655     if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL));
1656     PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL));
1657     fname[0] = 0;
1658     PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1));
1659     if (flg1 && fname[0]) {
1660       PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1661       fd = fopen(sname, "w");
1662       PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1663       PetscCall(PetscMallocDump(fd));
1664       err = fclose(fd);
1665       PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1666     } else if (flg1 || flg2 || flg3) {
1667       MPI_Comm local_comm;
1668 
1669       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1670       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1671       PetscCall(PetscMallocDump(stdout));
1672       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1673       PetscCallMPI(MPI_Comm_free(&local_comm));
1674     }
1675     fname[0] = 0;
1676     PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1));
1677     if (flg1 && fname[0]) {
1678       PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1679       fd = fopen(sname, "w");
1680       PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1681       PetscCall(PetscMallocView(fd));
1682       err = fclose(fd);
1683       PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1684     } else if (flg1) {
1685       MPI_Comm local_comm;
1686 
1687       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1688       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1689       PetscCall(PetscMallocView(stdout));
1690       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1691       PetscCallMPI(MPI_Comm_free(&local_comm));
1692     }
1693   }
1694 #endif
1695 
1696   /*
1697      Close any open dynamic libraries
1698   */
1699   PetscCall(PetscFinalize_DynamicLibraries());
1700 
1701   /* Can be destroyed only after all the options are used */
1702   PetscCall(PetscOptionsDestroyDefault());
1703 
1704 #if defined(PETSC_HAVE_NVSHMEM)
1705   if (PetscBeganNvshmem) {
1706     PetscCall(PetscNvshmemFinalize());
1707     PetscBeganNvshmem = PETSC_FALSE;
1708   }
1709 #endif
1710 
1711   PetscCall(PetscFreeMPIResources());
1712 
1713   /*
1714      Destroy any known inner MPI_Comm's and attributes pointing to them
1715      Note this will not destroy any new communicators the user has created.
1716 
1717      If all PETSc objects were not destroyed those left over objects will have hanging references to
1718      the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again
1719  */
1720   {
1721     PetscCommCounter *counter;
1722     PetscMPIInt       flg;
1723     MPI_Comm          icomm;
1724     union
1725     {
1726       MPI_Comm comm;
1727       void    *ptr;
1728     } ucomm;
1729     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1730     if (flg) {
1731       icomm = ucomm.comm;
1732       PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1733       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1734 
1735       PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval));
1736       PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1737       PetscCallMPI(MPI_Comm_free(&icomm));
1738     }
1739     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1740     if (flg) {
1741       icomm = ucomm.comm;
1742       PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1743       PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1744 
1745       PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval));
1746       PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1747       PetscCallMPI(MPI_Comm_free(&icomm));
1748     }
1749   }
1750 
1751   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval));
1752   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval));
1753   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval));
1754   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval));
1755   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval));
1756   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval));
1757 
1758   // Free keyvals which may be silently created by some routines
1759   if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval));
1760   if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval));
1761 
1762   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen));
1763   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout));
1764   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr));
1765   PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock));
1766 
1767   if (PetscBeganMPI) {
1768     PetscMPIInt flag;
1769     PetscCallMPI(MPI_Finalized(&flag));
1770     PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()");
1771     /* wait until the very last moment to disable error handling */
1772     PetscErrorHandlingInitialized = PETSC_FALSE;
1773     PetscCallMPI(MPI_Finalize());
1774   } else PetscErrorHandlingInitialized = PETSC_FALSE;
1775 
1776   /*
1777 
1778      Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because
1779    the communicator has some outstanding requests on it. Specifically if the
1780    flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See
1781    src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate()
1782    is never freed as it should be. Thus one may obtain messages of the form
1783    [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the
1784    memory was not freed.
1785 
1786 */
1787   PetscCall(PetscMallocClear());
1788   PetscCall(PetscStackReset());
1789 
1790   PetscInitializeCalled = PETSC_FALSE;
1791   PetscFinalizeCalled   = PETSC_TRUE;
1792 #if defined(PETSC_USE_COVERAGE)
1793   /*
1794      flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the
1795      gcov files are still being added to the directories as git tries to remove the directories.
1796    */
1797   __gcov_flush();
1798 #endif
1799   /* To match PetscFunctionBegin() at the beginning of this function */
1800   PetscStackClearTop;
1801   return PETSC_SUCCESS;
1802 }
1803 
1804 #if defined(PETSC_MISSING_LAPACK_lsame_)
1805 PETSC_EXTERN int lsame_(char *a, char *b)
1806 {
1807   if (*a == *b) return 1;
1808   if (*a + 32 == *b) return 1;
1809   if (*a - 32 == *b) return 1;
1810   return 0;
1811 }
1812 #endif
1813 
1814 #if defined(PETSC_MISSING_LAPACK_lsame)
1815 PETSC_EXTERN int lsame(char *a, char *b)
1816 {
1817   if (*a == *b) return 1;
1818   if (*a + 32 == *b) return 1;
1819   if (*a - 32 == *b) return 1;
1820   return 0;
1821 }
1822 #endif
1823 
1824 static inline PetscMPIInt MPIU_Allreduce_Count(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1825 {
1826   PetscMPIInt err;
1827 #if !defined(PETSC_HAVE_MPI_LARGE_COUNT)
1828   PetscMPIInt count2;
1829 
1830   PetscMPIIntCast_Internal(count, &count2);
1831   err = MPI_Allreduce((void *)inbuf, outbuf, count2, dtype, op, comm);
1832 #else
1833   err = MPI_Allreduce_c((void *)inbuf, outbuf, count, dtype, op, comm);
1834 #endif
1835   return err;
1836 }
1837 
1838 /*
1839      When count is 1 and dtype == MPIU_INT performs the reduction in PetscInt64 to check for integer overflow
1840 */
1841 PetscMPIInt MPIU_Allreduce_Private(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1842 {
1843   PetscMPIInt err;
1844   if (!PetscDefined(USE_64BIT_INDICES) && count == 1 && dtype == MPIU_INT) {
1845     PetscInt64 incnt, outcnt;
1846     void      *inbufd, *outbufd;
1847 
1848     if (inbuf != MPI_IN_PLACE) {
1849       incnt  = *(PetscInt32 *)inbuf;
1850       inbufd = &incnt;
1851     } else {
1852       outcnt = *(PetscInt32 *)outbuf;
1853       inbufd = (void *)MPI_IN_PLACE;
1854     }
1855     outbufd = &outcnt;
1856     err     = MPIU_Allreduce_Count(inbufd, outbufd, count, MPIU_INT64, op, comm);
1857     if (!err && outcnt > PETSC_INT_MAX) err = MPI_ERR_OTHER;
1858     *(PetscInt32 *)outbuf = (PetscInt32)outcnt;
1859   } else {
1860     err = MPIU_Allreduce_Count(inbuf, outbuf, count, dtype, op, comm);
1861   }
1862   return err;
1863 }
1864