xref: /petsc/src/sys/objects/pinit.c (revision f7b6882e165cbb09bfa16c37290015d70307eb3b)
1 #define PETSC_DESIRE_FEATURE_TEST_MACROS
2 /*
3    This file defines the initialization of PETSc, including PetscInitialize()
4 */
5 #include <petsc/private/petscimpl.h> /*I  "petscsys.h"   I*/
6 #include <petsc/private/logimpl.h>
7 #include <petscviewer.h>
8 #include <petsc/private/garbagecollector.h>
9 
10 #if !defined(PETSC_HAVE_WINDOWS_COMPILERS)
11   #include <petsc/private/valgrind/valgrind.h>
12 #endif
13 
14 #if defined(PETSC_USE_FORTRAN_BINDINGS)
15   #include <petsc/private/ftnimpl.h>
16 #endif
17 
18 #if PetscDefined(USE_COVERAGE)
19 EXTERN_C_BEGIN
20   #if defined(PETSC_HAVE___GCOV_DUMP)
21     #define __gcov_flush(x) __gcov_dump(x)
22   #endif
23 void __gcov_flush(void);
24 EXTERN_C_END
25 #endif
26 
27 #if defined(PETSC_SERIALIZE_FUNCTIONS)
28 PETSC_INTERN PetscFPT PetscFPTData;
29 PetscFPT              PetscFPTData = 0;
30 #endif
31 
32 #if PetscDefined(HAVE_SAWS)
33   #include <petscviewersaws.h>
34 #endif
35 
36 PETSC_INTERN FILE *petsc_history;
37 
38 PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void);
39 PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void);
40 PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int);
41 PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int);
42 PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **);
43 
44 /* user may set these BEFORE calling PetscInitialize() */
45 MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL;
46 #if PetscDefined(HAVE_MPI_INIT_THREAD)
47 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE;
48 #else
49 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE;
50 #endif
51 
52 PetscMPIInt Petsc_Counter_keyval      = MPI_KEYVAL_INVALID;
53 PetscMPIInt Petsc_InnerComm_keyval    = MPI_KEYVAL_INVALID;
54 PetscMPIInt Petsc_OuterComm_keyval    = MPI_KEYVAL_INVALID;
55 PetscMPIInt Petsc_ShmComm_keyval      = MPI_KEYVAL_INVALID;
56 PetscMPIInt Petsc_CreationIdx_keyval  = MPI_KEYVAL_INVALID;
57 PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID;
58 
59 PetscMPIInt Petsc_SharedWD_keyval  = MPI_KEYVAL_INVALID;
60 PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID;
61 
62 /*
63      Declare and set all the string names of the PETSc enums
64 */
65 const char *const PetscBools[]     = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL};
66 const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL};
67 
68 PetscBool PetscPreLoadingUsed = PETSC_FALSE;
69 PetscBool PetscPreLoadingOn   = PETSC_FALSE;
70 
71 PetscInt PetscHotRegionDepth;
72 
73 PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE;
74 
75 #if defined(PETSC_HAVE_THREADSAFETY)
76 PetscSpinlock PetscViewerASCIISpinLockOpen;
77 PetscSpinlock PetscViewerASCIISpinLockStdout;
78 PetscSpinlock PetscViewerASCIISpinLockStderr;
79 PetscSpinlock PetscCommSpinLock;
80 #endif
81 
82 extern PetscInt PetscNumBLASThreads;
83 
84 /*@C
85   PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args
86 
87   Collective, No Fortran Support
88 
89   Input Parameters:
90 + argc     - number of args
91 . args     - array of command line arguments
92 . filename - optional name of the program file, pass `NULL` to ignore
93 - help     - optional help, pass `NULL` to ignore
94 
95   Level: advanced
96 
97   Notes:
98   this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to
99   indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to
100   be called multiple times from Julia without the problem of trying to initialize MPI more than once.
101 
102   Developer Notes:
103   Turns off PETSc signal handling to allow Julia to manage signals
104 
105 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()`
106 */
107 PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help)
108 {
109   int    myargc = argc;
110   char **myargs = args;
111 
112   PetscFunctionBegin;
113   PetscCall(PetscInitialize(&myargc, &myargs, filename, help));
114   PetscCall(PetscPopSignalHandler());
115   PetscBeganMPI = PETSC_FALSE;
116   PetscFunctionReturn(PETSC_SUCCESS);
117 }
118 
119 /*@C
120   PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without
121   the command line arguments.
122 
123   Collective
124 
125   Level: advanced
126 
127 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`
128 @*/
129 PetscErrorCode PetscInitializeNoArguments(void) PeNS
130 {
131   int    argc = 0;
132   char **args = NULL;
133 
134   PetscFunctionBegin;
135   PetscCall(PetscInitialize(&argc, &args, NULL, NULL));
136   PetscFunctionReturn(PETSC_SUCCESS);
137 }
138 
139 /*@
140   PetscInitialized - Determine whether PETSc is initialized.
141 
142   Output Parameter:
143 . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise
144 
145   Level: beginner
146 
147 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
148 @*/
149 PetscErrorCode PetscInitialized(PetscBool *isInitialized)
150 {
151   PetscFunctionBegin;
152   if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1);
153   *isInitialized = PetscInitializeCalled;
154   PetscFunctionReturn(PETSC_SUCCESS);
155 }
156 
157 /*@
158   PetscFinalized - Determine whether `PetscFinalize()` has been called yet
159 
160   Output Parameter:
161 . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise
162 
163   Level: developer
164 
165 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
166 @*/
167 PetscErrorCode PetscFinalized(PetscBool *isFinalized)
168 {
169   PetscFunctionBegin;
170   if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1);
171   *isFinalized = PetscFinalizeCalled;
172   PetscFunctionReturn(PETSC_SUCCESS);
173 }
174 
175 PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]);
176 
177 /*
178        This function is the MPI reduction operation used to compute the sum of the
179    first half of the datatype and the max of the second half.
180 */
181 MPI_Op MPIU_MAXSUM_OP               = 0;
182 MPI_Op Petsc_Garbage_SetIntersectOp = 0;
183 
184 PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
185 {
186   PetscFunctionBegin;
187   if (*datatype == MPIU_INT_MPIINT && PetscDefined(USE_64BIT_INDICES)) {
188 #if defined(PETSC_USE_64BIT_INDICES)
189     struct petsc_mpiu_int_mpiint *xin = (struct petsc_mpiu_int_mpiint *)in, *xout = (struct petsc_mpiu_int_mpiint *)out;
190     PetscMPIInt                   count = *cnt;
191 
192     for (PetscMPIInt i = 0; i < count; i++) {
193       xout[i].a = PetscMax(xout[i].a, xin[i].a);
194       xout[i].b += xin[i].b;
195     }
196 #endif
197   } else if (*datatype == MPIU_2INT || *datatype == MPIU_INT_MPIINT) {
198     PetscInt   *xin = (PetscInt *)in, *xout = (PetscInt *)out;
199     PetscMPIInt count = *cnt;
200 
201     for (PetscMPIInt i = 0; i < count; i++) {
202       xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]);
203       xout[2 * i + 1] += xin[2 * i + 1];
204     }
205   } else {
206     PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT and MPIU_INT_MPIINT data types");
207     (void)ierr;
208     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
209   }
210   PetscFunctionReturnVoid();
211 }
212 
213 /*@
214   PetscMaxSum - Returns the max of the first entry over all MPI processes and the sum of the second entry.
215 
216   Collective
217 
218   Input Parameters:
219 + comm  - the communicator
220 - array - an arry of length 2 times `size`, the number of MPI processes
221 
222   Output Parameters:
223 + max - the maximum of `array[2*rank]` over all MPI processes
224 - sum - the sum of the `array[2*rank + 1]` over all MPI processes
225 
226   Level: developer
227 
228 .seealso: `PetscInitialize()`
229 @*/
230 PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt array[], PetscInt *max, PetscInt *sum)
231 {
232   PetscFunctionBegin;
233 #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
234   {
235     struct {
236       PetscInt max, sum;
237     } work;
238     PetscCallMPI(MPI_Reduce_scatter_block((void *)array, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm));
239     *max = work.max;
240     *sum = work.sum;
241   }
242 #else
243   {
244     PetscMPIInt size, rank;
245     struct {
246       PetscInt max, sum;
247     } *work;
248     PetscCallMPI(MPI_Comm_size(comm, &size));
249     PetscCallMPI(MPI_Comm_rank(comm, &rank));
250     PetscCall(PetscMalloc1(size, &work));
251     PetscCallMPI(MPIU_Allreduce((void *)array, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm));
252     *max = work[rank].max;
253     *sum = work[rank].sum;
254     PetscCall(PetscFree(work));
255   }
256 #endif
257   PetscFunctionReturn(PETSC_SUCCESS);
258 }
259 
260 #if (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
261   #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
262     #include <quadmath.h>
263   #endif
264 MPI_Op MPIU_SUM___FP16___FLOAT128 = 0;
265   #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
266 MPI_Op MPIU_SUM = 0;
267   #endif
268 
269 PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
270 {
271   PetscMPIInt i, count = *cnt;
272 
273   PetscFunctionBegin;
274   if (*datatype == MPIU_REAL) {
275     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
276     for (i = 0; i < count; i++) xout[i] += xin[i];
277   }
278   #if defined(PETSC_HAVE_COMPLEX)
279   else if (*datatype == MPIU_COMPLEX) {
280     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
281     for (i = 0; i < count; i++) xout[i] += xin[i];
282   }
283   #endif
284   #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
285   else if (*datatype == MPIU___FLOAT128) {
286     __float128 *xin = (__float128 *)in, *xout = (__float128 *)out;
287     for (i = 0; i < count; i++) xout[i] += xin[i];
288     #if defined(PETSC_HAVE_COMPLEX)
289   } else if (*datatype == MPIU___COMPLEX128) {
290     __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out;
291     for (i = 0; i < count; i++) xout[i] += xin[i];
292     #endif
293   }
294   #endif
295   #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
296   else if (*datatype == MPIU___FP16) {
297     __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out;
298     for (i = 0; i < count; i++) xout[i] = (__fp16)(xin[i] + xout[i]);
299   }
300   #endif
301   else {
302   #if (!defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)) && (!defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16))
303     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
304   #elif !defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16)
305     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types"));
306   #elif !defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)
307     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types"));
308   #else
309     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types"));
310   #endif
311     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
312   }
313   PetscFunctionReturnVoid();
314 }
315 #endif
316 
317 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
318 MPI_Op MPIU_MAX = 0;
319 MPI_Op MPIU_MIN = 0;
320 
321 PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
322 {
323   PetscInt i, count = *cnt;
324 
325   PetscFunctionBegin;
326   if (*datatype == MPIU_REAL) {
327     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
328     for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]);
329   }
330   #if defined(PETSC_HAVE_COMPLEX)
331   else if (*datatype == MPIU_COMPLEX) {
332     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
333     for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
334   }
335   #endif
336   else {
337     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
338     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
339   }
340   PetscFunctionReturnVoid();
341 }
342 
343 PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
344 {
345   PetscInt i, count = *cnt;
346 
347   PetscFunctionBegin;
348   if (*datatype == MPIU_REAL) {
349     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
350     for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]);
351   }
352   #if defined(PETSC_HAVE_COMPLEX)
353   else if (*datatype == MPIU_COMPLEX) {
354     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
355     for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
356   }
357   #endif
358   else {
359     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types"));
360     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
361   }
362   PetscFunctionReturnVoid();
363 }
364 #endif
365 
366 /*
367    Private routine to delete internal tag/name counter storage when a communicator is freed.
368 
369    This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this  data as an attribute is freed.
370 
371    Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
372 
373 */
374 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state)
375 {
376   PetscCommCounter      *counter = (PetscCommCounter *)count_val;
377   struct PetscCommStash *comms   = counter->comms, *pcomm;
378 
379   PetscFunctionBegin;
380   PetscCallReturnMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm));
381   PetscCallReturnMPI(PetscFree(counter->iflags));
382   while (comms) {
383     PetscCallMPIReturnMPI(MPI_Comm_free(&comms->comm));
384     pcomm = comms;
385     comms = comms->next;
386     PetscCallReturnMPI(PetscFree(pcomm));
387   }
388   PetscCallReturnMPI(PetscFree(counter));
389   PetscFunctionReturn(MPI_SUCCESS);
390 }
391 
392 /*
393   This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user
394   calls MPI_Comm_free().
395 
396   This is the only entry point for breaking the links between inner and outer comms.
397 
398   This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator.
399 
400   Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
401 
402 */
403 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
404 {
405   union
406   {
407     MPI_Comm comm;
408     void    *ptr;
409   } icomm;
410 
411   PetscFunctionBegin;
412   PetscCheckReturnMPI(keyval == Petsc_InnerComm_keyval, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval");
413   icomm.ptr = attr_val;
414   if (PetscDefined(USE_DEBUG)) {
415     /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */
416     PetscMPIInt flg;
417     union
418     {
419       MPI_Comm comm;
420       void    *ptr;
421     } ocomm;
422     PetscCallMPIReturnMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg));
423     PetscCheckReturnMPI(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute");
424     PetscCheckReturnMPI(ocomm.comm == comm, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm");
425   }
426   PetscCallMPIReturnMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval));
427   PetscCallReturnMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm));
428   PetscFunctionReturn(MPI_SUCCESS);
429 }
430 
431 /*
432  * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr().  It should not be reached any other way.
433  */
434 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
435 {
436   PetscFunctionBegin;
437   PetscCallReturnMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm));
438   PetscFunctionReturn(MPI_SUCCESS);
439 }
440 
441 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *);
442 
443 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
444 PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *);
445 PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
446 PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
447 #endif
448 
449 PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE;
450 
451 PETSC_INTERN int    PetscGlobalArgc;
452 PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran;
453 int                 PetscGlobalArgc        = 0;
454 char              **PetscGlobalArgs        = NULL;
455 char              **PetscGlobalArgsFortran = NULL;
456 PetscSegBuffer      PetscCitationsList;
457 
458 PetscErrorCode PetscCitationsInitialize(void)
459 {
460   PetscFunctionBegin;
461   PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList));
462 
463   PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\
464   Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\
465     and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\
466     and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\
467     and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\
468     and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\
469     and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith and Hansol Suh\n\
470     and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\
471   Title = {{PETSc/TAO} Users Manual},\n\
472   Number = {ANL-21/39 - Revision 3.23},\n\
473   Doi = {10.2172/2476320},\n\
474   Institution = {Argonne National Laboratory},\n\
475   Year = {2025}\n}\n",
476                                    NULL));
477 
478   PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\
479   Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\
480   Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\
481   Booktitle = {Modern Software Tools in Scientific Computing},\n\
482   Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\
483   Pages = {163--202},\n\
484   Publisher = {Birkh{\\\"{a}}user Press},\n\
485   Year = {1997}\n}\n",
486                                    NULL));
487   PetscFunctionReturn(PETSC_SUCCESS);
488 }
489 
490 static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */
491 
492 PetscErrorCode PetscSetProgramName(const char name[])
493 {
494   PetscFunctionBegin;
495   PetscCall(PetscStrncpy(programname, name, sizeof(programname)));
496   PetscFunctionReturn(PETSC_SUCCESS);
497 }
498 
499 /*@C
500   PetscGetProgramName - Gets the name of the running program.
501 
502   Not Collective
503 
504   Input Parameter:
505 . len - length of the string name
506 
507   Output Parameter:
508 . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN`
509 
510   Level: advanced
511 
512 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
513 @*/
514 PetscErrorCode PetscGetProgramName(char name[], size_t len)
515 {
516   PetscFunctionBegin;
517   PetscCall(PetscStrncpy(name, programname, len));
518   PetscFunctionReturn(PETSC_SUCCESS);
519 }
520 
521 /*@C
522   PetscGetArgs - Allows you to access the raw command line arguments anywhere
523   after `PetscInitialize()` is called but before `PetscFinalize()`.
524 
525   Not Collective, No Fortran Support
526 
527   Output Parameters:
528 + argc - count of the number of command line arguments
529 - args - the command line arguments
530 
531   Level: intermediate
532 
533   Notes:
534   This is usually used to pass the command line arguments into other libraries
535   that are called internally deep in PETSc or the application.
536 
537   The first argument contains the program name as is normal for C programs.
538 
539   See `PetscGetArguments()` for a variant of this routine.
540 
541 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
542 @*/
543 PetscErrorCode PetscGetArgs(int *argc, char ***args)
544 {
545   PetscFunctionBegin;
546   PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
547   *argc = PetscGlobalArgc;
548   *args = PetscGlobalArgs;
549   PetscFunctionReturn(PETSC_SUCCESS);
550 }
551 
552 /*@C
553   PetscGetArguments - Allows you to access the command line arguments anywhere
554   after `PetscInitialize()` is called but before `PetscFinalize()`.
555 
556   Not Collective, No Fortran Support
557 
558   Output Parameter:
559 . args - the command line arguments
560 
561   Level: intermediate
562 
563   Note:
564   This does NOT start with the program name and IS `NULL` terminated (the final argument is void)
565 
566   Use `PetscFreeArguments()` to return the memory used by the arguments.
567 
568   This makes a copy of the arguments and the array of arguments, while `PetscGetArgs()` does not make a copy,
569   it returns the array of arguments that was passed into the main program.
570 
571 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()`
572 @*/
573 PetscErrorCode PetscGetArguments(char ***args)
574 {
575   PetscInt i, argc = PetscGlobalArgc;
576 
577   PetscFunctionBegin;
578   PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
579   if (!argc) {
580     *args = NULL;
581     PetscFunctionReturn(PETSC_SUCCESS);
582   }
583   PetscCall(PetscMalloc1(argc, args));
584   for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i]));
585   (*args)[argc - 1] = NULL;
586   PetscFunctionReturn(PETSC_SUCCESS);
587 }
588 
589 /*@C
590   PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()`
591 
592   Not Collective, No Fortran Support
593 
594   Output Parameter:
595 . args - the command line arguments
596 
597   Level: intermediate
598 
599   Developer Note:
600   This should be PetscRestoreArguments()
601 
602 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()`
603 @*/
604 PetscErrorCode PetscFreeArguments(char **args)
605 {
606   PetscFunctionBegin;
607   if (args) {
608     PetscInt i = 0;
609 
610     while (args[i]) PetscCall(PetscFree(args[i++]));
611     PetscCall(PetscFree(args));
612   }
613   PetscFunctionReturn(PETSC_SUCCESS);
614 }
615 
616 #if PetscDefined(HAVE_SAWS)
617   #include <petscconfiginfo.h>
618 
619 PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[])
620 {
621   PetscFunctionBegin;
622   if (!PetscGlobalRank) {
623     char      cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64];
624     int       port;
625     PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE;
626     size_t    applinelen, introlen;
627     char      sawsurl[256];
628 
629     PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg));
630     if (flg) {
631       char sawslog[PETSC_MAX_PATH_LEN];
632 
633       PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL));
634       if (sawslog[0]) {
635         PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog));
636       } else {
637         PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL));
638       }
639     }
640     PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg));
641     if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert));
642     PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL));
643     if (selectport) {
644       PetscCallSAWs(SAWs_Get_Available_Port, (&port));
645       PetscCallSAWs(SAWs_Set_Port, (port));
646     } else {
647       PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg));
648       if (flg) PetscCallSAWs(SAWs_Set_Port, (port));
649     }
650     PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg));
651     if (flg) {
652       PetscCallSAWs(SAWs_Set_Document_Root, (root));
653       PetscCall(PetscStrcmp(root, ".", &rootlocal));
654     } else {
655       PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg));
656       if (flg) {
657         PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root)));
658         PetscCallSAWs(SAWs_Set_Document_Root, (root));
659       }
660     }
661     PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2));
662     if (flg2) {
663       char jsdir[PETSC_MAX_PATH_LEN];
664       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option");
665       PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root));
666       PetscCall(PetscTestDirectory(jsdir, 'r', &flg));
667       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory");
668       PetscCallSAWs(SAWs_Push_Local_Header, ());
669     }
670     PetscCall(PetscGetProgramName(programname, sizeof(programname)));
671     PetscCall(PetscStrlen(help, &applinelen));
672     introlen = 4096 + applinelen;
673     applinelen += 1024;
674     PetscCall(PetscMalloc(applinelen, &appline));
675     PetscCall(PetscMalloc(introlen, &intro));
676 
677     if (rootlocal) {
678       PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname));
679       PetscCall(PetscTestFile(appline, 'r', &rootlocal));
680     }
681     PetscCall(PetscOptionsGetAll(NULL, &options));
682     if (rootlocal && help) {
683       PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help));
684     } else if (help) {
685       PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help));
686     } else {
687       PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options));
688     }
689     PetscCall(PetscFree(options));
690     PetscCall(PetscGetVersion(version, sizeof(version)));
691     PetscCall(PetscSNPrintf(intro, introlen,
692                             "<body>\n"
693                             "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n"
694                             "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n"
695                             "%s",
696                             version, petscconfigureoptions, appline));
697     PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro));
698     PetscCall(PetscFree(intro));
699     PetscCall(PetscFree(appline));
700     if (selectport) {
701       PetscBool silent;
702 
703       /* another process may have grabbed the port so keep trying */
704       while (SAWs_Initialize()) {
705         PetscCallSAWs(SAWs_Get_Available_Port, (&port));
706         PetscCallSAWs(SAWs_Set_Port, (port));
707       }
708 
709       PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL));
710       if (!silent) {
711         PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl));
712         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl));
713       }
714     } else {
715       PetscCallSAWs(SAWs_Initialize, ());
716     }
717     PetscCall(PetscCitationsRegister("@TechReport{ saws,\n"
718                                      "  Author = {Matt Otten and Jed Brown and Barry Smith},\n"
719                                      "  Title  = {Scientific Application Web Server (SAWs) Users Manual},\n"
720                                      "  Institution = {Argonne National Laboratory},\n"
721                                      "  Year   = 2013\n}\n",
722                                      NULL));
723   }
724   PetscFunctionReturn(PETSC_SUCCESS);
725 }
726 #endif
727 
728 /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */
729 PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void)
730 {
731   PetscFunctionBegin;
732 #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG)
733   /* see MPI.py for details on this bug */
734   (void)setenv("HWLOC_COMPONENTS", "-x86", 1);
735 #endif
736   PetscFunctionReturn(PETSC_SUCCESS);
737 }
738 
739 #if PetscDefined(HAVE_ADIOS)
740   #include <adios.h>
741   #include <adios_read.h>
742 int64_t Petsc_adios_group;
743 #endif
744 #if PetscDefined(HAVE_OPENMP)
745   #include <omp.h>
746 PetscInt PetscNumOMPThreads;
747 #endif
748 
749 #include <petsc/private/deviceimpl.h>
750 #if PetscDefined(HAVE_CUDA)
751   #include <petscdevice_cuda.h>
752 // REMOVE ME
753 cudaStream_t PetscDefaultCudaStream = NULL;
754 #endif
755 #if PetscDefined(HAVE_HIP)
756   #include <petscdevice_hip.h>
757 // REMOVE ME
758 hipStream_t PetscDefaultHipStream = NULL;
759 #endif
760 
761 #if PetscDefined(HAVE_DLFCN_H)
762   #include <dlfcn.h>
763 #endif
764 PETSC_INTERN PetscErrorCode PetscLogInitialize(void);
765 #if PetscDefined(HAVE_VIENNACL)
766 PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void);
767 PetscBool                   PetscViennaCLSynchronize = PETSC_FALSE;
768 #endif
769 
770 PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE;
771 
772 /*
773   PetscInitialize_Common  - shared code between C and Fortran initialization
774 
775   prog:     program name
776   file:     optional PETSc database file name. Might be in Fortran string format when 'ftn' is true
777   help:     program help message
778   ftn:      is it called from Fortran initialization (petscinitializef_)?
779   len:      length of file string, used when Fortran is true
780 */
781 PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscInt len)
782 {
783   PetscMPIInt size;
784   PetscBool   flg = PETSC_TRUE;
785   char        hostname[256];
786   PetscBool   blas_view_flag = PETSC_FALSE;
787 
788   PetscFunctionBegin;
789   if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
790   /* these must be initialized in a routine, not as a constant declaration */
791   PETSC_STDOUT = stdout;
792   PETSC_STDERR = stderr;
793 
794   /* PetscCall can be used from now */
795   PetscErrorHandlingInitialized = PETSC_TRUE;
796 
797   /*
798       The checking over compatible runtime libraries is complicated by the MPI ABI initiative
799       https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with
800         MPICH v3.1 (Released February 2014)
801         IBM MPI v2.1 (December 2014)
802         Intel MPI Library v5.0 (2014)
803         Cray MPT v7.0.0 (June 2014)
804       As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions
805       listed above and since that time are compatible.
806 
807       Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number
808       at compile time or runtime. Thus we will need to systematically track the allowed versions
809       and how they are represented in the mpi.h and MPI_Get_library_version() output in order
810       to perform the checking.
811 
812       Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI).
813 
814       Questions:
815 
816         Should the checks for ABI incompatibility be only on the major version number below?
817         Presumably the output to stderr will be removed before a release.
818   */
819 
820 #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION)
821   {
822     char        mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING];
823     PetscMPIInt mpilibraryversionlength;
824 
825     PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength));
826     /* check for MPICH versions before MPI ABI initiative */
827   #if defined(MPICH_VERSION)
828     #if MPICH_NUMVERSION < 30100000
829     {
830       char     *ver, *lf;
831       PetscBool flg = PETSC_FALSE;
832 
833       PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver));
834       if (ver) {
835         PetscCall(PetscStrchr(ver, '\n', &lf));
836         if (lf) {
837           *lf = 0;
838           PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg));
839         }
840       }
841       if (!flg) {
842         PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION));
843         flg = PETSC_TRUE;
844       }
845     }
846     #endif
847       /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */
848   #elif defined(PETSC_HAVE_OPENMPI)
849     {
850       char     *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf;
851       PetscBool flg                                              = PETSC_FALSE;
852     #define PSTRSZ 2
853       char      ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"};
854       char      ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "};
855       int       i;
856       for (i = 0; i < PSTRSZ; i++) {
857         PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver));
858         if (ver) {
859           PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
860           PetscCall(PetscStrstr(ver, bs, &bsf));
861           if (bsf) flg = PETSC_TRUE;
862           break;
863         }
864       }
865       if (!flg) {
866         PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
867         flg = PETSC_TRUE;
868       }
869     }
870   #endif
871   }
872 #endif
873 
874 #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__))
875   /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */
876   PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly");
877 #endif
878 
879   /* on Windows - set printf to default to printing 2 digit exponents */
880 #if defined(PETSC_HAVE__SET_OUTPUT_FORMAT)
881   _set_output_format(_TWO_DIGIT_EXPONENT);
882 #endif
883 
884   PetscCall(PetscOptionsCreateDefault());
885 
886   PetscFinalizeCalled = PETSC_FALSE;
887 
888   PetscCall(PetscSetProgramName(prog));
889   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen));
890   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout));
891   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr));
892   PetscCall(PetscSpinlockCreate(&PetscCommSpinLock));
893 
894   if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD;
895   PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN));
896 
897   if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) {
898     PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS));
899     PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE));
900   }
901 
902   /* Done after init due to a bug in MPICH-GM? */
903   PetscCall(PetscErrorPrintfInitialize());
904 
905   PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank));
906   PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize));
907 
908   MPIU_BOOL        = MPI_INT;
909   MPIU_ENUM        = MPI_INT;
910   MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64;
911   if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED;
912   else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG;
913 #if defined(PETSC_SIZEOF_LONG_LONG)
914   else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG;
915 #endif
916   else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t");
917 
918   /*
919      Initialized the global complex variable; this is because with
920      shared libraries the constructors for global variables
921      are not called; at least on IRIX.
922   */
923 #if defined(PETSC_HAVE_COMPLEX)
924   {
925   #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128)
926     PetscComplex ic(0.0, 1.0);
927     PETSC_i = ic;
928   #else
929     PETSC_i = _Complex_I;
930   #endif
931   }
932 #endif /* PETSC_HAVE_COMPLEX */
933 
934   /*
935      Create the PETSc MPI reduction operator that sums of the first
936      half of the entries and maxes the second half.
937   */
938   PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP));
939 
940 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
941   PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128));
942   PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128));
943   #if defined(PETSC_HAVE_COMPLEX)
944   PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128));
945   PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128));
946   #endif
947 #endif
948 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
949   PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16));
950   PetscCallMPI(MPI_Type_commit(&MPIU___FP16));
951 #endif
952 
953 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
954   PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM));
955   PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX));
956   PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN));
957 #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
958   PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128));
959 #endif
960 
961   PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR));
962   PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp));
963   PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR));
964 
965   /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */
966 #if !defined(PETSC_HAVE_MPIUNI)
967   {
968     PetscMPIInt  blockSizes[2]   = {1, 1};
969     MPI_Aint     blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)};
970     MPI_Datatype blockTypes[2]   = {MPIU_REAL, MPIU_INT}, tmpStruct;
971 
972     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
973     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT));
974     PetscCallMPI(MPI_Type_free(&tmpStruct));
975     PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT));
976   }
977   {
978     PetscMPIInt  blockSizes[2]   = {1, 1};
979     MPI_Aint     blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)};
980     MPI_Datatype blockTypes[2]   = {MPIU_SCALAR, MPIU_INT}, tmpStruct;
981 
982     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
983     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT));
984     PetscCallMPI(MPI_Type_free(&tmpStruct));
985     PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT));
986   }
987 #endif
988 
989 #if defined(PETSC_USE_64BIT_INDICES)
990   PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT));
991   PetscCallMPI(MPI_Type_commit(&MPIU_2INT));
992 
993   #if !defined(PETSC_HAVE_MPIUNI)
994   {
995     int          blockSizes[]   = {1, 1};
996     MPI_Aint     blockOffsets[] = {offsetof(struct petsc_mpiu_int_mpiint, a), offsetof(struct petsc_mpiu_int_mpiint, b)};
997     MPI_Datatype blockTypes[]   = {MPIU_INT, MPI_INT}, tmpStruct;
998 
999     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
1000     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_int_mpiint), &MPIU_INT_MPIINT));
1001     PetscCallMPI(MPI_Type_free(&tmpStruct));
1002     PetscCallMPI(MPI_Type_commit(&MPIU_INT_MPIINT));
1003   }
1004   #endif
1005 #endif
1006   PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT));
1007   PetscCallMPI(MPI_Type_commit(&MPI_4INT));
1008   PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT));
1009   PetscCallMPI(MPI_Type_commit(&MPIU_4INT));
1010 
1011   /*
1012      Attributes to be set on PETSc communicators
1013   */
1014   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, NULL));
1015   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, NULL));
1016   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, NULL));
1017   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, NULL));
1018   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, NULL));
1019   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, NULL));
1020 
1021 #if defined(PETSC_USE_FORTRAN_BINDINGS)
1022   if (ftn) PetscCall(PetscInitFortran_Private(file, len));
1023   else
1024 #endif
1025     PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file));
1026 
1027   if (PetscDefined(HAVE_MPIUNI)) {
1028     const char *mpienv = getenv("PMI_SIZE");
1029     if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE");
1030     if (mpienv) {
1031       PetscInt  isize;
1032       PetscBool mflag = PETSC_FALSE;
1033 
1034       PetscCall(PetscOptionsStringToInt(mpienv, &isize));
1035       PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpiuni-allow-multiprocess-launch", &mflag, NULL));
1036       PetscCheck(isize == 1 || mflag, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc. Or run with -mpiuni-allow-multiprocess-launch to allow multiple independent MPI-uni jobs.");
1037     }
1038   }
1039 
1040   /* call a second time so it can look in the options database */
1041   PetscCall(PetscErrorPrintfInitialize());
1042 
1043   /*
1044      Check system options and print help
1045   */
1046   PetscCall(PetscOptionsCheckInitial_Private(help));
1047 
1048   /*
1049     Creates the logging data structures; this is enabled even if logging is not turned on
1050     This is the last thing we do before returning to the user code to prevent having the
1051     logging numbers contaminated by any startup time associated with MPI
1052   */
1053   PetscCall(PetscLogInitialize());
1054 
1055   /*
1056    Initialize PetscDevice and PetscDeviceContext
1057 
1058    Note to any future devs thinking of moving this, proper initialization requires:
1059    1. MPI initialized
1060    2. Options DB initialized
1061    3. PETSc error handling initialized, specifically signal handlers. This expects to set up
1062       its own SIGSEV handler via the push/pop interface.
1063    4. Logging initialized
1064   */
1065   PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD));
1066 
1067 #if PetscDefined(HAVE_VIENNACL)
1068   flg = PETSC_FALSE;
1069   PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg));
1070   if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL));
1071   PetscViennaCLSynchronize = flg;
1072   PetscCall(PetscViennaCLInit());
1073 #endif
1074 
1075   PetscCall(PetscCitationsInitialize());
1076 
1077 #if defined(PETSC_HAVE_SAWS)
1078   PetscCall(PetscInitializeSAWs(ftn ? NULL : help));
1079   flg = PETSC_FALSE;
1080   PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg));
1081   if (flg) PetscCall(PetscStackViewSAWs());
1082 #endif
1083 
1084   /*
1085      Load the dynamic libraries (on machines that support them), this registers all
1086      the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes)
1087   */
1088   PetscCall(PetscInitialize_DynamicLibraries());
1089 
1090   PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
1091   PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size));
1092   PetscCall(PetscGetHostName(hostname, sizeof(hostname)));
1093   PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname));
1094 #if defined(PETSC_HAVE_OPENMP)
1095   {
1096     PetscBool omp_view_flag;
1097     char     *threads = getenv("OMP_NUM_THREADS");
1098 
1099     if (threads) {
1100       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads));
1101       (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads);
1102     } else {
1103       PetscNumOMPThreads = (PetscInt)omp_get_max_threads();
1104       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads));
1105     }
1106     PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys");
1107     PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg));
1108     PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag));
1109     PetscOptionsEnd();
1110     if (flg) {
1111       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads));
1112       omp_set_num_threads((int)PetscNumOMPThreads);
1113     }
1114     if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads));
1115   }
1116 #endif
1117 
1118   PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys");
1119   PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag));
1120 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1121   {
1122     char *threads = NULL;
1123 
1124     /* determine any default number of threads requested in the environment; TODO: Apple libraries? */
1125   #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS)
1126     threads = getenv("BLIS_NUM_THREADS");
1127     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads));
1128     if (!threads) {
1129       threads = getenv("OMP_NUM_THREADS");
1130       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads));
1131     }
1132   #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS)
1133     threads = getenv("MKL_NUM_THREADS");
1134     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads));
1135     if (!threads) {
1136       threads = getenv("OMP_NUM_THREADS");
1137       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads));
1138     }
1139   #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1140     threads = getenv("OPENBLAS_NUM_THREADS");
1141     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads));
1142     if (!threads) {
1143       threads = getenv("OMP_NUM_THREADS");
1144       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads));
1145     }
1146   #endif
1147     if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads);
1148     PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg));
1149     if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads));
1150     if (flg || threads) {
1151       PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads));
1152       if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads));
1153     }
1154   }
1155 #elif defined(PETSC_HAVE_APPLE_ACCELERATE)
1156   PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1157   if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1158 #else
1159   if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n"));
1160 #endif
1161   PetscOptionsEnd();
1162 
1163 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
1164   /*
1165       Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI
1166 
1167       Currently not used because it is not supported by MPICH.
1168   */
1169   if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL));
1170 #endif
1171 
1172 #if defined(PETSC_SERIALIZE_FUNCTIONS)
1173   PetscCall(PetscFPTCreate(10000));
1174 #endif
1175 
1176 #if defined(PETSC_HAVE_HWLOC)
1177   {
1178     PetscViewer viewer;
1179     PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg));
1180     if (flg) {
1181       PetscCall(PetscProcessPlacementView(viewer));
1182       PetscCall(PetscViewerDestroy(&viewer));
1183     }
1184   }
1185 #endif
1186 
1187   flg = PETSC_TRUE;
1188   PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL));
1189   if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE));
1190 
1191 #if defined(PETSC_HAVE_ADIOS)
1192   PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD);
1193   PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default);
1194   PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", "");
1195   PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, "");
1196 #endif
1197 
1198 #if defined(__VALGRIND_H)
1199   PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE;
1200   #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE)
1201   if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack"));
1202   #endif
1203 #endif
1204   /*
1205       Set flag that we are completely initialized
1206   */
1207   PetscInitializeCalled = PETSC_TRUE;
1208 
1209   PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg));
1210   if (flg) PetscCall(PetscPythonInitialize(NULL, NULL));
1211 
1212   PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1213   if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n"));
1214   if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin());
1215   else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case");
1216   PetscFunctionReturn(PETSC_SUCCESS);
1217 }
1218 
1219 // "Unknown section 'Environmental Variables'"
1220 // PetscClangLinter pragma disable: -fdoc-section-header-unknown
1221 /*@C
1222   PetscInitialize - Initializes the PETSc database and MPI.
1223   `PetscInitialize()` calls MPI_Init() if that has yet to be called,
1224   so this routine should always be called near the beginning of
1225   your program -- usually the very first line!
1226 
1227   Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set
1228 
1229   Input Parameters:
1230 + argc - count of number of command line arguments
1231 . args - the command line arguments
1232 . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format.
1233           Use `NULL` or empty string to not check for code specific file.
1234           Also checks `~/.petscrc`, `.petscrc` and `petscrc`.
1235           Use `-skip_petscrc` in the code specific file (or command line) to skip `~/.petscrc`, `.petscrc` and `petscrc` files.
1236 - help - [optional] Help message to print, use `NULL` for no message
1237 
1238    If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that
1239    communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`.
1240    then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even
1241    if different subcommunicators of the job are doing different things with PETSc.
1242 
1243   Options Database Keys:
1244 + -help [intro]                                       - prints help method for each option; if `intro` is given the program stops after printing the introductory help message
1245 . -start_in_debugger [noxterm,dbx,xdb,gdb,...]        - Starts program in debugger
1246 . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected
1247 . -on_error_emacs <machinename>                       - causes `emacsclient` to jump to error file if an error is detected
1248 . -on_error_abort                                     - calls `abort()` when error detected (no traceback)
1249 . -on_error_mpiabort                                  - calls `MPI_abort()` when error detected
1250 . -error_output_stdout                                - prints PETSc error messages to `stdout` instead of the default `stderr`
1251 . -error_output_none                                  - does not print the error messages (but handles errors in the same way as if this was not called)
1252 . -debugger_ranks [rank1,rank2,...]                   - Indicates MPI ranks to start in debugger
1253 . -debugger_pause [sleeptime] (in seconds)            - Pauses debugger, use if it takes a long time for the debugger to start up on your system
1254 . -stop_for_debugger                                  - Print message on how to attach debugger manually to
1255                                                         process and wait (`-debugger_pause`) seconds for attachment
1256 . -malloc_dump                                        - prints a list of all unfreed memory at the end of the run
1257 . -malloc_test                                        - like `-malloc_dump` `-malloc_debug`, only active for debugging build, ignored in optimized build. Often set in `PETSC_OPTIONS` environmental variable
1258 . -malloc_view                                        - show a list of all allocated memory during `PetscFinalize()`
1259 . -malloc_view_threshold <t>                          - only list memory allocations of size greater than t with `-malloc_view`
1260 . -malloc_requested_size                              - malloc logging will record the requested size rather than (possibly large) size after alignment
1261 . -fp_trap                                            - Stops on floating point exceptions
1262 . -no_signal_handler                                  - Indicates not to trap error signals
1263 . -shared_tmp                                         - indicates `/tmp` directory is known to be shared by all processors
1264 . -not_shared_tmp                                     - indicates each processor has own `/tmp`
1265 . -tmp                                                - alternative directory to use instead of `/tmp`
1266 . -python <exe>                                       - Initializes Python, and optionally takes a Python executable name
1267 - -mpiuni-allow-multiprocess-launch                   - allow `mpiexec` to launch multiple independent MPI-Uni jobs, otherwise a sanity check error is invoked to prevent misuse of MPI-Uni
1268 
1269   Options Database Keys for Option Database:
1270 + -skip_petscrc           - skip the default option files `~/.petscrc`, `.petscrc`, `petscrc`
1271 . -options_monitor        - monitor all set options to standard output for the whole program run
1272 - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()`
1273 
1274    Options -options_monitor_{all,cancel} are
1275    position-independent and apply to all options set since the PETSc start.
1276    They can be used also in option files.
1277 
1278    See `PetscOptionsMonitorSet()` to do monitoring programmatically.
1279 
1280   Options Database Keys for Profiling:
1281    See Users-Manual: ch_profiling for details.
1282 + -info [filename][:[~]<list,of,classnames>[:[~]self]] - Prints verbose information. See `PetscInfo()`.
1283 . -log_sync                                            - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event,
1284                                                          however it slows things down and gives a distorted view of the overall runtime.
1285 . -log_trace [filename]                                - Print traces of all PETSc calls to the screen (useful to determine where a program
1286                                                          hangs without running in the debugger).  See `PetscLogTraceBegin()`.
1287 . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers)
1288 . -log_view_memory                                     - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`.
1289 . -log_view_gpu_time                                   - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView().
1290 . -log_exclude: <vec,mat,pc,ksp,snes>                  - excludes subset of object classes from logging
1291 . -log [filename]                                      - Logs profiling information in a dump file, see `PetscLogDump()`.
1292 . -log_all [filename]                                  - Same as `-log`.
1293 . -log_mpe [filename]                                  - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution)
1294 . -log_perfstubs                                       - Starts a log handler with the perfstubs interface (which is used by TAU)
1295 . -log_nvtx                                            - Starts an nvtx log handler for use with Nsight
1296 . -log_roctx                                           - Starts an roctx log handler for use with rocprof on AMD GPUs
1297 . -viewfromoptions on,off                              - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off
1298 . -get_total_flops                                     - Returns total flops done by all processors
1299 . -memory_view                                         - Print memory usage at end of run
1300 - -check_pointer_intensity 0,1,2                       - if pointers are checked for validity (debug version only), using 0 will result in faster code
1301 
1302   Options Database Keys for SAWs:
1303 + -saws_port <portnumber>        - port number to publish SAWs data, default is 8080
1304 . -saws_port_auto_select         - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen
1305                                    this is useful when you are running many jobs that utilize SAWs at the same time
1306 . -saws_log <filename>           - save a log of all SAWs communication
1307 . -saws_https <certificate file> - have SAWs use HTTPS instead of HTTP
1308 - -saws_root <directory>         - allow SAWs to have access to the given directory to search for requested resources and files
1309 
1310   Environmental Variables:
1311 +   `PETSC_TMP`                   - alternative directory to use instead of `/tmp`
1312 .   `PETSC_SHARED_TMP`            - `/tmp` is shared by all processes
1313 .   `PETSC_NOT_SHARED_TMP`        - each process has its own private `/tmp`
1314 .   `PETSC_OPTIONS`               - a string containing additional options for PETSc in the form of command line "-key value" pairs
1315 .   `PETSC_OPTIONS_YAML`          - (requires configuring PETSc to use libyaml with `--download-yaml`) a string containing additional options for PETSc in the form of a YAML document
1316 .   `PETSC_VIEWER_SOCKET_PORT`    - socket number to use for socket viewer
1317 -   `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to
1318 
1319   Level: beginner
1320 
1321   Note:
1322   If for some reason you must call `MPI_Init()` separately from `PetscInitialize()`, call
1323   it before `PetscInitialize()`.
1324 
1325   Fortran Notes:
1326   In Fortran this routine can be called with
1327 .vb
1328        call PetscInitialize(ierr)
1329        call PetscInitialize(file,ierr) or
1330        call PetscInitialize(file,help,ierr)
1331 .ve
1332 
1333   If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after
1334   calling `PetscInitialize()`.
1335 
1336   Options Database Key for Developers:
1337 . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form:
1338                        "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py)
1339 
1340 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()`
1341 @*/
1342 PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[])
1343 {
1344   PetscMPIInt flag;
1345   const char *prog = "Unknown Name";
1346 
1347   PetscFunctionBegin;
1348   if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
1349   PetscCallMPI(MPI_Initialized(&flag));
1350   if (!flag) {
1351     PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first");
1352     PetscCall(PetscPreMPIInit_Private());
1353 #if defined(PETSC_HAVE_MPI_INIT_THREAD)
1354     {
1355       PetscMPIInt provided;
1356       PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided));
1357       PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required");
1358       if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up
1359     }
1360 #else
1361     PetscCallMPI(MPI_Init(argc, args));
1362 #endif
1363     PetscBeganMPI = PETSC_TRUE;
1364   }
1365 
1366   if (argc && *argc) prog = **args;
1367   if (argc && args) {
1368     PetscGlobalArgc = *argc;
1369     PetscGlobalArgs = *args;
1370   }
1371   PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, 0));
1372   PetscFunctionReturn(PETSC_SUCCESS);
1373 }
1374 
1375 PETSC_INTERN PetscObject *PetscObjects;
1376 PETSC_INTERN PetscInt     PetscObjectsCounts;
1377 PETSC_INTERN PetscInt     PetscObjectsMaxCounts;
1378 PETSC_INTERN PetscBool    PetscObjectsLog;
1379 
1380 /*
1381     Frees all the MPI types and operations that PETSc may have created
1382 */
1383 PetscErrorCode PetscFreeMPIResources(void)
1384 {
1385   PetscFunctionBegin;
1386 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
1387   PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128));
1388   #if defined(PETSC_HAVE_COMPLEX)
1389   PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128));
1390   #endif
1391 #endif
1392 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
1393   PetscCallMPI(MPI_Type_free(&MPIU___FP16));
1394 #endif
1395 
1396 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
1397   PetscCallMPI(MPI_Op_free(&MPIU_SUM));
1398   PetscCallMPI(MPI_Op_free(&MPIU_MAX));
1399   PetscCallMPI(MPI_Op_free(&MPIU_MIN));
1400 #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
1401   PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128));
1402 #endif
1403 
1404   PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR));
1405   PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT));
1406   PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT));
1407 #if defined(PETSC_USE_64BIT_INDICES)
1408   PetscCallMPI(MPI_Type_free(&MPIU_2INT));
1409   PetscCallMPI(MPI_Type_free(&MPIU_INT_MPIINT));
1410 #endif
1411   PetscCallMPI(MPI_Type_free(&MPI_4INT));
1412   PetscCallMPI(MPI_Type_free(&MPIU_4INT));
1413   PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP));
1414   PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp));
1415   PetscFunctionReturn(PETSC_SUCCESS);
1416 }
1417 
1418 PETSC_INTERN PetscErrorCode PetscLogFinalize(void);
1419 PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]);
1420 
1421 /*@
1422   PetscFinalize - Checks for options to be called at the conclusion of a PETSc program and frees any remaining PETSc objects and data structures.
1423   of the program. Automatically calls `MPI_Finalize()` if the user had not called `MPI_Init()` before calling `PetscInitialize()`.
1424 
1425   Collective on `PETSC_COMM_WORLD`
1426 
1427   Options Database Keys:
1428 + -options_view                    - Calls `PetscOptionsView()`
1429 . -options_left                    - Prints unused options that remain in the database
1430 . -objects_dump [all]              - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed
1431 . -mpidump                         - Calls PetscMPIDump()
1432 . -malloc_dump <optional filename> - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed
1433 . -memory_view                     - Prints total memory usage
1434 - -malloc_view <optional filename> - Prints list of all memory allocated and in what functions
1435 
1436   Level: beginner
1437 
1438   Note:
1439   See `PetscInitialize()` for other runtime options.
1440 
1441   You can call `PetscInitialize()` after `PetscFinalize()` but only with MPI-Uni or if you called `MPI_Init()` before ever calling `PetscInitialize()`.
1442 
1443 .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()`
1444 @*/
1445 PetscErrorCode PetscFinalize(void)
1446 {
1447   PetscMPIInt rank;
1448   PetscInt    nopt;
1449   PetscBool   flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE;
1450   PetscBool   flg;
1451   char        mname[PETSC_MAX_PATH_LEN];
1452 
1453   PetscFunctionBegin;
1454   PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()");
1455   PetscCall(PetscInfo(NULL, "PetscFinalize() called\n"));
1456 
1457   PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1458   if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd());
1459 
1460   PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL));
1461   PetscGlobalArgc = 0;
1462   PetscGlobalArgs = NULL;
1463 
1464   /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */
1465   {
1466     union
1467     {
1468       MPI_Comm comm;
1469       void    *ptr;
1470     } ucomm;
1471     PetscMPIInt flg;
1472     void       *tmp;
1473 
1474     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1475     if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1476     if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF));
1477     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1478     if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1479     if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD));
1480   }
1481 
1482   PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1483 #if defined(PETSC_HAVE_ADIOS)
1484   PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE);
1485   PetscCallExternal(adios_finalize, rank);
1486 #endif
1487   PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg));
1488   if (flg) {
1489     char *cits, filename[PETSC_MAX_PATH_LEN];
1490     FILE *fd = PETSC_STDOUT;
1491 
1492     PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL));
1493     if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd));
1494     PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits));
1495     cits[0] = 0;
1496     PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits));
1497     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n"));
1498     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1499     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits));
1500     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1501     PetscCall(PetscFClose(PETSC_COMM_WORLD, fd));
1502     PetscCall(PetscFree(cits));
1503   }
1504   PetscCall(PetscSegBufferDestroy(&PetscCitationsList));
1505 
1506 #if defined(PETSC_SERIALIZE_FUNCTIONS)
1507   PetscCall(PetscFPTDestroy());
1508 #endif
1509 
1510 #if defined(PETSC_HAVE_X)
1511   flg1 = PETSC_FALSE;
1512   PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL));
1513   if (flg1) {
1514     /*  this is a crude hack, but better than nothing */
1515     PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL));
1516   }
1517 #endif
1518 
1519 #if !defined(PETSC_HAVE_THREADSAFETY)
1520   PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL));
1521   if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n"));
1522 #endif
1523 
1524   if (PetscDefined(USE_LOG)) {
1525     flg1 = PETSC_FALSE;
1526     PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL));
1527     if (flg1) {
1528       PetscLogDouble flops = 0;
1529       PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD));
1530       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops));
1531     }
1532   }
1533 
1534   if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) {
1535     mname[0] = 0;
1536     PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1));
1537     if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL));
1538   }
1539 
1540 #if defined(PETSC_HAVE_KOKKOS)
1541   // Free PETSc/kokkos stuff before the potentially non-null PETSc default gpu stream is destroyed by PetscObjectRegisterDestroyAll
1542   if (PetscKokkosInitialized) {
1543     PetscCall(PetscKokkosFinalize_Private());
1544     PetscKokkosInitialized = PETSC_FALSE;
1545   }
1546 #endif
1547 
1548   // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_().
1549   PetscCall(PetscObjectRegisterDestroyAll());
1550 
1551   if (PetscDefined(USE_LOG)) {
1552     PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE));
1553     PetscCall(PetscLogViewFromOptions());
1554     PetscCall(PetscOptionsPopCreateViewerOff());
1555     //  It should be turned on with PetscLogGpuTime() and never turned off except in this place
1556     PetscLogGpuTimeFlag = PETSC_FALSE;
1557 
1558     // Free any objects created by the last block of code.
1559     PetscCall(PetscObjectRegisterDestroyAll());
1560 
1561     mname[0] = 0;
1562     PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1));
1563     PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2));
1564     if (flg1 || flg2) PetscCall(PetscLogDump(mname));
1565   }
1566 
1567   flg1 = PETSC_FALSE;
1568   PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL));
1569   if (!flg1) PetscCall(PetscPopSignalHandler());
1570   flg1 = PETSC_FALSE;
1571   PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL));
1572   if (flg1) PetscCall(PetscMPIDump(stdout));
1573   flg1 = PETSC_FALSE;
1574   flg2 = PETSC_FALSE;
1575   /* preemptive call to avoid listing this option in options table as unused */
1576   PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1));
1577   PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1578   PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL));
1579 
1580   if (flg2) { PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); }
1581 
1582   /* to prevent PETSc -options_left from warning */
1583   PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1));
1584   PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1));
1585 
1586   flg3 = PETSC_FALSE; /* default value is required */
1587   PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1));
1588   if (!flg1) flg3 = PETSC_TRUE;
1589   if (flg3) {
1590     if (!flg2 && flg1) { /* have not yet printed the options */
1591       PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD));
1592     }
1593     PetscCall(PetscOptionsAllUsed(NULL, &nopt));
1594     if (nopt) {
1595       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n"));
1596       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n"));
1597       if (nopt == 1) {
1598         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n"));
1599       } else {
1600         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt));
1601       }
1602     } else if (flg3 && flg1) {
1603       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n"));
1604     }
1605     PetscCall(PetscOptionsLeft(NULL));
1606   }
1607 
1608 #if defined(PETSC_HAVE_SAWS)
1609   if (!PetscGlobalRank) {
1610     PetscCall(PetscStackSAWsViewOff());
1611     PetscCallSAWs(SAWs_Finalize, ());
1612   }
1613 #endif
1614 
1615   /*
1616        List all objects the user may have forgot to free
1617   */
1618   if (PetscDefined(USE_LOG) && PetscObjectsLog) {
1619     PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1620     if (flg1) {
1621       MPI_Comm local_comm;
1622       char     string[64];
1623 
1624       PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL));
1625       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1626       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1627       PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE));
1628       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1629       PetscCallMPI(MPI_Comm_free(&local_comm));
1630     }
1631   }
1632 
1633   PetscObjectsCounts    = 0;
1634   PetscObjectsMaxCounts = 0;
1635   PetscCall(PetscFree(PetscObjects));
1636 
1637   /*
1638      Destroy any packages that registered a finalize
1639   */
1640   PetscCall(PetscRegisterFinalizeAll());
1641 
1642   PetscCall(PetscLogFinalize());
1643 
1644   /*
1645      Print PetscFunctionLists that have not been properly freed
1646   */
1647   if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll());
1648 
1649   if (petsc_history) {
1650     PetscCall(PetscCloseHistoryFile(&petsc_history));
1651     petsc_history = NULL;
1652   }
1653   PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton));
1654   PetscCall(PetscInfoDestroy());
1655 
1656 #if !defined(PETSC_HAVE_THREADSAFETY)
1657   if (!(PETSC_RUNNING_ON_VALGRIND)) {
1658     char  fname[PETSC_MAX_PATH_LEN];
1659     char  sname[PETSC_MAX_PATH_LEN];
1660     FILE *fd;
1661     int   err;
1662 
1663     flg2 = PETSC_FALSE;
1664     flg3 = PETSC_FALSE;
1665     if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL));
1666     PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL));
1667     fname[0] = 0;
1668     PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1));
1669     if (flg1 && fname[0]) {
1670       PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1671       fd = fopen(sname, "w");
1672       PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1673       PetscCall(PetscMallocDump(fd));
1674       err = fclose(fd);
1675       PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1676     } else if (flg1 || flg2 || flg3) {
1677       MPI_Comm local_comm;
1678 
1679       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1680       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1681       PetscCall(PetscMallocDump(stdout));
1682       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1683       PetscCallMPI(MPI_Comm_free(&local_comm));
1684     }
1685     fname[0] = 0;
1686     PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1));
1687     if (flg1 && fname[0]) {
1688       PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1689       fd = fopen(sname, "w");
1690       PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1691       PetscCall(PetscMallocView(fd));
1692       err = fclose(fd);
1693       PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1694     } else if (flg1) {
1695       MPI_Comm local_comm;
1696 
1697       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1698       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1699       PetscCall(PetscMallocView(stdout));
1700       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1701       PetscCallMPI(MPI_Comm_free(&local_comm));
1702     }
1703   }
1704 #endif
1705 
1706   /*
1707      Close any open dynamic libraries
1708   */
1709   PetscCall(PetscFinalize_DynamicLibraries());
1710 
1711   /* Can be destroyed only after all the options are used */
1712   PetscCall(PetscOptionsDestroyDefault());
1713 
1714 #if defined(PETSC_HAVE_NVSHMEM)
1715   if (PetscBeganNvshmem) {
1716     PetscCall(PetscNvshmemFinalize());
1717     PetscBeganNvshmem = PETSC_FALSE;
1718   }
1719 #endif
1720 
1721   PetscCall(PetscFreeMPIResources());
1722 
1723   /*
1724      Destroy any known inner MPI_Comm's and attributes pointing to them
1725      Note this will not destroy any new communicators the user has created.
1726 
1727      If all PETSc objects were not destroyed those left over objects will have hanging references to
1728      the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again
1729  */
1730   {
1731     PetscCommCounter *counter;
1732     PetscMPIInt       flg;
1733     MPI_Comm          icomm;
1734     union
1735     {
1736       MPI_Comm comm;
1737       void    *ptr;
1738     } ucomm;
1739     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1740     if (flg) {
1741       icomm = ucomm.comm;
1742       PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1743       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1744 
1745       PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval));
1746       PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1747       PetscCallMPI(MPI_Comm_free(&icomm));
1748     }
1749     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1750     if (flg) {
1751       icomm = ucomm.comm;
1752       PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1753       PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1754 
1755       PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval));
1756       PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1757       PetscCallMPI(MPI_Comm_free(&icomm));
1758     }
1759   }
1760 
1761   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval));
1762   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval));
1763   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval));
1764   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval));
1765   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval));
1766   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval));
1767 
1768   // Free keyvals which may be silently created by some routines
1769   if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval));
1770   if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval));
1771 
1772   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen));
1773   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout));
1774   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr));
1775   PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock));
1776 
1777   if (PetscBeganMPI) {
1778     PetscMPIInt flag;
1779     PetscCallMPI(MPI_Finalized(&flag));
1780     PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()");
1781     /* wait until the very last moment to disable error handling */
1782     PetscErrorHandlingInitialized = PETSC_FALSE;
1783     PetscCallMPI(MPI_Finalize());
1784   } else PetscErrorHandlingInitialized = PETSC_FALSE;
1785 
1786   /*
1787      Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because
1788    the communicator has some outstanding requests on it. Specifically if the
1789    flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See
1790    src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate()
1791    is never freed as it should be. Thus one may obtain messages of the form
1792    [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the
1793    memory was not freed.
1794 
1795 */
1796   PetscCall(PetscMallocClear());
1797   PetscCall(PetscStackReset());
1798 
1799   PetscInitializeCalled = PETSC_FALSE;
1800   PetscFinalizeCalled   = PETSC_TRUE;
1801 #if defined(PETSC_USE_COVERAGE)
1802   /*
1803      flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the
1804      gcov files are still being added to the directories as git tries to remove the directories.
1805    */
1806   __gcov_flush();
1807 #endif
1808   /* To match PetscFunctionBegin() at the beginning of this function */
1809   PetscStackClearTop;
1810   return PETSC_SUCCESS;
1811 }
1812 
1813 #if defined(PETSC_MISSING_LAPACK_lsame_)
1814 PETSC_EXTERN int lsame_(char *a, char *b)
1815 {
1816   if (*a == *b) return 1;
1817   if (*a + 32 == *b) return 1;
1818   if (*a - 32 == *b) return 1;
1819   return 0;
1820 }
1821 #endif
1822 
1823 #if defined(PETSC_MISSING_LAPACK_lsame)
1824 PETSC_EXTERN int lsame(char *a, char *b)
1825 {
1826   if (*a == *b) return 1;
1827   if (*a + 32 == *b) return 1;
1828   if (*a - 32 == *b) return 1;
1829   return 0;
1830 }
1831 #endif
1832 
1833 static inline PetscMPIInt MPIU_Allreduce_Count(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1834 {
1835   PetscMPIInt err;
1836 #if !defined(PETSC_HAVE_MPI_LARGE_COUNT)
1837   PetscMPIInt count2;
1838 
1839   PetscMPIIntCast_Internal(count, &count2);
1840   err = MPI_Allreduce((void *)inbuf, outbuf, count2, dtype, op, comm);
1841 #else
1842   err = MPI_Allreduce_c((void *)inbuf, outbuf, count, dtype, op, comm);
1843 #endif
1844   return err;
1845 }
1846 
1847 /*
1848      When count is 1 and dtype == MPIU_INT performs the reduction in PetscInt64 to check for integer overflow
1849 */
1850 PetscMPIInt MPIU_Allreduce_Private(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1851 {
1852   PetscMPIInt err;
1853   if (!PetscDefined(USE_64BIT_INDICES) && count == 1 && dtype == MPIU_INT && (op == MPI_SUM || op == MPI_PROD)) {
1854     PetscInt64 incnt, outcnt;
1855     void      *inbufd, *outbufd;
1856 
1857     if (inbuf != MPI_IN_PLACE) {
1858       incnt   = *(PetscInt32 *)inbuf;
1859       inbufd  = &incnt;
1860       outbufd = &outcnt;
1861       err     = MPIU_Allreduce_Count(inbufd, outbufd, count, MPIU_INT64, op, comm);
1862     } else {
1863       outcnt  = *(PetscInt32 *)outbuf;
1864       outbufd = &outcnt;
1865       err     = MPIU_Allreduce_Count(MPI_IN_PLACE, outbufd, count, MPIU_INT64, op, comm);
1866     }
1867     if (!err && outcnt > PETSC_INT_MAX) err = MPI_ERR_OTHER;
1868     *(PetscInt32 *)outbuf = (PetscInt32)outcnt;
1869   } else {
1870     err = MPIU_Allreduce_Count(inbuf, outbuf, count, dtype, op, comm);
1871   }
1872   return err;
1873 }
1874 
1875 /*@C
1876   PetscCtxDestroyDefault - An implementation of a `PetscCtxDestroyFn` that uses `PetscFree()` to free the context
1877 
1878   Input Parameter:
1879 . ctx - the context to be destroyed
1880 
1881   Level: intermediate
1882 
1883   Note:
1884   This is not called directly, rather it is passed to `DMSetApplicationContextDestroy()`, `PetscContainerSetDestroy()`,
1885   `PetscObjectContainterCreate()` and similar routines and then called by the destructor of the associated object.
1886 
1887 .seealso: `PetscObject`, `PetscCtxDestroyFn`, `PetscObjectDestroy()`, `DMSetApplicationContextDestroy()`,  `PetscContainerSetDestroy()`,
1888            `PetscObjectContainterCreate()`
1889 @*/
1890 PETSC_EXTERN PetscErrorCode PetscCtxDestroyDefault(void **ctx)
1891 {
1892   PetscFunctionBegin;
1893   PetscCall(PetscFree(*ctx));
1894   PetscFunctionReturn(PETSC_SUCCESS);
1895 }
1896