xref: /petsc/src/sys/objects/pinit.c (revision bb35897ee893994dec84e688d7df046e8fbd549f)
1 #define PETSC_DESIRE_FEATURE_TEST_MACROS
2 /*
3    This file defines the initialization of PETSc, including PetscInitialize()
4 */
5 #include <petsc/private/petscimpl.h> /*I  "petscsys.h"   I*/
6 #include <petsc/private/logimpl.h>
7 #include <petscviewer.h>
8 #include <petsc/private/garbagecollector.h>
9 
10 #if !defined(PETSC_HAVE_WINDOWS_COMPILERS)
11   #include <petsc/private/valgrind/valgrind.h>
12 #endif
13 
14 #if defined(PETSC_USE_FORTRAN_BINDINGS)
15   #include <petsc/private/ftnimpl.h>
16 #endif
17 
18 #if PetscDefined(USE_COVERAGE)
19 EXTERN_C_BEGIN
20   #if defined(PETSC_HAVE___GCOV_DUMP)
21     #define __gcov_flush(x) __gcov_dump(x)
22   #endif
23 void __gcov_flush(void);
24 EXTERN_C_END
25 #endif
26 
27 #if defined(PETSC_SERIALIZE_FUNCTIONS)
28 PETSC_INTERN PetscFPT PetscFPTData;
29 PetscFPT              PetscFPTData = 0;
30 #endif
31 
32 #if PetscDefined(HAVE_SAWS)
33   #include <petscviewersaws.h>
34 #endif
35 
36 PETSC_INTERN FILE *petsc_history;
37 
38 PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void);
39 PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void);
40 PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int);
41 PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int);
42 PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **);
43 
44 /* user may set these BEFORE calling PetscInitialize() */
45 MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL;
46 #if PetscDefined(HAVE_MPI_INIT_THREAD)
47 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE;
48 #else
49 PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE;
50 #endif
51 
52 PetscMPIInt Petsc_Counter_keyval      = MPI_KEYVAL_INVALID;
53 PetscMPIInt Petsc_InnerComm_keyval    = MPI_KEYVAL_INVALID;
54 PetscMPIInt Petsc_OuterComm_keyval    = MPI_KEYVAL_INVALID;
55 PetscMPIInt Petsc_ShmComm_keyval      = MPI_KEYVAL_INVALID;
56 PetscMPIInt Petsc_CreationIdx_keyval  = MPI_KEYVAL_INVALID;
57 PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID;
58 
59 PetscMPIInt Petsc_SharedWD_keyval  = MPI_KEYVAL_INVALID;
60 PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID;
61 
62 /*
63      Declare and set all the string names of the PETSc enums
64 */
65 const char *const PetscBools[]     = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL};
66 const char *const PetscBool3s[]    = {"FALSE", "TRUE", "UNKNOWN", "PetscBool3", "PETSC_", NULL};
67 const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL};
68 
69 PetscBool PetscPreLoadingUsed = PETSC_FALSE;
70 PetscBool PetscPreLoadingOn   = PETSC_FALSE;
71 
72 PetscInt PetscHotRegionDepth;
73 
74 PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE;
75 
76 #if defined(PETSC_HAVE_THREADSAFETY)
77 PetscSpinlock PetscViewerASCIISpinLockOpen;
78 PetscSpinlock PetscViewerASCIISpinLockStdout;
79 PetscSpinlock PetscViewerASCIISpinLockStderr;
80 PetscSpinlock PetscCommSpinLock;
81 #endif
82 
83 extern PetscInt PetscNumBLASThreads;
84 
85 /*@C
86   PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args
87 
88   Collective, No Fortran Support
89 
90   Input Parameters:
91 + argc     - number of args
92 . args     - array of command line arguments
93 . filename - optional name of the program file, pass `NULL` to ignore
94 - help     - optional help, pass `NULL` to ignore
95 
96   Level: advanced
97 
98   Notes:
99   this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to
100   indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to
101   be called multiple times from Julia without the problem of trying to initialize MPI more than once.
102 
103   Developer Notes:
104   Turns off PETSc signal handling to allow Julia to manage signals
105 
106 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()`
107 */
108 PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help)
109 {
110   int    myargc = argc;
111   char **myargs = args;
112 
113   PetscFunctionBegin;
114   PetscCall(PetscInitialize(&myargc, &myargs, filename, help));
115   PetscCall(PetscPopSignalHandler());
116   PetscBeganMPI = PETSC_FALSE;
117   PetscFunctionReturn(PETSC_SUCCESS);
118 }
119 
120 /*@C
121   PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without
122   the command line arguments.
123 
124   Collective
125 
126   Level: advanced
127 
128 .seealso: `PetscInitialize()`, `PetscInitializeFortran()`
129 @*/
130 PetscErrorCode PetscInitializeNoArguments(void) PeNS
131 {
132   int    argc = 0;
133   char **args = NULL;
134 
135   PetscFunctionBegin;
136   PetscCall(PetscInitialize(&argc, &args, NULL, NULL));
137   PetscFunctionReturn(PETSC_SUCCESS);
138 }
139 
140 /*@
141   PetscInitialized - Determine whether PETSc is initialized.
142 
143   Output Parameter:
144 . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise
145 
146   Level: beginner
147 
148 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
149 @*/
150 PetscErrorCode PetscInitialized(PetscBool *isInitialized)
151 {
152   PetscFunctionBegin;
153   if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1);
154   *isInitialized = PetscInitializeCalled;
155   PetscFunctionReturn(PETSC_SUCCESS);
156 }
157 
158 /*@
159   PetscFinalized - Determine whether `PetscFinalize()` has been called yet
160 
161   Output Parameter:
162 . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise
163 
164   Level: developer
165 
166 .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
167 @*/
168 PetscErrorCode PetscFinalized(PetscBool *isFinalized)
169 {
170   PetscFunctionBegin;
171   if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1);
172   *isFinalized = PetscFinalizeCalled;
173   PetscFunctionReturn(PETSC_SUCCESS);
174 }
175 
176 PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]);
177 
178 /*
179        This function is the MPI reduction operation used to compute the sum of the
180    first half of the datatype and the max of the second half.
181 */
182 MPI_Op MPIU_MAXSUM_OP               = 0;
183 MPI_Op Petsc_Garbage_SetIntersectOp = 0;
184 
185 PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
186 {
187   PetscFunctionBegin;
188   if (*datatype == MPIU_INT_MPIINT && PetscDefined(USE_64BIT_INDICES)) {
189 #if defined(PETSC_USE_64BIT_INDICES)
190     struct petsc_mpiu_int_mpiint *xin = (struct petsc_mpiu_int_mpiint *)in, *xout = (struct petsc_mpiu_int_mpiint *)out;
191     PetscMPIInt                   count = *cnt;
192 
193     for (PetscMPIInt i = 0; i < count; i++) {
194       xout[i].a = PetscMax(xout[i].a, xin[i].a);
195       xout[i].b += xin[i].b;
196     }
197 #endif
198   } else if (*datatype == MPIU_2INT || *datatype == MPIU_INT_MPIINT) {
199     PetscInt   *xin = (PetscInt *)in, *xout = (PetscInt *)out;
200     PetscMPIInt count = *cnt;
201 
202     for (PetscMPIInt i = 0; i < count; i++) {
203       xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]);
204       xout[2 * i + 1] += xin[2 * i + 1];
205     }
206   } else {
207     PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT and MPIU_INT_MPIINT data types");
208     (void)ierr;
209     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
210   }
211   PetscFunctionReturnVoid();
212 }
213 
214 /*@
215   PetscMaxSum - Returns the max of the first entry over all MPI processes and the sum of the second entry.
216 
217   Collective
218 
219   Input Parameters:
220 + comm  - the communicator
221 - array - an arry of length 2 times `size`, the number of MPI processes
222 
223   Output Parameters:
224 + max - the maximum of `array[2*rank]` over all MPI processes
225 - sum - the sum of the `array[2*rank + 1]` over all MPI processes
226 
227   Level: developer
228 
229 .seealso: `PetscInitialize()`
230 @*/
231 PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt array[], PetscInt *max, PetscInt *sum)
232 {
233   PetscFunctionBegin;
234 #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
235   {
236     struct {
237       PetscInt max, sum;
238     } work;
239     PetscCallMPI(MPI_Reduce_scatter_block((void *)array, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm));
240     *max = work.max;
241     *sum = work.sum;
242   }
243 #else
244   {
245     PetscMPIInt size, rank;
246     struct {
247       PetscInt max, sum;
248     } *work;
249     PetscCallMPI(MPI_Comm_size(comm, &size));
250     PetscCallMPI(MPI_Comm_rank(comm, &rank));
251     PetscCall(PetscMalloc1(size, &work));
252     PetscCallMPI(MPIU_Allreduce((void *)array, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm));
253     *max = work[rank].max;
254     *sum = work[rank].sum;
255     PetscCall(PetscFree(work));
256   }
257 #endif
258   PetscFunctionReturn(PETSC_SUCCESS);
259 }
260 
261 #if (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
262   #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
263     #include <quadmath.h>
264   #endif
265 MPI_Op MPIU_SUM___FP16___FLOAT128 = 0;
266   #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
267 MPI_Op MPIU_SUM = 0;
268   #endif
269 
270 PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
271 {
272   PetscMPIInt i, count = *cnt;
273 
274   PetscFunctionBegin;
275   if (*datatype == MPIU_REAL) {
276     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
277     for (i = 0; i < count; i++) xout[i] += xin[i];
278   }
279   #if defined(PETSC_HAVE_COMPLEX)
280   else if (*datatype == MPIU_COMPLEX) {
281     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
282     for (i = 0; i < count; i++) xout[i] += xin[i];
283   }
284   #endif
285   #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
286   else if (*datatype == MPIU___FLOAT128) {
287     __float128 *xin = (__float128 *)in, *xout = (__float128 *)out;
288     for (i = 0; i < count; i++) xout[i] += xin[i];
289     #if defined(PETSC_HAVE_COMPLEX)
290   } else if (*datatype == MPIU___COMPLEX128) {
291     __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out;
292     for (i = 0; i < count; i++) xout[i] += xin[i];
293     #endif
294   }
295   #endif
296   #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
297   else if (*datatype == MPIU___FP16) {
298     __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out;
299     for (i = 0; i < count; i++) xout[i] = (__fp16)(xin[i] + xout[i]);
300   }
301   #endif
302   else {
303   #if (!defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)) && (!defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16))
304     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
305   #elif !defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16)
306     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types"));
307   #elif !defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)
308     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types"));
309   #else
310     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types"));
311   #endif
312     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
313   }
314   PetscFunctionReturnVoid();
315 }
316 #endif
317 
318 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
319 MPI_Op MPIU_MAX = 0;
320 MPI_Op MPIU_MIN = 0;
321 
322 PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
323 {
324   PetscInt i, count = *cnt;
325 
326   PetscFunctionBegin;
327   if (*datatype == MPIU_REAL) {
328     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
329     for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]);
330   }
331   #if defined(PETSC_HAVE_COMPLEX)
332   else if (*datatype == MPIU_COMPLEX) {
333     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
334     for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
335   }
336   #endif
337   else {
338     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
339     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
340   }
341   PetscFunctionReturnVoid();
342 }
343 
344 PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
345 {
346   PetscInt i, count = *cnt;
347 
348   PetscFunctionBegin;
349   if (*datatype == MPIU_REAL) {
350     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
351     for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]);
352   }
353   #if defined(PETSC_HAVE_COMPLEX)
354   else if (*datatype == MPIU_COMPLEX) {
355     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
356     for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
357   }
358   #endif
359   else {
360     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types"));
361     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
362   }
363   PetscFunctionReturnVoid();
364 }
365 #endif
366 
367 /*
368    Private routine to delete internal tag/name counter storage when a communicator is freed.
369 
370    This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this  data as an attribute is freed.
371 
372    Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
373 
374 */
375 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state)
376 {
377   PetscCommCounter      *counter = (PetscCommCounter *)count_val;
378   struct PetscCommStash *comms   = counter->comms, *pcomm;
379 
380   PetscFunctionBegin;
381   PetscCallReturnMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm));
382   PetscCallReturnMPI(PetscFree(counter->iflags));
383   while (comms) {
384     PetscCallMPIReturnMPI(MPI_Comm_free(&comms->comm));
385     pcomm = comms;
386     comms = comms->next;
387     PetscCallReturnMPI(PetscFree(pcomm));
388   }
389   PetscCallReturnMPI(PetscFree(counter));
390   PetscFunctionReturn(MPI_SUCCESS);
391 }
392 
393 /*
394   This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user
395   calls MPI_Comm_free().
396 
397   This is the only entry point for breaking the links between inner and outer comms.
398 
399   This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator.
400 
401   Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
402 
403 */
404 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
405 {
406   union
407   {
408     MPI_Comm comm;
409     void    *ptr;
410   } icomm;
411 
412   PetscFunctionBegin;
413   PetscCheckReturnMPI(keyval == Petsc_InnerComm_keyval, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval");
414   icomm.ptr = attr_val;
415   if (PetscDefined(USE_DEBUG)) {
416     /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */
417     PetscMPIInt flg;
418     union
419     {
420       MPI_Comm comm;
421       void    *ptr;
422     } ocomm;
423     PetscCallMPIReturnMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg));
424     PetscCheckReturnMPI(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute");
425     PetscCheckReturnMPI(ocomm.comm == comm, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm");
426   }
427   PetscCallMPIReturnMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval));
428   PetscCallReturnMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm));
429   PetscFunctionReturn(MPI_SUCCESS);
430 }
431 
432 /*
433  * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr().  It should not be reached any other way.
434  */
435 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
436 {
437   PetscFunctionBegin;
438   PetscCallReturnMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm));
439   PetscFunctionReturn(MPI_SUCCESS);
440 }
441 
442 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *);
443 
444 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
445 PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *);
446 PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
447 PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
448 #endif
449 
450 PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE;
451 
452 PETSC_INTERN int    PetscGlobalArgc;
453 PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran;
454 int                 PetscGlobalArgc        = 0;
455 char              **PetscGlobalArgs        = NULL;
456 char              **PetscGlobalArgsFortran = NULL;
457 PetscSegBuffer      PetscCitationsList;
458 
459 PetscErrorCode PetscCitationsInitialize(void)
460 {
461   PetscFunctionBegin;
462   PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList));
463 
464   PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\
465   Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\
466     and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\
467     and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\
468     and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\
469     and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\
470     and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith and Hansol Suh\n\
471     and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\
472   Title = {{PETSc/TAO} Users Manual},\n\
473   Number = {ANL-21/39 - Revision 3.24},\n\
474   Doi = {10.2172/2998643},\n\
475   Institution = {Argonne National Laboratory},\n\
476   Year = {2025}\n}\n",
477                                    NULL));
478 
479   PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\
480   Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\
481   Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\
482   Booktitle = {Modern Software Tools in Scientific Computing},\n\
483   Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\
484   Pages = {163--202},\n\
485   Publisher = {Birkh{\\\"{a}}user Press},\n\
486   Year = {1997}\n}\n",
487                                    NULL));
488   PetscFunctionReturn(PETSC_SUCCESS);
489 }
490 
491 static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */
492 
493 PetscErrorCode PetscSetProgramName(const char name[])
494 {
495   PetscFunctionBegin;
496   PetscCall(PetscStrncpy(programname, name, sizeof(programname)));
497   PetscFunctionReturn(PETSC_SUCCESS);
498 }
499 
500 /*@C
501   PetscGetProgramName - Gets the name of the running program.
502 
503   Not Collective
504 
505   Input Parameter:
506 . len - length of the string name
507 
508   Output Parameter:
509 . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN`
510 
511   Level: advanced
512 
513 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
514 @*/
515 PetscErrorCode PetscGetProgramName(char name[], size_t len)
516 {
517   PetscFunctionBegin;
518   PetscCall(PetscStrncpy(name, programname, len));
519   PetscFunctionReturn(PETSC_SUCCESS);
520 }
521 
522 /*@C
523   PetscGetArgs - Allows you to access the raw command line arguments anywhere
524   after `PetscInitialize()` is called but before `PetscFinalize()`.
525 
526   Not Collective, No Fortran Support
527 
528   Output Parameters:
529 + argc - count of the number of command line arguments
530 - args - the command line arguments
531 
532   Level: intermediate
533 
534   Notes:
535   This is usually used to pass the command line arguments into other libraries
536   that are called internally deep in PETSc or the application.
537 
538   The first argument contains the program name as is normal for C programs.
539 
540   See `PetscGetArguments()` for a variant of this routine.
541 
542 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
543 @*/
544 PetscErrorCode PetscGetArgs(int *argc, char ***args)
545 {
546   PetscFunctionBegin;
547   PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
548   *argc = PetscGlobalArgc;
549   *args = PetscGlobalArgs;
550   PetscFunctionReturn(PETSC_SUCCESS);
551 }
552 
553 /*@C
554   PetscGetArguments - Allows you to access the command line arguments anywhere
555   after `PetscInitialize()` is called but before `PetscFinalize()`.
556 
557   Not Collective, No Fortran Support
558 
559   Output Parameter:
560 . args - the command line arguments
561 
562   Level: intermediate
563 
564   Note:
565   This does NOT start with the program name and IS `NULL` terminated (the final argument is void)
566 
567   Use `PetscFreeArguments()` to return the memory used by the arguments.
568 
569   This makes a copy of the arguments and the array of arguments, while `PetscGetArgs()` does not make a copy,
570   it returns the array of arguments that was passed into the main program.
571 
572 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()`
573 @*/
574 PetscErrorCode PetscGetArguments(char ***args)
575 {
576   PetscInt i, argc = PetscGlobalArgc;
577 
578   PetscFunctionBegin;
579   PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
580   if (!argc) {
581     *args = NULL;
582     PetscFunctionReturn(PETSC_SUCCESS);
583   }
584   PetscCall(PetscMalloc1(argc, args));
585   for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i]));
586   (*args)[argc - 1] = NULL;
587   PetscFunctionReturn(PETSC_SUCCESS);
588 }
589 
590 /*@C
591   PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()`
592 
593   Not Collective, No Fortran Support
594 
595   Output Parameter:
596 . args - the command line arguments
597 
598   Level: intermediate
599 
600   Developer Note:
601   This should be PetscRestoreArguments()
602 
603 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()`
604 @*/
605 PetscErrorCode PetscFreeArguments(char **args)
606 {
607   PetscFunctionBegin;
608   if (args) {
609     PetscInt i = 0;
610 
611     while (args[i]) PetscCall(PetscFree(args[i++]));
612     PetscCall(PetscFree(args));
613   }
614   PetscFunctionReturn(PETSC_SUCCESS);
615 }
616 
617 #if PetscDefined(HAVE_SAWS)
618   #include <petscconfiginfo.h>
619 
620 PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[])
621 {
622   PetscFunctionBegin;
623   if (!PetscGlobalRank) {
624     char      cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64];
625     int       port;
626     PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE;
627     size_t    applinelen, introlen;
628     char      sawsurl[256];
629 
630     PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg));
631     if (flg) {
632       char sawslog[PETSC_MAX_PATH_LEN];
633 
634       PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL));
635       if (sawslog[0]) {
636         PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog));
637       } else {
638         PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL));
639       }
640     }
641     PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg));
642     if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert));
643     PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL));
644     if (selectport) {
645       PetscCallSAWs(SAWs_Get_Available_Port, (&port));
646       PetscCallSAWs(SAWs_Set_Port, (port));
647     } else {
648       PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg));
649       if (flg) PetscCallSAWs(SAWs_Set_Port, (port));
650     }
651     PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg));
652     if (flg) {
653       PetscCallSAWs(SAWs_Set_Document_Root, (root));
654       PetscCall(PetscStrcmp(root, ".", &rootlocal));
655     } else {
656       PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg));
657       if (flg) {
658         PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root)));
659         PetscCallSAWs(SAWs_Set_Document_Root, (root));
660       }
661     }
662     PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2));
663     if (flg2) {
664       char jsdir[PETSC_MAX_PATH_LEN];
665       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option");
666       PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root));
667       PetscCall(PetscTestDirectory(jsdir, 'r', &flg));
668       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory");
669       PetscCallSAWs(SAWs_Push_Local_Header, ());
670     }
671     PetscCall(PetscGetProgramName(programname, sizeof(programname)));
672     PetscCall(PetscStrlen(help, &applinelen));
673     introlen = 4096 + applinelen;
674     applinelen += 1024;
675     PetscCall(PetscMalloc(applinelen, &appline));
676     PetscCall(PetscMalloc(introlen, &intro));
677 
678     if (rootlocal) {
679       PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname));
680       PetscCall(PetscTestFile(appline, 'r', &rootlocal));
681     }
682     PetscCall(PetscOptionsGetAll(NULL, &options));
683     if (rootlocal && help) {
684       PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help));
685     } else if (help) {
686       PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help));
687     } else {
688       PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options));
689     }
690     PetscCall(PetscFree(options));
691     PetscCall(PetscGetVersion(version, sizeof(version)));
692     PetscCall(PetscSNPrintf(intro, introlen,
693                             "<body>\n"
694                             "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n"
695                             "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n"
696                             "%s",
697                             version, petscconfigureoptions, appline));
698     PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro));
699     PetscCall(PetscFree(intro));
700     PetscCall(PetscFree(appline));
701     if (selectport) {
702       PetscBool silent;
703 
704       /* another process may have grabbed the port so keep trying */
705       while (SAWs_Initialize()) {
706         PetscCallSAWs(SAWs_Get_Available_Port, (&port));
707         PetscCallSAWs(SAWs_Set_Port, (port));
708       }
709 
710       PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL));
711       if (!silent) {
712         PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl));
713         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl));
714       }
715     } else {
716       PetscCallSAWs(SAWs_Initialize, ());
717     }
718     PetscCall(PetscCitationsRegister("@TechReport{ saws,\n"
719                                      "  Author = {Matt Otten and Jed Brown and Barry Smith},\n"
720                                      "  Title  = {Scientific Application Web Server (SAWs) Users Manual},\n"
721                                      "  Institution = {Argonne National Laboratory},\n"
722                                      "  Year   = 2013\n}\n",
723                                      NULL));
724   }
725   PetscFunctionReturn(PETSC_SUCCESS);
726 }
727 #endif
728 
729 /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */
730 PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void)
731 {
732   PetscFunctionBegin;
733 #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG)
734   /* see MPI.py for details on this bug */
735   (void)setenv("HWLOC_COMPONENTS", "-x86", 1);
736 #endif
737   PetscFunctionReturn(PETSC_SUCCESS);
738 }
739 
740 #if PetscDefined(HAVE_ADIOS)
741   #include <adios.h>
742   #include <adios_read.h>
743 int64_t Petsc_adios_group;
744 #endif
745 #if PetscDefined(HAVE_OPENMP)
746   #include <omp.h>
747 PetscInt PetscNumOMPThreads;
748 #endif
749 
750 #include <petsc/private/deviceimpl.h>
751 #if PetscDefined(HAVE_CUDA)
752   #include <petscdevice_cuda.h>
753 // REMOVE ME
754 cudaStream_t PetscDefaultCudaStream = NULL;
755 #endif
756 #if PetscDefined(HAVE_HIP)
757   #include <petscdevice_hip.h>
758 // REMOVE ME
759 hipStream_t PetscDefaultHipStream = NULL;
760 #endif
761 
762 #if PetscDefined(HAVE_DLFCN_H)
763   #include <dlfcn.h>
764 #endif
765 PETSC_INTERN PetscErrorCode PetscLogInitialize(void);
766 #if PetscDefined(HAVE_VIENNACL)
767 PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void);
768 PetscBool                   PetscViennaCLSynchronize = PETSC_FALSE;
769 #endif
770 
771 PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE;
772 
773 /*
774   PetscInitialize_Common  - shared code between C and Fortran initialization
775 
776   prog:     program name
777   file:     optional PETSc database file name. Might be in Fortran string format when 'ftn' is true
778   help:     program help message
779   ftn:      is it called from Fortran initialization (petscinitializef_)?
780   len:      length of file string, used when Fortran is true
781 */
782 PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscInt len)
783 {
784   PetscMPIInt size;
785   PetscBool   flg = PETSC_TRUE;
786   char        hostname[256];
787   PetscBool   blas_view_flag = PETSC_FALSE;
788 
789   PetscFunctionBegin;
790   if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
791   /* these must be initialized in a routine, not as a constant declaration */
792   PETSC_STDOUT = stdout;
793   PETSC_STDERR = stderr;
794 
795   /* PetscCall can be used from now */
796   PetscErrorHandlingInitialized = PETSC_TRUE;
797 
798   /*
799       The checking over compatible runtime libraries is complicated by the MPI ABI initiative
800       https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with
801         MPICH v3.1 (Released February 2014)
802         IBM MPI v2.1 (December 2014)
803         Intel MPI Library v5.0 (2014)
804         Cray MPT v7.0.0 (June 2014)
805       As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions
806       listed above and since that time are compatible.
807 
808       Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number
809       at compile time or runtime. Thus we will need to systematically track the allowed versions
810       and how they are represented in the mpi.h and MPI_Get_library_version() output in order
811       to perform the checking.
812 
813       Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI).
814 
815       Questions:
816 
817         Should the checks for ABI incompatibility be only on the major version number below?
818         Presumably the output to stderr will be removed before a release.
819   */
820 
821 #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION)
822   {
823     char        mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING];
824     PetscMPIInt mpilibraryversionlength;
825 
826     PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength));
827     /* check for MPICH versions before MPI ABI initiative */
828   #if defined(MPICH_VERSION)
829     #if MPICH_NUMVERSION < 30100000
830     {
831       char     *ver, *lf;
832       PetscBool flg = PETSC_FALSE;
833 
834       PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver));
835       if (ver) {
836         PetscCall(PetscStrchr(ver, '\n', &lf));
837         if (lf) {
838           *lf = 0;
839           PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg));
840         }
841       }
842       if (!flg) {
843         PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION));
844         flg = PETSC_TRUE;
845       }
846     }
847     #endif
848       /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */
849   #elif defined(PETSC_HAVE_OPENMPI)
850     {
851       char     *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf;
852       PetscBool flg                                              = PETSC_FALSE;
853     #define PSTRSZ 2
854       char      ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"};
855       char      ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "};
856       int       i;
857       for (i = 0; i < PSTRSZ; i++) {
858         PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver));
859         if (ver) {
860           PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
861           PetscCall(PetscStrstr(ver, bs, &bsf));
862           if (bsf) flg = PETSC_TRUE;
863           break;
864         }
865       }
866       if (!flg) {
867         PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
868         flg = PETSC_TRUE;
869       }
870     }
871   #endif
872   }
873 #endif
874 
875 #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__))
876   /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */
877   PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly");
878 #endif
879 
880   PetscCall(PetscOptionsCreateDefault());
881 
882   PetscFinalizeCalled = PETSC_FALSE;
883 
884   PetscCall(PetscSetProgramName(prog));
885   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen));
886   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout));
887   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr));
888   PetscCall(PetscSpinlockCreate(&PetscCommSpinLock));
889 
890   if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD;
891   PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN));
892 
893   if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) {
894     PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS));
895     PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE));
896   }
897 
898   /* Done after init due to a bug in MPICH-GM? */
899   PetscCall(PetscErrorPrintfInitialize());
900 
901   PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank));
902   PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize));
903 
904   MPIU_ENUM        = MPI_INT;
905   MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64;
906   if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED;
907   else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG;
908 #if defined(PETSC_SIZEOF_LONG_LONG)
909   else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG;
910 #endif
911   else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t");
912 
913   /*
914      Initialized the global complex variable; this is because with
915      shared libraries the constructors for global variables
916      are not called; at least on IRIX.
917   */
918 #if defined(PETSC_HAVE_COMPLEX)
919   {
920   #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128)
921     PetscComplex ic(0.0, 1.0);
922     PETSC_i = ic;
923   #else
924     PETSC_i = _Complex_I;
925   #endif
926   }
927 #endif /* PETSC_HAVE_COMPLEX */
928 
929   /*
930      Create the PETSc MPI reduction operator that sums of the first
931      half of the entries and maxes the second half.
932   */
933   PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP));
934 
935 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
936   PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128));
937   PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128));
938   #if defined(PETSC_HAVE_COMPLEX)
939   PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128));
940   PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128));
941   #endif
942 #endif
943 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
944   PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16));
945   PetscCallMPI(MPI_Type_commit(&MPIU___FP16));
946 #endif
947 
948 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
949   PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM));
950   PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX));
951   PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN));
952 #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
953   PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128));
954 #endif
955 
956   PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR));
957   PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp));
958   PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR));
959 
960   /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */
961 #if !defined(PETSC_HAVE_MPIUNI)
962   {
963     PetscMPIInt  blockSizes[2]   = {1, 1};
964     MPI_Aint     blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)};
965     MPI_Datatype blockTypes[2]   = {MPIU_REAL, MPIU_INT}, tmpStruct;
966 
967     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
968     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT));
969     PetscCallMPI(MPI_Type_free(&tmpStruct));
970     PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT));
971   }
972   {
973     PetscMPIInt  blockSizes[2]   = {1, 1};
974     MPI_Aint     blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)};
975     MPI_Datatype blockTypes[2]   = {MPIU_SCALAR, MPIU_INT}, tmpStruct;
976 
977     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
978     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT));
979     PetscCallMPI(MPI_Type_free(&tmpStruct));
980     PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT));
981   }
982 #endif
983 
984 #if defined(PETSC_USE_64BIT_INDICES)
985   PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT));
986   PetscCallMPI(MPI_Type_commit(&MPIU_2INT));
987 
988   #if !defined(PETSC_HAVE_MPIUNI)
989   {
990     int          blockSizes[]   = {1, 1};
991     MPI_Aint     blockOffsets[] = {offsetof(struct petsc_mpiu_int_mpiint, a), offsetof(struct petsc_mpiu_int_mpiint, b)};
992     MPI_Datatype blockTypes[]   = {MPIU_INT, MPI_INT}, tmpStruct;
993 
994     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
995     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_int_mpiint), &MPIU_INT_MPIINT));
996     PetscCallMPI(MPI_Type_free(&tmpStruct));
997     PetscCallMPI(MPI_Type_commit(&MPIU_INT_MPIINT));
998   }
999   #endif
1000 #endif
1001   PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT));
1002   PetscCallMPI(MPI_Type_commit(&MPI_4INT));
1003   PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT));
1004   PetscCallMPI(MPI_Type_commit(&MPIU_4INT));
1005 
1006   /*
1007      Attributes to be set on PETSc communicators
1008   */
1009   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, NULL));
1010   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, NULL));
1011   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, NULL));
1012   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, NULL));
1013   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, NULL));
1014   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, NULL));
1015 
1016 #if defined(PETSC_USE_FORTRAN_BINDINGS)
1017   if (ftn) PetscCall(PetscInitFortran_Private(file, len));
1018   else
1019 #endif
1020     PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file));
1021 
1022   if (PetscDefined(HAVE_MPIUNI)) {
1023     const char *mpienv = getenv("PMI_SIZE");
1024     if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE");
1025     if (mpienv) {
1026       PetscInt  isize;
1027       PetscBool mflag = PETSC_FALSE;
1028 
1029       PetscCall(PetscOptionsStringToInt(mpienv, &isize));
1030       PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpiuni-allow-multiprocess-launch", &mflag, NULL));
1031       PetscCheck(isize == 1 || mflag, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc. Or run with -mpiuni-allow-multiprocess-launch to allow multiple independent MPI-uni jobs.");
1032     }
1033   }
1034 
1035   /* call a second time so it can look in the options database */
1036   PetscCall(PetscErrorPrintfInitialize());
1037 
1038   /*
1039      Check system options and print help
1040   */
1041   PetscCall(PetscOptionsCheckInitial_Private(help));
1042 
1043   /*
1044     Creates the logging data structures; this is enabled even if logging is not turned on
1045     This is the last thing we do before returning to the user code to prevent having the
1046     logging numbers contaminated by any startup time associated with MPI
1047   */
1048   PetscCall(PetscLogInitialize());
1049 
1050   /*
1051    Initialize PetscDevice and PetscDeviceContext
1052 
1053    Note to any future devs thinking of moving this, proper initialization requires:
1054    1. MPI initialized
1055    2. Options DB initialized
1056    3. PETSc error handling initialized, specifically signal handlers. This expects to set up
1057       its own SIGSEV handler via the push/pop interface.
1058    4. Logging initialized
1059   */
1060   PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD));
1061 
1062 #if PetscDefined(HAVE_VIENNACL)
1063   flg = PETSC_FALSE;
1064   PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg));
1065   if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL));
1066   PetscViennaCLSynchronize = flg;
1067   PetscCall(PetscViennaCLInit());
1068 #endif
1069 
1070   PetscCall(PetscCitationsInitialize());
1071 
1072 #if defined(PETSC_HAVE_SAWS)
1073   PetscCall(PetscInitializeSAWs(ftn ? NULL : help));
1074   flg = PETSC_FALSE;
1075   PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg));
1076   if (flg) PetscCall(PetscStackViewSAWs());
1077 #endif
1078 
1079   /*
1080      Load the dynamic libraries (on machines that support them), this registers all
1081      the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes)
1082   */
1083   PetscCall(PetscInitialize_DynamicLibraries());
1084 
1085   PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
1086   PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size));
1087   PetscCall(PetscGetHostName(hostname, sizeof(hostname)));
1088   PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname));
1089 #if defined(PETSC_HAVE_OPENMP)
1090   {
1091     PetscBool omp_view_flag;
1092     char     *threads = getenv("OMP_NUM_THREADS");
1093 
1094     if (threads) {
1095       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads));
1096       (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads);
1097     } else {
1098       PetscNumOMPThreads = (PetscInt)omp_get_max_threads();
1099       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads));
1100     }
1101     PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys");
1102     PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg));
1103     PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag));
1104     PetscOptionsEnd();
1105     if (flg) {
1106       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads));
1107       omp_set_num_threads((int)PetscNumOMPThreads);
1108     }
1109     if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads));
1110   }
1111 #endif
1112 
1113   PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys");
1114   PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag));
1115 #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1116   {
1117     char *threads = NULL;
1118 
1119     /* determine any default number of threads requested in the environment; TODO: Apple libraries? */
1120   #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS)
1121     threads = getenv("BLIS_NUM_THREADS");
1122     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads));
1123     if (!threads) {
1124       threads = getenv("OMP_NUM_THREADS");
1125       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads));
1126     }
1127   #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS)
1128     threads = getenv("MKL_NUM_THREADS");
1129     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads));
1130     if (!threads) {
1131       threads = getenv("OMP_NUM_THREADS");
1132       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads));
1133     }
1134   #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1135     threads = getenv("OPENBLAS_NUM_THREADS");
1136     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads));
1137     if (!threads) {
1138       threads = getenv("OMP_NUM_THREADS");
1139       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads));
1140     }
1141   #endif
1142     if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads);
1143     PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg));
1144     if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads));
1145     if (flg || threads) {
1146       PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads));
1147       if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads));
1148     }
1149   }
1150 #elif defined(PETSC_HAVE_APPLE_ACCELERATE)
1151   PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1152   if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1153 #else
1154   if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n"));
1155 #endif
1156   PetscOptionsEnd();
1157 
1158 #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
1159   /*
1160       Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI
1161 
1162       Currently not used because it is not supported by MPICH.
1163   */
1164   if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL));
1165 #endif
1166 
1167 #if defined(PETSC_SERIALIZE_FUNCTIONS)
1168   PetscCall(PetscFPTCreate(10000));
1169 #endif
1170 
1171 #if defined(PETSC_HAVE_HWLOC)
1172   {
1173     PetscViewer viewer;
1174     PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg));
1175     if (flg) {
1176       PetscCall(PetscProcessPlacementView(viewer));
1177       PetscCall(PetscViewerDestroy(&viewer));
1178     }
1179   }
1180 #endif
1181 
1182   flg = PETSC_TRUE;
1183   PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL));
1184   if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE));
1185 
1186 #if defined(PETSC_HAVE_ADIOS)
1187   PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD);
1188   PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default);
1189   PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", "");
1190   PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, "");
1191 #endif
1192 
1193 #if defined(__VALGRIND_H)
1194   PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE;
1195   #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE)
1196   if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack"));
1197   #endif
1198 #endif
1199   /*
1200       Set flag that we are completely initialized
1201   */
1202   PetscInitializeCalled = PETSC_TRUE;
1203 
1204   PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg));
1205   if (flg) PetscCall(PetscPythonInitialize(NULL, NULL));
1206 
1207   PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1208   if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n"));
1209   if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin());
1210   else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case");
1211   PetscFunctionReturn(PETSC_SUCCESS);
1212 }
1213 
1214 // "Unknown section 'Environmental Variables'"
1215 // PetscClangLinter pragma disable: -fdoc-section-header-unknown
1216 /*@C
1217   PetscInitialize - Initializes the PETSc database and MPI.
1218   `PetscInitialize()` calls MPI_Init() if that has yet to be called,
1219   so this routine should always be called near the beginning of
1220   your program -- usually the very first line!
1221 
1222   Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set
1223 
1224   Input Parameters:
1225 + argc - count of number of command line arguments
1226 . args - the command line arguments
1227 . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format.
1228           Use `NULL` or empty string to not check for code specific file.
1229           Also checks `~/.petscrc`, `.petscrc` and `petscrc`.
1230           Use `-skip_petscrc` in the code specific file (or command line) to skip `~/.petscrc`, `.petscrc` and `petscrc` files.
1231 - help - [optional] Help message to print, use `NULL` for no message
1232 
1233    If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that
1234    communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`.
1235    then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even
1236    if different subcommunicators of the job are doing different things with PETSc.
1237 
1238   Options Database Keys:
1239 + -help [intro]                                       - prints help method for each option; if `intro` is given the program stops after printing the introductory help message
1240 . -start_in_debugger [noxterm,dbx,xdb,gdb,...]        - Starts program in debugger
1241 . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected
1242 . -on_error_emacs <machinename>                       - causes `emacsclient` to jump to error file if an error is detected
1243 . -on_error_abort                                     - calls `abort()` when error detected (no traceback)
1244 . -on_error_mpiabort                                  - calls `MPI_abort()` when error detected
1245 . -error_output_stdout                                - prints PETSc error messages to `stdout` instead of the default `stderr`
1246 . -error_output_none                                  - does not print the error messages (but handles errors in the same way as if this was not called)
1247 . -debugger_ranks [rank1,rank2,...]                   - Indicates MPI ranks to start in debugger
1248 . -debugger_pause [sleeptime] (in seconds)            - Pauses debugger, use if it takes a long time for the debugger to start up on your system
1249 . -stop_for_debugger                                  - Print message on how to attach debugger manually to
1250                                                         process and wait (`-debugger_pause`) seconds for attachment
1251 . -malloc_dump                                        - prints a list of all unfreed memory at the end of the run
1252 . -malloc_test                                        - like `-malloc_dump` `-malloc_debug`, only active for debugging build, ignored in optimized build. Often set in `PETSC_OPTIONS` environmental variable
1253 . -malloc_view                                        - show a list of all allocated memory during `PetscFinalize()`
1254 . -malloc_view_threshold <t>                          - only list memory allocations of size greater than t with `-malloc_view`
1255 . -malloc_requested_size                              - malloc logging will record the requested size rather than (possibly large) size after alignment
1256 . -fp_trap                                            - Stops on floating point exceptions
1257 . -no_signal_handler                                  - Indicates not to trap error signals
1258 . -shared_tmp                                         - indicates `/tmp` directory is known to be shared by all processors
1259 . -not_shared_tmp                                     - indicates each processor has own `/tmp`
1260 . -tmp                                                - alternative directory to use instead of `/tmp`
1261 . -python <exe>                                       - Initializes Python, and optionally takes a Python executable name
1262 - -mpiuni-allow-multiprocess-launch                   - allow `mpiexec` to launch multiple independent MPI-Uni jobs, otherwise a sanity check error is invoked to prevent misuse of MPI-Uni
1263 
1264   Options Database Keys for Option Database:
1265 + -skip_petscrc           - skip the default option files `~/.petscrc`, `.petscrc`, `petscrc`
1266 . -options_monitor        - monitor all set options to standard output for the whole program run
1267 - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()`
1268 
1269    Options -options_monitor_{all,cancel} are
1270    position-independent and apply to all options set since the PETSc start.
1271    They can be used also in option files.
1272 
1273    See `PetscOptionsMonitorSet()` to do monitoring programmatically.
1274 
1275   Options Database Keys for Profiling:
1276    See Users-Manual: ch_profiling for details.
1277 + -info [filename][:[~]<list,of,classnames>[:[~]self]] - Prints verbose information. See `PetscInfo()`.
1278 . -log_sync                                            - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event,
1279                                                          however it slows things down and gives a distorted view of the overall runtime.
1280 . -log_trace [filename]                                - Print traces of all PETSc calls to the screen (useful to determine where a program
1281                                                          hangs without running in the debugger).  See `PetscLogTraceBegin()`.
1282 . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers)
1283 . -log_view_memory                                     - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`.
1284 . -log_view_gpu_time                                   - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView().
1285 . -log_view_gpu_energy                                 - Includes in the summary from -log_view the energy (estimated with power*gtime) consumed in each GPU kernel, see `PetscLogView()`.
1286 . -log_view_gpu_energy_meter                           - Includes in the summary from -log_view the energy (readings from meters) consumed in each GPU kernel, see `PetscLogView()`.
1287 . -log_exclude: <vec,mat,pc,ksp,snes>                  - excludes subset of object classes from logging
1288 . -log [filename]                                      - Logs profiling information in a dump file, see `PetscLogDump()`.
1289 . -log_all [filename]                                  - Same as `-log`.
1290 . -log_mpe [filename]                                  - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution)
1291 . -log_perfstubs                                       - Starts a log handler with the perfstubs interface (which is used by TAU)
1292 . -log_nvtx                                            - Starts an nvtx log handler for use with Nsight
1293 . -log_roctx                                           - Starts an roctx log handler for use with rocprof on AMD GPUs
1294 . -viewfromoptions on,off                              - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off
1295 . -get_total_flops                                     - Returns total flops done by all processors
1296 . -memory_view                                         - Print memory usage at end of run
1297 - -check_pointer_intensity 0,1,2                       - if pointers are checked for validity (debug version only), using 0 will result in faster code
1298 
1299   Options Database Keys for SAWs:
1300 + -saws_port <portnumber>        - port number to publish SAWs data, default is 8080
1301 . -saws_port_auto_select         - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen
1302                                    this is useful when you are running many jobs that utilize SAWs at the same time
1303 . -saws_log <filename>           - save a log of all SAWs communication
1304 . -saws_https <certificate file> - have SAWs use HTTPS instead of HTTP
1305 - -saws_root <directory>         - allow SAWs to have access to the given directory to search for requested resources and files
1306 
1307   Environmental Variables:
1308 +   `PETSC_TMP`                   - alternative directory to use instead of `/tmp`
1309 .   `PETSC_SHARED_TMP`            - `/tmp` is shared by all processes
1310 .   `PETSC_NOT_SHARED_TMP`        - each process has its own private `/tmp`
1311 .   `PETSC_OPTIONS`               - a string containing additional options for PETSc in the form of command line "-key value" pairs
1312 .   `PETSC_OPTIONS_YAML`          - (requires configuring PETSc to use libyaml with `--download-yaml`) a string containing additional options for PETSc in the form of a YAML document
1313 .   `PETSC_VIEWER_SOCKET_PORT`    - socket number to use for socket viewer
1314 -   `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to
1315 
1316   Level: beginner
1317 
1318   Note:
1319   If for some reason you must call `MPI_Init()` separately from `PetscInitialize()`, call
1320   it before `PetscInitialize()`.
1321 
1322   Fortran Notes:
1323   In Fortran this routine can be called with
1324 .vb
1325        call PetscInitialize(ierr)
1326        call PetscInitialize(file,ierr) or
1327        call PetscInitialize(file,help,ierr)
1328 .ve
1329 
1330   If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after
1331   calling `PetscInitialize()`.
1332 
1333   Options Database Key for Developers:
1334 . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form:
1335                        "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py)
1336 
1337 .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()`
1338 @*/
1339 PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[])
1340 {
1341   PetscMPIInt flag;
1342   const char *prog = "Unknown Name";
1343 
1344   PetscFunctionBegin;
1345   if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
1346   PetscCallMPI(MPI_Initialized(&flag));
1347   if (!flag) {
1348     PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first");
1349     PetscCall(PetscPreMPIInit_Private());
1350 #if defined(PETSC_HAVE_MPI_INIT_THREAD)
1351     {
1352       PetscMPIInt provided;
1353       PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided));
1354       PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required");
1355       if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up
1356     }
1357 #else
1358     PetscCallMPI(MPI_Init(argc, args));
1359 #endif
1360     PetscBeganMPI = PETSC_TRUE;
1361   }
1362 
1363   if (argc && *argc) prog = **args;
1364   if (argc && args) {
1365     PetscGlobalArgc = *argc;
1366     PetscGlobalArgs = *args;
1367   }
1368   PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, 0));
1369   PetscFunctionReturn(PETSC_SUCCESS);
1370 }
1371 
1372 PETSC_INTERN PetscObject *PetscObjects;
1373 PETSC_INTERN PetscInt     PetscObjectsCounts;
1374 PETSC_INTERN PetscInt     PetscObjectsMaxCounts;
1375 PETSC_INTERN PetscBool    PetscObjectsLog;
1376 
1377 /*
1378     Frees all the MPI types and operations that PETSc may have created
1379 */
1380 PetscErrorCode PetscFreeMPIResources(void)
1381 {
1382   PetscFunctionBegin;
1383 #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
1384   PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128));
1385   #if defined(PETSC_HAVE_COMPLEX)
1386   PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128));
1387   #endif
1388 #endif
1389 #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
1390   PetscCallMPI(MPI_Type_free(&MPIU___FP16));
1391 #endif
1392 
1393 #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
1394   PetscCallMPI(MPI_Op_free(&MPIU_SUM));
1395   PetscCallMPI(MPI_Op_free(&MPIU_MAX));
1396   PetscCallMPI(MPI_Op_free(&MPIU_MIN));
1397 #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
1398   PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128));
1399 #endif
1400 
1401   PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR));
1402   PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT));
1403   PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT));
1404 #if defined(PETSC_USE_64BIT_INDICES)
1405   PetscCallMPI(MPI_Type_free(&MPIU_2INT));
1406   PetscCallMPI(MPI_Type_free(&MPIU_INT_MPIINT));
1407 #endif
1408   PetscCallMPI(MPI_Type_free(&MPI_4INT));
1409   PetscCallMPI(MPI_Type_free(&MPIU_4INT));
1410   PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP));
1411   PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp));
1412   PetscFunctionReturn(PETSC_SUCCESS);
1413 }
1414 
1415 PETSC_INTERN PetscErrorCode PetscLogFinalize(void);
1416 PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]);
1417 
1418 /*@
1419   PetscFinalize - Checks for options to be called at the conclusion of a PETSc program and frees any remaining PETSc objects and data structures.
1420   of the program. Automatically calls `MPI_Finalize()` if the user had not called `MPI_Init()` before calling `PetscInitialize()`.
1421 
1422   Collective on `PETSC_COMM_WORLD`
1423 
1424   Options Database Keys:
1425 + -options_view                    - Calls `PetscOptionsView()`
1426 . -options_left                    - Prints unused options that remain in the database
1427 . -objects_dump [all]              - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed
1428 . -mpidump                         - Calls PetscMPIDump()
1429 . -malloc_dump <optional filename> - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed
1430 . -memory_view                     - Prints total memory usage
1431 - -malloc_view <optional filename> - Prints list of all memory allocated and in what functions
1432 
1433   Level: beginner
1434 
1435   Note:
1436   See `PetscInitialize()` for other runtime options.
1437 
1438   You can call `PetscInitialize()` after `PetscFinalize()` but only with MPI-Uni or if you called `MPI_Init()` before ever calling `PetscInitialize()`.
1439 
1440 .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()`
1441 @*/
1442 PetscErrorCode PetscFinalize(void)
1443 {
1444   PetscMPIInt rank;
1445   PetscInt    nopt;
1446   PetscBool   flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE;
1447   PetscBool   flg;
1448   char        mname[PETSC_MAX_PATH_LEN];
1449 
1450   PetscFunctionBegin;
1451   PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()");
1452   PetscCall(PetscInfo(NULL, "PetscFinalize() called\n"));
1453 
1454   PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1455   if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd());
1456 
1457   PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL));
1458   PetscGlobalArgc = 0;
1459   PetscGlobalArgs = NULL;
1460 
1461   /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */
1462   {
1463     union
1464     {
1465       MPI_Comm comm;
1466       void    *ptr;
1467     } ucomm;
1468     PetscMPIInt flg;
1469     void       *tmp;
1470 
1471     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1472     if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1473     if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF));
1474     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1475     if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1476     if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD));
1477   }
1478 
1479   PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1480 #if defined(PETSC_HAVE_ADIOS)
1481   PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE);
1482   PetscCallExternal(adios_finalize, rank);
1483 #endif
1484   PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg));
1485   if (flg) {
1486     char *cits, filename[PETSC_MAX_PATH_LEN];
1487     FILE *fd = PETSC_STDOUT;
1488 
1489     PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL));
1490     if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd));
1491     PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits));
1492     cits[0] = 0;
1493     PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits));
1494     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n"));
1495     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1496     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits));
1497     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1498     PetscCall(PetscFClose(PETSC_COMM_WORLD, fd));
1499     PetscCall(PetscFree(cits));
1500   }
1501   PetscCall(PetscSegBufferDestroy(&PetscCitationsList));
1502 
1503 #if defined(PETSC_SERIALIZE_FUNCTIONS)
1504   PetscCall(PetscFPTDestroy());
1505 #endif
1506 
1507 #if defined(PETSC_HAVE_X)
1508   flg1 = PETSC_FALSE;
1509   PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL));
1510   if (flg1) {
1511     /*  this is a crude hack, but better than nothing */
1512     PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL));
1513   }
1514 #endif
1515 
1516 #if !defined(PETSC_HAVE_THREADSAFETY)
1517   PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL));
1518   if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n"));
1519 #endif
1520 
1521   if (PetscDefined(USE_LOG)) {
1522     flg1 = PETSC_FALSE;
1523     PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL));
1524     if (flg1) {
1525       PetscLogDouble flops = 0;
1526       PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD));
1527       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops));
1528     }
1529   }
1530 
1531   if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) {
1532     mname[0] = 0;
1533     PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1));
1534     if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL));
1535   }
1536 
1537 #if defined(PETSC_HAVE_KOKKOS)
1538   // Free PETSc/kokkos stuff before the potentially non-null PETSc default gpu stream is destroyed by PetscObjectRegisterDestroyAll
1539   if (PetscKokkosInitialized) {
1540     PetscCall(PetscKokkosFinalize_Private());
1541     PetscKokkosInitialized = PETSC_FALSE;
1542   }
1543 #endif
1544 
1545   // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_().
1546   PetscCall(PetscObjectRegisterDestroyAll());
1547 
1548   if (PetscDefined(USE_LOG)) {
1549     PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE));
1550     PetscCall(PetscLogViewFromOptions());
1551     PetscCall(PetscOptionsPopCreateViewerOff());
1552     //  It should be turned on with PetscLogGpuTime() and never turned off except in this place
1553     PetscLogGpuTimeFlag = PETSC_FALSE;
1554 
1555     // Free any objects created by the last block of code.
1556     PetscCall(PetscObjectRegisterDestroyAll());
1557 
1558     mname[0] = 0;
1559     PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1));
1560     PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2));
1561     if (flg1 || flg2) PetscCall(PetscLogDump(mname));
1562   }
1563 
1564   flg1 = PETSC_FALSE;
1565   PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL));
1566   if (!flg1) PetscCall(PetscPopSignalHandler());
1567   flg1 = PETSC_FALSE;
1568   PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL));
1569   if (flg1) PetscCall(PetscMPIDump(stdout));
1570   flg1 = PETSC_FALSE;
1571   flg2 = PETSC_FALSE;
1572   /* preemptive call to avoid listing this option in options table as unused */
1573   PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1));
1574   PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1575   PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL));
1576 
1577   if (flg2) PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD));
1578 
1579   /* to prevent PETSc -options_left from warning */
1580   PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1));
1581   PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1));
1582 
1583   flg3 = PETSC_FALSE; /* default value is required */
1584   PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1));
1585   if (!flg1) flg3 = PETSC_TRUE;
1586   if (flg3) {
1587     if (!flg2 && flg1) { /* have not yet printed the options */
1588       PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD));
1589     }
1590     PetscCall(PetscOptionsAllUsed(NULL, &nopt));
1591     if (nopt) {
1592       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n"));
1593       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n"));
1594       if (nopt == 1) {
1595         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n"));
1596       } else {
1597         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt));
1598       }
1599     } else if (flg3 && flg1) {
1600       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n"));
1601     }
1602     PetscCall(PetscOptionsLeft(NULL));
1603   }
1604 
1605 #if defined(PETSC_HAVE_SAWS)
1606   if (!PetscGlobalRank) {
1607     PetscCall(PetscStackSAWsViewOff());
1608     PetscCallSAWs(SAWs_Finalize, ());
1609   }
1610 #endif
1611 
1612   /*
1613        List all objects the user may have forgot to free
1614   */
1615   if (PetscDefined(USE_LOG) && PetscObjectsLog) {
1616     PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1617     if (flg1) {
1618       MPI_Comm local_comm;
1619       char     string[64];
1620 
1621       PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL));
1622       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1623       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1624       PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE));
1625       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1626       PetscCallMPI(MPI_Comm_free(&local_comm));
1627     }
1628   }
1629 
1630   PetscObjectsCounts    = 0;
1631   PetscObjectsMaxCounts = 0;
1632   PetscCall(PetscFree(PetscObjects));
1633 
1634   /*
1635      Destroy any packages that registered a finalize
1636   */
1637   PetscCall(PetscRegisterFinalizeAll());
1638 
1639   PetscCall(PetscLogFinalize());
1640 
1641   /*
1642      Print PetscFunctionLists that have not been properly freed
1643   */
1644   if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll());
1645 
1646   if (petsc_history) {
1647     PetscCall(PetscCloseHistoryFile(&petsc_history));
1648     petsc_history = NULL;
1649   }
1650   PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton));
1651   PetscCall(PetscInfoDestroy());
1652 
1653 #if !defined(PETSC_HAVE_THREADSAFETY)
1654   if (!(PETSC_RUNNING_ON_VALGRIND)) {
1655     char  fname[PETSC_MAX_PATH_LEN];
1656     char  sname[PETSC_MAX_PATH_LEN];
1657     FILE *fd;
1658     int   err;
1659 
1660     flg2 = PETSC_FALSE;
1661     flg3 = PETSC_FALSE;
1662     if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL));
1663     PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL));
1664     fname[0] = 0;
1665     PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1));
1666     if (flg1 && fname[0]) {
1667       PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1668       fd = fopen(sname, "w");
1669       PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1670       PetscCall(PetscMallocDump(fd));
1671       err = fclose(fd);
1672       PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1673     } else if (flg1 || flg2 || flg3) {
1674       MPI_Comm local_comm;
1675 
1676       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1677       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1678       PetscCall(PetscMallocDump(stdout));
1679       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1680       PetscCallMPI(MPI_Comm_free(&local_comm));
1681     }
1682     fname[0] = 0;
1683     PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1));
1684     if (flg1 && fname[0]) {
1685       PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1686       fd = fopen(sname, "w");
1687       PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1688       PetscCall(PetscMallocView(fd));
1689       err = fclose(fd);
1690       PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1691     } else if (flg1) {
1692       MPI_Comm local_comm;
1693 
1694       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1695       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1696       PetscCall(PetscMallocView(stdout));
1697       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1698       PetscCallMPI(MPI_Comm_free(&local_comm));
1699     }
1700   }
1701 #endif
1702 
1703   /*
1704      Close any open dynamic libraries
1705   */
1706   PetscCall(PetscFinalize_DynamicLibraries());
1707 
1708   /* Can be destroyed only after all the options are used */
1709   PetscCall(PetscOptionsDestroyDefault());
1710 
1711 #if defined(PETSC_HAVE_NVSHMEM)
1712   if (PetscBeganNvshmem) {
1713     PetscCall(PetscNvshmemFinalize());
1714     PetscBeganNvshmem = PETSC_FALSE;
1715   }
1716 #endif
1717 
1718   PetscCall(PetscFreeMPIResources());
1719 
1720   /*
1721      Destroy any known inner MPI_Comm's and attributes pointing to them
1722      Note this will not destroy any new communicators the user has created.
1723 
1724      If all PETSc objects were not destroyed those left over objects will have hanging references to
1725      the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again
1726  */
1727   {
1728     PetscCommCounter *counter;
1729     PetscMPIInt       flg;
1730     MPI_Comm          icomm;
1731     union
1732     {
1733       MPI_Comm comm;
1734       void    *ptr;
1735     } ucomm;
1736     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1737     if (flg) {
1738       icomm = ucomm.comm;
1739       PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1740       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1741 
1742       PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval));
1743       PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1744       PetscCallMPI(MPI_Comm_free(&icomm));
1745     }
1746     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1747     if (flg) {
1748       icomm = ucomm.comm;
1749       PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1750       PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1751 
1752       PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval));
1753       PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1754       PetscCallMPI(MPI_Comm_free(&icomm));
1755     }
1756   }
1757 
1758   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval));
1759   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval));
1760   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval));
1761   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval));
1762   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval));
1763   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval));
1764 
1765   // Free keyvals which may be silently created by some routines
1766   if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval));
1767   if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval));
1768 
1769   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen));
1770   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout));
1771   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr));
1772   PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock));
1773 
1774   if (PetscBeganMPI) {
1775     PetscMPIInt flag;
1776     PetscCallMPI(MPI_Finalized(&flag));
1777     PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()");
1778     /* wait until the very last moment to disable error handling */
1779     PetscErrorHandlingInitialized = PETSC_FALSE;
1780     PetscCallMPI(MPI_Finalize());
1781   } else PetscErrorHandlingInitialized = PETSC_FALSE;
1782 
1783   /*
1784      Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because
1785    the communicator has some outstanding requests on it. Specifically if the
1786    flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See
1787    src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate()
1788    is never freed as it should be. Thus one may obtain messages of the form
1789    [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the
1790    memory was not freed.
1791 
1792 */
1793   PetscCall(PetscMallocClear());
1794   PetscCall(PetscStackReset());
1795 
1796   PetscInitializeCalled = PETSC_FALSE;
1797   PetscFinalizeCalled   = PETSC_TRUE;
1798 #if defined(PETSC_USE_COVERAGE)
1799   /*
1800      flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the
1801      gcov files are still being added to the directories as git tries to remove the directories.
1802    */
1803   __gcov_flush();
1804 #endif
1805   /* To match PetscFunctionBegin() at the beginning of this function */
1806   PetscStackClearTop;
1807   return PETSC_SUCCESS;
1808 }
1809 
1810 #if defined(PETSC_MISSING_LAPACK_lsame_)
1811 PETSC_EXTERN int lsame_(char *a, char *b)
1812 {
1813   if (*a == *b) return 1;
1814   if (*a + 32 == *b) return 1;
1815   if (*a - 32 == *b) return 1;
1816   return 0;
1817 }
1818 #endif
1819 
1820 #if defined(PETSC_MISSING_LAPACK_lsame)
1821 PETSC_EXTERN int lsame(char *a, char *b)
1822 {
1823   if (*a == *b) return 1;
1824   if (*a + 32 == *b) return 1;
1825   if (*a - 32 == *b) return 1;
1826   return 0;
1827 }
1828 #endif
1829 
1830 static inline PetscMPIInt MPIU_Allreduce_Count(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1831 {
1832   PetscMPIInt err;
1833 #if !defined(PETSC_HAVE_MPI_LARGE_COUNT)
1834   PetscMPIInt count2;
1835 
1836   PetscMPIIntCast_Internal(count, &count2);
1837   err = MPI_Allreduce((void *)inbuf, outbuf, count2, dtype, op, comm);
1838 #else
1839   err = MPI_Allreduce_c((void *)inbuf, outbuf, count, dtype, op, comm);
1840 #endif
1841   return err;
1842 }
1843 
1844 /*
1845      When count is 1 and dtype == MPIU_INT performs the reduction in PetscInt64 to check for integer overflow
1846 */
1847 PetscMPIInt MPIU_Allreduce_Private(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1848 {
1849   PetscMPIInt err;
1850   if (!PetscDefined(USE_64BIT_INDICES) && count == 1 && dtype == MPIU_INT && (op == MPI_SUM || op == MPI_PROD)) {
1851     PetscInt64 incnt, outcnt;
1852     void      *inbufd, *outbufd;
1853 
1854     if (inbuf != MPI_IN_PLACE) {
1855       incnt   = *(PetscInt32 *)inbuf;
1856       inbufd  = &incnt;
1857       outbufd = &outcnt;
1858       err     = MPIU_Allreduce_Count(inbufd, outbufd, count, MPIU_INT64, op, comm);
1859     } else {
1860       outcnt  = *(PetscInt32 *)outbuf;
1861       outbufd = &outcnt;
1862       err     = MPIU_Allreduce_Count(MPI_IN_PLACE, outbufd, count, MPIU_INT64, op, comm);
1863     }
1864     if (!err && outcnt > PETSC_INT_MAX) err = MPI_ERR_OTHER;
1865     *(PetscInt32 *)outbuf = (PetscInt32)outcnt;
1866   } else {
1867     err = MPIU_Allreduce_Count(inbuf, outbuf, count, dtype, op, comm);
1868   }
1869   return err;
1870 }
1871 
1872 // Check if MPIU_Allreduce() is called on the same filename:lineno and with the same data count across all processes. Error out if otherwise.
1873 PetscErrorCode PetscCheckAllreduceSameLineAndCount_Private(MPI_Comm comm, const char *filename, PetscMPIInt lineno, PetscMPIInt count)
1874 {
1875   PetscMPIInt rbuf[4];
1876 
1877   PetscFunctionBegin;
1878   rbuf[0] = lineno;
1879   rbuf[1] = -rbuf[0];
1880   rbuf[2] = count;
1881   rbuf[3] = -rbuf[2];
1882   PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, rbuf, 4, MPI_INT, MPI_MAX, comm));
1883 
1884   if (rbuf[0] != -rbuf[1]) {
1885     size_t      len;
1886     PetscMPIInt size, rank, ilen, *recvcounts = NULL, *displs = NULL;
1887     char       *str = NULL, *str0 = NULL;
1888 
1889     PetscCallMPI(MPI_Comm_size(comm, &size));
1890     PetscCallMPI(MPI_Comm_rank(comm, &rank));
1891     PetscCall(PetscStrlen(filename, &len));
1892     len += 128; /* add enough space for the leading and trailing chars in PetscSNPrintf around __FILE__ */
1893     PetscCall(PetscMalloc1(len, &str));
1894     PetscCall(PetscSNPrintf(str, len, "                On process %d, %s:%d\n", rank, filename, lineno));
1895     PetscCall(PetscStrlen(str, &len)); /* string length exclusive of the NULL terminator */
1896     ilen = (PetscMPIInt)len;
1897     if (rank == 0) PetscCall(PetscMalloc2(size, &recvcounts, size + 1, &displs));
1898     PetscCallMPI(MPI_Gather(&ilen, 1, MPI_INT, recvcounts, 1, MPI_INT, 0, comm));
1899     if (rank == 0) {
1900       displs[0] = 0;
1901       for (PetscMPIInt i = 0; i < size; i++) displs[i + 1] = displs[i] + recvcounts[i];
1902       PetscCall(PetscMalloc1(displs[size], &str0));
1903     }
1904     PetscCallMPI(MPI_Gatherv(str, ilen, MPI_CHAR, str0, recvcounts, displs, MPI_CHAR, 0, comm));
1905     if (rank == 0) str0[displs[size] - 1] = 0; /* replace the ending \n with NULL */
1906     PetscCall(PetscFree(str));
1907     if (rank == 0) PetscCall(PetscFree2(recvcounts, displs));
1908     SETERRQ(comm, PETSC_ERR_PLIB, "MPIU_Allreduce() called in different locations on different processes:\n%s", str0);
1909   }
1910   PetscCheck(rbuf[2] == -rbuf[3], comm, PETSC_ERR_PLIB, "MPIU_Allreduce() called with different counts %d on different processes", count);
1911   PetscFunctionReturn(PETSC_SUCCESS);
1912 }
1913 
1914 /*@C
1915   PetscCtxDestroyDefault - An implementation of a `PetscCtxDestroyFn` that uses `PetscFree()` to free the context
1916 
1917   Input Parameter:
1918 . ctx - the context to be destroyed
1919 
1920   Level: intermediate
1921 
1922   Note:
1923   This is not called directly, rather it is passed to `DMSetApplicationContextDestroy()`, `PetscContainerSetDestroy()`,
1924   `PetscObjectContainterCreate()` and similar routines and then called by the destructor of the associated object.
1925 
1926 .seealso: `PetscObject`, `PetscCtxDestroyFn`, `PetscObjectDestroy()`, `DMSetApplicationContextDestroy()`,  `PetscContainerSetDestroy()`,
1927            `PetscObjectContainterCreate()`
1928 @*/
1929 PETSC_EXTERN PetscErrorCode PetscCtxDestroyDefault(void **ctx)
1930 {
1931   PetscFunctionBegin;
1932   PetscCall(PetscFree(*ctx));
1933   PetscFunctionReturn(PETSC_SUCCESS);
1934 }
1935