xref: /petsc/src/sys/mpiuni/mpi.c (revision 607e733f3db3ee7f6f605a13295c517df8dbb9c9)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include <petscsys.h>
6 #ifndef MPIUNI_H
7   #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 
10 #include <petscdevice_cupm.h>
11 #include <petsc/private/petscimpl.h>
12 
13 #define MPI_SUCCESS 0
14 #define MPI_FAILURE 1
15 
16 void *MPIUNI_TMP = NULL;
17 
18 /*
19        With MPI Uni there are exactly four distinct communicators:
20     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
21 
22     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
23      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
24 
25 */
26 #define MAX_ATTR 256
27 #define MAX_COMM 128
28 
29 typedef struct {
30   void *attribute_val;
31   int   active;
32 } MPI_Attr;
33 
34 typedef struct {
35   void                *extra_state;
36   MPI_Delete_function *del;
37   int                  active; /* Is this keyval in use by some comm? */
38 } MPI_Attr_keyval;
39 
40 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
41 static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
42 static int             comm_active[MAX_COMM]; /* Boolean array indicating which comms are in use */
43 static int             mpi_tag_ub           = 100000000;
44 static int             num_attr             = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
45 static int             MaxComm              = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
46 static void           *MPIUNIF_mpi_in_place = 0;
47 
48 #define CommIdx(comm) ((comm) - 1) /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
49 
50 #if defined(__cplusplus)
51 extern "C" {
52 #endif
53 
54 /*
55    To avoid problems with prototypes to the system memcpy() it is duplicated here
56 */
57 int MPIUNI_Memcpy(void *dst, const void *src, MPI_Count n)
58 {
59   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
60   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
61   if (!n) return MPI_SUCCESS;
62 
63   /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
64 #if defined(PETSC_HAVE_CUDA)
65   if (PetscDeviceInitialized(PETSC_DEVICE_CUDA)) {
66     cudaError_t cerr = cudaMemcpy(dst, src, n, cudaMemcpyDefault);
67     if (cerr != cudaSuccess) return MPI_FAILURE;
68   } else
69 #elif defined(PETSC_HAVE_HIP)
70   if (PetscDeviceInitialized(PETSC_DEVICE_HIP)) {
71     hipError_t cerr = hipMemcpy(dst, src, n, hipMemcpyDefault);
72     if (cerr != hipSuccess) return MPI_FAILURE;
73   } else
74 #endif
75   {
76     (void)memcpy(dst, src, n);
77   }
78   return MPI_SUCCESS;
79 }
80 
81 static int classcnt = 0;
82 static int codecnt  = 0;
83 
84 int MPI_Add_error_class(int *cl)
85 {
86   *cl = classcnt++;
87   return MPI_SUCCESS;
88 }
89 
90 int MPI_Add_error_code(int cl, int *co)
91 {
92   if (cl >= classcnt) return MPI_FAILURE;
93   *co = codecnt++;
94   return MPI_SUCCESS;
95 }
96 
97 int MPI_Type_get_envelope(MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner)
98 {
99   int comb = datatype >> 28;
100   switch (comb) {
101   case MPI_COMBINER_NAMED:
102     *num_integers  = 0;
103     *num_addresses = 0;
104     *num_datatypes = 0;
105     *combiner      = comb;
106     break;
107   case MPI_COMBINER_DUP:
108     *num_integers  = 0;
109     *num_addresses = 0;
110     *num_datatypes = 1;
111     *combiner      = comb;
112     break;
113   case MPI_COMBINER_CONTIGUOUS:
114     *num_integers  = 1;
115     *num_addresses = 0;
116     *num_datatypes = 1;
117     *combiner      = comb;
118     break;
119   default:
120     return MPIUni_Abort(MPI_COMM_SELF, 1);
121   }
122   return MPI_SUCCESS;
123 }
124 
125 int MPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int *array_of_integers, MPI_Aint *array_of_addresses, MPI_Datatype *array_of_datatypes)
126 {
127   int comb = datatype >> 28;
128   switch (comb) {
129   case MPI_COMBINER_NAMED:
130     return MPIUni_Abort(MPI_COMM_SELF, 1);
131   case MPI_COMBINER_DUP:
132     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
133     array_of_datatypes[0] = datatype & 0x0fffffff;
134     break;
135   case MPI_COMBINER_CONTIGUOUS:
136     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
137     array_of_integers[0]  = (datatype >> 8) & 0xfff;         /* count */
138     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100; /* basic named type (count=1) from which the contiguous type is derived */
139     break;
140   default:
141     return MPIUni_Abort(MPI_COMM_SELF, 1);
142   }
143   return MPI_SUCCESS;
144 }
145 
146 /*
147    Used to set the built-in MPI_TAG_UB attribute
148 */
149 static int Keyval_setup(void)
150 {
151   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
152   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
153   attr[CommIdx(MPI_COMM_SELF)][0].active         = 1;
154   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val  = &mpi_tag_ub;
155   attr_keyval[0].active                          = 1;
156   return MPI_SUCCESS;
157 }
158 
159 int MPI_Comm_create_keyval(PETSC_UNUSED MPI_Copy_function *copy_fn, PETSC_UNUSED MPI_Delete_function *delete_fn, int *keyval, void *extra_state)
160 {
161   int i, keyid;
162 
163   (void)copy_fn;
164   (void)delete_fn;
165   for (i = 1; i < num_attr; i++) { /* the first attribute is always in use */
166     if (!attr_keyval[i].active) {
167       keyid = i;
168       goto found;
169     }
170   }
171   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD, 1);
172   keyid = num_attr++;
173 
174 found:
175   attr_keyval[keyid].extra_state = extra_state;
176   attr_keyval[keyid].del         = delete_fn;
177   attr_keyval[keyid].active      = 1;
178   *keyval                        = keyid;
179   return MPI_SUCCESS;
180 }
181 
182 /*
183   The reference counting business is here to guard against the following:
184 
185   MPI_Comm_set_attr(comm, keyval, some_attr);
186   MPI_Comm_free_keyval(&keyval);
187   MPI_Comm_free(&comm);
188 
189   Here MPI_Comm_free() will try to destroy all of the attributes of the comm, and hence we
190   should not clear the deleter or extra_state until all communicators that have the attribute
191   set are either freed or have given up their attribute.
192 
193   The attribute reference count is INCREASED in:
194   - MPI_Comm_create_keyval()
195   - MPI_Comm_set_attr()
196 
197   The atrtibute reference count is DECREASED in:
198   - MPI_Comm_free_keyval()
199   - MPI_Comm_delete_attr() (but only if the comm has the attribute)
200 */
201 static int MPI_Attr_dereference_keyval(int keyval)
202 {
203   if (--attr_keyval[keyval].active <= 0) {
204     attr_keyval[keyval].extra_state = 0;
205     attr_keyval[keyval].del         = 0;
206   }
207   return MPI_SUCCESS;
208 }
209 
210 static int MPI_Attr_reference_keyval(int keyval)
211 {
212   ++attr_keyval[keyval].active;
213   return MPI_SUCCESS;
214 }
215 
216 int MPI_Comm_free_keyval(int *keyval)
217 {
218   int ret;
219 
220   if (*keyval < 0 || *keyval >= num_attr) return MPI_FAILURE;
221   if ((ret = MPI_Attr_dereference_keyval(*keyval))) return ret;
222   *keyval = 0;
223   return MPI_SUCCESS;
224 }
225 
226 int MPI_Comm_set_attr(MPI_Comm comm, int keyval, void *attribute_val)
227 {
228   int idx = CommIdx(comm), ret;
229   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
230   if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
231 
232   if ((ret = MPI_Comm_delete_attr(comm, keyval))) return ret;
233   if ((ret = MPI_Attr_reference_keyval(keyval))) return ret;
234   attr[idx][keyval].active        = 1;
235   attr[idx][keyval].attribute_val = attribute_val;
236   return MPI_SUCCESS;
237 }
238 
239 int MPI_Comm_delete_attr(MPI_Comm comm, int keyval)
240 {
241   int idx = CommIdx(comm);
242   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
243   if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
244   if (attr[idx][keyval].active) {
245     int   ret;
246     void *save_attribute_val = attr[idx][keyval].attribute_val;
247 
248     attr[idx][keyval].active        = 0;
249     attr[idx][keyval].attribute_val = 0;
250     if (attr_keyval[keyval].del) {
251       if ((ret = (*attr_keyval[keyval].del)(comm, keyval, save_attribute_val, attr_keyval[keyval].extra_state))) return ret;
252     }
253     if ((ret = MPI_Attr_dereference_keyval(keyval))) return ret;
254   }
255   return MPI_SUCCESS;
256 }
257 
258 int MPI_Comm_get_attr(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
259 {
260   int idx = CommIdx(comm);
261   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
262   if (!keyval) Keyval_setup();
263   *flag                   = attr[idx][keyval].active;
264   *(void **)attribute_val = attr[idx][keyval].attribute_val;
265   return MPI_SUCCESS;
266 }
267 
268 static char all_comm_names[MAX_COMM][MPI_MAX_OBJECT_NAME] = {"MPI_COMM_SELF", "MPI_COMM_WORLD"};
269 
270 int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen)
271 {
272   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
273   if (!comm_name || !resultlen) return MPI_FAILURE;
274   (void)strncpy(comm_name, all_comm_names[CommIdx(comm)], MPI_MAX_OBJECT_NAME - 1);
275   *resultlen = (int)strlen(comm_name);
276   return MPI_SUCCESS;
277 }
278 
279 int MPI_Comm_set_name(MPI_Comm comm, const char *comm_name)
280 {
281   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
282   if (!comm_name) return MPI_FAILURE;
283   if (strlen(comm_name) > MPI_MAX_OBJECT_NAME - 1) return MPI_FAILURE;
284   (void)strncpy(all_comm_names[CommIdx(comm)], comm_name, MPI_MAX_OBJECT_NAME - 1);
285   return MPI_SUCCESS;
286 }
287 
288 int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm)
289 {
290   int j;
291   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
292   for (j = 3; j <= MaxComm; j++) {
293     if (!comm_active[CommIdx(j)]) {
294       comm_active[CommIdx(j)] = 1;
295       *newcomm                = j;
296       return MPI_SUCCESS;
297     }
298   }
299   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
300   *newcomm                       = ++MaxComm;
301   comm_active[CommIdx(*newcomm)] = 1;
302   return MPI_SUCCESS;
303 }
304 
305 int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *out)
306 {
307   int j;
308   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
309   for (j = 3; j <= MaxComm; j++) {
310     if (!comm_active[CommIdx(j)]) {
311       comm_active[CommIdx(j)] = 1;
312       *out                    = j;
313       return MPI_SUCCESS;
314     }
315   }
316   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
317   *out                       = ++MaxComm;
318   comm_active[CommIdx(*out)] = 1;
319   return MPI_SUCCESS;
320 }
321 
322 int MPI_Comm_free(MPI_Comm *comm)
323 {
324   int idx = CommIdx(*comm);
325 
326   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
327   for (int i = 0; i < num_attr; i++) {
328     int ret = MPI_Comm_delete_attr(*comm, i);
329 
330     if (ret) return ret;
331   }
332   if (*comm >= 3) comm_active[idx] = 0;
333   *comm = 0;
334   return MPI_SUCCESS;
335 }
336 
337 int MPI_Comm_size(MPI_Comm comm, int *size)
338 {
339   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
340   *size = 1;
341   return MPI_SUCCESS;
342 }
343 
344 int MPI_Comm_rank(MPI_Comm comm, int *rank)
345 {
346   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
347   *rank = 0;
348   return MPI_SUCCESS;
349 }
350 
351 int MPIUni_Abort(MPI_Comm comm, int errorcode)
352 {
353   (void)printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
354   return MPI_ERR_NOSUPPORT;
355 }
356 
357 int MPI_Abort(MPI_Comm comm, int errorcode)
358 {
359   abort();
360   return MPI_SUCCESS;
361 }
362 
363 /* --------------------------------------------------------------------------*/
364 
365 static int MPI_was_initialized = 0;
366 static int MPI_was_finalized   = 0;
367 
368 int MPI_Init(int *argc, char ***argv)
369 {
370   if (MPI_was_initialized) return MPI_FAILURE;
371   /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
372      MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
373      of MPIU so that they can do multiple PetscInitialize/Finalize().
374   */
375   /* if (MPI_was_finalized) return MPI_FAILURE; */
376   MPI_was_initialized = 1;
377   MPI_was_finalized   = 0;
378   return MPI_SUCCESS;
379 }
380 
381 int MPI_Init_thread(int *argc, char ***argv, int required, int *provided)
382 {
383   MPI_Query_thread(provided);
384   return MPI_Init(argc, argv);
385 }
386 
387 int MPI_Query_thread(int *provided)
388 {
389   *provided = MPI_THREAD_FUNNELED;
390   return MPI_SUCCESS;
391 }
392 
393 int MPI_Finalize(void)
394 {
395   if (MPI_was_finalized || !MPI_was_initialized) return MPI_FAILURE;
396   MPI_Comm comm = MPI_COMM_WORLD;
397   int      ret  = MPI_Comm_free(&comm);
398 
399   if (ret) return ret;
400   comm = MPI_COMM_SELF;
401   ret  = MPI_Comm_free(&comm);
402   if (ret) return ret;
403   if (PetscDefined(USE_DEBUG)) {
404     for (int i = 3; i <= MaxComm; ++i) {
405       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
406     }
407 
408     for (int i = 1; i <= MaxComm; ++i) {
409       for (int j = 0; j < num_attr; ++j) {
410         if (attr[CommIdx(i)][j].active) printf("MPIUni warning: MPI communicator %d attribute %d was not freed before MPI_Finalize()\n", i, j);
411       }
412     }
413 
414     for (int i = 1; i < num_attr; ++i) {
415       if (attr_keyval[i].active) printf("MPIUni warning: MPI attribute %d was not freed before MPI_Finalize()\n", i);
416     }
417   }
418 
419   /* reset counters */
420   MaxComm             = 2;
421   num_attr            = 1;
422   MPI_was_finalized   = 1;
423   MPI_was_initialized = 0;
424   PETSC_COMM_WORLD    = MPI_COMM_NULL;
425   return MPI_SUCCESS;
426 }
427 
428 int MPI_Initialized(int *flag)
429 {
430   *flag = MPI_was_initialized;
431   return MPI_SUCCESS;
432 }
433 
434 int MPI_Finalized(int *flag)
435 {
436   *flag = MPI_was_finalized;
437   return MPI_SUCCESS;
438 }
439 
440 int MPI_Win_free(MPI_Win *win)
441 {
442   free(*win);
443   *win = NULL;
444   return MPI_SUCCESS;
445 }
446 
447 int MPI_Win_allocate_shared(size_t sz, size_t asz, MPI_Info info, MPI_Comm comm, void **addr, MPI_Win *win)
448 {
449   *win = *addr = malloc(sz);
450   return MPI_SUCCESS;
451 }
452 
453 /* -------------------     Fortran versions of several routines ------------------ */
454 
455 #if defined(PETSC_HAVE_FORTRAN_CAPS)
456   #define mpiunisetmoduleblock_         MPIUNISETMODULEBLOCK
457   #define mpiunisetfortranbasepointers_ MPIUNISETFORTRANBASEPOINTERS
458   #define petsc_mpi_init_               PETSC_MPI_INIT
459   #define petsc_mpi_finalize_           PETSC_MPI_FINALIZE
460   #define petsc_mpi_comm_size_          PETSC_MPI_COMM_SIZE
461   #define petsc_mpi_comm_rank_          PETSC_MPI_COMM_RANK
462   #define petsc_mpi_abort_              PETSC_MPI_ABORT
463   #define petsc_mpi_reduce_             PETSC_MPI_REDUCE
464   #define petsc_mpi_allreduce_          PETSC_MPI_ALLREDUCE
465   #define petsc_mpi_barrier_            PETSC_MPI_BARRIER
466   #define petsc_mpi_bcast_              PETSC_MPI_BCAST
467   #define petsc_mpi_gather_             PETSC_MPI_GATHER
468   #define petsc_mpi_allgather_          PETSC_MPI_ALLGATHER
469   #define petsc_mpi_comm_split_         PETSC_MPI_COMM_SPLIT
470   #define petsc_mpi_scan_               PETSC_MPI_SCAN
471   #define petsc_mpi_send_               PETSC_MPI_SEND
472   #define petsc_mpi_recv_               PETSC_MPI_RECV
473   #define petsc_mpi_reduce_scatter_     PETSC_MPI_REDUCE_SCATTER
474   #define petsc_mpi_irecv_              PETSC_MPI_IRECV
475   #define petsc_mpi_isend_              PETSC_MPI_ISEND
476   #define petsc_mpi_sendrecv_           PETSC_MPI_SENDRECV
477   #define petsc_mpi_test_               PETSC_MPI_TEST
478   #define petsc_mpi_waitall_            PETSC_MPI_WAITALL
479   #define petsc_mpi_waitany_            PETSC_MPI_WAITANY
480   #define petsc_mpi_allgatherv_         PETSC_MPI_ALLGATHERV
481   #define petsc_mpi_alltoallv_          PETSC_MPI_ALLTOALLV
482   #define petsc_mpi_comm_create_        PETSC_MPI_COMM_CREATE
483   #define petsc_mpi_address_            PETSC_MPI_ADDRESS
484   #define petsc_mpi_pack_               PETSC_MPI_PACK
485   #define petsc_mpi_unpack_             PETSC_MPI_UNPACK
486   #define petsc_mpi_pack_size_          PETSC_MPI_PACK_SIZE
487   #define petsc_mpi_type_struct_        PETSC_MPI_TYPE_STRUCT
488   #define petsc_mpi_type_commit_        PETSC_MPI_TYPE_COMMIT
489   #define petsc_mpi_wtime_              PETSC_MPI_WTIME
490   #define petsc_mpi_cancel_             PETSC_MPI_CANCEL
491   #define petsc_mpi_comm_dup_           PETSC_MPI_COMM_DUP
492   #define petsc_mpi_comm_free_          PETSC_MPI_COMM_FREE
493   #define petsc_mpi_get_count_          PETSC_MPI_GET_COUNT
494   #define petsc_mpi_get_processor_name_ PETSC_MPI_GET_PROCESSOR_NAME
495   #define petsc_mpi_initialized_        PETSC_MPI_INITIALIZED
496   #define petsc_mpi_iprobe_             PETSC_MPI_IPROBE
497   #define petsc_mpi_probe_              PETSC_MPI_PROBE
498   #define petsc_mpi_request_free_       PETSC_MPI_REQUEST_FREE
499   #define petsc_mpi_ssend_              PETSC_MPI_SSEND
500   #define petsc_mpi_wait_               PETSC_MPI_WAIT
501   #define petsc_mpi_comm_group_         PETSC_MPI_COMM_GROUP
502   #define petsc_mpi_exscan_             PETSC_MPI_EXSCAN
503 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
504   #define mpiunisetmoduleblock_         mpiunisetmoduleblock
505   #define mpiunisetfortranbasepointers_ mpiunisetfortranbasepointers
506   #define petsc_mpi_init_               petsc_mpi_init
507   #define petsc_mpi_finalize_           petsc_mpi_finalize
508   #define petsc_mpi_comm_size_          petsc_mpi_comm_size
509   #define petsc_mpi_comm_rank_          petsc_mpi_comm_rank
510   #define petsc_mpi_abort_              petsc_mpi_abort
511   #define petsc_mpi_reduce_             petsc_mpi_reduce
512   #define petsc_mpi_allreduce_          petsc_mpi_allreduce
513   #define petsc_mpi_barrier_            petsc_mpi_barrier
514   #define petsc_mpi_bcast_              petsc_mpi_bcast
515   #define petsc_mpi_gather_             petsc_mpi_gather
516   #define petsc_mpi_allgather_          petsc_mpi_allgather
517   #define petsc_mpi_comm_split_         petsc_mpi_comm_split
518   #define petsc_mpi_scan_               petsc_mpi_scan
519   #define petsc_mpi_send_               petsc_mpi_send
520   #define petsc_mpi_recv_               petsc_mpi_recv
521   #define petsc_mpi_reduce_scatter_     petsc_mpi_reduce_scatter
522   #define petsc_mpi_irecv_              petsc_mpi_irecv
523   #define petsc_mpi_isend_              petsc_mpi_isend
524   #define petsc_mpi_sendrecv_           petsc_mpi_sendrecv
525   #define petsc_mpi_test_               petsc_mpi_test
526   #define petsc_mpi_waitall_            petsc_mpi_waitall
527   #define petsc_mpi_waitany_            petsc_mpi_waitany
528   #define petsc_mpi_allgatherv_         petsc_mpi_allgatherv
529   #define petsc_mpi_alltoallv_          petsc_mpi_alltoallv
530   #define petsc_mpi_comm_create_        petsc_mpi_comm_create
531   #define petsc_mpi_address_            petsc_mpi_address
532   #define petsc_mpi_pack_               petsc_mpi_pack
533   #define petsc_mpi_unpack_             petsc_mpi_unpack
534   #define petsc_mpi_pack_size_          petsc_mpi_pack_size
535   #define petsc_mpi_type_struct_        petsc_mpi_type_struct
536   #define petsc_mpi_type_commit_        petsc_mpi_type_commit
537   #define petsc_mpi_wtime_              petsc_mpi_wtime
538   #define petsc_mpi_cancel_             petsc_mpi_cancel
539   #define petsc_mpi_comm_dup_           petsc_mpi_comm_dup
540   #define petsc_mpi_comm_free_          petsc_mpi_comm_free
541   #define petsc_mpi_get_count_          petsc_mpi_get_count
542   #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name
543   #define petsc_mpi_initialized_        petsc_mpi_initialized
544   #define petsc_mpi_iprobe_             petsc_mpi_iprobe
545   #define petsc_mpi_probe_              petsc_mpi_probe
546   #define petsc_mpi_request_free_       petsc_mpi_request_free
547   #define petsc_mpi_ssend_              petsc_mpi_ssend
548   #define petsc_mpi_wait_               petsc_mpi_wait
549   #define petsc_mpi_comm_group_         petsc_mpi_comm_group
550   #define petsc_mpi_exscan_             petsc_mpi_exscan
551 #endif
552 
553 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
554   #define petsc_mpi_init_               petsc_mpi_init__
555   #define petsc_mpi_finalize_           petsc_mpi_finalize__
556   #define petsc_mpi_comm_size_          petsc_mpi_comm_size__
557   #define petsc_mpi_comm_rank_          petsc_mpi_comm_rank__
558   #define petsc_mpi_abort_              petsc_mpi_abort__
559   #define petsc_mpi_reduce_             petsc_mpi_reduce__
560   #define petsc_mpi_allreduce_          petsc_mpi_allreduce__
561   #define petsc_mpi_barrier_            petsc_mpi_barrier__
562   #define petsc_mpi_bcast_              petsc_mpi_bcast__
563   #define petsc_mpi_gather_             petsc_mpi_gather__
564   #define petsc_mpi_allgather_          petsc_mpi_allgather__
565   #define petsc_mpi_comm_split_         petsc_mpi_comm_split__
566   #define petsc_mpi_scan_               petsc_mpi_scan__
567   #define petsc_mpi_send_               petsc_mpi_send__
568   #define petsc_mpi_recv_               petsc_mpi_recv__
569   #define petsc_mpi_reduce_scatter_     petsc_mpi_reduce_scatter__
570   #define petsc_mpi_irecv_              petsc_mpi_irecv__
571   #define petsc_mpi_isend_              petsc_mpi_isend__
572   #define petsc_mpi_sendrecv_           petsc_mpi_sendrecv__
573   #define petsc_mpi_test_               petsc_mpi_test__
574   #define petsc_mpi_waitall_            petsc_mpi_waitall__
575   #define petsc_mpi_waitany_            petsc_mpi_waitany__
576   #define petsc_mpi_allgatherv_         petsc_mpi_allgatherv__
577   #define petsc_mpi_alltoallv_          petsc_mpi_alltoallv__
578   #define petsc_mpi_comm_create_        petsc_mpi_comm_create__
579   #define petsc_mpi_address_            petsc_mpi_address__
580   #define petsc_mpi_pack_               petsc_mpi_pack__
581   #define petsc_mpi_unpack_             petsc_mpi_unpack__
582   #define petsc_mpi_pack_size_          petsc_mpi_pack_size__
583   #define petsc_mpi_type_struct_        petsc_mpi_type_struct__
584   #define petsc_mpi_type_commit_        petsc_mpi_type_commit__
585   #define petsc_mpi_wtime_              petsc_mpi_wtime__
586   #define petsc_mpi_cancel_             petsc_mpi_cancel__
587   #define petsc_mpi_comm_dup_           petsc_mpi_comm_dup__
588   #define petsc_mpi_comm_free_          petsc_mpi_comm_free__
589   #define petsc_mpi_get_count_          petsc_mpi_get_count__
590   #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name__
591   #define petsc_mpi_initialized_        petsc_mpi_initialized__
592   #define petsc_mpi_iprobe_             petsc_mpi_iprobe__
593   #define petsc_mpi_probe_              petsc_mpi_probe__
594   #define petsc_mpi_request_free_       petsc_mpi_request_free__
595   #define petsc_mpi_ssend_              petsc_mpi_ssend__
596   #define petsc_mpi_wait_               petsc_mpi_wait__
597   #define petsc_mpi_comm_group_         petsc_mpi_comm_group__
598   #define petsc_mpi_exscan_             petsc_mpi_exscan__
599 #endif
600 
601 /* Do not build fortran interface if MPI namespace collision is to be avoided */
602 #if defined(PETSC_USE_FORTRAN_BINDINGS)
603 
604 PETSC_EXTERN void mpiunisetmoduleblock_(void);
605 
606 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
607 {
608   MPIUNIF_mpi_in_place = f_mpi_in_place;
609 }
610 
611 PETSC_EXTERN void petsc_mpi_init_(int *ierr)
612 {
613   mpiunisetmoduleblock_();
614   *ierr = MPI_Init(NULL, NULL);
615 }
616 
617 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
618 {
619   *ierr = MPI_Finalize();
620 }
621 
622 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm, int *size, int *ierr)
623 {
624   *size = 1;
625   *ierr = 0;
626 }
627 
628 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm, int *rank, int *ierr)
629 {
630   *rank = 0;
631   *ierr = MPI_SUCCESS;
632 }
633 
634 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm, int *color, int *key, MPI_Comm *newcomm, int *ierr)
635 {
636   *newcomm = *comm;
637   *ierr    = MPI_SUCCESS;
638 }
639 
640 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm, int *errorcode, int *ierr)
641 {
642   abort();
643   *ierr = MPI_SUCCESS;
644 }
645 
646 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *root, int *comm, int *ierr)
647 {
648   *ierr = MPI_Reduce(sendbuf, recvbuf, *count, *datatype, *op, *root, *comm);
649 }
650 
651 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
652 {
653   *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, *datatype, *op, *comm);
654 }
655 
656 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm, int *ierr)
657 {
658   *ierr = MPI_SUCCESS;
659 }
660 
661 PETSC_EXTERN void petsc_mpi_bcast_(void *buf, int *count, int *datatype, int *root, int *comm, int *ierr)
662 {
663   *ierr = MPI_SUCCESS;
664 }
665 
666 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root, int *comm, int *ierr)
667 {
668   *ierr = MPI_Gather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *root, *comm);
669 }
670 
671 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *comm, int *ierr)
672 {
673   *ierr = MPI_Allgather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *comm);
674 }
675 
676 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
677 {
678   *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*count) * MPI_sizeof(*datatype));
679 }
680 
681 PETSC_EXTERN void petsc_mpi_send_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr)
682 {
683   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
684 }
685 
686 PETSC_EXTERN void petsc_mpi_recv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int status, int *ierr)
687 {
688   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
689 }
690 
691 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf, void *recvbuf, int *recvcounts, int *datatype, int *op, int *comm, int *ierr)
692 {
693   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
694 }
695 
696 PETSC_EXTERN void petsc_mpi_irecv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
697 {
698   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
699 }
700 
701 PETSC_EXTERN void petsc_mpi_isend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *request, int *ierr)
702 {
703   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
704 }
705 
706 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf, int *sendcount, int *sendtype, int *dest, int *sendtag, void *recvbuf, int *recvcount, int *recvtype, int *source, int *recvtag, int *comm, int *status, int *ierr)
707 {
708   *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*sendcount) * MPI_sizeof(*sendtype));
709 }
710 
711 PETSC_EXTERN void petsc_mpi_test_(int *request, int *flag, int *status, int *ierr)
712 {
713   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
714 }
715 
716 PETSC_EXTERN void petsc_mpi_waitall_(int *count, int *array_of_requests, int *array_of_statuses, int *ierr)
717 {
718   *ierr = MPI_SUCCESS;
719 }
720 
721 PETSC_EXTERN void petsc_mpi_waitany_(int *count, int *array_of_requests, int *index, int *status, int *ierr)
722 {
723   *ierr = MPI_SUCCESS;
724 }
725 
726 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf, int *sendcount, int *sendtype, void *recvbuf, int *recvcounts, int *displs, int *recvtype, int *comm, int *ierr)
727 {
728   *ierr = MPI_Allgatherv(sendbuf, *sendcount, *sendtype, recvbuf, recvcounts, displs, *recvtype, *comm);
729 }
730 
731 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf, int *sendcounts, int *sdispls, int *sendtype, void *recvbuf, int *recvcounts, int *rdispls, int *recvtype, int *comm, int *ierr)
732 {
733   *ierr = MPI_Alltoallv(sendbuf, sendcounts, sdispls, *sendtype, recvbuf, recvcounts, rdispls, *recvtype, *comm);
734 }
735 
736 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm, int *group, int *newcomm, int *ierr)
737 {
738   *newcomm = *comm;
739   *ierr    = MPI_SUCCESS;
740 }
741 
742 PETSC_EXTERN void petsc_mpi_address_(void *location, MPI_Aint *address, int *ierr)
743 {
744   *address = (MPI_Aint)((char *)location);
745   *ierr    = MPI_SUCCESS;
746 }
747 
748 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf, int *incount, int *datatype, void *outbuf, int *outsize, int *position, int *comm, int *ierr)
749 {
750   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
751 }
752 
753 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf, int *insize, int *position, void *outbuf, int *outcount, int *datatype, int *comm, int *ierr)
754 {
755   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
756 }
757 
758 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount, int *datatype, int *comm, int *size, int *ierr)
759 {
760   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
761 }
762 
763 PETSC_EXTERN void petsc_mpi_type_struct_(int *count, int *array_of_blocklengths, int *array_of_displaments, int *array_of_types, int *newtype, int *ierr)
764 {
765   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
766 }
767 
768 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype, int *ierr)
769 {
770   *ierr = MPI_SUCCESS;
771 }
772 
773 double petsc_mpi_wtime_(void)
774 {
775   return 0.0;
776 }
777 
778 PETSC_EXTERN void petsc_mpi_cancel_(int *request, int *ierr)
779 {
780   *ierr = MPI_SUCCESS;
781 }
782 
783 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm, int *out, int *ierr)
784 {
785   *out  = *comm;
786   *ierr = MPI_SUCCESS;
787 }
788 
789 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm, int *ierr)
790 {
791   *ierr = MPI_SUCCESS;
792 }
793 
794 PETSC_EXTERN void petsc_mpi_get_count_(int *status, int *datatype, int *count, int *ierr)
795 {
796   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
797 }
798 
799 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name, int *result_len, int *ierr, PETSC_FORTRAN_CHARLEN_T len)
800 {
801   MPIUNI_Memcpy(name, "localhost", 9 * sizeof(char));
802   *result_len = 9;
803   *ierr       = MPI_SUCCESS;
804 }
805 
806 PETSC_EXTERN void petsc_mpi_initialized_(int *flag, int *ierr)
807 {
808   *flag = MPI_was_initialized;
809   *ierr = MPI_SUCCESS;
810 }
811 
812 PETSC_EXTERN void petsc_mpi_iprobe_(int *source, int *tag, int *comm, int *glag, int *status, int *ierr)
813 {
814   *ierr = MPI_SUCCESS;
815 }
816 
817 PETSC_EXTERN void petsc_mpi_probe_(int *source, int *tag, int *comm, int *flag, int *status, int *ierr)
818 {
819   *ierr = MPI_SUCCESS;
820 }
821 
822 PETSC_EXTERN void petsc_mpi_request_free_(int *request, int *ierr)
823 {
824   *ierr = MPI_SUCCESS;
825 }
826 
827 PETSC_EXTERN void petsc_mpi_ssend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr)
828 {
829   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
830 }
831 
832 PETSC_EXTERN void petsc_mpi_wait_(int *request, int *status, int *ierr)
833 {
834   *ierr = MPI_SUCCESS;
835 }
836 
837 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm, int *group, int *ierr)
838 {
839   *ierr = MPI_SUCCESS;
840 }
841 
842 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
843 {
844   *ierr = MPI_SUCCESS;
845 }
846 
847 #endif /* PETSC_USE_FORTRAN_BINDINGS */
848 
849 #if defined(__cplusplus)
850 }
851 #endif
852