xref: /petsc/src/sys/mpiuni/mpi.c (revision 36f0be53e0644c7a79c5d6ccee4c1a28cdf94d67)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include <petscsys.h>
6 #if !defined(MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 
10 #include <petsc/private/petscimpl.h> /* for PetscCUPMInitialized */
11 
12 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
13 #include <petscdevice.h>
14 #endif
15 
16 #define MPI_SUCCESS 0
17 #define MPI_FAILURE 1
18 
19 void *MPIUNI_TMP = NULL;
20 
21 /*
22        With MPI Uni there are exactly four distinct communicators:
23     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
24 
25     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
26      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
27 
28 */
29 #define MAX_ATTR 256
30 #define MAX_COMM 128
31 
32 typedef struct {
33   void *attribute_val;
34   int   active;
35 } MPI_Attr;
36 
37 typedef struct {
38   void                *extra_state;
39   MPI_Delete_function *del;
40   int                  active; /* Is this keyval in use by some comm? */
41 } MPI_Attr_keyval;
42 
43 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
44 static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
45 static int             comm_active[MAX_COMM]; /* Boolean array indicating which comms are in use */
46 static int             mpi_tag_ub           = 100000000;
47 static int             num_attr             = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
48 static int             MaxComm              = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
49 static void           *MPIUNIF_mpi_in_place = 0;
50 
51 #define CommIdx(comm) ((comm)-1) /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
52 
53 #if defined(__cplusplus)
54 extern "C" {
55 #endif
56 
57 /*
58    To avoid problems with prototypes to the system memcpy() it is duplicated here
59 */
60 int MPIUNI_Memcpy(void *dst, const void *src, int n) {
61   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
62   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
63   if (!n) return MPI_SUCCESS;
64 
65     /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
66 #if defined(PETSC_HAVE_CUDA)
67   if (PetscDeviceInitialized(PETSC_DEVICE_CUDA)) {
68     cudaError_t cerr = cudaMemcpy(dst, src, n, cudaMemcpyDefault);
69     if (cerr != cudaSuccess) return MPI_FAILURE;
70   } else
71 #elif defined(PETSC_HAVE_HIP)
72   if (PetscDeviceInitialized(PETSC_DEVICE_HIP)) {
73     hipError_t cerr = hipMemcpy(dst, src, n, hipMemcpyDefault);
74     if (cerr != hipSuccess) return MPI_FAILURE;
75   } else
76 #endif
77   {
78     memcpy(dst, src, n);
79   }
80   return MPI_SUCCESS;
81 }
82 
83 static int classcnt = 0;
84 static int codecnt  = 0;
85 
86 int MPI_Add_error_class(int *cl) {
87   *cl = classcnt++;
88   return MPI_SUCCESS;
89 }
90 
91 int MPI_Add_error_code(int cl, int *co) {
92   if (cl >= classcnt) return MPI_FAILURE;
93   *co = codecnt++;
94   return MPI_SUCCESS;
95 }
96 
97 int MPI_Type_get_envelope(MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner) {
98   int comb = datatype >> 28;
99   switch (comb) {
100   case MPI_COMBINER_NAMED:
101     *num_integers  = 0;
102     *num_addresses = 0;
103     *num_datatypes = 0;
104     *combiner      = comb;
105     break;
106   case MPI_COMBINER_DUP:
107     *num_integers  = 0;
108     *num_addresses = 0;
109     *num_datatypes = 1;
110     *combiner      = comb;
111     break;
112   case MPI_COMBINER_CONTIGUOUS:
113     *num_integers  = 1;
114     *num_addresses = 0;
115     *num_datatypes = 1;
116     *combiner      = comb;
117     break;
118   default: return MPIUni_Abort(MPI_COMM_SELF, 1);
119   }
120   return MPI_SUCCESS;
121 }
122 
123 int MPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int *array_of_integers, MPI_Aint *array_of_addresses, MPI_Datatype *array_of_datatypes) {
124   int comb = datatype >> 28;
125   switch (comb) {
126   case MPI_COMBINER_NAMED: return MPIUni_Abort(MPI_COMM_SELF, 1);
127   case MPI_COMBINER_DUP:
128     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
129     array_of_datatypes[0] = datatype & 0x0fffffff;
130     break;
131   case MPI_COMBINER_CONTIGUOUS:
132     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
133     array_of_integers[0]  = (datatype >> 8) & 0xfff;         /* count */
134     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100; /* basic named type (count=1) from which the contiguous type is derived */
135     break;
136   default: return MPIUni_Abort(MPI_COMM_SELF, 1);
137   }
138   return MPI_SUCCESS;
139 }
140 
141 /*
142    Used to set the built-in MPI_TAG_UB attribute
143 */
144 static int Keyval_setup(void) {
145   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
146   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
147   attr[CommIdx(MPI_COMM_SELF)][0].active         = 1;
148   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val  = &mpi_tag_ub;
149   attr_keyval[0].active                          = 1;
150   return MPI_SUCCESS;
151 }
152 
153 int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn, MPI_Delete_function *delete_fn, int *keyval, void *extra_state) {
154   int i, keyid;
155   for (i = 1; i < num_attr; i++) { /* the first attribute is always in use */
156     if (!attr_keyval[i].active) {
157       keyid = i;
158       goto found;
159     }
160   }
161   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD, 1);
162   keyid = num_attr++;
163 
164 found:
165   attr_keyval[keyid].extra_state = extra_state;
166   attr_keyval[keyid].del         = delete_fn;
167   attr_keyval[keyid].active      = 1;
168   *keyval                        = keyid;
169   return MPI_SUCCESS;
170 }
171 
172 /*
173   The reference counting business is here to guard against the following:
174 
175   MPI_Comm_set_attr(comm, keyval, some_attr);
176   MPI_Comm_free_keyval(&keyval);
177   MPI_Comm_free(&comm);
178 
179   Here MPI_Comm_free() will try to destroy all of the attributes of the comm, and hence we
180   should not clear the deleter or extra_state until all communicators that have the attribute
181   set are either freed or have given up their attribute.
182 
183   The attribute reference count is INCREASED in:
184   - MPI_Comm_create_keyval()
185   - MPI_Comm_set_attr()
186 
187   The atrtibute reference count is DECREASED in:
188   - MPI_Comm_free_keyval()
189   - MPI_Comm_delete_attr() (but only if the comm has the attribute)
190 */
191 static int MPI_Attr_dereference_keyval(int keyval) {
192   if (--(attr_keyval[keyval].active) <= 0) {
193     attr_keyval[keyval].extra_state = 0;
194     attr_keyval[keyval].del         = 0;
195   }
196   return MPI_SUCCESS;
197 }
198 
199 static int MPI_Attr_reference_keyval(int keyval) {
200   ++(attr_keyval[keyval].active);
201   return MPI_SUCCESS;
202 }
203 
204 int MPI_Comm_free_keyval(int *keyval) {
205   int ret;
206 
207   if (*keyval < 0 || *keyval >= num_attr) return MPI_FAILURE;
208   if ((ret = MPI_Attr_dereference_keyval(*keyval))) return ret;
209   *keyval = 0;
210   return MPI_SUCCESS;
211 }
212 
213 int MPI_Comm_set_attr(MPI_Comm comm, int keyval, void *attribute_val) {
214   int idx = CommIdx(comm), ret;
215   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
216   if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
217 
218   if ((ret = MPI_Comm_delete_attr(comm, keyval))) return ret;
219   if ((ret = MPI_Attr_reference_keyval(keyval))) return ret;
220   attr[idx][keyval].active        = 1;
221   attr[idx][keyval].attribute_val = attribute_val;
222   return MPI_SUCCESS;
223 }
224 
225 int MPI_Comm_delete_attr(MPI_Comm comm, int keyval) {
226   int idx = CommIdx(comm);
227   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
228   if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
229   if (attr[idx][keyval].active) {
230     int   ret;
231     void *save_attribute_val = attr[idx][keyval].attribute_val;
232 
233     attr[idx][keyval].active        = 0;
234     attr[idx][keyval].attribute_val = 0;
235     if (attr_keyval[keyval].del) {
236       if ((ret = (*(attr_keyval[keyval].del))(comm, keyval, save_attribute_val, attr_keyval[keyval].extra_state))) return ret;
237     }
238     if ((ret = MPI_Attr_dereference_keyval(keyval))) return ret;
239   }
240   return MPI_SUCCESS;
241 }
242 
243 int MPI_Comm_get_attr(MPI_Comm comm, int keyval, void *attribute_val, int *flag) {
244   int idx = CommIdx(comm);
245   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
246   if (!keyval) Keyval_setup();
247   *flag                   = attr[idx][keyval].active;
248   *(void **)attribute_val = attr[idx][keyval].attribute_val;
249   return MPI_SUCCESS;
250 }
251 
252 static char all_comm_names[MAX_COMM][MPI_MAX_OBJECT_NAME] = {"MPI_COMM_SELF", "MPI_COMM_WORLD"};
253 
254 int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen) {
255   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
256   if (!comm_name || !resultlen) return MPI_FAILURE;
257   strncpy(comm_name, all_comm_names[CommIdx(comm)], MPI_MAX_OBJECT_NAME - 1);
258   *resultlen = (int)strlen(comm_name);
259   return MPI_SUCCESS;
260 }
261 
262 int MPI_Comm_set_name(MPI_Comm comm, const char *comm_name) {
263   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
264   if (!comm_name) return MPI_FAILURE;
265   if (strlen(comm_name) > MPI_MAX_OBJECT_NAME - 1) return MPI_FAILURE;
266   strncpy(all_comm_names[CommIdx(comm)], comm_name, MPI_MAX_OBJECT_NAME - 1);
267   return MPI_SUCCESS;
268 }
269 
270 int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) {
271   int j;
272   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
273   for (j = 3; j <= MaxComm; j++) {
274     if (!comm_active[CommIdx(j)]) {
275       comm_active[CommIdx(j)] = 1;
276       *newcomm                = j;
277       return MPI_SUCCESS;
278     }
279   }
280   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
281   *newcomm                       = ++MaxComm;
282   comm_active[CommIdx(*newcomm)] = 1;
283   return MPI_SUCCESS;
284 }
285 
286 int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *out) {
287   int j;
288   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
289   for (j = 3; j <= MaxComm; j++) {
290     if (!comm_active[CommIdx(j)]) {
291       comm_active[CommIdx(j)] = 1;
292       *out                    = j;
293       return MPI_SUCCESS;
294     }
295   }
296   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
297   *out                       = ++MaxComm;
298   comm_active[CommIdx(*out)] = 1;
299   return MPI_SUCCESS;
300 }
301 
302 int MPI_Comm_free(MPI_Comm *comm) {
303   int idx = CommIdx(*comm);
304 
305   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
306   for (int i = 0; i < num_attr; i++) {
307     int ret = MPI_Comm_delete_attr(*comm, i);
308 
309     if (ret) return ret;
310   }
311   if (*comm >= 3) comm_active[idx] = 0;
312   *comm = 0;
313   return MPI_SUCCESS;
314 }
315 
316 int MPI_Comm_size(MPI_Comm comm, int *size) {
317   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
318   *size = 1;
319   return MPI_SUCCESS;
320 }
321 
322 int MPI_Comm_rank(MPI_Comm comm, int *rank) {
323   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
324   *rank = 0;
325   return MPI_SUCCESS;
326 }
327 
328 int MPIUni_Abort(MPI_Comm comm, int errorcode) {
329   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
330   return MPI_ERR_NOSUPPORT;
331 }
332 
333 int MPI_Abort(MPI_Comm comm, int errorcode) {
334   abort();
335   return MPI_SUCCESS;
336 }
337 
338 /* --------------------------------------------------------------------------*/
339 
340 static int MPI_was_initialized = 0;
341 static int MPI_was_finalized   = 0;
342 
343 int MPI_Init(int *argc, char ***argv) {
344   if (MPI_was_initialized) return MPI_FAILURE;
345   /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
346      MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
347      of MPIU so that they can do multiple PetscInitialize/Finalize().
348   */
349   /* if (MPI_was_finalized) return MPI_FAILURE; */
350   MPI_was_initialized = 1;
351   MPI_was_finalized   = 0;
352   return MPI_SUCCESS;
353 }
354 
355 int MPI_Init_thread(int *argc, char ***argv, int required, int *provided) {
356   MPI_Query_thread(provided);
357   return MPI_Init(argc, argv);
358 }
359 
360 int MPI_Query_thread(int *provided) {
361   *provided = MPI_THREAD_FUNNELED;
362   return MPI_SUCCESS;
363 }
364 
365 int MPI_Finalize(void) {
366   if (MPI_was_finalized || !MPI_was_initialized) return MPI_FAILURE;
367   MPI_Comm comm = MPI_COMM_WORLD;
368   int      ret  = MPI_Comm_free(&comm);
369 
370   if (ret) return ret;
371   comm = MPI_COMM_SELF;
372   ret  = MPI_Comm_free(&comm);
373   if (ret) return ret;
374   if (PetscDefined(USE_DEBUG)) {
375     for (int i = 3; i <= MaxComm; ++i) {
376       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
377     }
378 
379     for (int i = 1; i <= MaxComm; ++i) {
380       for (int j = 0; j < num_attr; ++j) {
381         if (attr[CommIdx(i)][j].active) printf("MPIUni warning: MPI communicator %d attribute %d was not freed before MPI_Finalize()\n", i, j);
382       }
383     }
384 
385     for (int i = 1; i < num_attr; ++i) {
386       if (attr_keyval[i].active) printf("MPIUni warning: MPI attribute %d was not freed before MPI_Finalize()\n", i);
387     }
388   }
389 
390   /* reset counters */
391   MaxComm             = 2;
392   num_attr            = 1;
393   MPI_was_finalized   = 1;
394   MPI_was_initialized = 0;
395   PETSC_COMM_WORLD    = MPI_COMM_NULL;
396   return MPI_SUCCESS;
397 }
398 
399 int MPI_Initialized(int *flag) {
400   *flag = MPI_was_initialized;
401   return MPI_SUCCESS;
402 }
403 
404 int MPI_Finalized(int *flag) {
405   *flag = MPI_was_finalized;
406   return MPI_SUCCESS;
407 }
408 
409 /* -------------------     Fortran versions of several routines ------------------ */
410 
411 #if defined(PETSC_HAVE_FORTRAN_CAPS)
412 #define mpiunisetmoduleblock_         MPIUNISETMODULEBLOCK
413 #define mpiunisetfortranbasepointers_ MPIUNISETFORTRANBASEPOINTERS
414 #define petsc_mpi_init_               PETSC_MPI_INIT
415 #define petsc_mpi_finalize_           PETSC_MPI_FINALIZE
416 #define petsc_mpi_comm_size_          PETSC_MPI_COMM_SIZE
417 #define petsc_mpi_comm_rank_          PETSC_MPI_COMM_RANK
418 #define petsc_mpi_abort_              PETSC_MPI_ABORT
419 #define petsc_mpi_reduce_             PETSC_MPI_REDUCE
420 #define petsc_mpi_allreduce_          PETSC_MPI_ALLREDUCE
421 #define petsc_mpi_barrier_            PETSC_MPI_BARRIER
422 #define petsc_mpi_bcast_              PETSC_MPI_BCAST
423 #define petsc_mpi_gather_             PETSC_MPI_GATHER
424 #define petsc_mpi_allgather_          PETSC_MPI_ALLGATHER
425 #define petsc_mpi_comm_split_         PETSC_MPI_COMM_SPLIT
426 #define petsc_mpi_scan_               PETSC_MPI_SCAN
427 #define petsc_mpi_send_               PETSC_MPI_SEND
428 #define petsc_mpi_recv_               PETSC_MPI_RECV
429 #define petsc_mpi_reduce_scatter_     PETSC_MPI_REDUCE_SCATTER
430 #define petsc_mpi_irecv_              PETSC_MPI_IRECV
431 #define petsc_mpi_isend_              PETSC_MPI_ISEND
432 #define petsc_mpi_sendrecv_           PETSC_MPI_SENDRECV
433 #define petsc_mpi_test_               PETSC_MPI_TEST
434 #define petsc_mpi_waitall_            PETSC_MPI_WAITALL
435 #define petsc_mpi_waitany_            PETSC_MPI_WAITANY
436 #define petsc_mpi_allgatherv_         PETSC_MPI_ALLGATHERV
437 #define petsc_mpi_alltoallv_          PETSC_MPI_ALLTOALLV
438 #define petsc_mpi_comm_create_        PETSC_MPI_COMM_CREATE
439 #define petsc_mpi_address_            PETSC_MPI_ADDRESS
440 #define petsc_mpi_pack_               PETSC_MPI_PACK
441 #define petsc_mpi_unpack_             PETSC_MPI_UNPACK
442 #define petsc_mpi_pack_size_          PETSC_MPI_PACK_SIZE
443 #define petsc_mpi_type_struct_        PETSC_MPI_TYPE_STRUCT
444 #define petsc_mpi_type_commit_        PETSC_MPI_TYPE_COMMIT
445 #define petsc_mpi_wtime_              PETSC_MPI_WTIME
446 #define petsc_mpi_cancel_             PETSC_MPI_CANCEL
447 #define petsc_mpi_comm_dup_           PETSC_MPI_COMM_DUP
448 #define petsc_mpi_comm_free_          PETSC_MPI_COMM_FREE
449 #define petsc_mpi_get_count_          PETSC_MPI_GET_COUNT
450 #define petsc_mpi_get_processor_name_ PETSC_MPI_GET_PROCESSOR_NAME
451 #define petsc_mpi_initialized_        PETSC_MPI_INITIALIZED
452 #define petsc_mpi_iprobe_             PETSC_MPI_IPROBE
453 #define petsc_mpi_probe_              PETSC_MPI_PROBE
454 #define petsc_mpi_request_free_       PETSC_MPI_REQUEST_FREE
455 #define petsc_mpi_ssend_              PETSC_MPI_SSEND
456 #define petsc_mpi_wait_               PETSC_MPI_WAIT
457 #define petsc_mpi_comm_group_         PETSC_MPI_COMM_GROUP
458 #define petsc_mpi_exscan_             PETSC_MPI_EXSCAN
459 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
460 #define mpiunisetmoduleblock_         mpiunisetmoduleblock
461 #define mpiunisetfortranbasepointers_ mpiunisetfortranbasepointers
462 #define petsc_mpi_init_               petsc_mpi_init
463 #define petsc_mpi_finalize_           petsc_mpi_finalize
464 #define petsc_mpi_comm_size_          petsc_mpi_comm_size
465 #define petsc_mpi_comm_rank_          petsc_mpi_comm_rank
466 #define petsc_mpi_abort_              petsc_mpi_abort
467 #define petsc_mpi_reduce_             petsc_mpi_reduce
468 #define petsc_mpi_allreduce_          petsc_mpi_allreduce
469 #define petsc_mpi_barrier_            petsc_mpi_barrier
470 #define petsc_mpi_bcast_              petsc_mpi_bcast
471 #define petsc_mpi_gather_             petsc_mpi_gather
472 #define petsc_mpi_allgather_          petsc_mpi_allgather
473 #define petsc_mpi_comm_split_         petsc_mpi_comm_split
474 #define petsc_mpi_scan_               petsc_mpi_scan
475 #define petsc_mpi_send_               petsc_mpi_send
476 #define petsc_mpi_recv_               petsc_mpi_recv
477 #define petsc_mpi_reduce_scatter_     petsc_mpi_reduce_scatter
478 #define petsc_mpi_irecv_              petsc_mpi_irecv
479 #define petsc_mpi_isend_              petsc_mpi_isend
480 #define petsc_mpi_sendrecv_           petsc_mpi_sendrecv
481 #define petsc_mpi_test_               petsc_mpi_test
482 #define petsc_mpi_waitall_            petsc_mpi_waitall
483 #define petsc_mpi_waitany_            petsc_mpi_waitany
484 #define petsc_mpi_allgatherv_         petsc_mpi_allgatherv
485 #define petsc_mpi_alltoallv_          petsc_mpi_alltoallv
486 #define petsc_mpi_comm_create_        petsc_mpi_comm_create
487 #define petsc_mpi_address_            petsc_mpi_address
488 #define petsc_mpi_pack_               petsc_mpi_pack
489 #define petsc_mpi_unpack_             petsc_mpi_unpack
490 #define petsc_mpi_pack_size_          petsc_mpi_pack_size
491 #define petsc_mpi_type_struct_        petsc_mpi_type_struct
492 #define petsc_mpi_type_commit_        petsc_mpi_type_commit
493 #define petsc_mpi_wtime_              petsc_mpi_wtime
494 #define petsc_mpi_cancel_             petsc_mpi_cancel
495 #define petsc_mpi_comm_dup_           petsc_mpi_comm_dup
496 #define petsc_mpi_comm_free_          petsc_mpi_comm_free
497 #define petsc_mpi_get_count_          petsc_mpi_get_count
498 #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name
499 #define petsc_mpi_initialized_        petsc_mpi_initialized
500 #define petsc_mpi_iprobe_             petsc_mpi_iprobe
501 #define petsc_mpi_probe_              petsc_mpi_probe
502 #define petsc_mpi_request_free_       petsc_mpi_request_free
503 #define petsc_mpi_ssend_              petsc_mpi_ssend
504 #define petsc_mpi_wait_               petsc_mpi_wait
505 #define petsc_mpi_comm_group_         petsc_mpi_comm_group
506 #define petsc_mpi_exscan_             petsc_mpi_exscan
507 #endif
508 
509 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
510 #define petsc_mpi_init_               petsc_mpi_init__
511 #define petsc_mpi_finalize_           petsc_mpi_finalize__
512 #define petsc_mpi_comm_size_          petsc_mpi_comm_size__
513 #define petsc_mpi_comm_rank_          petsc_mpi_comm_rank__
514 #define petsc_mpi_abort_              petsc_mpi_abort__
515 #define petsc_mpi_reduce_             petsc_mpi_reduce__
516 #define petsc_mpi_allreduce_          petsc_mpi_allreduce__
517 #define petsc_mpi_barrier_            petsc_mpi_barrier__
518 #define petsc_mpi_bcast_              petsc_mpi_bcast__
519 #define petsc_mpi_gather_             petsc_mpi_gather__
520 #define petsc_mpi_allgather_          petsc_mpi_allgather__
521 #define petsc_mpi_comm_split_         petsc_mpi_comm_split__
522 #define petsc_mpi_scan_               petsc_mpi_scan__
523 #define petsc_mpi_send_               petsc_mpi_send__
524 #define petsc_mpi_recv_               petsc_mpi_recv__
525 #define petsc_mpi_reduce_scatter_     petsc_mpi_reduce_scatter__
526 #define petsc_mpi_irecv_              petsc_mpi_irecv__
527 #define petsc_mpi_isend_              petsc_mpi_isend__
528 #define petsc_mpi_sendrecv_           petsc_mpi_sendrecv__
529 #define petsc_mpi_test_               petsc_mpi_test__
530 #define petsc_mpi_waitall_            petsc_mpi_waitall__
531 #define petsc_mpi_waitany_            petsc_mpi_waitany__
532 #define petsc_mpi_allgatherv_         petsc_mpi_allgatherv__
533 #define petsc_mpi_alltoallv_          petsc_mpi_alltoallv__
534 #define petsc_mpi_comm_create_        petsc_mpi_comm_create__
535 #define petsc_mpi_address_            petsc_mpi_address__
536 #define petsc_mpi_pack_               petsc_mpi_pack__
537 #define petsc_mpi_unpack_             petsc_mpi_unpack__
538 #define petsc_mpi_pack_size_          petsc_mpi_pack_size__
539 #define petsc_mpi_type_struct_        petsc_mpi_type_struct__
540 #define petsc_mpi_type_commit_        petsc_mpi_type_commit__
541 #define petsc_mpi_wtime_              petsc_mpi_wtime__
542 #define petsc_mpi_cancel_             petsc_mpi_cancel__
543 #define petsc_mpi_comm_dup_           petsc_mpi_comm_dup__
544 #define petsc_mpi_comm_free_          petsc_mpi_comm_free__
545 #define petsc_mpi_get_count_          petsc_mpi_get_count__
546 #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name__
547 #define petsc_mpi_initialized_        petsc_mpi_initialized__
548 #define petsc_mpi_iprobe_             petsc_mpi_iprobe__
549 #define petsc_mpi_probe_              petsc_mpi_probe__
550 #define petsc_mpi_request_free_       petsc_mpi_request_free__
551 #define petsc_mpi_ssend_              petsc_mpi_ssend__
552 #define petsc_mpi_wait_               petsc_mpi_wait__
553 #define petsc_mpi_comm_group_         petsc_mpi_comm_group__
554 #define petsc_mpi_exscan_             petsc_mpi_exscan__
555 #endif
556 
557 /* Do not build fortran interface if MPI namespace colision is to be avoided */
558 #if defined(PETSC_HAVE_FORTRAN)
559 
560 PETSC_EXTERN void mpiunisetmoduleblock_(void);
561 
562 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place) {
563   MPIUNIF_mpi_in_place = f_mpi_in_place;
564 }
565 
566 PETSC_EXTERN void petsc_mpi_init_(int *ierr) {
567   mpiunisetmoduleblock_();
568   *ierr = MPI_Init((int *)0, (char ***)0);
569 }
570 
571 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr) {
572   *ierr = MPI_Finalize();
573 }
574 
575 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm, int *size, int *ierr) {
576   *size = 1;
577   *ierr = 0;
578 }
579 
580 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm, int *rank, int *ierr) {
581   *rank = 0;
582   *ierr = MPI_SUCCESS;
583 }
584 
585 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm, int *color, int *key, MPI_Comm *newcomm, int *ierr) {
586   *newcomm = *comm;
587   *ierr    = MPI_SUCCESS;
588 }
589 
590 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm, int *errorcode, int *ierr) {
591   abort();
592   *ierr = MPI_SUCCESS;
593 }
594 
595 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *root, int *comm, int *ierr) {
596   *ierr = MPI_Reduce(sendbuf, recvbuf, *count, *datatype, *op, *root, *comm);
597 }
598 
599 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
600   *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, *datatype, *op, *comm);
601 }
602 
603 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm, int *ierr) {
604   *ierr = MPI_SUCCESS;
605 }
606 
607 PETSC_EXTERN void petsc_mpi_bcast_(void *buf, int *count, int *datatype, int *root, int *comm, int *ierr) {
608   *ierr = MPI_SUCCESS;
609 }
610 
611 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root, int *comm, int *ierr) {
612   *ierr = MPI_Gather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *root, *comm);
613 }
614 
615 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *comm, int *ierr) {
616   *ierr = MPI_Allgather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *comm);
617 }
618 
619 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
620   *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*count) * MPI_sizeof(*datatype));
621 }
622 
623 PETSC_EXTERN void petsc_mpi_send_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr) {
624   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
625 }
626 
627 PETSC_EXTERN void petsc_mpi_recv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int status, int *ierr) {
628   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
629 }
630 
631 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf, void *recvbuf, int *recvcounts, int *datatype, int *op, int *comm, int *ierr) {
632   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
633 }
634 
635 PETSC_EXTERN void petsc_mpi_irecv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr) {
636   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
637 }
638 
639 PETSC_EXTERN void petsc_mpi_isend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *request, int *ierr) {
640   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
641 }
642 
643 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf, int *sendcount, int *sendtype, int *dest, int *sendtag, void *recvbuf, int *recvcount, int *recvtype, int *source, int *recvtag, int *comm, int *status, int *ierr) {
644   *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*sendcount) * MPI_sizeof(*sendtype));
645 }
646 
647 PETSC_EXTERN void petsc_mpi_test_(int *request, int *flag, int *status, int *ierr) {
648   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
649 }
650 
651 PETSC_EXTERN void petsc_mpi_waitall_(int *count, int *array_of_requests, int *array_of_statuses, int *ierr) {
652   *ierr = MPI_SUCCESS;
653 }
654 
655 PETSC_EXTERN void petsc_mpi_waitany_(int *count, int *array_of_requests, int *index, int *status, int *ierr) {
656   *ierr = MPI_SUCCESS;
657 }
658 
659 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf, int *sendcount, int *sendtype, void *recvbuf, int *recvcounts, int *displs, int *recvtype, int *comm, int *ierr) {
660   *ierr = MPI_Allgatherv(sendbuf, *sendcount, *sendtype, recvbuf, recvcounts, displs, *recvtype, *comm);
661 }
662 
663 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf, int *sendcounts, int *sdispls, int *sendtype, void *recvbuf, int *recvcounts, int *rdispls, int *recvtype, int *comm, int *ierr) {
664   *ierr = MPI_Alltoallv(sendbuf, sendcounts, sdispls, *sendtype, recvbuf, recvcounts, rdispls, *recvtype, *comm);
665 }
666 
667 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm, int *group, int *newcomm, int *ierr) {
668   *newcomm = *comm;
669   *ierr    = MPI_SUCCESS;
670 }
671 
672 PETSC_EXTERN void petsc_mpi_address_(void *location, MPI_Aint *address, int *ierr) {
673   *address = (MPI_Aint)((char *)location);
674   *ierr    = MPI_SUCCESS;
675 }
676 
677 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf, int *incount, int *datatype, void *outbuf, int *outsize, int *position, int *comm, int *ierr) {
678   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
679 }
680 
681 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf, int *insize, int *position, void *outbuf, int *outcount, int *datatype, int *comm, int *ierr) {
682   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
683 }
684 
685 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount, int *datatype, int *comm, int *size, int *ierr) {
686   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
687 }
688 
689 PETSC_EXTERN void petsc_mpi_type_struct_(int *count, int *array_of_blocklengths, int *array_of_displaments, int *array_of_types, int *newtype, int *ierr) {
690   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
691 }
692 
693 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype, int *ierr) {
694   *ierr = MPI_SUCCESS;
695 }
696 
697 double petsc_mpi_wtime_(void) {
698   return 0.0;
699 }
700 
701 PETSC_EXTERN void petsc_mpi_cancel_(int *request, int *ierr) {
702   *ierr = MPI_SUCCESS;
703 }
704 
705 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm, int *out, int *ierr) {
706   *out  = *comm;
707   *ierr = MPI_SUCCESS;
708 }
709 
710 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm, int *ierr) {
711   *ierr = MPI_SUCCESS;
712 }
713 
714 PETSC_EXTERN void petsc_mpi_get_count_(int *status, int *datatype, int *count, int *ierr) {
715   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
716 }
717 
718 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name, int *result_len, int *ierr, PETSC_FORTRAN_CHARLEN_T len) {
719   MPIUNI_Memcpy(name, "localhost", 9 * sizeof(char));
720   *result_len = 9;
721   *ierr       = MPI_SUCCESS;
722 }
723 
724 PETSC_EXTERN void petsc_mpi_initialized_(int *flag, int *ierr) {
725   *flag = MPI_was_initialized;
726   *ierr = MPI_SUCCESS;
727 }
728 
729 PETSC_EXTERN void petsc_mpi_iprobe_(int *source, int *tag, int *comm, int *glag, int *status, int *ierr) {
730   *ierr = MPI_SUCCESS;
731 }
732 
733 PETSC_EXTERN void petsc_mpi_probe_(int *source, int *tag, int *comm, int *flag, int *status, int *ierr) {
734   *ierr = MPI_SUCCESS;
735 }
736 
737 PETSC_EXTERN void petsc_mpi_request_free_(int *request, int *ierr) {
738   *ierr = MPI_SUCCESS;
739 }
740 
741 PETSC_EXTERN void petsc_mpi_ssend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr) {
742   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
743 }
744 
745 PETSC_EXTERN void petsc_mpi_wait_(int *request, int *status, int *ierr) {
746   *ierr = MPI_SUCCESS;
747 }
748 
749 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm, int *group, int *ierr) {
750   *ierr = MPI_SUCCESS;
751 }
752 
753 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
754   *ierr = MPI_SUCCESS;
755 }
756 
757 #endif /* PETSC_HAVE_FORTRAN */
758 
759 #if defined(__cplusplus)
760 }
761 #endif
762