xref: /petsc/src/sys/mpiuni/mpi.c (revision cf906205f1df435953e2a75894dc7795d3ff0aed)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include <petscsys.h>
6 #if !defined(MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 
10 #include <petscdevice_cupm.h>
11 #include <petsc/private/petscimpl.h>
12 
13 #define MPI_SUCCESS 0
14 #define MPI_FAILURE 1
15 
16 void *MPIUNI_TMP = NULL;
17 
18 /*
19        With MPI Uni there are exactly four distinct communicators:
20     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
21 
22     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
23      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
24 
25 */
26 #define MAX_ATTR 256
27 #define MAX_COMM 128
28 
29 typedef struct {
30   void *attribute_val;
31   int   active;
32 } MPI_Attr;
33 
34 typedef struct {
35   void                *extra_state;
36   MPI_Delete_function *del;
37   int                  active; /* Is this keyval in use by some comm? */
38 } MPI_Attr_keyval;
39 
40 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
41 static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
42 static int             comm_active[MAX_COMM]; /* Boolean array indicating which comms are in use */
43 static int             mpi_tag_ub           = 100000000;
44 static int             num_attr             = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
45 static int             MaxComm              = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
46 static void           *MPIUNIF_mpi_in_place = 0;
47 
48 #define CommIdx(comm) ((comm)-1) /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
49 
50 #if defined(__cplusplus)
51 extern "C" {
52 #endif
53 
54 /*
55    To avoid problems with prototypes to the system memcpy() it is duplicated here
56 */
57 int MPIUNI_Memcpy(void *dst, const void *src, int n) {
58   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
59   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
60   if (!n) return MPI_SUCCESS;
61 
62     /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
63 #if defined(PETSC_HAVE_CUDA)
64   if (PetscDeviceInitialized(PETSC_DEVICE_CUDA)) {
65     cudaError_t cerr = cudaMemcpy(dst, src, n, cudaMemcpyDefault);
66     if (cerr != cudaSuccess) return MPI_FAILURE;
67   } else
68 #elif defined(PETSC_HAVE_HIP)
69   if (PetscDeviceInitialized(PETSC_DEVICE_HIP)) {
70     hipError_t cerr = hipMemcpy(dst, src, n, hipMemcpyDefault);
71     if (cerr != hipSuccess) return MPI_FAILURE;
72   } else
73 #endif
74   {
75     memcpy(dst, src, n);
76   }
77   return MPI_SUCCESS;
78 }
79 
80 static int classcnt = 0;
81 static int codecnt  = 0;
82 
83 int MPI_Add_error_class(int *cl) {
84   *cl = classcnt++;
85   return MPI_SUCCESS;
86 }
87 
88 int MPI_Add_error_code(int cl, int *co) {
89   if (cl >= classcnt) return MPI_FAILURE;
90   *co = codecnt++;
91   return MPI_SUCCESS;
92 }
93 
94 int MPI_Type_get_envelope(MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner) {
95   int comb = datatype >> 28;
96   switch (comb) {
97   case MPI_COMBINER_NAMED:
98     *num_integers  = 0;
99     *num_addresses = 0;
100     *num_datatypes = 0;
101     *combiner      = comb;
102     break;
103   case MPI_COMBINER_DUP:
104     *num_integers  = 0;
105     *num_addresses = 0;
106     *num_datatypes = 1;
107     *combiner      = comb;
108     break;
109   case MPI_COMBINER_CONTIGUOUS:
110     *num_integers  = 1;
111     *num_addresses = 0;
112     *num_datatypes = 1;
113     *combiner      = comb;
114     break;
115   default: return MPIUni_Abort(MPI_COMM_SELF, 1);
116   }
117   return MPI_SUCCESS;
118 }
119 
120 int MPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int *array_of_integers, MPI_Aint *array_of_addresses, MPI_Datatype *array_of_datatypes) {
121   int comb = datatype >> 28;
122   switch (comb) {
123   case MPI_COMBINER_NAMED: return MPIUni_Abort(MPI_COMM_SELF, 1);
124   case MPI_COMBINER_DUP:
125     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
126     array_of_datatypes[0] = datatype & 0x0fffffff;
127     break;
128   case MPI_COMBINER_CONTIGUOUS:
129     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
130     array_of_integers[0]  = (datatype >> 8) & 0xfff;         /* count */
131     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100; /* basic named type (count=1) from which the contiguous type is derived */
132     break;
133   default: return MPIUni_Abort(MPI_COMM_SELF, 1);
134   }
135   return MPI_SUCCESS;
136 }
137 
138 /*
139    Used to set the built-in MPI_TAG_UB attribute
140 */
141 static int Keyval_setup(void) {
142   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
143   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
144   attr[CommIdx(MPI_COMM_SELF)][0].active         = 1;
145   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val  = &mpi_tag_ub;
146   attr_keyval[0].active                          = 1;
147   return MPI_SUCCESS;
148 }
149 
150 int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn, MPI_Delete_function *delete_fn, int *keyval, void *extra_state) {
151   int i, keyid;
152   for (i = 1; i < num_attr; i++) { /* the first attribute is always in use */
153     if (!attr_keyval[i].active) {
154       keyid = i;
155       goto found;
156     }
157   }
158   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD, 1);
159   keyid = num_attr++;
160 
161 found:
162   attr_keyval[keyid].extra_state = extra_state;
163   attr_keyval[keyid].del         = delete_fn;
164   attr_keyval[keyid].active      = 1;
165   *keyval                        = keyid;
166   return MPI_SUCCESS;
167 }
168 
169 /*
170   The reference counting business is here to guard against the following:
171 
172   MPI_Comm_set_attr(comm, keyval, some_attr);
173   MPI_Comm_free_keyval(&keyval);
174   MPI_Comm_free(&comm);
175 
176   Here MPI_Comm_free() will try to destroy all of the attributes of the comm, and hence we
177   should not clear the deleter or extra_state until all communicators that have the attribute
178   set are either freed or have given up their attribute.
179 
180   The attribute reference count is INCREASED in:
181   - MPI_Comm_create_keyval()
182   - MPI_Comm_set_attr()
183 
184   The atrtibute reference count is DECREASED in:
185   - MPI_Comm_free_keyval()
186   - MPI_Comm_delete_attr() (but only if the comm has the attribute)
187 */
188 static int MPI_Attr_dereference_keyval(int keyval) {
189   if (--(attr_keyval[keyval].active) <= 0) {
190     attr_keyval[keyval].extra_state = 0;
191     attr_keyval[keyval].del         = 0;
192   }
193   return MPI_SUCCESS;
194 }
195 
196 static int MPI_Attr_reference_keyval(int keyval) {
197   ++(attr_keyval[keyval].active);
198   return MPI_SUCCESS;
199 }
200 
201 int MPI_Comm_free_keyval(int *keyval) {
202   int ret;
203 
204   if (*keyval < 0 || *keyval >= num_attr) return MPI_FAILURE;
205   if ((ret = MPI_Attr_dereference_keyval(*keyval))) return ret;
206   *keyval = 0;
207   return MPI_SUCCESS;
208 }
209 
210 int MPI_Comm_set_attr(MPI_Comm comm, int keyval, void *attribute_val) {
211   int idx = CommIdx(comm), ret;
212   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
213   if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
214 
215   if ((ret = MPI_Comm_delete_attr(comm, keyval))) return ret;
216   if ((ret = MPI_Attr_reference_keyval(keyval))) return ret;
217   attr[idx][keyval].active        = 1;
218   attr[idx][keyval].attribute_val = attribute_val;
219   return MPI_SUCCESS;
220 }
221 
222 int MPI_Comm_delete_attr(MPI_Comm comm, int keyval) {
223   int idx = CommIdx(comm);
224   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
225   if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
226   if (attr[idx][keyval].active) {
227     int   ret;
228     void *save_attribute_val = attr[idx][keyval].attribute_val;
229 
230     attr[idx][keyval].active        = 0;
231     attr[idx][keyval].attribute_val = 0;
232     if (attr_keyval[keyval].del) {
233       if ((ret = (*(attr_keyval[keyval].del))(comm, keyval, save_attribute_val, attr_keyval[keyval].extra_state))) return ret;
234     }
235     if ((ret = MPI_Attr_dereference_keyval(keyval))) return ret;
236   }
237   return MPI_SUCCESS;
238 }
239 
240 int MPI_Comm_get_attr(MPI_Comm comm, int keyval, void *attribute_val, int *flag) {
241   int idx = CommIdx(comm);
242   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
243   if (!keyval) Keyval_setup();
244   *flag                   = attr[idx][keyval].active;
245   *(void **)attribute_val = attr[idx][keyval].attribute_val;
246   return MPI_SUCCESS;
247 }
248 
249 static char all_comm_names[MAX_COMM][MPI_MAX_OBJECT_NAME] = {"MPI_COMM_SELF", "MPI_COMM_WORLD"};
250 
251 int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen) {
252   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
253   if (!comm_name || !resultlen) return MPI_FAILURE;
254   strncpy(comm_name, all_comm_names[CommIdx(comm)], MPI_MAX_OBJECT_NAME - 1);
255   *resultlen = (int)strlen(comm_name);
256   return MPI_SUCCESS;
257 }
258 
259 int MPI_Comm_set_name(MPI_Comm comm, const char *comm_name) {
260   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
261   if (!comm_name) return MPI_FAILURE;
262   if (strlen(comm_name) > MPI_MAX_OBJECT_NAME - 1) return MPI_FAILURE;
263   strncpy(all_comm_names[CommIdx(comm)], comm_name, MPI_MAX_OBJECT_NAME - 1);
264   return MPI_SUCCESS;
265 }
266 
267 int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) {
268   int j;
269   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
270   for (j = 3; j <= MaxComm; j++) {
271     if (!comm_active[CommIdx(j)]) {
272       comm_active[CommIdx(j)] = 1;
273       *newcomm                = j;
274       return MPI_SUCCESS;
275     }
276   }
277   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
278   *newcomm                       = ++MaxComm;
279   comm_active[CommIdx(*newcomm)] = 1;
280   return MPI_SUCCESS;
281 }
282 
283 int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *out) {
284   int j;
285   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
286   for (j = 3; j <= MaxComm; j++) {
287     if (!comm_active[CommIdx(j)]) {
288       comm_active[CommIdx(j)] = 1;
289       *out                    = j;
290       return MPI_SUCCESS;
291     }
292   }
293   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
294   *out                       = ++MaxComm;
295   comm_active[CommIdx(*out)] = 1;
296   return MPI_SUCCESS;
297 }
298 
299 int MPI_Comm_free(MPI_Comm *comm) {
300   int idx = CommIdx(*comm);
301 
302   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
303   for (int i = 0; i < num_attr; i++) {
304     int ret = MPI_Comm_delete_attr(*comm, i);
305 
306     if (ret) return ret;
307   }
308   if (*comm >= 3) comm_active[idx] = 0;
309   *comm = 0;
310   return MPI_SUCCESS;
311 }
312 
313 int MPI_Comm_size(MPI_Comm comm, int *size) {
314   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
315   *size = 1;
316   return MPI_SUCCESS;
317 }
318 
319 int MPI_Comm_rank(MPI_Comm comm, int *rank) {
320   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
321   *rank = 0;
322   return MPI_SUCCESS;
323 }
324 
325 int MPIUni_Abort(MPI_Comm comm, int errorcode) {
326   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
327   return MPI_ERR_NOSUPPORT;
328 }
329 
330 int MPI_Abort(MPI_Comm comm, int errorcode) {
331   abort();
332   return MPI_SUCCESS;
333 }
334 
335 /* --------------------------------------------------------------------------*/
336 
337 static int MPI_was_initialized = 0;
338 static int MPI_was_finalized   = 0;
339 
340 int MPI_Init(int *argc, char ***argv) {
341   if (MPI_was_initialized) return MPI_FAILURE;
342   /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
343      MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
344      of MPIU so that they can do multiple PetscInitialize/Finalize().
345   */
346   /* if (MPI_was_finalized) return MPI_FAILURE; */
347   MPI_was_initialized = 1;
348   MPI_was_finalized   = 0;
349   return MPI_SUCCESS;
350 }
351 
352 int MPI_Init_thread(int *argc, char ***argv, int required, int *provided) {
353   MPI_Query_thread(provided);
354   return MPI_Init(argc, argv);
355 }
356 
357 int MPI_Query_thread(int *provided) {
358   *provided = MPI_THREAD_FUNNELED;
359   return MPI_SUCCESS;
360 }
361 
362 int MPI_Finalize(void) {
363   if (MPI_was_finalized || !MPI_was_initialized) return MPI_FAILURE;
364   MPI_Comm comm = MPI_COMM_WORLD;
365   int      ret  = MPI_Comm_free(&comm);
366 
367   if (ret) return ret;
368   comm = MPI_COMM_SELF;
369   ret  = MPI_Comm_free(&comm);
370   if (ret) return ret;
371   if (PetscDefined(USE_DEBUG)) {
372     for (int i = 3; i <= MaxComm; ++i) {
373       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
374     }
375 
376     for (int i = 1; i <= MaxComm; ++i) {
377       for (int j = 0; j < num_attr; ++j) {
378         if (attr[CommIdx(i)][j].active) printf("MPIUni warning: MPI communicator %d attribute %d was not freed before MPI_Finalize()\n", i, j);
379       }
380     }
381 
382     for (int i = 1; i < num_attr; ++i) {
383       if (attr_keyval[i].active) printf("MPIUni warning: MPI attribute %d was not freed before MPI_Finalize()\n", i);
384     }
385   }
386 
387   /* reset counters */
388   MaxComm             = 2;
389   num_attr            = 1;
390   MPI_was_finalized   = 1;
391   MPI_was_initialized = 0;
392   PETSC_COMM_WORLD    = MPI_COMM_NULL;
393   return MPI_SUCCESS;
394 }
395 
396 int MPI_Initialized(int *flag) {
397   *flag = MPI_was_initialized;
398   return MPI_SUCCESS;
399 }
400 
401 int MPI_Finalized(int *flag) {
402   *flag = MPI_was_finalized;
403   return MPI_SUCCESS;
404 }
405 
406 /* -------------------     Fortran versions of several routines ------------------ */
407 
408 #if defined(PETSC_HAVE_FORTRAN_CAPS)
409 #define mpiunisetmoduleblock_         MPIUNISETMODULEBLOCK
410 #define mpiunisetfortranbasepointers_ MPIUNISETFORTRANBASEPOINTERS
411 #define petsc_mpi_init_               PETSC_MPI_INIT
412 #define petsc_mpi_finalize_           PETSC_MPI_FINALIZE
413 #define petsc_mpi_comm_size_          PETSC_MPI_COMM_SIZE
414 #define petsc_mpi_comm_rank_          PETSC_MPI_COMM_RANK
415 #define petsc_mpi_abort_              PETSC_MPI_ABORT
416 #define petsc_mpi_reduce_             PETSC_MPI_REDUCE
417 #define petsc_mpi_allreduce_          PETSC_MPI_ALLREDUCE
418 #define petsc_mpi_barrier_            PETSC_MPI_BARRIER
419 #define petsc_mpi_bcast_              PETSC_MPI_BCAST
420 #define petsc_mpi_gather_             PETSC_MPI_GATHER
421 #define petsc_mpi_allgather_          PETSC_MPI_ALLGATHER
422 #define petsc_mpi_comm_split_         PETSC_MPI_COMM_SPLIT
423 #define petsc_mpi_scan_               PETSC_MPI_SCAN
424 #define petsc_mpi_send_               PETSC_MPI_SEND
425 #define petsc_mpi_recv_               PETSC_MPI_RECV
426 #define petsc_mpi_reduce_scatter_     PETSC_MPI_REDUCE_SCATTER
427 #define petsc_mpi_irecv_              PETSC_MPI_IRECV
428 #define petsc_mpi_isend_              PETSC_MPI_ISEND
429 #define petsc_mpi_sendrecv_           PETSC_MPI_SENDRECV
430 #define petsc_mpi_test_               PETSC_MPI_TEST
431 #define petsc_mpi_waitall_            PETSC_MPI_WAITALL
432 #define petsc_mpi_waitany_            PETSC_MPI_WAITANY
433 #define petsc_mpi_allgatherv_         PETSC_MPI_ALLGATHERV
434 #define petsc_mpi_alltoallv_          PETSC_MPI_ALLTOALLV
435 #define petsc_mpi_comm_create_        PETSC_MPI_COMM_CREATE
436 #define petsc_mpi_address_            PETSC_MPI_ADDRESS
437 #define petsc_mpi_pack_               PETSC_MPI_PACK
438 #define petsc_mpi_unpack_             PETSC_MPI_UNPACK
439 #define petsc_mpi_pack_size_          PETSC_MPI_PACK_SIZE
440 #define petsc_mpi_type_struct_        PETSC_MPI_TYPE_STRUCT
441 #define petsc_mpi_type_commit_        PETSC_MPI_TYPE_COMMIT
442 #define petsc_mpi_wtime_              PETSC_MPI_WTIME
443 #define petsc_mpi_cancel_             PETSC_MPI_CANCEL
444 #define petsc_mpi_comm_dup_           PETSC_MPI_COMM_DUP
445 #define petsc_mpi_comm_free_          PETSC_MPI_COMM_FREE
446 #define petsc_mpi_get_count_          PETSC_MPI_GET_COUNT
447 #define petsc_mpi_get_processor_name_ PETSC_MPI_GET_PROCESSOR_NAME
448 #define petsc_mpi_initialized_        PETSC_MPI_INITIALIZED
449 #define petsc_mpi_iprobe_             PETSC_MPI_IPROBE
450 #define petsc_mpi_probe_              PETSC_MPI_PROBE
451 #define petsc_mpi_request_free_       PETSC_MPI_REQUEST_FREE
452 #define petsc_mpi_ssend_              PETSC_MPI_SSEND
453 #define petsc_mpi_wait_               PETSC_MPI_WAIT
454 #define petsc_mpi_comm_group_         PETSC_MPI_COMM_GROUP
455 #define petsc_mpi_exscan_             PETSC_MPI_EXSCAN
456 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
457 #define mpiunisetmoduleblock_         mpiunisetmoduleblock
458 #define mpiunisetfortranbasepointers_ mpiunisetfortranbasepointers
459 #define petsc_mpi_init_               petsc_mpi_init
460 #define petsc_mpi_finalize_           petsc_mpi_finalize
461 #define petsc_mpi_comm_size_          petsc_mpi_comm_size
462 #define petsc_mpi_comm_rank_          petsc_mpi_comm_rank
463 #define petsc_mpi_abort_              petsc_mpi_abort
464 #define petsc_mpi_reduce_             petsc_mpi_reduce
465 #define petsc_mpi_allreduce_          petsc_mpi_allreduce
466 #define petsc_mpi_barrier_            petsc_mpi_barrier
467 #define petsc_mpi_bcast_              petsc_mpi_bcast
468 #define petsc_mpi_gather_             petsc_mpi_gather
469 #define petsc_mpi_allgather_          petsc_mpi_allgather
470 #define petsc_mpi_comm_split_         petsc_mpi_comm_split
471 #define petsc_mpi_scan_               petsc_mpi_scan
472 #define petsc_mpi_send_               petsc_mpi_send
473 #define petsc_mpi_recv_               petsc_mpi_recv
474 #define petsc_mpi_reduce_scatter_     petsc_mpi_reduce_scatter
475 #define petsc_mpi_irecv_              petsc_mpi_irecv
476 #define petsc_mpi_isend_              petsc_mpi_isend
477 #define petsc_mpi_sendrecv_           petsc_mpi_sendrecv
478 #define petsc_mpi_test_               petsc_mpi_test
479 #define petsc_mpi_waitall_            petsc_mpi_waitall
480 #define petsc_mpi_waitany_            petsc_mpi_waitany
481 #define petsc_mpi_allgatherv_         petsc_mpi_allgatherv
482 #define petsc_mpi_alltoallv_          petsc_mpi_alltoallv
483 #define petsc_mpi_comm_create_        petsc_mpi_comm_create
484 #define petsc_mpi_address_            petsc_mpi_address
485 #define petsc_mpi_pack_               petsc_mpi_pack
486 #define petsc_mpi_unpack_             petsc_mpi_unpack
487 #define petsc_mpi_pack_size_          petsc_mpi_pack_size
488 #define petsc_mpi_type_struct_        petsc_mpi_type_struct
489 #define petsc_mpi_type_commit_        petsc_mpi_type_commit
490 #define petsc_mpi_wtime_              petsc_mpi_wtime
491 #define petsc_mpi_cancel_             petsc_mpi_cancel
492 #define petsc_mpi_comm_dup_           petsc_mpi_comm_dup
493 #define petsc_mpi_comm_free_          petsc_mpi_comm_free
494 #define petsc_mpi_get_count_          petsc_mpi_get_count
495 #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name
496 #define petsc_mpi_initialized_        petsc_mpi_initialized
497 #define petsc_mpi_iprobe_             petsc_mpi_iprobe
498 #define petsc_mpi_probe_              petsc_mpi_probe
499 #define petsc_mpi_request_free_       petsc_mpi_request_free
500 #define petsc_mpi_ssend_              petsc_mpi_ssend
501 #define petsc_mpi_wait_               petsc_mpi_wait
502 #define petsc_mpi_comm_group_         petsc_mpi_comm_group
503 #define petsc_mpi_exscan_             petsc_mpi_exscan
504 #endif
505 
506 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
507 #define petsc_mpi_init_               petsc_mpi_init__
508 #define petsc_mpi_finalize_           petsc_mpi_finalize__
509 #define petsc_mpi_comm_size_          petsc_mpi_comm_size__
510 #define petsc_mpi_comm_rank_          petsc_mpi_comm_rank__
511 #define petsc_mpi_abort_              petsc_mpi_abort__
512 #define petsc_mpi_reduce_             petsc_mpi_reduce__
513 #define petsc_mpi_allreduce_          petsc_mpi_allreduce__
514 #define petsc_mpi_barrier_            petsc_mpi_barrier__
515 #define petsc_mpi_bcast_              petsc_mpi_bcast__
516 #define petsc_mpi_gather_             petsc_mpi_gather__
517 #define petsc_mpi_allgather_          petsc_mpi_allgather__
518 #define petsc_mpi_comm_split_         petsc_mpi_comm_split__
519 #define petsc_mpi_scan_               petsc_mpi_scan__
520 #define petsc_mpi_send_               petsc_mpi_send__
521 #define petsc_mpi_recv_               petsc_mpi_recv__
522 #define petsc_mpi_reduce_scatter_     petsc_mpi_reduce_scatter__
523 #define petsc_mpi_irecv_              petsc_mpi_irecv__
524 #define petsc_mpi_isend_              petsc_mpi_isend__
525 #define petsc_mpi_sendrecv_           petsc_mpi_sendrecv__
526 #define petsc_mpi_test_               petsc_mpi_test__
527 #define petsc_mpi_waitall_            petsc_mpi_waitall__
528 #define petsc_mpi_waitany_            petsc_mpi_waitany__
529 #define petsc_mpi_allgatherv_         petsc_mpi_allgatherv__
530 #define petsc_mpi_alltoallv_          petsc_mpi_alltoallv__
531 #define petsc_mpi_comm_create_        petsc_mpi_comm_create__
532 #define petsc_mpi_address_            petsc_mpi_address__
533 #define petsc_mpi_pack_               petsc_mpi_pack__
534 #define petsc_mpi_unpack_             petsc_mpi_unpack__
535 #define petsc_mpi_pack_size_          petsc_mpi_pack_size__
536 #define petsc_mpi_type_struct_        petsc_mpi_type_struct__
537 #define petsc_mpi_type_commit_        petsc_mpi_type_commit__
538 #define petsc_mpi_wtime_              petsc_mpi_wtime__
539 #define petsc_mpi_cancel_             petsc_mpi_cancel__
540 #define petsc_mpi_comm_dup_           petsc_mpi_comm_dup__
541 #define petsc_mpi_comm_free_          petsc_mpi_comm_free__
542 #define petsc_mpi_get_count_          petsc_mpi_get_count__
543 #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name__
544 #define petsc_mpi_initialized_        petsc_mpi_initialized__
545 #define petsc_mpi_iprobe_             petsc_mpi_iprobe__
546 #define petsc_mpi_probe_              petsc_mpi_probe__
547 #define petsc_mpi_request_free_       petsc_mpi_request_free__
548 #define petsc_mpi_ssend_              petsc_mpi_ssend__
549 #define petsc_mpi_wait_               petsc_mpi_wait__
550 #define petsc_mpi_comm_group_         petsc_mpi_comm_group__
551 #define petsc_mpi_exscan_             petsc_mpi_exscan__
552 #endif
553 
554 /* Do not build fortran interface if MPI namespace colision is to be avoided */
555 #if defined(PETSC_HAVE_FORTRAN)
556 
557 PETSC_EXTERN void mpiunisetmoduleblock_(void);
558 
559 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place) {
560   MPIUNIF_mpi_in_place = f_mpi_in_place;
561 }
562 
563 PETSC_EXTERN void petsc_mpi_init_(int *ierr) {
564   mpiunisetmoduleblock_();
565   *ierr = MPI_Init((int *)0, (char ***)0);
566 }
567 
568 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr) {
569   *ierr = MPI_Finalize();
570 }
571 
572 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm, int *size, int *ierr) {
573   *size = 1;
574   *ierr = 0;
575 }
576 
577 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm, int *rank, int *ierr) {
578   *rank = 0;
579   *ierr = MPI_SUCCESS;
580 }
581 
582 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm, int *color, int *key, MPI_Comm *newcomm, int *ierr) {
583   *newcomm = *comm;
584   *ierr    = MPI_SUCCESS;
585 }
586 
587 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm, int *errorcode, int *ierr) {
588   abort();
589   *ierr = MPI_SUCCESS;
590 }
591 
592 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *root, int *comm, int *ierr) {
593   *ierr = MPI_Reduce(sendbuf, recvbuf, *count, *datatype, *op, *root, *comm);
594 }
595 
596 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
597   *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, *datatype, *op, *comm);
598 }
599 
600 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm, int *ierr) {
601   *ierr = MPI_SUCCESS;
602 }
603 
604 PETSC_EXTERN void petsc_mpi_bcast_(void *buf, int *count, int *datatype, int *root, int *comm, int *ierr) {
605   *ierr = MPI_SUCCESS;
606 }
607 
608 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root, int *comm, int *ierr) {
609   *ierr = MPI_Gather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *root, *comm);
610 }
611 
612 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *comm, int *ierr) {
613   *ierr = MPI_Allgather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *comm);
614 }
615 
616 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
617   *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*count) * MPI_sizeof(*datatype));
618 }
619 
620 PETSC_EXTERN void petsc_mpi_send_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr) {
621   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
622 }
623 
624 PETSC_EXTERN void petsc_mpi_recv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int status, int *ierr) {
625   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
626 }
627 
628 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf, void *recvbuf, int *recvcounts, int *datatype, int *op, int *comm, int *ierr) {
629   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
630 }
631 
632 PETSC_EXTERN void petsc_mpi_irecv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr) {
633   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
634 }
635 
636 PETSC_EXTERN void petsc_mpi_isend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *request, int *ierr) {
637   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
638 }
639 
640 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf, int *sendcount, int *sendtype, int *dest, int *sendtag, void *recvbuf, int *recvcount, int *recvtype, int *source, int *recvtag, int *comm, int *status, int *ierr) {
641   *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*sendcount) * MPI_sizeof(*sendtype));
642 }
643 
644 PETSC_EXTERN void petsc_mpi_test_(int *request, int *flag, int *status, int *ierr) {
645   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
646 }
647 
648 PETSC_EXTERN void petsc_mpi_waitall_(int *count, int *array_of_requests, int *array_of_statuses, int *ierr) {
649   *ierr = MPI_SUCCESS;
650 }
651 
652 PETSC_EXTERN void petsc_mpi_waitany_(int *count, int *array_of_requests, int *index, int *status, int *ierr) {
653   *ierr = MPI_SUCCESS;
654 }
655 
656 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf, int *sendcount, int *sendtype, void *recvbuf, int *recvcounts, int *displs, int *recvtype, int *comm, int *ierr) {
657   *ierr = MPI_Allgatherv(sendbuf, *sendcount, *sendtype, recvbuf, recvcounts, displs, *recvtype, *comm);
658 }
659 
660 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf, int *sendcounts, int *sdispls, int *sendtype, void *recvbuf, int *recvcounts, int *rdispls, int *recvtype, int *comm, int *ierr) {
661   *ierr = MPI_Alltoallv(sendbuf, sendcounts, sdispls, *sendtype, recvbuf, recvcounts, rdispls, *recvtype, *comm);
662 }
663 
664 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm, int *group, int *newcomm, int *ierr) {
665   *newcomm = *comm;
666   *ierr    = MPI_SUCCESS;
667 }
668 
669 PETSC_EXTERN void petsc_mpi_address_(void *location, MPI_Aint *address, int *ierr) {
670   *address = (MPI_Aint)((char *)location);
671   *ierr    = MPI_SUCCESS;
672 }
673 
674 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf, int *incount, int *datatype, void *outbuf, int *outsize, int *position, int *comm, int *ierr) {
675   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
676 }
677 
678 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf, int *insize, int *position, void *outbuf, int *outcount, int *datatype, int *comm, int *ierr) {
679   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
680 }
681 
682 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount, int *datatype, int *comm, int *size, int *ierr) {
683   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
684 }
685 
686 PETSC_EXTERN void petsc_mpi_type_struct_(int *count, int *array_of_blocklengths, int *array_of_displaments, int *array_of_types, int *newtype, int *ierr) {
687   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
688 }
689 
690 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype, int *ierr) {
691   *ierr = MPI_SUCCESS;
692 }
693 
694 double petsc_mpi_wtime_(void) {
695   return 0.0;
696 }
697 
698 PETSC_EXTERN void petsc_mpi_cancel_(int *request, int *ierr) {
699   *ierr = MPI_SUCCESS;
700 }
701 
702 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm, int *out, int *ierr) {
703   *out  = *comm;
704   *ierr = MPI_SUCCESS;
705 }
706 
707 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm, int *ierr) {
708   *ierr = MPI_SUCCESS;
709 }
710 
711 PETSC_EXTERN void petsc_mpi_get_count_(int *status, int *datatype, int *count, int *ierr) {
712   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
713 }
714 
715 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name, int *result_len, int *ierr, PETSC_FORTRAN_CHARLEN_T len) {
716   MPIUNI_Memcpy(name, "localhost", 9 * sizeof(char));
717   *result_len = 9;
718   *ierr       = MPI_SUCCESS;
719 }
720 
721 PETSC_EXTERN void petsc_mpi_initialized_(int *flag, int *ierr) {
722   *flag = MPI_was_initialized;
723   *ierr = MPI_SUCCESS;
724 }
725 
726 PETSC_EXTERN void petsc_mpi_iprobe_(int *source, int *tag, int *comm, int *glag, int *status, int *ierr) {
727   *ierr = MPI_SUCCESS;
728 }
729 
730 PETSC_EXTERN void petsc_mpi_probe_(int *source, int *tag, int *comm, int *flag, int *status, int *ierr) {
731   *ierr = MPI_SUCCESS;
732 }
733 
734 PETSC_EXTERN void petsc_mpi_request_free_(int *request, int *ierr) {
735   *ierr = MPI_SUCCESS;
736 }
737 
738 PETSC_EXTERN void petsc_mpi_ssend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr) {
739   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
740 }
741 
742 PETSC_EXTERN void petsc_mpi_wait_(int *request, int *status, int *ierr) {
743   *ierr = MPI_SUCCESS;
744 }
745 
746 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm, int *group, int *ierr) {
747   *ierr = MPI_SUCCESS;
748 }
749 
750 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
751   *ierr = MPI_SUCCESS;
752 }
753 
754 #endif /* PETSC_HAVE_FORTRAN */
755 
756 #if defined(__cplusplus)
757 }
758 #endif
759