xref: /petsc/src/sys/mpiuni/mpi.c (revision a69119a591a03a9d906b29c0a4e9802e4d7c9795)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include <petscsys.h>
6 #if !defined(MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 
10 #include <petsc/private/petscimpl.h> /* for PetscCUPMInitialized */
11 
12 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
13 #include <petscdevice.h>
14 #endif
15 
16 #define MPI_SUCCESS 0
17 #define MPI_FAILURE 1
18 
19 void *MPIUNI_TMP = NULL;
20 
21 /*
22        With MPI Uni there are exactly four distinct communicators:
23     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
24 
25     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
26      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
27 
28 */
29 #define MAX_ATTR 256
30 #define MAX_COMM 128
31 
32 typedef struct {
33   void *attribute_val;
34   int   active;
35 } MPI_Attr;
36 
37 typedef struct {
38   void                *extra_state;
39   MPI_Delete_function *del;
40   int                  active; /* Is this keyval in use by some comm? */
41 } MPI_Attr_keyval;
42 
43 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
44 static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
45 static int             comm_active[MAX_COMM]; /* Boolean array indicating which comms are in use */
46 static int             mpi_tag_ub           = 100000000;
47 static int             num_attr             = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
48 static int             MaxComm              = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
49 static void           *MPIUNIF_mpi_in_place = 0;
50 
51 #define CommIdx(comm) ((comm)-1) /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
52 
53 #if defined(__cplusplus)
54 extern "C" {
55 #endif
56 
57 /*
58    To avoid problems with prototypes to the system memcpy() it is duplicated here
59 */
60 int MPIUNI_Memcpy(void *dst, const void *src, int n) {
61   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
62   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
63   if (!n) return MPI_SUCCESS;
64 
65     /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
66 #if defined(PETSC_HAVE_CUDA)
67   if (PetscDeviceInitialized(PETSC_DEVICE_CUDA)) {
68     cudaError_t cerr = cudaMemcpy(dst, src, n, cudaMemcpyDefault);
69     if (cerr != cudaSuccess) return MPI_FAILURE;
70   } else
71 #elif defined(PETSC_HAVE_HIP)
72   if (PetscDeviceInitialized(PETSC_DEVICE_HIP)) {
73     hipError_t cerr = hipMemcpy(dst, src, n, hipMemcpyDefault);
74     if (cerr != hipSuccess) return MPI_FAILURE;
75   } else
76 #endif
77   {
78     memcpy(dst, src, n);
79   }
80   return MPI_SUCCESS;
81 }
82 
83 static int classcnt = 0;
84 static int codecnt  = 0;
85 
86 int MPI_Add_error_class(int *cl) {
87   *cl = classcnt++;
88   return MPI_SUCCESS;
89 }
90 
91 int MPI_Add_error_code(int cl, int *co) {
92   if (cl >= classcnt) return MPI_FAILURE;
93   *co = codecnt++;
94   return MPI_SUCCESS;
95 }
96 
97 int MPI_Type_get_envelope(MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner) {
98   int comb = datatype >> 28;
99   switch (comb) {
100   case MPI_COMBINER_NAMED:
101     *num_integers  = 0;
102     *num_addresses = 0;
103     *num_datatypes = 0;
104     *combiner      = comb;
105     break;
106   case MPI_COMBINER_DUP:
107     *num_integers  = 0;
108     *num_addresses = 0;
109     *num_datatypes = 1;
110     *combiner      = comb;
111     break;
112   case MPI_COMBINER_CONTIGUOUS:
113     *num_integers  = 1;
114     *num_addresses = 0;
115     *num_datatypes = 1;
116     *combiner      = comb;
117     break;
118   default: return MPIUni_Abort(MPI_COMM_SELF, 1);
119   }
120   return MPI_SUCCESS;
121 }
122 
123 int MPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int *array_of_integers, MPI_Aint *array_of_addresses, MPI_Datatype *array_of_datatypes) {
124   int comb = datatype >> 28;
125   switch (comb) {
126   case MPI_COMBINER_NAMED: return MPIUni_Abort(MPI_COMM_SELF, 1);
127   case MPI_COMBINER_DUP:
128     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
129     array_of_datatypes[0] = datatype & 0x0fffffff;
130     break;
131   case MPI_COMBINER_CONTIGUOUS:
132     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
133     array_of_integers[0]  = (datatype >> 8) & 0xfff;         /* count */
134     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100; /* basic named type (count=1) from which the contiguous type is derived */
135     break;
136   default: return MPIUni_Abort(MPI_COMM_SELF, 1);
137   }
138   return MPI_SUCCESS;
139 }
140 
141 /*
142    Used to set the built-in MPI_TAG_UB attribute
143 */
144 static int Keyval_setup(void) {
145   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
146   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
147   attr[CommIdx(MPI_COMM_SELF)][0].active         = 1;
148   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val  = &mpi_tag_ub;
149   attr_keyval[0].active                          = 1;
150   return MPI_SUCCESS;
151 }
152 
153 int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn, MPI_Delete_function *delete_fn, int *keyval, void *extra_state) {
154   int i, keyid;
155   for (i = 1; i < num_attr; i++) { /* the first attribute is always in use */
156     if (!attr_keyval[i].active) {
157       keyid = i;
158       goto found;
159     }
160   }
161   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD, 1);
162   keyid = num_attr++;
163 
164 found:
165   attr_keyval[keyid].extra_state = extra_state;
166   attr_keyval[keyid].del         = delete_fn;
167   attr_keyval[keyid].active      = 1;
168   *keyval                        = keyid;
169   return MPI_SUCCESS;
170 }
171 
172 int MPI_Comm_free_keyval(int *keyval) {
173   attr_keyval[*keyval].extra_state = 0;
174   attr_keyval[*keyval].del         = 0;
175   attr_keyval[*keyval].active      = 0;
176   *keyval                          = 0;
177   return MPI_SUCCESS;
178 }
179 
180 int MPI_Comm_set_attr(MPI_Comm comm, int keyval, void *attribute_val) {
181   int idx = CommIdx(comm);
182   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
183   attr[idx][keyval].active        = 1;
184   attr[idx][keyval].attribute_val = attribute_val;
185   return MPI_SUCCESS;
186 }
187 
188 int MPI_Comm_delete_attr(MPI_Comm comm, int keyval) {
189   int idx = CommIdx(comm);
190   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
191   if (attr[idx][keyval].active && attr_keyval[keyval].del) {
192     void *save_attribute_val        = attr[idx][keyval].attribute_val;
193     attr[idx][keyval].active        = 0;
194     attr[idx][keyval].attribute_val = 0;
195     (*(attr_keyval[keyval].del))(comm, keyval, save_attribute_val, attr_keyval[keyval].extra_state);
196   }
197   return MPI_SUCCESS;
198 }
199 
200 int MPI_Comm_get_attr(MPI_Comm comm, int keyval, void *attribute_val, int *flag) {
201   int idx = CommIdx(comm);
202   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
203   if (!keyval) Keyval_setup();
204   *flag                   = attr[idx][keyval].active;
205   *(void **)attribute_val = attr[idx][keyval].attribute_val;
206   return MPI_SUCCESS;
207 }
208 
209 static char all_comm_names[MAX_COMM][MPI_MAX_OBJECT_NAME] = {"MPI_COMM_SELF", "MPI_COMM_WORLD"};
210 
211 int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen) {
212   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
213   if (!comm_name || !resultlen) return MPI_FAILURE;
214   strncpy(comm_name, all_comm_names[CommIdx(comm)], MPI_MAX_OBJECT_NAME - 1);
215   *resultlen = (int)strlen(comm_name);
216   return MPI_SUCCESS;
217 }
218 
219 int MPI_Comm_set_name(MPI_Comm comm, const char *comm_name) {
220   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
221   if (!comm_name) return MPI_FAILURE;
222   if (strlen(comm_name) > MPI_MAX_OBJECT_NAME - 1) return MPI_FAILURE;
223   strncpy(all_comm_names[CommIdx(comm)], comm_name, MPI_MAX_OBJECT_NAME - 1);
224   return MPI_SUCCESS;
225 }
226 
227 int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) {
228   int j;
229   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
230   for (j = 3; j <= MaxComm; j++) {
231     if (!comm_active[CommIdx(j)]) {
232       comm_active[CommIdx(j)] = 1;
233       *newcomm                = j;
234       return MPI_SUCCESS;
235     }
236   }
237   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
238   *newcomm                       = ++MaxComm;
239   comm_active[CommIdx(*newcomm)] = 1;
240   return MPI_SUCCESS;
241 }
242 
243 int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *out) {
244   int j;
245   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
246   for (j = 3; j <= MaxComm; j++) {
247     if (!comm_active[CommIdx(j)]) {
248       comm_active[CommIdx(j)] = 1;
249       *out                    = j;
250       return MPI_SUCCESS;
251     }
252   }
253   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
254   *out                       = ++MaxComm;
255   comm_active[CommIdx(*out)] = 1;
256   return MPI_SUCCESS;
257 }
258 
259 int MPI_Comm_free(MPI_Comm *comm) {
260   int i;
261   int idx = CommIdx(*comm);
262 
263   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
264   for (i = 0; i < num_attr; i++) {
265     if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm, i, attr[idx][i].attribute_val, attr_keyval[i].extra_state);
266     attr[idx][i].active        = 0;
267     attr[idx][i].attribute_val = 0;
268   }
269   if (*comm >= 3) comm_active[idx] = 0;
270   *comm = 0;
271   return MPI_SUCCESS;
272 }
273 
274 int MPI_Comm_size(MPI_Comm comm, int *size) {
275   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
276   *size = 1;
277   return MPI_SUCCESS;
278 }
279 
280 int MPI_Comm_rank(MPI_Comm comm, int *rank) {
281   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
282   *rank = 0;
283   return MPI_SUCCESS;
284 }
285 
286 int MPIUni_Abort(MPI_Comm comm, int errorcode) {
287   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
288   return MPI_ERR_NOSUPPORT;
289 }
290 
291 int MPI_Abort(MPI_Comm comm, int errorcode) {
292   abort();
293   return MPI_SUCCESS;
294 }
295 
296 /* --------------------------------------------------------------------------*/
297 
298 static int MPI_was_initialized = 0;
299 static int MPI_was_finalized   = 0;
300 
301 int MPI_Init(int *argc, char ***argv) {
302   if (MPI_was_initialized) return MPI_FAILURE;
303   /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
304      MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
305      of MPIU so that they can do multiple PetscInitialize/Finalize().
306   */
307   /* if (MPI_was_finalized) return MPI_FAILURE; */
308   MPI_was_initialized = 1;
309   MPI_was_finalized   = 0;
310   return MPI_SUCCESS;
311 }
312 
313 int MPI_Init_thread(int *argc, char ***argv, int required, int *provided) {
314   MPI_Query_thread(provided);
315   return MPI_Init(argc, argv);
316 }
317 
318 int MPI_Query_thread(int *provided) {
319   *provided = MPI_THREAD_FUNNELED;
320   return MPI_SUCCESS;
321 }
322 
323 int MPI_Finalize(void) {
324   MPI_Comm comm;
325   if (MPI_was_finalized) return MPI_FAILURE;
326   if (!MPI_was_initialized) return MPI_FAILURE;
327   comm = MPI_COMM_WORLD;
328   MPI_Comm_free(&comm);
329   comm = MPI_COMM_SELF;
330   MPI_Comm_free(&comm);
331 #if defined(PETSC_USE_DEBUG)
332   {
333     int i;
334     for (i = 3; i <= MaxComm; i++) {
335       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
336     }
337   }
338 #endif
339   /* reset counters */
340   MaxComm             = 2;
341   num_attr            = 1;
342   MPI_was_finalized   = 1;
343   MPI_was_initialized = 0;
344   PETSC_COMM_WORLD    = MPI_COMM_NULL;
345   return MPI_SUCCESS;
346 }
347 
348 int MPI_Initialized(int *flag) {
349   *flag = MPI_was_initialized;
350   return MPI_SUCCESS;
351 }
352 
353 int MPI_Finalized(int *flag) {
354   *flag = MPI_was_finalized;
355   return MPI_SUCCESS;
356 }
357 
358 /* -------------------     Fortran versions of several routines ------------------ */
359 
360 #if defined(PETSC_HAVE_FORTRAN_CAPS)
361 #define mpiunisetmoduleblock_         MPIUNISETMODULEBLOCK
362 #define mpiunisetfortranbasepointers_ MPIUNISETFORTRANBASEPOINTERS
363 #define petsc_mpi_init_               PETSC_MPI_INIT
364 #define petsc_mpi_finalize_           PETSC_MPI_FINALIZE
365 #define petsc_mpi_comm_size_          PETSC_MPI_COMM_SIZE
366 #define petsc_mpi_comm_rank_          PETSC_MPI_COMM_RANK
367 #define petsc_mpi_abort_              PETSC_MPI_ABORT
368 #define petsc_mpi_reduce_             PETSC_MPI_REDUCE
369 #define petsc_mpi_allreduce_          PETSC_MPI_ALLREDUCE
370 #define petsc_mpi_barrier_            PETSC_MPI_BARRIER
371 #define petsc_mpi_bcast_              PETSC_MPI_BCAST
372 #define petsc_mpi_gather_             PETSC_MPI_GATHER
373 #define petsc_mpi_allgather_          PETSC_MPI_ALLGATHER
374 #define petsc_mpi_comm_split_         PETSC_MPI_COMM_SPLIT
375 #define petsc_mpi_scan_               PETSC_MPI_SCAN
376 #define petsc_mpi_send_               PETSC_MPI_SEND
377 #define petsc_mpi_recv_               PETSC_MPI_RECV
378 #define petsc_mpi_reduce_scatter_     PETSC_MPI_REDUCE_SCATTER
379 #define petsc_mpi_irecv_              PETSC_MPI_IRECV
380 #define petsc_mpi_isend_              PETSC_MPI_ISEND
381 #define petsc_mpi_sendrecv_           PETSC_MPI_SENDRECV
382 #define petsc_mpi_test_               PETSC_MPI_TEST
383 #define petsc_mpi_waitall_            PETSC_MPI_WAITALL
384 #define petsc_mpi_waitany_            PETSC_MPI_WAITANY
385 #define petsc_mpi_allgatherv_         PETSC_MPI_ALLGATHERV
386 #define petsc_mpi_alltoallv_          PETSC_MPI_ALLTOALLV
387 #define petsc_mpi_comm_create_        PETSC_MPI_COMM_CREATE
388 #define petsc_mpi_address_            PETSC_MPI_ADDRESS
389 #define petsc_mpi_pack_               PETSC_MPI_PACK
390 #define petsc_mpi_unpack_             PETSC_MPI_UNPACK
391 #define petsc_mpi_pack_size_          PETSC_MPI_PACK_SIZE
392 #define petsc_mpi_type_struct_        PETSC_MPI_TYPE_STRUCT
393 #define petsc_mpi_type_commit_        PETSC_MPI_TYPE_COMMIT
394 #define petsc_mpi_wtime_              PETSC_MPI_WTIME
395 #define petsc_mpi_cancel_             PETSC_MPI_CANCEL
396 #define petsc_mpi_comm_dup_           PETSC_MPI_COMM_DUP
397 #define petsc_mpi_comm_free_          PETSC_MPI_COMM_FREE
398 #define petsc_mpi_get_count_          PETSC_MPI_GET_COUNT
399 #define petsc_mpi_get_processor_name_ PETSC_MPI_GET_PROCESSOR_NAME
400 #define petsc_mpi_initialized_        PETSC_MPI_INITIALIZED
401 #define petsc_mpi_iprobe_             PETSC_MPI_IPROBE
402 #define petsc_mpi_probe_              PETSC_MPI_PROBE
403 #define petsc_mpi_request_free_       PETSC_MPI_REQUEST_FREE
404 #define petsc_mpi_ssend_              PETSC_MPI_SSEND
405 #define petsc_mpi_wait_               PETSC_MPI_WAIT
406 #define petsc_mpi_comm_group_         PETSC_MPI_COMM_GROUP
407 #define petsc_mpi_exscan_             PETSC_MPI_EXSCAN
408 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
409 #define mpiunisetmoduleblock_         mpiunisetmoduleblock
410 #define mpiunisetfortranbasepointers_ mpiunisetfortranbasepointers
411 #define petsc_mpi_init_               petsc_mpi_init
412 #define petsc_mpi_finalize_           petsc_mpi_finalize
413 #define petsc_mpi_comm_size_          petsc_mpi_comm_size
414 #define petsc_mpi_comm_rank_          petsc_mpi_comm_rank
415 #define petsc_mpi_abort_              petsc_mpi_abort
416 #define petsc_mpi_reduce_             petsc_mpi_reduce
417 #define petsc_mpi_allreduce_          petsc_mpi_allreduce
418 #define petsc_mpi_barrier_            petsc_mpi_barrier
419 #define petsc_mpi_bcast_              petsc_mpi_bcast
420 #define petsc_mpi_gather_             petsc_mpi_gather
421 #define petsc_mpi_allgather_          petsc_mpi_allgather
422 #define petsc_mpi_comm_split_         petsc_mpi_comm_split
423 #define petsc_mpi_scan_               petsc_mpi_scan
424 #define petsc_mpi_send_               petsc_mpi_send
425 #define petsc_mpi_recv_               petsc_mpi_recv
426 #define petsc_mpi_reduce_scatter_     petsc_mpi_reduce_scatter
427 #define petsc_mpi_irecv_              petsc_mpi_irecv
428 #define petsc_mpi_isend_              petsc_mpi_isend
429 #define petsc_mpi_sendrecv_           petsc_mpi_sendrecv
430 #define petsc_mpi_test_               petsc_mpi_test
431 #define petsc_mpi_waitall_            petsc_mpi_waitall
432 #define petsc_mpi_waitany_            petsc_mpi_waitany
433 #define petsc_mpi_allgatherv_         petsc_mpi_allgatherv
434 #define petsc_mpi_alltoallv_          petsc_mpi_alltoallv
435 #define petsc_mpi_comm_create_        petsc_mpi_comm_create
436 #define petsc_mpi_address_            petsc_mpi_address
437 #define petsc_mpi_pack_               petsc_mpi_pack
438 #define petsc_mpi_unpack_             petsc_mpi_unpack
439 #define petsc_mpi_pack_size_          petsc_mpi_pack_size
440 #define petsc_mpi_type_struct_        petsc_mpi_type_struct
441 #define petsc_mpi_type_commit_        petsc_mpi_type_commit
442 #define petsc_mpi_wtime_              petsc_mpi_wtime
443 #define petsc_mpi_cancel_             petsc_mpi_cancel
444 #define petsc_mpi_comm_dup_           petsc_mpi_comm_dup
445 #define petsc_mpi_comm_free_          petsc_mpi_comm_free
446 #define petsc_mpi_get_count_          petsc_mpi_get_count
447 #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name
448 #define petsc_mpi_initialized_        petsc_mpi_initialized
449 #define petsc_mpi_iprobe_             petsc_mpi_iprobe
450 #define petsc_mpi_probe_              petsc_mpi_probe
451 #define petsc_mpi_request_free_       petsc_mpi_request_free
452 #define petsc_mpi_ssend_              petsc_mpi_ssend
453 #define petsc_mpi_wait_               petsc_mpi_wait
454 #define petsc_mpi_comm_group_         petsc_mpi_comm_group
455 #define petsc_mpi_exscan_             petsc_mpi_exscan
456 #endif
457 
458 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
459 #define petsc_mpi_init_               petsc_mpi_init__
460 #define petsc_mpi_finalize_           petsc_mpi_finalize__
461 #define petsc_mpi_comm_size_          petsc_mpi_comm_size__
462 #define petsc_mpi_comm_rank_          petsc_mpi_comm_rank__
463 #define petsc_mpi_abort_              petsc_mpi_abort__
464 #define petsc_mpi_reduce_             petsc_mpi_reduce__
465 #define petsc_mpi_allreduce_          petsc_mpi_allreduce__
466 #define petsc_mpi_barrier_            petsc_mpi_barrier__
467 #define petsc_mpi_bcast_              petsc_mpi_bcast__
468 #define petsc_mpi_gather_             petsc_mpi_gather__
469 #define petsc_mpi_allgather_          petsc_mpi_allgather__
470 #define petsc_mpi_comm_split_         petsc_mpi_comm_split__
471 #define petsc_mpi_scan_               petsc_mpi_scan__
472 #define petsc_mpi_send_               petsc_mpi_send__
473 #define petsc_mpi_recv_               petsc_mpi_recv__
474 #define petsc_mpi_reduce_scatter_     petsc_mpi_reduce_scatter__
475 #define petsc_mpi_irecv_              petsc_mpi_irecv__
476 #define petsc_mpi_isend_              petsc_mpi_isend__
477 #define petsc_mpi_sendrecv_           petsc_mpi_sendrecv__
478 #define petsc_mpi_test_               petsc_mpi_test__
479 #define petsc_mpi_waitall_            petsc_mpi_waitall__
480 #define petsc_mpi_waitany_            petsc_mpi_waitany__
481 #define petsc_mpi_allgatherv_         petsc_mpi_allgatherv__
482 #define petsc_mpi_alltoallv_          petsc_mpi_alltoallv__
483 #define petsc_mpi_comm_create_        petsc_mpi_comm_create__
484 #define petsc_mpi_address_            petsc_mpi_address__
485 #define petsc_mpi_pack_               petsc_mpi_pack__
486 #define petsc_mpi_unpack_             petsc_mpi_unpack__
487 #define petsc_mpi_pack_size_          petsc_mpi_pack_size__
488 #define petsc_mpi_type_struct_        petsc_mpi_type_struct__
489 #define petsc_mpi_type_commit_        petsc_mpi_type_commit__
490 #define petsc_mpi_wtime_              petsc_mpi_wtime__
491 #define petsc_mpi_cancel_             petsc_mpi_cancel__
492 #define petsc_mpi_comm_dup_           petsc_mpi_comm_dup__
493 #define petsc_mpi_comm_free_          petsc_mpi_comm_free__
494 #define petsc_mpi_get_count_          petsc_mpi_get_count__
495 #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name__
496 #define petsc_mpi_initialized_        petsc_mpi_initialized__
497 #define petsc_mpi_iprobe_             petsc_mpi_iprobe__
498 #define petsc_mpi_probe_              petsc_mpi_probe__
499 #define petsc_mpi_request_free_       petsc_mpi_request_free__
500 #define petsc_mpi_ssend_              petsc_mpi_ssend__
501 #define petsc_mpi_wait_               petsc_mpi_wait__
502 #define petsc_mpi_comm_group_         petsc_mpi_comm_group__
503 #define petsc_mpi_exscan_             petsc_mpi_exscan__
504 #endif
505 
506 /* Do not build fortran interface if MPI namespace colision is to be avoided */
507 #if defined(PETSC_HAVE_FORTRAN)
508 
509 PETSC_EXTERN void mpiunisetmoduleblock_(void);
510 
511 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place) {
512   MPIUNIF_mpi_in_place = f_mpi_in_place;
513 }
514 
515 PETSC_EXTERN void petsc_mpi_init_(int *ierr) {
516   mpiunisetmoduleblock_();
517   *ierr = MPI_Init((int *)0, (char ***)0);
518 }
519 
520 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr) {
521   *ierr = MPI_Finalize();
522 }
523 
524 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm, int *size, int *ierr) {
525   *size = 1;
526   *ierr = 0;
527 }
528 
529 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm, int *rank, int *ierr) {
530   *rank = 0;
531   *ierr = MPI_SUCCESS;
532 }
533 
534 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm, int *color, int *key, MPI_Comm *newcomm, int *ierr) {
535   *newcomm = *comm;
536   *ierr    = MPI_SUCCESS;
537 }
538 
539 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm, int *errorcode, int *ierr) {
540   abort();
541   *ierr = MPI_SUCCESS;
542 }
543 
544 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *root, int *comm, int *ierr) {
545   *ierr = MPI_Reduce(sendbuf, recvbuf, *count, *datatype, *op, *root, *comm);
546 }
547 
548 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
549   *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, *datatype, *op, *comm);
550 }
551 
552 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm, int *ierr) {
553   *ierr = MPI_SUCCESS;
554 }
555 
556 PETSC_EXTERN void petsc_mpi_bcast_(void *buf, int *count, int *datatype, int *root, int *comm, int *ierr) {
557   *ierr = MPI_SUCCESS;
558 }
559 
560 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root, int *comm, int *ierr) {
561   *ierr = MPI_Gather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *root, *comm);
562 }
563 
564 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *comm, int *ierr) {
565   *ierr = MPI_Allgather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *comm);
566 }
567 
568 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
569   *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*count) * MPI_sizeof(*datatype));
570 }
571 
572 PETSC_EXTERN void petsc_mpi_send_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr) {
573   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
574 }
575 
576 PETSC_EXTERN void petsc_mpi_recv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int status, int *ierr) {
577   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
578 }
579 
580 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf, void *recvbuf, int *recvcounts, int *datatype, int *op, int *comm, int *ierr) {
581   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
582 }
583 
584 PETSC_EXTERN void petsc_mpi_irecv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr) {
585   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
586 }
587 
588 PETSC_EXTERN void petsc_mpi_isend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *request, int *ierr) {
589   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
590 }
591 
592 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf, int *sendcount, int *sendtype, int *dest, int *sendtag, void *recvbuf, int *recvcount, int *recvtype, int *source, int *recvtag, int *comm, int *status, int *ierr) {
593   *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*sendcount) * MPI_sizeof(*sendtype));
594 }
595 
596 PETSC_EXTERN void petsc_mpi_test_(int *request, int *flag, int *status, int *ierr) {
597   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
598 }
599 
600 PETSC_EXTERN void petsc_mpi_waitall_(int *count, int *array_of_requests, int *array_of_statuses, int *ierr) {
601   *ierr = MPI_SUCCESS;
602 }
603 
604 PETSC_EXTERN void petsc_mpi_waitany_(int *count, int *array_of_requests, int *index, int *status, int *ierr) {
605   *ierr = MPI_SUCCESS;
606 }
607 
608 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf, int *sendcount, int *sendtype, void *recvbuf, int *recvcounts, int *displs, int *recvtype, int *comm, int *ierr) {
609   *ierr = MPI_Allgatherv(sendbuf, *sendcount, *sendtype, recvbuf, recvcounts, displs, *recvtype, *comm);
610 }
611 
612 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf, int *sendcounts, int *sdispls, int *sendtype, void *recvbuf, int *recvcounts, int *rdispls, int *recvtype, int *comm, int *ierr) {
613   *ierr = MPI_Alltoallv(sendbuf, sendcounts, sdispls, *sendtype, recvbuf, recvcounts, rdispls, *recvtype, *comm);
614 }
615 
616 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm, int *group, int *newcomm, int *ierr) {
617   *newcomm = *comm;
618   *ierr    = MPI_SUCCESS;
619 }
620 
621 PETSC_EXTERN void petsc_mpi_address_(void *location, MPI_Aint *address, int *ierr) {
622   *address = (MPI_Aint)((char *)location);
623   *ierr    = MPI_SUCCESS;
624 }
625 
626 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf, int *incount, int *datatype, void *outbuf, int *outsize, int *position, int *comm, int *ierr) {
627   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
628 }
629 
630 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf, int *insize, int *position, void *outbuf, int *outcount, int *datatype, int *comm, int *ierr) {
631   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
632 }
633 
634 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount, int *datatype, int *comm, int *size, int *ierr) {
635   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
636 }
637 
638 PETSC_EXTERN void petsc_mpi_type_struct_(int *count, int *array_of_blocklengths, int *array_of_displaments, int *array_of_types, int *newtype, int *ierr) {
639   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
640 }
641 
642 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype, int *ierr) {
643   *ierr = MPI_SUCCESS;
644 }
645 
646 double petsc_mpi_wtime_(void) {
647   return 0.0;
648 }
649 
650 PETSC_EXTERN void petsc_mpi_cancel_(int *request, int *ierr) {
651   *ierr = MPI_SUCCESS;
652 }
653 
654 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm, int *out, int *ierr) {
655   *out  = *comm;
656   *ierr = MPI_SUCCESS;
657 }
658 
659 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm, int *ierr) {
660   *ierr = MPI_SUCCESS;
661 }
662 
663 PETSC_EXTERN void petsc_mpi_get_count_(int *status, int *datatype, int *count, int *ierr) {
664   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
665 }
666 
667 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name, int *result_len, int *ierr, PETSC_FORTRAN_CHARLEN_T len) {
668   MPIUNI_Memcpy(name, "localhost", 9 * sizeof(char));
669   *result_len = 9;
670   *ierr       = MPI_SUCCESS;
671 }
672 
673 PETSC_EXTERN void petsc_mpi_initialized_(int *flag, int *ierr) {
674   *flag = MPI_was_initialized;
675   *ierr = MPI_SUCCESS;
676 }
677 
678 PETSC_EXTERN void petsc_mpi_iprobe_(int *source, int *tag, int *comm, int *glag, int *status, int *ierr) {
679   *ierr = MPI_SUCCESS;
680 }
681 
682 PETSC_EXTERN void petsc_mpi_probe_(int *source, int *tag, int *comm, int *flag, int *status, int *ierr) {
683   *ierr = MPI_SUCCESS;
684 }
685 
686 PETSC_EXTERN void petsc_mpi_request_free_(int *request, int *ierr) {
687   *ierr = MPI_SUCCESS;
688 }
689 
690 PETSC_EXTERN void petsc_mpi_ssend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr) {
691   *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
692 }
693 
694 PETSC_EXTERN void petsc_mpi_wait_(int *request, int *status, int *ierr) {
695   *ierr = MPI_SUCCESS;
696 }
697 
698 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm, int *group, int *ierr) {
699   *ierr = MPI_SUCCESS;
700 }
701 
702 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr) {
703   *ierr = MPI_SUCCESS;
704 }
705 
706 #endif /* PETSC_HAVE_FORTRAN */
707 
708 #if defined(__cplusplus)
709 }
710 #endif
711