xref: /petsc/src/sys/mpiuni/mpi.c (revision c524e44ceafee4aa1f13666ee553c16f4e41b8f4)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include <petscsys.h>
6 #if !defined(MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 
10 #include <petsc/private/petscimpl.h> /* for PetscCUPMInitialized */
11 
12 #if defined(PETSC_HAVE_CUDA)
13   #include <cuda_runtime.h>
14 #endif
15 
16 #if defined(PETSC_HAVE_HIP)
17   #include <hip/hip_runtime.h>
18 #endif
19 
20 #define MPI_SUCCESS 0
21 #define MPI_FAILURE 1
22 
23 void *MPIUNI_TMP = NULL;
24 
25 /*
26        With MPI Uni there are exactly four distinct communicators:
27     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
28 
29     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
30      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
31 
32 */
33 #define MAX_ATTR 256
34 #define MAX_COMM 128
35 
36 typedef struct {
37   void *attribute_val;
38   int  active;
39 } MPI_Attr;
40 
41 typedef struct {
42   void                *extra_state;
43   MPI_Delete_function *del;
44   int                 active;  /* Is this keyval in use by some comm? */
45 } MPI_Attr_keyval;
46 
47 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
48 static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
49 static int             comm_active[MAX_COMM];  /* Boolean array indicating which comms are in use */
50 static int             mpi_tag_ub = 100000000;
51 static int             num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
52 static int             MaxComm  = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
53 static void*           MPIUNIF_mpi_in_place = 0;
54 
55 #define CommIdx(comm)  ((comm)-1)  /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
56 
57 #if defined(__cplusplus)
58 extern "C" {
59 #endif
60 
61 /*
62    To avoid problems with prototypes to the system memcpy() it is duplicated here
63 */
64 int MPIUNI_Memcpy(void *dst,const void *src,int n)
65 {
66   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
67   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
68   if (!n) return MPI_SUCCESS;
69 
70   /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
71 #if defined(PETSC_HAVE_CUDA)
72   if (PetscCUDAInitialized) {cudaError_t cerr = cudaMemcpy(dst,src,n,cudaMemcpyDefault);if (cerr != cudaSuccess) return MPI_FAILURE;} else
73 #elif defined(PETSC_HAVE_HIP)
74   if (PetscHIPInitialized)  {hipError_t  cerr = hipMemcpy(dst,src,n,hipMemcpyDefault);  if (cerr != hipSuccess)  return MPI_FAILURE;} else
75 #endif
76   {memcpy(dst,src,n);}
77   return MPI_SUCCESS;
78 }
79 
80 static int classcnt = 0;
81 static int codecnt = 0;
82 
83 int MPI_Add_error_class(int *cl)
84 {
85   *cl = classcnt++;
86   return MPI_SUCCESS;
87 }
88 
89 int MPI_Add_error_code(int cl,int *co)
90 {
91   if (cl >= classcnt) return MPI_FAILURE;
92   *co = codecnt++;
93   return MPI_SUCCESS;
94 }
95 
96 int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
97 {
98   int comb = datatype >> 28;
99   switch (comb) {
100   case MPI_COMBINER_NAMED:
101     *num_integers = 0;
102     *num_addresses = 0;
103     *num_datatypes = 0;
104     *combiner = comb;
105     break;
106   case MPI_COMBINER_DUP:
107     *num_integers = 0;
108     *num_addresses = 0;
109     *num_datatypes = 1;
110     *combiner = comb;
111     break;
112   case MPI_COMBINER_CONTIGUOUS:
113     *num_integers = 1;
114     *num_addresses = 0;
115     *num_datatypes = 1;
116     *combiner = comb;
117     break;
118   default:
119     return MPIUni_Abort(MPI_COMM_SELF,1);
120   }
121   return MPI_SUCCESS;
122 }
123 
124 int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
125 {
126   int comb = datatype >> 28;
127   switch (comb) {
128   case MPI_COMBINER_NAMED:
129     return MPIUni_Abort(MPI_COMM_SELF,1);
130   case MPI_COMBINER_DUP:
131     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
132     array_of_datatypes[0] = datatype & 0x0fffffff;
133     break;
134   case MPI_COMBINER_CONTIGUOUS:
135     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
136     array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
137     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100;  /* basic named type (count=1) from which the contiguous type is derived */
138     break;
139   default:
140     return MPIUni_Abort(MPI_COMM_SELF,1);
141   }
142   return MPI_SUCCESS;
143 }
144 
145 /*
146    Used to set the built-in MPI_TAG_UB attribute
147 */
148 static int Keyval_setup(void)
149 {
150   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
151   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
152   attr[CommIdx(MPI_COMM_SELF)][0].active        = 1;
153   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val = &mpi_tag_ub;
154   attr_keyval[0].active                          = 1;
155   return MPI_SUCCESS;
156 }
157 
158 int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
159 {
160   int i,keyid;
161   for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
162     if (!attr_keyval[i].active) {
163       keyid = i;
164       goto found;
165     }
166   }
167   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
168   keyid = num_attr++;
169 
170 found:
171   attr_keyval[keyid].extra_state = extra_state;
172   attr_keyval[keyid].del         = delete_fn;
173   attr_keyval[keyid].active      = 1;
174   *keyval                        = keyid;
175   return MPI_SUCCESS;
176 }
177 
178 int MPI_Comm_free_keyval(int *keyval)
179 {
180   attr_keyval[*keyval].extra_state = 0;
181   attr_keyval[*keyval].del         = 0;
182   attr_keyval[*keyval].active      = 0;
183   *keyval = 0;
184   return MPI_SUCCESS;
185 }
186 
187 int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
188 {
189   int idx = CommIdx(comm);
190   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
191   attr[idx][keyval].active        = 1;
192   attr[idx][keyval].attribute_val = attribute_val;
193   return MPI_SUCCESS;
194 }
195 
196 int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
197 {
198   int idx = CommIdx(comm);
199   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
200   if (attr[idx][keyval].active && attr_keyval[keyval].del) {
201     void *save_attribute_val        = attr[idx][keyval].attribute_val;
202     attr[idx][keyval].active        = 0;
203     attr[idx][keyval].attribute_val = 0;
204     (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
205   }
206   return MPI_SUCCESS;
207 }
208 
209 int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
210 {
211   int idx = CommIdx(comm);
212   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
213   if (!keyval) Keyval_setup();
214   *flag                  = attr[idx][keyval].active;
215   *(void**)attribute_val = attr[idx][keyval].attribute_val;
216   return MPI_SUCCESS;
217 }
218 
219 int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
220 {
221   int j;
222   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
223   for (j=3; j<=MaxComm; j++) {
224     if (!comm_active[CommIdx(j)]) {
225       comm_active[CommIdx(j)] = 1;
226       *newcomm = j;
227       return MPI_SUCCESS;
228     }
229   }
230   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
231   *newcomm = ++MaxComm;
232   comm_active[CommIdx(*newcomm)] = 1;
233   return MPI_SUCCESS;
234 }
235 
236 int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
237 {
238   int j;
239   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
240   for (j=3; j<=MaxComm; j++) {
241     if (!comm_active[CommIdx(j)]) {
242       comm_active[CommIdx(j)] = 1;
243       *out = j;
244       return MPI_SUCCESS;
245     }
246   }
247   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
248   *out = ++MaxComm;
249   comm_active[CommIdx(*out)] = 1;
250   return MPI_SUCCESS;
251 }
252 
253 int MPI_Comm_free(MPI_Comm *comm)
254 {
255   int i;
256   int idx = CommIdx(*comm);
257 
258   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
259   for (i=0; i<num_attr; i++) {
260     if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
261     attr[idx][i].active        = 0;
262     attr[idx][i].attribute_val = 0;
263   }
264   if (*comm >= 3) comm_active[idx] = 0;
265   *comm = 0;
266   return MPI_SUCCESS;
267 }
268 
269 int MPI_Comm_size(MPI_Comm comm, int *size)
270 {
271   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
272   *size=1;
273   return MPI_SUCCESS;
274 }
275 
276 int MPI_Comm_rank(MPI_Comm comm, int *rank)
277 {
278   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
279   *rank=0;
280   return MPI_SUCCESS;
281 }
282 
283 int MPIUni_Abort(MPI_Comm comm,int errorcode)
284 {
285   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
286   return MPI_FAILURE;
287 }
288 
289 int MPI_Abort(MPI_Comm comm,int errorcode)
290 {
291   abort();
292   return MPI_SUCCESS;
293 }
294 
295 /* --------------------------------------------------------------------------*/
296 
297 static int MPI_was_initialized = 0;
298 static int MPI_was_finalized   = 0;
299 
300 int MPI_Init(int *argc, char ***argv)
301 {
302   if (MPI_was_initialized) return MPI_FAILURE;
303   if (MPI_was_finalized) return MPI_FAILURE; /* MPI standard: once MPI_FINALIZE returns, no MPI routine (not even MPI_INIT) may be called, except ... */
304   MPI_was_initialized = 1;
305   return MPI_SUCCESS;
306 }
307 
308 int MPI_Finalize(void)
309 {
310   MPI_Comm comm;
311   if (MPI_was_finalized) return MPI_FAILURE;
312   if (!MPI_was_initialized) return MPI_FAILURE;
313   comm = MPI_COMM_WORLD;
314   MPI_Comm_free(&comm);
315   comm = MPI_COMM_SELF;
316   MPI_Comm_free(&comm);
317 #if defined(PETSC_USE_DEBUG)
318   {
319     int i;
320     for (i=3; i<=MaxComm; i++) {
321       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
322     }
323   }
324 #endif
325   /* reset counters */
326   MaxComm  = 2;
327   num_attr = 1;
328   MPI_was_finalized = 1;
329   return MPI_SUCCESS;
330 }
331 
332 int MPI_Initialized(int *flag)
333 {
334   *flag = MPI_was_initialized;
335   return MPI_SUCCESS;
336 }
337 
338 int MPI_Finalized(int *flag)
339 {
340   *flag = MPI_was_finalized;
341   return MPI_SUCCESS;
342 }
343 
344 /* -------------------     Fortran versions of several routines ------------------ */
345 
346 #if defined(PETSC_HAVE_FORTRAN_CAPS)
347 #define mpiunisetmoduleblock_          MPIUNISETMODULEBLOCK
348 #define mpiunisetfortranbasepointers_  MPIUNISETFORTRANBASEPOINTERS
349 #define petsc_mpi_init_                PETSC_MPI_INIT
350 #define petsc_mpi_finalize_            PETSC_MPI_FINALIZE
351 #define petsc_mpi_comm_size_           PETSC_MPI_COMM_SIZE
352 #define petsc_mpi_comm_rank_           PETSC_MPI_COMM_RANK
353 #define petsc_mpi_abort_               PETSC_MPI_ABORT
354 #define petsc_mpi_reduce_              PETSC_MPI_REDUCE
355 #define petsc_mpi_allreduce_           PETSC_MPI_ALLREDUCE
356 #define petsc_mpi_barrier_             PETSC_MPI_BARRIER
357 #define petsc_mpi_bcast_               PETSC_MPI_BCAST
358 #define petsc_mpi_gather_              PETSC_MPI_GATHER
359 #define petsc_mpi_allgather_           PETSC_MPI_ALLGATHER
360 #define petsc_mpi_comm_split_          PETSC_MPI_COMM_SPLIT
361 #define petsc_mpi_scan_                PETSC_MPI_SCAN
362 #define petsc_mpi_send_                PETSC_MPI_SEND
363 #define petsc_mpi_recv_                PETSC_MPI_RECV
364 #define petsc_mpi_reduce_scatter_      PETSC_MPI_REDUCE_SCATTER
365 #define petsc_mpi_irecv_               PETSC_MPI_IRECV
366 #define petsc_mpi_isend_               PETSC_MPI_ISEND
367 #define petsc_mpi_sendrecv_            PETSC_MPI_SENDRECV
368 #define petsc_mpi_test_                PETSC_MPI_TEST
369 #define petsc_mpi_waitall_             PETSC_MPI_WAITALL
370 #define petsc_mpi_waitany_             PETSC_MPI_WAITANY
371 #define petsc_mpi_allgatherv_          PETSC_MPI_ALLGATHERV
372 #define petsc_mpi_alltoallv_           PETSC_MPI_ALLTOALLV
373 #define petsc_mpi_comm_create_         PETSC_MPI_COMM_CREATE
374 #define petsc_mpi_address_             PETSC_MPI_ADDRESS
375 #define petsc_mpi_pack_                PETSC_MPI_PACK
376 #define petsc_mpi_unpack_              PETSC_MPI_UNPACK
377 #define petsc_mpi_pack_size_           PETSC_MPI_PACK_SIZE
378 #define petsc_mpi_type_struct_         PETSC_MPI_TYPE_STRUCT
379 #define petsc_mpi_type_commit_         PETSC_MPI_TYPE_COMMIT
380 #define petsc_mpi_wtime_               PETSC_MPI_WTIME
381 #define petsc_mpi_cancel_              PETSC_MPI_CANCEL
382 #define petsc_mpi_comm_dup_            PETSC_MPI_COMM_DUP
383 #define petsc_mpi_comm_free_           PETSC_MPI_COMM_FREE
384 #define petsc_mpi_get_count_           PETSC_MPI_GET_COUNT
385 #define petsc_mpi_get_processor_name_  PETSC_MPI_GET_PROCESSOR_NAME
386 #define petsc_mpi_initialized_         PETSC_MPI_INITIALIZED
387 #define petsc_mpi_iprobe_              PETSC_MPI_IPROBE
388 #define petsc_mpi_probe_               PETSC_MPI_PROBE
389 #define petsc_mpi_request_free_        PETSC_MPI_REQUEST_FREE
390 #define petsc_mpi_ssend_               PETSC_MPI_SSEND
391 #define petsc_mpi_wait_                PETSC_MPI_WAIT
392 #define petsc_mpi_comm_group_          PETSC_MPI_COMM_GROUP
393 #define petsc_mpi_exscan_              PETSC_MPI_EXSCAN
394 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
395 #define mpiunisetmoduleblock_          mpiunisetmoduleblock
396 #define mpiunisetfortranbasepointers_  mpiunisetfortranbasepointers
397 #define petsc_mpi_init_                petsc_mpi_init
398 #define petsc_mpi_finalize_            petsc_mpi_finalize
399 #define petsc_mpi_comm_size_           petsc_mpi_comm_size
400 #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank
401 #define petsc_mpi_abort_               petsc_mpi_abort
402 #define petsc_mpi_reduce_              petsc_mpi_reduce
403 #define petsc_mpi_allreduce_           petsc_mpi_allreduce
404 #define petsc_mpi_barrier_             petsc_mpi_barrier
405 #define petsc_mpi_bcast_               petsc_mpi_bcast
406 #define petsc_mpi_gather_              petsc_mpi_gather
407 #define petsc_mpi_allgather_           petsc_mpi_allgather
408 #define petsc_mpi_comm_split_          petsc_mpi_comm_split
409 #define petsc_mpi_scan_                petsc_mpi_scan
410 #define petsc_mpi_send_                petsc_mpi_send
411 #define petsc_mpi_recv_                petsc_mpi_recv
412 #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter
413 #define petsc_mpi_irecv_               petsc_mpi_irecv
414 #define petsc_mpi_isend_               petsc_mpi_isend
415 #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv
416 #define petsc_mpi_test_                petsc_mpi_test
417 #define petsc_mpi_waitall_             petsc_mpi_waitall
418 #define petsc_mpi_waitany_             petsc_mpi_waitany
419 #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv
420 #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv
421 #define petsc_mpi_comm_create_         petsc_mpi_comm_create
422 #define petsc_mpi_address_             petsc_mpi_address
423 #define petsc_mpi_pack_                petsc_mpi_pack
424 #define petsc_mpi_unpack_              petsc_mpi_unpack
425 #define petsc_mpi_pack_size_           petsc_mpi_pack_size
426 #define petsc_mpi_type_struct_         petsc_mpi_type_struct
427 #define petsc_mpi_type_commit_         petsc_mpi_type_commit
428 #define petsc_mpi_wtime_               petsc_mpi_wtime
429 #define petsc_mpi_cancel_              petsc_mpi_cancel
430 #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup
431 #define petsc_mpi_comm_free_           petsc_mpi_comm_free
432 #define petsc_mpi_get_count_           petsc_mpi_get_count
433 #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name
434 #define petsc_mpi_initialized_         petsc_mpi_initialized
435 #define petsc_mpi_iprobe_              petsc_mpi_iprobe
436 #define petsc_mpi_probe_               petsc_mpi_probe
437 #define petsc_mpi_request_free_        petsc_mpi_request_free
438 #define petsc_mpi_ssend_               petsc_mpi_ssend
439 #define petsc_mpi_wait_                petsc_mpi_wait
440 #define petsc_mpi_comm_group_          petsc_mpi_comm_group
441 #define petsc_mpi_exscan_              petsc_mpi_exscan
442 #endif
443 
444 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
445 #define petsc_mpi_init_                petsc_mpi_init__
446 #define petsc_mpi_finalize_            petsc_mpi_finalize__
447 #define petsc_mpi_comm_size_           petsc_mpi_comm_size__
448 #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank__
449 #define petsc_mpi_abort_               petsc_mpi_abort__
450 #define petsc_mpi_reduce_              petsc_mpi_reduce__
451 #define petsc_mpi_allreduce_           petsc_mpi_allreduce__
452 #define petsc_mpi_barrier_             petsc_mpi_barrier__
453 #define petsc_mpi_bcast_               petsc_mpi_bcast__
454 #define petsc_mpi_gather_              petsc_mpi_gather__
455 #define petsc_mpi_allgather_           petsc_mpi_allgather__
456 #define petsc_mpi_comm_split_          petsc_mpi_comm_split__
457 #define petsc_mpi_scan_                petsc_mpi_scan__
458 #define petsc_mpi_send_                petsc_mpi_send__
459 #define petsc_mpi_recv_                petsc_mpi_recv__
460 #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter__
461 #define petsc_mpi_irecv_               petsc_mpi_irecv__
462 #define petsc_mpi_isend_               petsc_mpi_isend__
463 #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv__
464 #define petsc_mpi_test_                petsc_mpi_test__
465 #define petsc_mpi_waitall_             petsc_mpi_waitall__
466 #define petsc_mpi_waitany_             petsc_mpi_waitany__
467 #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv__
468 #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv__
469 #define petsc_mpi_comm_create_         petsc_mpi_comm_create__
470 #define petsc_mpi_address_             petsc_mpi_address__
471 #define petsc_mpi_pack_                petsc_mpi_pack__
472 #define petsc_mpi_unpack_              petsc_mpi_unpack__
473 #define petsc_mpi_pack_size_           petsc_mpi_pack_size__
474 #define petsc_mpi_type_struct_         petsc_mpi_type_struct__
475 #define petsc_mpi_type_commit_         petsc_mpi_type_commit__
476 #define petsc_mpi_wtime_               petsc_mpi_wtime__
477 #define petsc_mpi_cancel_              petsc_mpi_cancel__
478 #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup__
479 #define petsc_mpi_comm_free_           petsc_mpi_comm_free__
480 #define petsc_mpi_get_count_           petsc_mpi_get_count__
481 #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name__
482 #define petsc_mpi_initialized_         petsc_mpi_initialized__
483 #define petsc_mpi_iprobe_              petsc_mpi_iprobe__
484 #define petsc_mpi_probe_               petsc_mpi_probe__
485 #define petsc_mpi_request_free_        petsc_mpi_request_free__
486 #define petsc_mpi_ssend_               petsc_mpi_ssend__
487 #define petsc_mpi_wait_                petsc_mpi_wait__
488 #define petsc_mpi_comm_group_          petsc_mpi_comm_group__
489 #define petsc_mpi_exscan_              petsc_mpi_exscan__
490 #endif
491 
492 /* Do not build fortran interface if MPI namespace colision is to be avoided */
493 #if defined(PETSC_HAVE_FORTRAN)
494 
495 PETSC_EXTERN void mpiunisetmoduleblock_(void);
496 
497 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
498 {
499   MPIUNIF_mpi_in_place   = f_mpi_in_place;
500 }
501 
502 PETSC_EXTERN void petsc_mpi_init_(int *ierr)
503 {
504   mpiunisetmoduleblock_();
505   *ierr = MPI_Init((int*)0, (char***)0);
506 }
507 
508 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
509 {
510   *ierr = MPI_Finalize();
511 }
512 
513 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
514 {
515   *size = 1;
516   *ierr = 0;
517 }
518 
519 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
520 {
521   *rank = 0;
522   *ierr = MPI_SUCCESS;
523 }
524 
525 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
526 {
527   *newcomm = *comm;
528   *ierr    = MPI_SUCCESS;
529 }
530 
531 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
532 {
533   abort();
534   *ierr = MPI_SUCCESS;
535 }
536 
537 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
538 {
539   *ierr = MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
540 }
541 
542 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
543 {
544   *ierr = MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
545 }
546 
547 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
548 {
549   *ierr = MPI_SUCCESS;
550 }
551 
552 PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
553 {
554   *ierr = MPI_SUCCESS;
555 }
556 
557 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
558 {
559   *ierr = MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
560 }
561 
562 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
563 {
564   *ierr = MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
565 }
566 
567 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
568 {
569   *ierr = MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
570 }
571 
572 PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
573 {
574   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
575 }
576 
577 PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
578 {
579   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
580 }
581 
582 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
583 {
584   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
585 }
586 
587 PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
588 {
589   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
590 }
591 
592 PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
593 {
594   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
595 }
596 
597 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
598 {
599   *ierr = MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
600 }
601 
602 PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
603 {
604   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
605 }
606 
607 PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
608 {
609   *ierr = MPI_SUCCESS;
610 }
611 
612 PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
613 {
614   *ierr = MPI_SUCCESS;
615 }
616 
617 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
618 {
619   *ierr = MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
620 }
621 
622 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
623 {
624   *ierr = MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
625 }
626 
627 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
628 {
629   *newcomm =  *comm;
630   *ierr    = MPI_SUCCESS;
631 }
632 
633 PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
634 {
635   *address =  (MPI_Aint) ((char *)location);
636   *ierr    = MPI_SUCCESS;
637 }
638 
639 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
640 {
641   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
642 }
643 
644 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
645 {
646   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
647 }
648 
649 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
650 {
651   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
652 }
653 
654 PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
655 {
656   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
657 }
658 
659 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
660 {
661   *ierr = MPI_SUCCESS;
662 }
663 
664 double petsc_mpi_wtime_(void)
665 {
666   return 0.0;
667 }
668 
669 PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
670 {
671   *ierr = MPI_SUCCESS;
672 }
673 
674 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
675 {
676   *out  = *comm;
677   *ierr = MPI_SUCCESS;
678 }
679 
680 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
681 {
682   *ierr = MPI_SUCCESS;
683 }
684 
685 PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
686 {
687   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
688 }
689 
690 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
691 {
692   MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
693   *result_len = 9;
694   *ierr       = MPI_SUCCESS;
695 }
696 
697 PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
698 {
699   *flag = MPI_was_initialized;
700   *ierr = MPI_SUCCESS;
701 }
702 
703 PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
704 {
705   *ierr = MPI_SUCCESS;
706 }
707 
708 PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
709 {
710   *ierr = MPI_SUCCESS;
711 }
712 
713 PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
714 {
715   *ierr = MPI_SUCCESS;
716 }
717 
718 PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
719 {
720   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
721 }
722 
723 PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
724 {
725   *ierr = MPI_SUCCESS;
726 }
727 
728 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
729 {
730   *ierr = MPI_SUCCESS;
731 }
732 
733 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
734 {
735   *ierr = MPI_SUCCESS;
736 }
737 
738 #endif /* PETSC_HAVE_FORTRAN */
739 
740 #if defined(__cplusplus)
741 }
742 #endif
743