xref: /petsc/src/sys/mpiuni/mpi.c (revision 15cee0077e82d043bf914399a172c47fe513bfcd)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include <petscsys.h>
6 #if !defined(MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 
10 #if defined(PETSC_HAVE_CUDA)
11 #include <cuda.h>
12 #include <cuda_runtime.h>
13 #include <petsc/private/petscimpl.h> /* for PetscCUDAInitialized */
14 #endif
15 
16 #define MPI_SUCCESS 0
17 #define MPI_FAILURE 1
18 
19 void *MPIUNI_TMP = NULL;
20 
21 /*
22        With MPI Uni there are exactly four distinct communicators:
23     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
24 
25     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
26      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
27 
28 */
29 #define MAX_ATTR 256
30 #define MAX_COMM 128
31 
32 typedef struct {
33   void *attribute_val;
34   int  active;
35 } MPI_Attr;
36 
37 typedef struct {
38   void                *extra_state;
39   MPI_Delete_function *del;
40   int                 active;  /* Is this keyval in use by some comm? */
41 } MPI_Attr_keyval;
42 
43 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
44 static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
45 static int             comm_active[MAX_COMM];  /* Boolean array indicating which comms are in use */
46 static int             mpi_tag_ub = 100000000;
47 static int             num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
48 static int             MaxComm  = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
49 static void*           MPIUNIF_mpi_in_place = 0;
50 
51 #define CommIdx(comm)  ((comm)-1)  /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
52 
53 #if defined(__cplusplus)
54 extern "C" {
55 #endif
56 
57 /*
58    To avoid problems with prototypes to the system memcpy() it is duplicated here
59 */
60 int MPIUNI_Memcpy(void *dst,const void *src,int n)
61 {
62   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
63   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
64   if (!n) return MPI_SUCCESS;
65 
66 #if defined(PETSC_HAVE_CUDA) /* CUDA-aware MPIUNI */
67   if (PetscCUDAInitialized) {
68     int                          dstType=0,srcType=0; /* 0: host memory; 1: device memory */
69     cudaError_t                  dstCerr,srcCerr,cerr;
70     struct cudaPointerAttributes dstAttr,srcAttr;
71     enum cudaMemoryType          dstMtype,srcMtype;
72     enum cudaMemcpyKind          kinds[2][2] = {{cudaMemcpyHostToHost,cudaMemcpyHostToDevice},{cudaMemcpyDeviceToHost,cudaMemcpyDeviceToDevice}};
73 
74     dstCerr = cudaPointerGetAttributes(&dstAttr,dst); /* Do not check error since before CUDA 11.0, passing a host pointers returns cudaErrorInvalidValue */
75     cudaGetLastError(); /* Get and then clear the last error */
76     srcCerr = cudaPointerGetAttributes(&srcAttr,src);
77     cudaGetLastError();
78     #if (CUDART_VERSION < 10000)
79       dstMtype = dstAttr.memoryType;
80       srcMtype = srcAttr.memoryType;
81     #else
82       dstMtype = dstAttr.type;
83       srcMtype = srcAttr.type;
84     #endif
85     if (dstCerr == cudaSuccess && dstMtype == cudaMemoryTypeDevice) dstType = 1;
86     if (srcCerr == cudaSuccess && srcMtype == cudaMemoryTypeDevice) srcType = 1;
87     cerr = cudaMemcpy(dst,src,n,kinds[srcType][dstType]); /* Use synchronous copy per MPI semantics */
88     if (cerr != cudaSuccess) return MPI_FAILURE;
89   } else
90 #endif
91   {memcpy(dst,src,n);}
92 
93   return MPI_SUCCESS;
94 }
95 
96 static int classcnt = 0;
97 static int codecnt = 0;
98 
99 int MPI_Add_error_class(int *cl)
100 {
101   *cl = classcnt++;
102   return MPI_SUCCESS;
103 }
104 
105 int MPI_Add_error_code(int cl,int *co)
106 {
107   if (cl >= classcnt) return MPI_FAILURE;
108   *co = codecnt++;
109   return MPI_SUCCESS;
110 }
111 
112 int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
113 {
114   int comb = datatype >> 28;
115   switch (comb) {
116   case MPI_COMBINER_NAMED:
117     *num_integers = 0;
118     *num_addresses = 0;
119     *num_datatypes = 0;
120     *combiner = comb;
121     break;
122   case MPI_COMBINER_DUP:
123     *num_integers = 0;
124     *num_addresses = 0;
125     *num_datatypes = 1;
126     *combiner = comb;
127     break;
128   case MPI_COMBINER_CONTIGUOUS:
129     *num_integers = 1;
130     *num_addresses = 0;
131     *num_datatypes = 1;
132     *combiner = comb;
133     break;
134   default:
135     return MPIUni_Abort(MPI_COMM_SELF,1);
136   }
137   return MPI_SUCCESS;
138 }
139 
140 int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
141 {
142   int comb = datatype >> 28;
143   switch (comb) {
144   case MPI_COMBINER_NAMED:
145     return MPIUni_Abort(MPI_COMM_SELF,1);
146     break;
147   case MPI_COMBINER_DUP:
148     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
149     array_of_datatypes[0] = datatype & 0x0fffffff;
150     break;
151   case MPI_COMBINER_CONTIGUOUS:
152     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
153     array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
154     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100;  /* basic named type (count=1) from which the contiguous type is derived */
155     break;
156   default:
157     return MPIUni_Abort(MPI_COMM_SELF,1);
158   }
159   return MPI_SUCCESS;
160 }
161 
162 /*
163    Used to set the built-in MPI_TAG_UB attribute
164 */
165 static int Keyval_setup(void)
166 {
167   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
168   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
169   attr[CommIdx(MPI_COMM_SELF )][0].active        = 1;
170   attr[CommIdx(MPI_COMM_SELF )][0].attribute_val = &mpi_tag_ub;
171   attr_keyval[0].active                          = 1;
172   return MPI_SUCCESS;
173 }
174 
175 int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
176 {
177   int i,keyid;
178   for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
179     if (!attr_keyval[i].active) {
180       keyid = i;
181       goto found;
182     }
183   }
184   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
185   keyid = num_attr++;
186 
187 found:
188   attr_keyval[keyid].extra_state = extra_state;
189   attr_keyval[keyid].del         = delete_fn;
190   attr_keyval[keyid].active      = 1;
191   *keyval                        = keyid;
192   return MPI_SUCCESS;
193 }
194 
195 int MPI_Comm_free_keyval(int *keyval)
196 {
197   attr_keyval[*keyval].extra_state = 0;
198   attr_keyval[*keyval].del         = 0;
199   attr_keyval[*keyval].active      = 0;
200   *keyval = 0;
201   return MPI_SUCCESS;
202 }
203 
204 int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
205 {
206   int idx = CommIdx(comm);
207   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
208   attr[idx][keyval].active        = 1;
209   attr[idx][keyval].attribute_val = attribute_val;
210   return MPI_SUCCESS;
211 }
212 
213 int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
214 {
215   int idx = CommIdx(comm);
216   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
217   if (attr[idx][keyval].active && attr_keyval[keyval].del) {
218     void *save_attribute_val        = attr[idx][keyval].attribute_val;
219     attr[idx][keyval].active        = 0;
220     attr[idx][keyval].attribute_val = 0;
221     (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
222   }
223   return MPI_SUCCESS;
224 }
225 
226 int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
227 {
228   int idx = CommIdx(comm);
229   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
230   if (!keyval) Keyval_setup();
231   *flag                  = attr[idx][keyval].active;
232   *(void**)attribute_val = attr[idx][keyval].attribute_val;
233   return MPI_SUCCESS;
234 }
235 
236 int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
237 {
238   int j;
239   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
240   for (j=3; j<=MaxComm; j++) {
241     if (!comm_active[CommIdx(j)]) {
242       comm_active[CommIdx(j)] = 1;
243       *newcomm = j;
244       return MPI_SUCCESS;
245     }
246   }
247   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
248   *newcomm = ++MaxComm;
249   comm_active[CommIdx(*newcomm)] = 1;
250   return MPI_SUCCESS;
251 }
252 
253 int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
254 {
255   int j;
256   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
257   for (j=3; j<=MaxComm; j++) {
258     if (!comm_active[CommIdx(j)]) {
259       comm_active[CommIdx(j)] = 1;
260       *out = j;
261       return MPI_SUCCESS;
262     }
263   }
264   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
265   *out = ++MaxComm;
266   comm_active[CommIdx(*out)] = 1;
267   return MPI_SUCCESS;
268 }
269 
270 int MPI_Comm_free(MPI_Comm *comm)
271 {
272   int i;
273   int idx = CommIdx(*comm);
274 
275   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
276   for (i=0; i<num_attr; i++) {
277     if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
278     attr[idx][i].active        = 0;
279     attr[idx][i].attribute_val = 0;
280   }
281   if (*comm >= 3) comm_active[idx] = 0;
282   *comm = 0;
283   return MPI_SUCCESS;
284 }
285 
286 int MPI_Comm_size(MPI_Comm comm, int *size)
287 {
288   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
289   *size=1;
290   return MPI_SUCCESS;
291 }
292 
293 int MPI_Comm_rank(MPI_Comm comm, int *rank)
294 {
295   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
296   *rank=0;
297   return MPI_SUCCESS;
298 }
299 
300 int MPIUni_Abort(MPI_Comm comm,int errorcode)
301 {
302   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
303   return MPI_FAILURE;
304 }
305 
306 int MPI_Abort(MPI_Comm comm,int errorcode)
307 {
308   abort();
309   return MPI_SUCCESS;
310 }
311 
312 /* --------------------------------------------------------------------------*/
313 
314 static int MPI_was_initialized = 0;
315 static int MPI_was_finalized   = 0;
316 
317 int MPI_Init(int *argc, char ***argv)
318 {
319   if (MPI_was_initialized) return MPI_FAILURE;
320   if (MPI_was_finalized) return MPI_FAILURE; /* MPI standard: once MPI_FINALIZE returns, no MPI routine (not even MPI_INIT) may be called, except ... */
321   MPI_was_initialized = 1;
322   return MPI_SUCCESS;
323 }
324 
325 int MPI_Finalize(void)
326 {
327   MPI_Comm comm;
328   if (MPI_was_finalized) return MPI_FAILURE;
329   if (!MPI_was_initialized) return MPI_FAILURE;
330   comm = MPI_COMM_WORLD;
331   MPI_Comm_free(&comm);
332   comm = MPI_COMM_SELF;
333   MPI_Comm_free(&comm);
334 #if defined(PETSC_USE_DEBUG)
335   {
336     int i;
337     for (i=3; i<=MaxComm; i++) {
338       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
339     }
340   }
341 #endif
342   /* reset counters */
343   MaxComm  = 2;
344   num_attr = 1;
345   MPI_was_finalized = 1;
346   return MPI_SUCCESS;
347 }
348 
349 int MPI_Initialized(int *flag)
350 {
351   *flag = MPI_was_initialized;
352   return MPI_SUCCESS;
353 }
354 
355 int MPI_Finalized(int *flag)
356 {
357   *flag = MPI_was_finalized;
358   return MPI_SUCCESS;
359 }
360 
361 /* -------------------     Fortran versions of several routines ------------------ */
362 
363 #if defined(PETSC_HAVE_FORTRAN_CAPS)
364 #define mpiunisetmoduleblock_          MPIUNISETMODULEBLOCK
365 #define mpiunisetfortranbasepointers_  MPIUNISETFORTRANBASEPOINTERS
366 #define petsc_mpi_init_                PETSC_MPI_INIT
367 #define petsc_mpi_finalize_            PETSC_MPI_FINALIZE
368 #define petsc_mpi_comm_size_           PETSC_MPI_COMM_SIZE
369 #define petsc_mpi_comm_rank_           PETSC_MPI_COMM_RANK
370 #define petsc_mpi_abort_               PETSC_MPI_ABORT
371 #define petsc_mpi_reduce_              PETSC_MPI_REDUCE
372 #define petsc_mpi_allreduce_           PETSC_MPI_ALLREDUCE
373 #define petsc_mpi_barrier_             PETSC_MPI_BARRIER
374 #define petsc_mpi_bcast_               PETSC_MPI_BCAST
375 #define petsc_mpi_gather_              PETSC_MPI_GATHER
376 #define petsc_mpi_allgather_           PETSC_MPI_ALLGATHER
377 #define petsc_mpi_comm_split_          PETSC_MPI_COMM_SPLIT
378 #define petsc_mpi_scan_                PETSC_MPI_SCAN
379 #define petsc_mpi_send_                PETSC_MPI_SEND
380 #define petsc_mpi_recv_                PETSC_MPI_RECV
381 #define petsc_mpi_reduce_scatter_      PETSC_MPI_REDUCE_SCATTER
382 #define petsc_mpi_irecv_               PETSC_MPI_IRECV
383 #define petsc_mpi_isend_               PETSC_MPI_ISEND
384 #define petsc_mpi_sendrecv_            PETSC_MPI_SENDRECV
385 #define petsc_mpi_test_                PETSC_MPI_TEST
386 #define petsc_mpi_waitall_             PETSC_MPI_WAITALL
387 #define petsc_mpi_waitany_             PETSC_MPI_WAITANY
388 #define petsc_mpi_allgatherv_          PETSC_MPI_ALLGATHERV
389 #define petsc_mpi_alltoallv_           PETSC_MPI_ALLTOALLV
390 #define petsc_mpi_comm_create_         PETSC_MPI_COMM_CREATE
391 #define petsc_mpi_address_             PETSC_MPI_ADDRESS
392 #define petsc_mpi_pack_                PETSC_MPI_PACK
393 #define petsc_mpi_unpack_              PETSC_MPI_UNPACK
394 #define petsc_mpi_pack_size_           PETSC_MPI_PACK_SIZE
395 #define petsc_mpi_type_struct_         PETSC_MPI_TYPE_STRUCT
396 #define petsc_mpi_type_commit_         PETSC_MPI_TYPE_COMMIT
397 #define petsc_mpi_wtime_               PETSC_MPI_WTIME
398 #define petsc_mpi_cancel_              PETSC_MPI_CANCEL
399 #define petsc_mpi_comm_dup_            PETSC_MPI_COMM_DUP
400 #define petsc_mpi_comm_free_           PETSC_MPI_COMM_FREE
401 #define petsc_mpi_get_count_           PETSC_MPI_GET_COUNT
402 #define petsc_mpi_get_processor_name_  PETSC_MPI_GET_PROCESSOR_NAME
403 #define petsc_mpi_initialized_         PETSC_MPI_INITIALIZED
404 #define petsc_mpi_iprobe_              PETSC_MPI_IPROBE
405 #define petsc_mpi_probe_               PETSC_MPI_PROBE
406 #define petsc_mpi_request_free_        PETSC_MPI_REQUEST_FREE
407 #define petsc_mpi_ssend_               PETSC_MPI_SSEND
408 #define petsc_mpi_wait_                PETSC_MPI_WAIT
409 #define petsc_mpi_comm_group_          PETSC_MPI_COMM_GROUP
410 #define petsc_mpi_exscan_              PETSC_MPI_EXSCAN
411 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
412 #define mpiunisetmoduleblock_          mpiunisetmoduleblock
413 #define mpiunisetfortranbasepointers_  mpiunisetfortranbasepointers
414 #define petsc_mpi_init_                petsc_mpi_init
415 #define petsc_mpi_finalize_            petsc_mpi_finalize
416 #define petsc_mpi_comm_size_           petsc_mpi_comm_size
417 #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank
418 #define petsc_mpi_abort_               petsc_mpi_abort
419 #define petsc_mpi_reduce_              petsc_mpi_reduce
420 #define petsc_mpi_allreduce_           petsc_mpi_allreduce
421 #define petsc_mpi_barrier_             petsc_mpi_barrier
422 #define petsc_mpi_bcast_               petsc_mpi_bcast
423 #define petsc_mpi_gather_              petsc_mpi_gather
424 #define petsc_mpi_allgather_           petsc_mpi_allgather
425 #define petsc_mpi_comm_split_          petsc_mpi_comm_split
426 #define petsc_mpi_scan_                petsc_mpi_scan
427 #define petsc_mpi_send_                petsc_mpi_send
428 #define petsc_mpi_recv_                petsc_mpi_recv
429 #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter
430 #define petsc_mpi_irecv_               petsc_mpi_irecv
431 #define petsc_mpi_isend_               petsc_mpi_isend
432 #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv
433 #define petsc_mpi_test_                petsc_mpi_test
434 #define petsc_mpi_waitall_             petsc_mpi_waitall
435 #define petsc_mpi_waitany_             petsc_mpi_waitany
436 #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv
437 #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv
438 #define petsc_mpi_comm_create_         petsc_mpi_comm_create
439 #define petsc_mpi_address_             petsc_mpi_address
440 #define petsc_mpi_pack_                petsc_mpi_pack
441 #define petsc_mpi_unpack_              petsc_mpi_unpack
442 #define petsc_mpi_pack_size_           petsc_mpi_pack_size
443 #define petsc_mpi_type_struct_         petsc_mpi_type_struct
444 #define petsc_mpi_type_commit_         petsc_mpi_type_commit
445 #define petsc_mpi_wtime_               petsc_mpi_wtime
446 #define petsc_mpi_cancel_              petsc_mpi_cancel
447 #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup
448 #define petsc_mpi_comm_free_           petsc_mpi_comm_free
449 #define petsc_mpi_get_count_           petsc_mpi_get_count
450 #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name
451 #define petsc_mpi_initialized_         petsc_mpi_initialized
452 #define petsc_mpi_iprobe_              petsc_mpi_iprobe
453 #define petsc_mpi_probe_               petsc_mpi_probe
454 #define petsc_mpi_request_free_        petsc_mpi_request_free
455 #define petsc_mpi_ssend_               petsc_mpi_ssend
456 #define petsc_mpi_wait_                petsc_mpi_wait
457 #define petsc_mpi_comm_group_          petsc_mpi_comm_group
458 #define petsc_mpi_exscan_              petsc_mpi_exscan
459 #endif
460 
461 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
462 #define petsc_mpi_init_                petsc_mpi_init__
463 #define petsc_mpi_finalize_            petsc_mpi_finalize__
464 #define petsc_mpi_comm_size_           petsc_mpi_comm_size__
465 #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank__
466 #define petsc_mpi_abort_               petsc_mpi_abort__
467 #define petsc_mpi_reduce_              petsc_mpi_reduce__
468 #define petsc_mpi_allreduce_           petsc_mpi_allreduce__
469 #define petsc_mpi_barrier_             petsc_mpi_barrier__
470 #define petsc_mpi_bcast_               petsc_mpi_bcast__
471 #define petsc_mpi_gather_              petsc_mpi_gather__
472 #define petsc_mpi_allgather_           petsc_mpi_allgather__
473 #define petsc_mpi_comm_split_          petsc_mpi_comm_split__
474 #define petsc_mpi_scan_                petsc_mpi_scan__
475 #define petsc_mpi_send_                petsc_mpi_send__
476 #define petsc_mpi_recv_                petsc_mpi_recv__
477 #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter__
478 #define petsc_mpi_irecv_               petsc_mpi_irecv__
479 #define petsc_mpi_isend_               petsc_mpi_isend__
480 #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv__
481 #define petsc_mpi_test_                petsc_mpi_test__
482 #define petsc_mpi_waitall_             petsc_mpi_waitall__
483 #define petsc_mpi_waitany_             petsc_mpi_waitany__
484 #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv__
485 #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv__
486 #define petsc_mpi_comm_create_         petsc_mpi_comm_create__
487 #define petsc_mpi_address_             petsc_mpi_address__
488 #define petsc_mpi_pack_                petsc_mpi_pack__
489 #define petsc_mpi_unpack_              petsc_mpi_unpack__
490 #define petsc_mpi_pack_size_           petsc_mpi_pack_size__
491 #define petsc_mpi_type_struct_         petsc_mpi_type_struct__
492 #define petsc_mpi_type_commit_         petsc_mpi_type_commit__
493 #define petsc_mpi_wtime_               petsc_mpi_wtime__
494 #define petsc_mpi_cancel_              petsc_mpi_cancel__
495 #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup__
496 #define petsc_mpi_comm_free_           petsc_mpi_comm_free__
497 #define petsc_mpi_get_count_           petsc_mpi_get_count__
498 #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name__
499 #define petsc_mpi_initialized_         petsc_mpi_initialized__
500 #define petsc_mpi_iprobe_              petsc_mpi_iprobe__
501 #define petsc_mpi_probe_               petsc_mpi_probe__
502 #define petsc_mpi_request_free_        petsc_mpi_request_free__
503 #define petsc_mpi_ssend_               petsc_mpi_ssend__
504 #define petsc_mpi_wait_                petsc_mpi_wait__
505 #define petsc_mpi_comm_group_          petsc_mpi_comm_group__
506 #define petsc_mpi_exscan_              petsc_mpi_exscan__
507 #endif
508 
509 /* Do not build fortran interface if MPI namespace colision is to be avoided */
510 #if defined(PETSC_HAVE_FORTRAN)
511 
512 PETSC_EXTERN void mpiunisetmoduleblock_(void);
513 
514 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
515 {
516   MPIUNIF_mpi_in_place   = f_mpi_in_place;
517 }
518 
519 PETSC_EXTERN void petsc_mpi_init_(int *ierr)
520 {
521   mpiunisetmoduleblock_();
522   *ierr = MPI_Init((int*)0, (char***)0);
523 }
524 
525 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
526 {
527   *ierr = MPI_Finalize();
528 }
529 
530 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
531 {
532   *size = 1;
533   *ierr = 0;
534 }
535 
536 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
537 {
538   *rank = 0;
539   *ierr = MPI_SUCCESS;
540 }
541 
542 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
543 {
544   *newcomm = *comm;
545   *ierr    = MPI_SUCCESS;
546 }
547 
548 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
549 {
550   abort();
551   *ierr = MPI_SUCCESS;
552 }
553 
554 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
555 {
556   *ierr = MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
557 }
558 
559 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
560 {
561   *ierr = MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
562 }
563 
564 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
565 {
566   *ierr = MPI_SUCCESS;
567 }
568 
569 PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
570 {
571   *ierr = MPI_SUCCESS;
572 }
573 
574 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
575 {
576   *ierr = MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
577 }
578 
579 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
580 {
581   *ierr = MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
582 }
583 
584 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
585 {
586   *ierr = MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
587 }
588 
589 PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
590 {
591   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
592 }
593 
594 PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
595 {
596   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
597 }
598 
599 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
600 {
601   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
602 }
603 
604 PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
605 {
606   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
607 }
608 
609 PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
610 {
611   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
612 }
613 
614 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
615 {
616   *ierr = MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
617 }
618 
619 PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
620 {
621   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
622 }
623 
624 PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
625 {
626   *ierr = MPI_SUCCESS;
627 }
628 
629 PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
630 {
631   *ierr = MPI_SUCCESS;
632 }
633 
634 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
635 {
636   *ierr = MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
637 }
638 
639 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
640 {
641   *ierr = MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
642 }
643 
644 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
645 {
646   *newcomm =  *comm;
647   *ierr    = MPI_SUCCESS;
648 }
649 
650 PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
651 {
652   *address =  (MPI_Aint) ((char *)location);
653   *ierr    = MPI_SUCCESS;
654 }
655 
656 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
657 {
658   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
659 }
660 
661 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
662 {
663   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
664 }
665 
666 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
667 {
668   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
669 }
670 
671 PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
672 {
673   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
674 }
675 
676 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
677 {
678   *ierr = MPI_SUCCESS;
679 }
680 
681 double petsc_mpi_wtime_(void)
682 {
683   return 0.0;
684 }
685 
686 PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
687 {
688   *ierr = MPI_SUCCESS;
689 }
690 
691 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
692 {
693   *out  = *comm;
694   *ierr = MPI_SUCCESS;
695 }
696 
697 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
698 {
699   *ierr = MPI_SUCCESS;
700 }
701 
702 PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
703 {
704   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
705 }
706 
707 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
708 {
709   MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
710   *result_len = 9;
711   *ierr       = MPI_SUCCESS;
712 }
713 
714 PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
715 {
716   *flag = MPI_was_initialized;
717   *ierr = MPI_SUCCESS;
718 }
719 
720 PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
721 {
722   *ierr = MPI_SUCCESS;
723 }
724 
725 PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
726 {
727   *ierr = MPI_SUCCESS;
728 }
729 
730 PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
731 {
732   *ierr = MPI_SUCCESS;
733 }
734 
735 PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
736 {
737   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
738 }
739 
740 PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
741 {
742   *ierr = MPI_SUCCESS;
743 }
744 
745 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
746 {
747   *ierr = MPI_SUCCESS;
748 }
749 
750 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
751 {
752   *ierr = MPI_SUCCESS;
753 }
754 
755 #endif /* PETSC_HAVE_FORTRAN */
756 
757 #if defined(__cplusplus)
758 }
759 #endif
760