xref: /petsc/src/sys/mpiuni/mpi.c (revision 7a3b9f033cf22d0cabcfecf34925be7a11b3c45b)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include <petscsys.h>
6 #if !defined(MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 
10 #if defined(PETSC_HAVE_CUDA)
11 #include <cuda.h>
12 #include <cuda_runtime.h>
13 #endif
14 
15 #define MPI_SUCCESS 0
16 #define MPI_FAILURE 1
17 
18 void *MPIUNI_TMP = NULL;
19 
20 /*
21        With MPI Uni there are exactly four distinct communicators:
22     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
23 
24     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
25      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
26 
27 */
28 #define MAX_ATTR 256
29 #define MAX_COMM 128
30 
31 typedef struct {
32   void *attribute_val;
33   int  active;
34 } MPI_Attr;
35 
36 typedef struct {
37   void                *extra_state;
38   MPI_Delete_function *del;
39   int                 active;  /* Is this keyval in use by some comm? */
40 } MPI_Attr_keyval;
41 
42 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
43 static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
44 static int             comm_active[MAX_COMM];  /* Boolean array indicating which comms are in use */
45 static int             mpi_tag_ub = 100000000;
46 static int             num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
47 static int             MaxComm  = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
48 static void*           MPIUNIF_mpi_in_place = 0;
49 
50 #define CommIdx(comm)  ((comm)-1)  /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
51 
52 #if defined(__cplusplus)
53 extern "C" {
54 #endif
55 
56 /*
57    To avoid problems with prototypes to the system memcpy() it is duplicated here
58 */
59 int MPIUNI_Memcpy(void *dst,const void *src,int n)
60 {
61   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
62   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
63   if (!n) return MPI_SUCCESS;
64 
65 #if defined(PETSC_HAVE_CUDA) /* CUDA-aware MPIUNI */
66   {
67     cudaError_t         cudaerr;
68     CUresult            cuerr;
69     CUmemorytype        mtype;
70     int                 dstType=0,srcType=0; /* 0: host memory; 1: device memory */
71     enum cudaMemcpyKind kinds[2][2] = {{cudaMemcpyHostToHost,cudaMemcpyHostToDevice},{cudaMemcpyDeviceToHost,cudaMemcpyDeviceToDevice}};
72 
73     /* CUDA driver API cuPointerGetAttribute() is faster than CUDA runtime API cudaPointerGetAttributes() */
74     cuerr = cuPointerGetAttribute(&mtype,CU_POINTER_ATTRIBUTE_MEMORY_TYPE,(CUdeviceptr)dst);
75     if (cuerr == CUDA_SUCCESS && mtype == CU_MEMORYTYPE_DEVICE) dstType = 1;
76     cuerr = cuPointerGetAttribute(&mtype,CU_POINTER_ATTRIBUTE_MEMORY_TYPE,(CUdeviceptr)src);
77     if (cuerr == CUDA_SUCCESS && mtype == CU_MEMORYTYPE_DEVICE) srcType = 1;
78 
79     cudaerr = cudaMemcpy(dst,src,n,kinds[srcType][dstType]); /* Use synchronous copy per MPI semantics */
80     if (cudaerr != cudaSuccess) return MPI_FAILURE;
81   }
82 #else
83   memcpy(dst,src,n);
84 #endif
85   return MPI_SUCCESS;
86 }
87 
88 static int classcnt = 0;
89 static int codecnt = 0;
90 
91 int MPI_Add_error_class(int *cl)
92 {
93   *cl = classcnt++;
94   return MPI_SUCCESS;
95 }
96 
97 int MPI_Add_error_code(int cl,int *co)
98 {
99   if (cl >= classcnt) return MPI_FAILURE;
100   *co = codecnt++;
101   return MPI_SUCCESS;
102 }
103 
104 int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
105 {
106   int comb = datatype >> 28;
107   switch (comb) {
108   case MPI_COMBINER_NAMED:
109     *num_integers = 0;
110     *num_addresses = 0;
111     *num_datatypes = 0;
112     *combiner = comb;
113     break;
114   case MPI_COMBINER_DUP:
115     *num_integers = 0;
116     *num_addresses = 0;
117     *num_datatypes = 1;
118     *combiner = comb;
119     break;
120   case MPI_COMBINER_CONTIGUOUS:
121     *num_integers = 1;
122     *num_addresses = 0;
123     *num_datatypes = 1;
124     *combiner = comb;
125     break;
126   default:
127     return MPIUni_Abort(MPI_COMM_SELF,1);
128   }
129   return MPI_SUCCESS;
130 }
131 
132 int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
133 {
134   int comb = datatype >> 28;
135   switch (comb) {
136   case MPI_COMBINER_NAMED:
137     return MPIUni_Abort(MPI_COMM_SELF,1);
138     break;
139   case MPI_COMBINER_DUP:
140     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
141     array_of_datatypes[0] = datatype & 0x0fffffff;
142     break;
143   case MPI_COMBINER_CONTIGUOUS:
144     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
145     array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
146     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100;  /* basic named type (count=1) from which the contiguous type is derived */
147     break;
148   default:
149     return MPIUni_Abort(MPI_COMM_SELF,1);
150   }
151   return MPI_SUCCESS;
152 }
153 
154 /*
155    Used to set the built-in MPI_TAG_UB attribute
156 */
157 static int Keyval_setup(void)
158 {
159   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
160   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
161   attr[CommIdx(MPI_COMM_SELF )][0].active        = 1;
162   attr[CommIdx(MPI_COMM_SELF )][0].attribute_val = &mpi_tag_ub;
163   attr_keyval[0].active                          = 1;
164   return MPI_SUCCESS;
165 }
166 
167 int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
168 {
169   int i,keyid;
170   for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
171     if (!attr_keyval[i].active) {
172       keyid = i;
173       goto found;
174     }
175   }
176   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
177   keyid = num_attr++;
178 
179 found:
180   attr_keyval[keyid].extra_state = extra_state;
181   attr_keyval[keyid].del         = delete_fn;
182   attr_keyval[keyid].active      = 1;
183   *keyval                        = keyid;
184   return MPI_SUCCESS;
185 }
186 
187 int MPI_Comm_free_keyval(int *keyval)
188 {
189   attr_keyval[*keyval].extra_state = 0;
190   attr_keyval[*keyval].del         = 0;
191   attr_keyval[*keyval].active      = 0;
192   *keyval = 0;
193   return MPI_SUCCESS;
194 }
195 
196 int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
197 {
198   int idx = CommIdx(comm);
199   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
200   attr[idx][keyval].active        = 1;
201   attr[idx][keyval].attribute_val = attribute_val;
202   return MPI_SUCCESS;
203 }
204 
205 int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
206 {
207   int idx = CommIdx(comm);
208   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
209   if (attr[idx][keyval].active && attr_keyval[keyval].del) {
210     void *save_attribute_val        = attr[idx][keyval].attribute_val;
211     attr[idx][keyval].active        = 0;
212     attr[idx][keyval].attribute_val = 0;
213     (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
214   }
215   return MPI_SUCCESS;
216 }
217 
218 int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
219 {
220   int idx = CommIdx(comm);
221   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
222   if (!keyval) Keyval_setup();
223   *flag                  = attr[idx][keyval].active;
224   *(void**)attribute_val = attr[idx][keyval].attribute_val;
225   return MPI_SUCCESS;
226 }
227 
228 int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
229 {
230   int j;
231   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
232   for (j=3; j<=MaxComm; j++) {
233     if (!comm_active[CommIdx(j)]) {
234       comm_active[CommIdx(j)] = 1;
235       *newcomm = j;
236       return MPI_SUCCESS;
237     }
238   }
239   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
240   *newcomm = ++MaxComm;
241   comm_active[CommIdx(*newcomm)] = 1;
242   return MPI_SUCCESS;
243 }
244 
245 int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
246 {
247   int j;
248   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
249   for (j=3; j<=MaxComm; j++) {
250     if (!comm_active[CommIdx(j)]) {
251       comm_active[CommIdx(j)] = 1;
252       *out = j;
253       return MPI_SUCCESS;
254     }
255   }
256   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
257   *out = ++MaxComm;
258   comm_active[CommIdx(*out)] = 1;
259   return MPI_SUCCESS;
260 }
261 
262 int MPI_Comm_free(MPI_Comm *comm)
263 {
264   int i;
265   int idx = CommIdx(*comm);
266 
267   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
268   for (i=0; i<num_attr; i++) {
269     if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
270     attr[idx][i].active        = 0;
271     attr[idx][i].attribute_val = 0;
272   }
273   if (*comm >= 3) comm_active[idx] = 0;
274   *comm = 0;
275   return MPI_SUCCESS;
276 }
277 
278 int MPI_Comm_size(MPI_Comm comm, int *size)
279 {
280   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
281   *size=1;
282   return MPI_SUCCESS;
283 }
284 
285 int MPI_Comm_rank(MPI_Comm comm, int *rank)
286 {
287   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
288   *rank=0;
289   return MPI_SUCCESS;
290 }
291 
292 int MPIUni_Abort(MPI_Comm comm,int errorcode)
293 {
294   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
295   return MPI_FAILURE;
296 }
297 
298 int MPI_Abort(MPI_Comm comm,int errorcode)
299 {
300   abort();
301   return MPI_SUCCESS;
302 }
303 
304 /* --------------------------------------------------------------------------*/
305 
306 static int MPI_was_initialized = 0;
307 static int MPI_was_finalized   = 0;
308 
309 int MPI_Init(int *argc, char ***argv)
310 {
311   if (MPI_was_initialized) return MPI_FAILURE;
312   if (MPI_was_finalized) return MPI_FAILURE; /* MPI standard: once MPI_FINALIZE returns, no MPI routine (not even MPI_INIT) may be called, except ... */
313   MPI_was_initialized = 1;
314   return MPI_SUCCESS;
315 }
316 
317 int MPI_Finalize(void)
318 {
319   MPI_Comm comm;
320   if (MPI_was_finalized) return MPI_FAILURE;
321   if (!MPI_was_initialized) return MPI_FAILURE;
322   comm = MPI_COMM_WORLD;
323   MPI_Comm_free(&comm);
324   comm = MPI_COMM_SELF;
325   MPI_Comm_free(&comm);
326 #if defined(PETSC_USE_DEBUG)
327   {
328     int i;
329     for (i=3; i<=MaxComm; i++) {
330       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
331     }
332   }
333 #endif
334   /* reset counters */
335   MaxComm  = 2;
336   num_attr = 1;
337   MPI_was_finalized = 1;
338   return MPI_SUCCESS;
339 }
340 
341 int MPI_Initialized(int *flag)
342 {
343   *flag = MPI_was_initialized;
344   return MPI_SUCCESS;
345 }
346 
347 int MPI_Finalized(int *flag)
348 {
349   *flag = MPI_was_finalized;
350   return MPI_SUCCESS;
351 }
352 
353 /* -------------------     Fortran versions of several routines ------------------ */
354 
355 #if defined(PETSC_HAVE_FORTRAN_CAPS)
356 #define mpiunisetmoduleblock_          MPIUNISETMODULEBLOCK
357 #define mpiunisetfortranbasepointers_  MPIUNISETFORTRANBASEPOINTERS
358 #define petsc_mpi_init_                PETSC_MPI_INIT
359 #define petsc_mpi_finalize_            PETSC_MPI_FINALIZE
360 #define petsc_mpi_comm_size_           PETSC_MPI_COMM_SIZE
361 #define petsc_mpi_comm_rank_           PETSC_MPI_COMM_RANK
362 #define petsc_mpi_abort_               PETSC_MPI_ABORT
363 #define petsc_mpi_reduce_              PETSC_MPI_REDUCE
364 #define petsc_mpi_allreduce_           PETSC_MPI_ALLREDUCE
365 #define petsc_mpi_barrier_             PETSC_MPI_BARRIER
366 #define petsc_mpi_bcast_               PETSC_MPI_BCAST
367 #define petsc_mpi_gather_              PETSC_MPI_GATHER
368 #define petsc_mpi_allgather_           PETSC_MPI_ALLGATHER
369 #define petsc_mpi_comm_split_          PETSC_MPI_COMM_SPLIT
370 #define petsc_mpi_scan_                PETSC_MPI_SCAN
371 #define petsc_mpi_send_                PETSC_MPI_SEND
372 #define petsc_mpi_recv_                PETSC_MPI_RECV
373 #define petsc_mpi_reduce_scatter_      PETSC_MPI_REDUCE_SCATTER
374 #define petsc_mpi_irecv_               PETSC_MPI_IRECV
375 #define petsc_mpi_isend_               PETSC_MPI_ISEND
376 #define petsc_mpi_sendrecv_            PETSC_MPI_SENDRECV
377 #define petsc_mpi_test_                PETSC_MPI_TEST
378 #define petsc_mpi_waitall_             PETSC_MPI_WAITALL
379 #define petsc_mpi_waitany_             PETSC_MPI_WAITANY
380 #define petsc_mpi_allgatherv_          PETSC_MPI_ALLGATHERV
381 #define petsc_mpi_alltoallv_           PETSC_MPI_ALLTOALLV
382 #define petsc_mpi_comm_create_         PETSC_MPI_COMM_CREATE
383 #define petsc_mpi_address_             PETSC_MPI_ADDRESS
384 #define petsc_mpi_pack_                PETSC_MPI_PACK
385 #define petsc_mpi_unpack_              PETSC_MPI_UNPACK
386 #define petsc_mpi_pack_size_           PETSC_MPI_PACK_SIZE
387 #define petsc_mpi_type_struct_         PETSC_MPI_TYPE_STRUCT
388 #define petsc_mpi_type_commit_         PETSC_MPI_TYPE_COMMIT
389 #define petsc_mpi_wtime_               PETSC_MPI_WTIME
390 #define petsc_mpi_cancel_              PETSC_MPI_CANCEL
391 #define petsc_mpi_comm_dup_            PETSC_MPI_COMM_DUP
392 #define petsc_mpi_comm_free_           PETSC_MPI_COMM_FREE
393 #define petsc_mpi_get_count_           PETSC_MPI_GET_COUNT
394 #define petsc_mpi_get_processor_name_  PETSC_MPI_GET_PROCESSOR_NAME
395 #define petsc_mpi_initialized_         PETSC_MPI_INITIALIZED
396 #define petsc_mpi_iprobe_              PETSC_MPI_IPROBE
397 #define petsc_mpi_probe_               PETSC_MPI_PROBE
398 #define petsc_mpi_request_free_        PETSC_MPI_REQUEST_FREE
399 #define petsc_mpi_ssend_               PETSC_MPI_SSEND
400 #define petsc_mpi_wait_                PETSC_MPI_WAIT
401 #define petsc_mpi_comm_group_          PETSC_MPI_COMM_GROUP
402 #define petsc_mpi_exscan_              PETSC_MPI_EXSCAN
403 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
404 #define mpiunisetmoduleblock_          mpiunisetmoduleblock
405 #define mpiunisetfortranbasepointers_  mpiunisetfortranbasepointers
406 #define petsc_mpi_init_                petsc_mpi_init
407 #define petsc_mpi_finalize_            petsc_mpi_finalize
408 #define petsc_mpi_comm_size_           petsc_mpi_comm_size
409 #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank
410 #define petsc_mpi_abort_               petsc_mpi_abort
411 #define petsc_mpi_reduce_              petsc_mpi_reduce
412 #define petsc_mpi_allreduce_           petsc_mpi_allreduce
413 #define petsc_mpi_barrier_             petsc_mpi_barrier
414 #define petsc_mpi_bcast_               petsc_mpi_bcast
415 #define petsc_mpi_gather_              petsc_mpi_gather
416 #define petsc_mpi_allgather_           petsc_mpi_allgather
417 #define petsc_mpi_comm_split_          petsc_mpi_comm_split
418 #define petsc_mpi_scan_                petsc_mpi_scan
419 #define petsc_mpi_send_                petsc_mpi_send
420 #define petsc_mpi_recv_                petsc_mpi_recv
421 #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter
422 #define petsc_mpi_irecv_               petsc_mpi_irecv
423 #define petsc_mpi_isend_               petsc_mpi_isend
424 #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv
425 #define petsc_mpi_test_                petsc_mpi_test
426 #define petsc_mpi_waitall_             petsc_mpi_waitall
427 #define petsc_mpi_waitany_             petsc_mpi_waitany
428 #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv
429 #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv
430 #define petsc_mpi_comm_create_         petsc_mpi_comm_create
431 #define petsc_mpi_address_             petsc_mpi_address
432 #define petsc_mpi_pack_                petsc_mpi_pack
433 #define petsc_mpi_unpack_              petsc_mpi_unpack
434 #define petsc_mpi_pack_size_           petsc_mpi_pack_size
435 #define petsc_mpi_type_struct_         petsc_mpi_type_struct
436 #define petsc_mpi_type_commit_         petsc_mpi_type_commit
437 #define petsc_mpi_wtime_               petsc_mpi_wtime
438 #define petsc_mpi_cancel_              petsc_mpi_cancel
439 #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup
440 #define petsc_mpi_comm_free_           petsc_mpi_comm_free
441 #define petsc_mpi_get_count_           petsc_mpi_get_count
442 #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name
443 #define petsc_mpi_initialized_         petsc_mpi_initialized
444 #define petsc_mpi_iprobe_              petsc_mpi_iprobe
445 #define petsc_mpi_probe_               petsc_mpi_probe
446 #define petsc_mpi_request_free_        petsc_mpi_request_free
447 #define petsc_mpi_ssend_               petsc_mpi_ssend
448 #define petsc_mpi_wait_                petsc_mpi_wait
449 #define petsc_mpi_comm_group_          petsc_mpi_comm_group
450 #define petsc_mpi_exscan_              petsc_mpi_exscan
451 #endif
452 
453 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
454 #define petsc_mpi_init_                petsc_mpi_init__
455 #define petsc_mpi_finalize_            petsc_mpi_finalize__
456 #define petsc_mpi_comm_size_           petsc_mpi_comm_size__
457 #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank__
458 #define petsc_mpi_abort_               petsc_mpi_abort__
459 #define petsc_mpi_reduce_              petsc_mpi_reduce__
460 #define petsc_mpi_allreduce_           petsc_mpi_allreduce__
461 #define petsc_mpi_barrier_             petsc_mpi_barrier__
462 #define petsc_mpi_bcast_               petsc_mpi_bcast__
463 #define petsc_mpi_gather_              petsc_mpi_gather__
464 #define petsc_mpi_allgather_           petsc_mpi_allgather__
465 #define petsc_mpi_comm_split_          petsc_mpi_comm_split__
466 #define petsc_mpi_scan_                petsc_mpi_scan__
467 #define petsc_mpi_send_                petsc_mpi_send__
468 #define petsc_mpi_recv_                petsc_mpi_recv__
469 #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter__
470 #define petsc_mpi_irecv_               petsc_mpi_irecv__
471 #define petsc_mpi_isend_               petsc_mpi_isend__
472 #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv__
473 #define petsc_mpi_test_                petsc_mpi_test__
474 #define petsc_mpi_waitall_             petsc_mpi_waitall__
475 #define petsc_mpi_waitany_             petsc_mpi_waitany__
476 #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv__
477 #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv__
478 #define petsc_mpi_comm_create_         petsc_mpi_comm_create__
479 #define petsc_mpi_address_             petsc_mpi_address__
480 #define petsc_mpi_pack_                petsc_mpi_pack__
481 #define petsc_mpi_unpack_              petsc_mpi_unpack__
482 #define petsc_mpi_pack_size_           petsc_mpi_pack_size__
483 #define petsc_mpi_type_struct_         petsc_mpi_type_struct__
484 #define petsc_mpi_type_commit_         petsc_mpi_type_commit__
485 #define petsc_mpi_wtime_               petsc_mpi_wtime__
486 #define petsc_mpi_cancel_              petsc_mpi_cancel__
487 #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup__
488 #define petsc_mpi_comm_free_           petsc_mpi_comm_free__
489 #define petsc_mpi_get_count_           petsc_mpi_get_count__
490 #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name__
491 #define petsc_mpi_initialized_         petsc_mpi_initialized__
492 #define petsc_mpi_iprobe_              petsc_mpi_iprobe__
493 #define petsc_mpi_probe_               petsc_mpi_probe__
494 #define petsc_mpi_request_free_        petsc_mpi_request_free__
495 #define petsc_mpi_ssend_               petsc_mpi_ssend__
496 #define petsc_mpi_wait_                petsc_mpi_wait__
497 #define petsc_mpi_comm_group_          petsc_mpi_comm_group__
498 #define petsc_mpi_exscan_              petsc_mpi_exscan__
499 #endif
500 
501 /* Do not build fortran interface if MPI namespace colision is to be avoided */
502 #if defined(PETSC_HAVE_FORTRAN)
503 
504 PETSC_EXTERN void mpiunisetmoduleblock_(void);
505 
506 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
507 {
508   MPIUNIF_mpi_in_place   = f_mpi_in_place;
509 }
510 
511 PETSC_EXTERN void petsc_mpi_init_(int *ierr)
512 {
513   mpiunisetmoduleblock_();
514   *ierr = MPI_Init((int*)0, (char***)0);
515 }
516 
517 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
518 {
519   *ierr = MPI_Finalize();
520 }
521 
522 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
523 {
524   *size = 1;
525   *ierr = 0;
526 }
527 
528 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
529 {
530   *rank = 0;
531   *ierr = MPI_SUCCESS;
532 }
533 
534 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
535 {
536   *newcomm = *comm;
537   *ierr    = MPI_SUCCESS;
538 }
539 
540 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
541 {
542   abort();
543   *ierr = MPI_SUCCESS;
544 }
545 
546 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
547 {
548   *ierr = MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
549 }
550 
551 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
552 {
553   *ierr = MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
554 }
555 
556 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
557 {
558   *ierr = MPI_SUCCESS;
559 }
560 
561 PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
562 {
563   *ierr = MPI_SUCCESS;
564 }
565 
566 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
567 {
568   *ierr = MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
569 }
570 
571 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
572 {
573   *ierr = MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
574 }
575 
576 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
577 {
578   *ierr = MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
579 }
580 
581 PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
582 {
583   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
584 }
585 
586 PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
587 {
588   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
589 }
590 
591 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
592 {
593   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
594 }
595 
596 PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
597 {
598   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
599 }
600 
601 PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
602 {
603   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
604 }
605 
606 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
607 {
608   *ierr = MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
609 }
610 
611 PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
612 {
613   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
614 }
615 
616 PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
617 {
618   *ierr = MPI_SUCCESS;
619 }
620 
621 PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
622 {
623   *ierr = MPI_SUCCESS;
624 }
625 
626 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
627 {
628   *ierr = MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
629 }
630 
631 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
632 {
633   *ierr = MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
634 }
635 
636 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
637 {
638   *newcomm =  *comm;
639   *ierr    = MPI_SUCCESS;
640 }
641 
642 PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
643 {
644   *address =  (MPI_Aint) ((char *)location);
645   *ierr    = MPI_SUCCESS;
646 }
647 
648 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
649 {
650   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
651 }
652 
653 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
654 {
655   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
656 }
657 
658 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
659 {
660   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
661 }
662 
663 PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
664 {
665   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
666 }
667 
668 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
669 {
670   *ierr = MPI_SUCCESS;
671 }
672 
673 double petsc_mpi_wtime_(void)
674 {
675   return 0.0;
676 }
677 
678 PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
679 {
680   *ierr = MPI_SUCCESS;
681 }
682 
683 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
684 {
685   *out  = *comm;
686   *ierr = MPI_SUCCESS;
687 }
688 
689 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
690 {
691   *ierr = MPI_SUCCESS;
692 }
693 
694 PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
695 {
696   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
697 }
698 
699 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
700 {
701   MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
702   *result_len = 9;
703   *ierr       = MPI_SUCCESS;
704 }
705 
706 PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
707 {
708   *flag = MPI_was_initialized;
709   *ierr = MPI_SUCCESS;
710 }
711 
712 PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
713 {
714   *ierr = MPI_SUCCESS;
715 }
716 
717 PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
718 {
719   *ierr = MPI_SUCCESS;
720 }
721 
722 PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
723 {
724   *ierr = MPI_SUCCESS;
725 }
726 
727 PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
728 {
729   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
730 }
731 
732 PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
733 {
734   *ierr = MPI_SUCCESS;
735 }
736 
737 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
738 {
739   *ierr = MPI_SUCCESS;
740 }
741 
742 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
743 {
744   *ierr = MPI_SUCCESS;
745 }
746 
747 #endif /* PETSC_HAVE_FORTRAN */
748 
749 #if defined(__cplusplus)
750 }
751 #endif
752