xref: /petsc/src/sys/mpiuni/mpi.c (revision 3e1d142d2e05843cb1ed541a9706c8b6035b9125)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include <petscsys.h>
6 #if !defined(MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 
10 #include <petsc/private/petscimpl.h> /* for PetscCUPMInitialized */
11 
12 #if defined(PETSC_HAVE_CUDA)
13   #include <cuda_runtime.h>
14 #endif
15 
16 #if defined(PETSC_HAVE_HIP)
17   #include <hip/hip_runtime.h>
18 #endif
19 
20 #define MPI_SUCCESS 0
21 #define MPI_FAILURE 1
22 
23 void *MPIUNI_TMP = NULL;
24 
25 /*
26        With MPI Uni there are exactly four distinct communicators:
27     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
28 
29     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
30      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
31 
32 */
33 #define MAX_ATTR 256
34 #define MAX_COMM 128
35 
36 typedef struct {
37   void *attribute_val;
38   int  active;
39 } MPI_Attr;
40 
41 typedef struct {
42   void                *extra_state;
43   MPI_Delete_function *del;
44   int                 active;  /* Is this keyval in use by some comm? */
45 } MPI_Attr_keyval;
46 
47 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
48 static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
49 static int             comm_active[MAX_COMM];  /* Boolean array indicating which comms are in use */
50 static int             mpi_tag_ub = 100000000;
51 static int             num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
52 static int             MaxComm  = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
53 static void*           MPIUNIF_mpi_in_place = 0;
54 
55 #define CommIdx(comm)  ((comm)-1)  /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
56 
57 #if defined(__cplusplus)
58 extern "C" {
59 #endif
60 
61 /*
62    To avoid problems with prototypes to the system memcpy() it is duplicated here
63 */
64 int MPIUNI_Memcpy(void *dst,const void *src,int n)
65 {
66   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
67   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
68   if (!n) return MPI_SUCCESS;
69 
70   /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
71 #if defined(PETSC_HAVE_CUDA)
72   if (PetscCUDAInitialized) {cudaError_t cerr = cudaMemcpy(dst,src,n,cudaMemcpyDefault);if (cerr != cudaSuccess) return MPI_FAILURE;} else
73 #elif defined(PETSC_HAVE_HIP)
74   if (PetscHIPInitialized)  {hipError_t  cerr = hipMemcpy(dst,src,n,hipMemcpyDefault);  if (cerr != hipSuccess)  return MPI_FAILURE;} else
75 #endif
76   {memcpy(dst,src,n);}
77   return MPI_SUCCESS;
78 }
79 
80 static int classcnt = 0;
81 static int codecnt = 0;
82 
83 int MPI_Add_error_class(int *cl)
84 {
85   *cl = classcnt++;
86   return MPI_SUCCESS;
87 }
88 
89 int MPI_Add_error_code(int cl,int *co)
90 {
91   if (cl >= classcnt) return MPI_FAILURE;
92   *co = codecnt++;
93   return MPI_SUCCESS;
94 }
95 
96 int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
97 {
98   int comb = datatype >> 28;
99   switch (comb) {
100   case MPI_COMBINER_NAMED:
101     *num_integers = 0;
102     *num_addresses = 0;
103     *num_datatypes = 0;
104     *combiner = comb;
105     break;
106   case MPI_COMBINER_DUP:
107     *num_integers = 0;
108     *num_addresses = 0;
109     *num_datatypes = 1;
110     *combiner = comb;
111     break;
112   case MPI_COMBINER_CONTIGUOUS:
113     *num_integers = 1;
114     *num_addresses = 0;
115     *num_datatypes = 1;
116     *combiner = comb;
117     break;
118   default:
119     return MPIUni_Abort(MPI_COMM_SELF,1);
120   }
121   return MPI_SUCCESS;
122 }
123 
124 int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
125 {
126   int comb = datatype >> 28;
127   switch (comb) {
128   case MPI_COMBINER_NAMED:
129     return MPIUni_Abort(MPI_COMM_SELF,1);
130   case MPI_COMBINER_DUP:
131     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
132     array_of_datatypes[0] = datatype & 0x0fffffff;
133     break;
134   case MPI_COMBINER_CONTIGUOUS:
135     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
136     array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
137     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100;  /* basic named type (count=1) from which the contiguous type is derived */
138     break;
139   default:
140     return MPIUni_Abort(MPI_COMM_SELF,1);
141   }
142   return MPI_SUCCESS;
143 }
144 
145 /*
146    Used to set the built-in MPI_TAG_UB attribute
147 */
148 static int Keyval_setup(void)
149 {
150   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
151   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
152   attr[CommIdx(MPI_COMM_SELF)][0].active        = 1;
153   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val = &mpi_tag_ub;
154   attr_keyval[0].active                          = 1;
155   return MPI_SUCCESS;
156 }
157 
158 int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
159 {
160   int i,keyid;
161   for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
162     if (!attr_keyval[i].active) {
163       keyid = i;
164       goto found;
165     }
166   }
167   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
168   keyid = num_attr++;
169 
170 found:
171   attr_keyval[keyid].extra_state = extra_state;
172   attr_keyval[keyid].del         = delete_fn;
173   attr_keyval[keyid].active      = 1;
174   *keyval                        = keyid;
175   return MPI_SUCCESS;
176 }
177 
178 int MPI_Comm_free_keyval(int *keyval)
179 {
180   attr_keyval[*keyval].extra_state = 0;
181   attr_keyval[*keyval].del         = 0;
182   attr_keyval[*keyval].active      = 0;
183   *keyval = 0;
184   return MPI_SUCCESS;
185 }
186 
187 int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
188 {
189   int idx = CommIdx(comm);
190   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
191   attr[idx][keyval].active        = 1;
192   attr[idx][keyval].attribute_val = attribute_val;
193   return MPI_SUCCESS;
194 }
195 
196 int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
197 {
198   int idx = CommIdx(comm);
199   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
200   if (attr[idx][keyval].active && attr_keyval[keyval].del) {
201     void *save_attribute_val        = attr[idx][keyval].attribute_val;
202     attr[idx][keyval].active        = 0;
203     attr[idx][keyval].attribute_val = 0;
204     (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
205   }
206   return MPI_SUCCESS;
207 }
208 
209 int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
210 {
211   int idx = CommIdx(comm);
212   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
213   if (!keyval) Keyval_setup();
214   *flag                  = attr[idx][keyval].active;
215   *(void**)attribute_val = attr[idx][keyval].attribute_val;
216   return MPI_SUCCESS;
217 }
218 
219 static char all_comm_names[MAX_COMM][MPI_MAX_OBJECT_NAME] = {
220   "MPI_COMM_SELF",
221   "MPI_COMM_WORLD"
222 };
223 
224 int MPI_Comm_get_name(MPI_Comm comm,char *comm_name,int *resultlen)
225 {
226   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
227   if (!comm_name || !resultlen) return MPI_FAILURE;
228   strncpy(comm_name,all_comm_names[CommIdx(comm)],MPI_MAX_OBJECT_NAME-1);
229   *resultlen = (int)strlen(comm_name);
230   return MPI_SUCCESS;
231 }
232 
233 int MPI_Comm_set_name(MPI_Comm comm,const char *comm_name)
234 {
235   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
236   if (!comm_name) return MPI_FAILURE;
237   if (strlen(comm_name) > MPI_MAX_OBJECT_NAME-1) return MPI_FAILURE;
238   strncpy(all_comm_names[CommIdx(comm)],comm_name,MPI_MAX_OBJECT_NAME-1);
239   return MPI_SUCCESS;
240 }
241 
242 int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
243 {
244   int j;
245   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
246   for (j=3; j<=MaxComm; j++) {
247     if (!comm_active[CommIdx(j)]) {
248       comm_active[CommIdx(j)] = 1;
249       *newcomm = j;
250       return MPI_SUCCESS;
251     }
252   }
253   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
254   *newcomm = ++MaxComm;
255   comm_active[CommIdx(*newcomm)] = 1;
256   return MPI_SUCCESS;
257 }
258 
259 int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
260 {
261   int j;
262   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
263   for (j=3; j<=MaxComm; j++) {
264     if (!comm_active[CommIdx(j)]) {
265       comm_active[CommIdx(j)] = 1;
266       *out = j;
267       return MPI_SUCCESS;
268     }
269   }
270   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
271   *out = ++MaxComm;
272   comm_active[CommIdx(*out)] = 1;
273   return MPI_SUCCESS;
274 }
275 
276 int MPI_Comm_free(MPI_Comm *comm)
277 {
278   int i;
279   int idx = CommIdx(*comm);
280 
281   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
282   for (i=0; i<num_attr; i++) {
283     if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
284     attr[idx][i].active        = 0;
285     attr[idx][i].attribute_val = 0;
286   }
287   if (*comm >= 3) comm_active[idx] = 0;
288   *comm = 0;
289   return MPI_SUCCESS;
290 }
291 
292 int MPI_Comm_size(MPI_Comm comm, int *size)
293 {
294   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
295   *size=1;
296   return MPI_SUCCESS;
297 }
298 
299 int MPI_Comm_rank(MPI_Comm comm, int *rank)
300 {
301   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
302   *rank=0;
303   return MPI_SUCCESS;
304 }
305 
306 int MPIUni_Abort(MPI_Comm comm,int errorcode)
307 {
308   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
309   return MPI_ERR_NOSUPPORT;
310 }
311 
312 int MPI_Abort(MPI_Comm comm,int errorcode)
313 {
314   abort();
315   return MPI_SUCCESS;
316 }
317 
318 /* --------------------------------------------------------------------------*/
319 
320 static int MPI_was_initialized = 0;
321 static int MPI_was_finalized   = 0;
322 
323 int MPI_Init(int *argc, char ***argv)
324 {
325   if (MPI_was_initialized) return MPI_FAILURE;
326   /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
327      MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
328      of MPIU so that they can do multiple PetscInitialize/Finalize().
329   */
330   /* if (MPI_was_finalized) return MPI_FAILURE; */
331   MPI_was_initialized = 1;
332   MPI_was_finalized   = 0;
333   return MPI_SUCCESS;
334 }
335 
336 int MPI_Init_thread(int *argc, char ***argv, int required, int* provided)
337 {
338   MPI_Query_thread(provided);
339   return MPI_Init(argc,argv);
340 }
341 
342 int MPI_Query_thread(int* provided)
343 {
344   *provided = MPI_THREAD_FUNNELED;
345   return MPI_SUCCESS;
346 }
347 
348 int MPI_Finalize(void)
349 {
350   MPI_Comm comm;
351   if (MPI_was_finalized) return MPI_FAILURE;
352   if (!MPI_was_initialized) return MPI_FAILURE;
353   comm = MPI_COMM_WORLD;
354   MPI_Comm_free(&comm);
355   comm = MPI_COMM_SELF;
356   MPI_Comm_free(&comm);
357 #if defined(PETSC_USE_DEBUG)
358   {
359     int i;
360     for (i=3; i<=MaxComm; i++) {
361       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
362     }
363   }
364 #endif
365   /* reset counters */
366   MaxComm  = 2;
367   num_attr = 1;
368   MPI_was_finalized   = 1;
369   MPI_was_initialized = 0;
370   PETSC_COMM_WORLD    = MPI_COMM_NULL;
371   return MPI_SUCCESS;
372 }
373 
374 int MPI_Initialized(int *flag)
375 {
376   *flag = MPI_was_initialized;
377   return MPI_SUCCESS;
378 }
379 
380 int MPI_Finalized(int *flag)
381 {
382   *flag = MPI_was_finalized;
383   return MPI_SUCCESS;
384 }
385 
386 /* -------------------     Fortran versions of several routines ------------------ */
387 
388 #if defined(PETSC_HAVE_FORTRAN_CAPS)
389 #define mpiunisetmoduleblock_          MPIUNISETMODULEBLOCK
390 #define mpiunisetfortranbasepointers_  MPIUNISETFORTRANBASEPOINTERS
391 #define petsc_mpi_init_                PETSC_MPI_INIT
392 #define petsc_mpi_finalize_            PETSC_MPI_FINALIZE
393 #define petsc_mpi_comm_size_           PETSC_MPI_COMM_SIZE
394 #define petsc_mpi_comm_rank_           PETSC_MPI_COMM_RANK
395 #define petsc_mpi_abort_               PETSC_MPI_ABORT
396 #define petsc_mpi_reduce_              PETSC_MPI_REDUCE
397 #define petsc_mpi_allreduce_           PETSC_MPI_ALLREDUCE
398 #define petsc_mpi_barrier_             PETSC_MPI_BARRIER
399 #define petsc_mpi_bcast_               PETSC_MPI_BCAST
400 #define petsc_mpi_gather_              PETSC_MPI_GATHER
401 #define petsc_mpi_allgather_           PETSC_MPI_ALLGATHER
402 #define petsc_mpi_comm_split_          PETSC_MPI_COMM_SPLIT
403 #define petsc_mpi_scan_                PETSC_MPI_SCAN
404 #define petsc_mpi_send_                PETSC_MPI_SEND
405 #define petsc_mpi_recv_                PETSC_MPI_RECV
406 #define petsc_mpi_reduce_scatter_      PETSC_MPI_REDUCE_SCATTER
407 #define petsc_mpi_irecv_               PETSC_MPI_IRECV
408 #define petsc_mpi_isend_               PETSC_MPI_ISEND
409 #define petsc_mpi_sendrecv_            PETSC_MPI_SENDRECV
410 #define petsc_mpi_test_                PETSC_MPI_TEST
411 #define petsc_mpi_waitall_             PETSC_MPI_WAITALL
412 #define petsc_mpi_waitany_             PETSC_MPI_WAITANY
413 #define petsc_mpi_allgatherv_          PETSC_MPI_ALLGATHERV
414 #define petsc_mpi_alltoallv_           PETSC_MPI_ALLTOALLV
415 #define petsc_mpi_comm_create_         PETSC_MPI_COMM_CREATE
416 #define petsc_mpi_address_             PETSC_MPI_ADDRESS
417 #define petsc_mpi_pack_                PETSC_MPI_PACK
418 #define petsc_mpi_unpack_              PETSC_MPI_UNPACK
419 #define petsc_mpi_pack_size_           PETSC_MPI_PACK_SIZE
420 #define petsc_mpi_type_struct_         PETSC_MPI_TYPE_STRUCT
421 #define petsc_mpi_type_commit_         PETSC_MPI_TYPE_COMMIT
422 #define petsc_mpi_wtime_               PETSC_MPI_WTIME
423 #define petsc_mpi_cancel_              PETSC_MPI_CANCEL
424 #define petsc_mpi_comm_dup_            PETSC_MPI_COMM_DUP
425 #define petsc_mpi_comm_free_           PETSC_MPI_COMM_FREE
426 #define petsc_mpi_get_count_           PETSC_MPI_GET_COUNT
427 #define petsc_mpi_get_processor_name_  PETSC_MPI_GET_PROCESSOR_NAME
428 #define petsc_mpi_initialized_         PETSC_MPI_INITIALIZED
429 #define petsc_mpi_iprobe_              PETSC_MPI_IPROBE
430 #define petsc_mpi_probe_               PETSC_MPI_PROBE
431 #define petsc_mpi_request_free_        PETSC_MPI_REQUEST_FREE
432 #define petsc_mpi_ssend_               PETSC_MPI_SSEND
433 #define petsc_mpi_wait_                PETSC_MPI_WAIT
434 #define petsc_mpi_comm_group_          PETSC_MPI_COMM_GROUP
435 #define petsc_mpi_exscan_              PETSC_MPI_EXSCAN
436 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
437 #define mpiunisetmoduleblock_          mpiunisetmoduleblock
438 #define mpiunisetfortranbasepointers_  mpiunisetfortranbasepointers
439 #define petsc_mpi_init_                petsc_mpi_init
440 #define petsc_mpi_finalize_            petsc_mpi_finalize
441 #define petsc_mpi_comm_size_           petsc_mpi_comm_size
442 #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank
443 #define petsc_mpi_abort_               petsc_mpi_abort
444 #define petsc_mpi_reduce_              petsc_mpi_reduce
445 #define petsc_mpi_allreduce_           petsc_mpi_allreduce
446 #define petsc_mpi_barrier_             petsc_mpi_barrier
447 #define petsc_mpi_bcast_               petsc_mpi_bcast
448 #define petsc_mpi_gather_              petsc_mpi_gather
449 #define petsc_mpi_allgather_           petsc_mpi_allgather
450 #define petsc_mpi_comm_split_          petsc_mpi_comm_split
451 #define petsc_mpi_scan_                petsc_mpi_scan
452 #define petsc_mpi_send_                petsc_mpi_send
453 #define petsc_mpi_recv_                petsc_mpi_recv
454 #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter
455 #define petsc_mpi_irecv_               petsc_mpi_irecv
456 #define petsc_mpi_isend_               petsc_mpi_isend
457 #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv
458 #define petsc_mpi_test_                petsc_mpi_test
459 #define petsc_mpi_waitall_             petsc_mpi_waitall
460 #define petsc_mpi_waitany_             petsc_mpi_waitany
461 #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv
462 #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv
463 #define petsc_mpi_comm_create_         petsc_mpi_comm_create
464 #define petsc_mpi_address_             petsc_mpi_address
465 #define petsc_mpi_pack_                petsc_mpi_pack
466 #define petsc_mpi_unpack_              petsc_mpi_unpack
467 #define petsc_mpi_pack_size_           petsc_mpi_pack_size
468 #define petsc_mpi_type_struct_         petsc_mpi_type_struct
469 #define petsc_mpi_type_commit_         petsc_mpi_type_commit
470 #define petsc_mpi_wtime_               petsc_mpi_wtime
471 #define petsc_mpi_cancel_              petsc_mpi_cancel
472 #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup
473 #define petsc_mpi_comm_free_           petsc_mpi_comm_free
474 #define petsc_mpi_get_count_           petsc_mpi_get_count
475 #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name
476 #define petsc_mpi_initialized_         petsc_mpi_initialized
477 #define petsc_mpi_iprobe_              petsc_mpi_iprobe
478 #define petsc_mpi_probe_               petsc_mpi_probe
479 #define petsc_mpi_request_free_        petsc_mpi_request_free
480 #define petsc_mpi_ssend_               petsc_mpi_ssend
481 #define petsc_mpi_wait_                petsc_mpi_wait
482 #define petsc_mpi_comm_group_          petsc_mpi_comm_group
483 #define petsc_mpi_exscan_              petsc_mpi_exscan
484 #endif
485 
486 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
487 #define petsc_mpi_init_                petsc_mpi_init__
488 #define petsc_mpi_finalize_            petsc_mpi_finalize__
489 #define petsc_mpi_comm_size_           petsc_mpi_comm_size__
490 #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank__
491 #define petsc_mpi_abort_               petsc_mpi_abort__
492 #define petsc_mpi_reduce_              petsc_mpi_reduce__
493 #define petsc_mpi_allreduce_           petsc_mpi_allreduce__
494 #define petsc_mpi_barrier_             petsc_mpi_barrier__
495 #define petsc_mpi_bcast_               petsc_mpi_bcast__
496 #define petsc_mpi_gather_              petsc_mpi_gather__
497 #define petsc_mpi_allgather_           petsc_mpi_allgather__
498 #define petsc_mpi_comm_split_          petsc_mpi_comm_split__
499 #define petsc_mpi_scan_                petsc_mpi_scan__
500 #define petsc_mpi_send_                petsc_mpi_send__
501 #define petsc_mpi_recv_                petsc_mpi_recv__
502 #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter__
503 #define petsc_mpi_irecv_               petsc_mpi_irecv__
504 #define petsc_mpi_isend_               petsc_mpi_isend__
505 #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv__
506 #define petsc_mpi_test_                petsc_mpi_test__
507 #define petsc_mpi_waitall_             petsc_mpi_waitall__
508 #define petsc_mpi_waitany_             petsc_mpi_waitany__
509 #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv__
510 #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv__
511 #define petsc_mpi_comm_create_         petsc_mpi_comm_create__
512 #define petsc_mpi_address_             petsc_mpi_address__
513 #define petsc_mpi_pack_                petsc_mpi_pack__
514 #define petsc_mpi_unpack_              petsc_mpi_unpack__
515 #define petsc_mpi_pack_size_           petsc_mpi_pack_size__
516 #define petsc_mpi_type_struct_         petsc_mpi_type_struct__
517 #define petsc_mpi_type_commit_         petsc_mpi_type_commit__
518 #define petsc_mpi_wtime_               petsc_mpi_wtime__
519 #define petsc_mpi_cancel_              petsc_mpi_cancel__
520 #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup__
521 #define petsc_mpi_comm_free_           petsc_mpi_comm_free__
522 #define petsc_mpi_get_count_           petsc_mpi_get_count__
523 #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name__
524 #define petsc_mpi_initialized_         petsc_mpi_initialized__
525 #define petsc_mpi_iprobe_              petsc_mpi_iprobe__
526 #define petsc_mpi_probe_               petsc_mpi_probe__
527 #define petsc_mpi_request_free_        petsc_mpi_request_free__
528 #define petsc_mpi_ssend_               petsc_mpi_ssend__
529 #define petsc_mpi_wait_                petsc_mpi_wait__
530 #define petsc_mpi_comm_group_          petsc_mpi_comm_group__
531 #define petsc_mpi_exscan_              petsc_mpi_exscan__
532 #endif
533 
534 /* Do not build fortran interface if MPI namespace colision is to be avoided */
535 #if defined(PETSC_HAVE_FORTRAN)
536 
537 PETSC_EXTERN void mpiunisetmoduleblock_(void);
538 
539 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
540 {
541   MPIUNIF_mpi_in_place   = f_mpi_in_place;
542 }
543 
544 PETSC_EXTERN void petsc_mpi_init_(int *ierr)
545 {
546   mpiunisetmoduleblock_();
547   *ierr = MPI_Init((int*)0, (char***)0);
548 }
549 
550 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
551 {
552   *ierr = MPI_Finalize();
553 }
554 
555 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
556 {
557   *size = 1;
558   *ierr = 0;
559 }
560 
561 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
562 {
563   *rank = 0;
564   *ierr = MPI_SUCCESS;
565 }
566 
567 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
568 {
569   *newcomm = *comm;
570   *ierr    = MPI_SUCCESS;
571 }
572 
573 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
574 {
575   abort();
576   *ierr = MPI_SUCCESS;
577 }
578 
579 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
580 {
581   *ierr = MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
582 }
583 
584 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
585 {
586   *ierr = MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
587 }
588 
589 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
590 {
591   *ierr = MPI_SUCCESS;
592 }
593 
594 PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
595 {
596   *ierr = MPI_SUCCESS;
597 }
598 
599 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
600 {
601   *ierr = MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
602 }
603 
604 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
605 {
606   *ierr = MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
607 }
608 
609 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
610 {
611   *ierr = MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
612 }
613 
614 PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
615 {
616   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
617 }
618 
619 PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
620 {
621   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
622 }
623 
624 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
625 {
626   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
627 }
628 
629 PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
630 {
631   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
632 }
633 
634 PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
635 {
636   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
637 }
638 
639 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
640 {
641   *ierr = MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
642 }
643 
644 PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
645 {
646   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
647 }
648 
649 PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
650 {
651   *ierr = MPI_SUCCESS;
652 }
653 
654 PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
655 {
656   *ierr = MPI_SUCCESS;
657 }
658 
659 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
660 {
661   *ierr = MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
662 }
663 
664 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
665 {
666   *ierr = MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
667 }
668 
669 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
670 {
671   *newcomm =  *comm;
672   *ierr    = MPI_SUCCESS;
673 }
674 
675 PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
676 {
677   *address =  (MPI_Aint) ((char *)location);
678   *ierr    = MPI_SUCCESS;
679 }
680 
681 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
682 {
683   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
684 }
685 
686 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
687 {
688   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
689 }
690 
691 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
692 {
693   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
694 }
695 
696 PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
697 {
698   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
699 }
700 
701 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
702 {
703   *ierr = MPI_SUCCESS;
704 }
705 
706 double petsc_mpi_wtime_(void)
707 {
708   return 0.0;
709 }
710 
711 PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
712 {
713   *ierr = MPI_SUCCESS;
714 }
715 
716 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
717 {
718   *out  = *comm;
719   *ierr = MPI_SUCCESS;
720 }
721 
722 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
723 {
724   *ierr = MPI_SUCCESS;
725 }
726 
727 PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
728 {
729   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
730 }
731 
732 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
733 {
734   MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
735   *result_len = 9;
736   *ierr       = MPI_SUCCESS;
737 }
738 
739 PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
740 {
741   *flag = MPI_was_initialized;
742   *ierr = MPI_SUCCESS;
743 }
744 
745 PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
746 {
747   *ierr = MPI_SUCCESS;
748 }
749 
750 PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
751 {
752   *ierr = MPI_SUCCESS;
753 }
754 
755 PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
756 {
757   *ierr = MPI_SUCCESS;
758 }
759 
760 PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
761 {
762   *ierr = MPIUni_Abort(MPI_COMM_WORLD,0);
763 }
764 
765 PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
766 {
767   *ierr = MPI_SUCCESS;
768 }
769 
770 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
771 {
772   *ierr = MPI_SUCCESS;
773 }
774 
775 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
776 {
777   *ierr = MPI_SUCCESS;
778 }
779 
780 #endif /* PETSC_HAVE_FORTRAN */
781 
782 #if defined(__cplusplus)
783 }
784 #endif
785