1 /*
2 This provides a few of the MPI-uni functions that cannot be implemented
3 with C macros
4 */
5 #include <petscsys.h>
6 #if !defined(MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9
10 #include <petscdevice_cupm.h>
11 #include <petsc/private/petscimpl.h>
12
13 #define MPI_SUCCESS 0
14 #define MPI_FAILURE 1
15
16 void *MPIUNI_TMP = NULL;
17
18 /*
19 With MPI Uni there are exactly four distinct communicators:
20 MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
21
22 MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
23 the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
24
25 */
26 #define MAX_ATTR 256
27 #define MAX_COMM 128
28
29 typedef struct {
30 void *attribute_val;
31 int active;
32 } MPI_Attr;
33
34 typedef struct {
35 void *extra_state;
36 MPI_Delete_function *del;
37 int active; /* Is this keyval in use by some comm? */
38 } MPI_Attr_keyval;
39
40 static MPI_Attr_keyval attr_keyval[MAX_ATTR];
41 static MPI_Attr attr[MAX_COMM][MAX_ATTR];
42 static int comm_active[MAX_COMM]; /* Boolean array indicating which comms are in use */
43 static int mpi_tag_ub = 100000000;
44 static int num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
45 static int MaxComm = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
46 static void *MPIUNIF_mpi_in_place = 0;
47
48 #define CommIdx(comm) ((comm) - 1) /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
49
50 #if defined(__cplusplus)
51 extern "C" {
52 #endif
53
54 /*
55 To avoid problems with prototypes to the system memcpy() it is duplicated here
56 */
MPIUNI_Memcpy(void * dst,const void * src,MPI_Count n)57 int MPIUNI_Memcpy(void *dst, const void *src, MPI_Count n)
58 {
59 if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
60 if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
61 if (!n) return MPI_SUCCESS;
62
63 /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
64 #if defined(PETSC_HAVE_CUDA)
65 if (PetscDeviceInitialized(PETSC_DEVICE_CUDA)) {
66 cudaError_t cerr = cudaMemcpy(dst, src, n, cudaMemcpyDefault);
67 if (cerr != cudaSuccess) return MPI_FAILURE;
68 } else
69 #elif defined(PETSC_HAVE_HIP)
70 if (PetscDeviceInitialized(PETSC_DEVICE_HIP)) {
71 hipError_t cerr = hipMemcpy(dst, src, n, hipMemcpyDefault);
72 if (cerr != hipSuccess) return MPI_FAILURE;
73 } else
74 #endif
75 {
76 (void)memcpy(dst, src, n);
77 }
78 return MPI_SUCCESS;
79 }
80
81 static int classcnt = 0;
82 static int codecnt = 0;
83
MPI_Add_error_class(int * cl)84 int MPI_Add_error_class(int *cl)
85 {
86 *cl = classcnt++;
87 return MPI_SUCCESS;
88 }
89
MPI_Add_error_code(int cl,int * co)90 int MPI_Add_error_code(int cl, int *co)
91 {
92 if (cl >= classcnt) return MPI_FAILURE;
93 *co = codecnt++;
94 return MPI_SUCCESS;
95 }
96
MPI_Type_get_envelope(MPI_Datatype datatype,int * num_integers,int * num_addresses,int * num_datatypes,int * combiner)97 int MPI_Type_get_envelope(MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner)
98 {
99 int comb = datatype >> 28;
100 switch (comb) {
101 case MPI_COMBINER_NAMED:
102 *num_integers = 0;
103 *num_addresses = 0;
104 *num_datatypes = 0;
105 *combiner = comb;
106 break;
107 case MPI_COMBINER_DUP:
108 *num_integers = 0;
109 *num_addresses = 0;
110 *num_datatypes = 1;
111 *combiner = comb;
112 break;
113 case MPI_COMBINER_CONTIGUOUS:
114 *num_integers = 1;
115 *num_addresses = 0;
116 *num_datatypes = 1;
117 *combiner = comb;
118 break;
119 default:
120 return MPIUni_Abort(MPI_COMM_SELF, 1);
121 }
122 return MPI_SUCCESS;
123 }
124
MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int * array_of_integers,MPI_Aint * array_of_addresses,MPI_Datatype * array_of_datatypes)125 int MPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int *array_of_integers, MPI_Aint *array_of_addresses, MPI_Datatype *array_of_datatypes)
126 {
127 int comb = datatype >> 28;
128 switch (comb) {
129 case MPI_COMBINER_NAMED:
130 return MPIUni_Abort(MPI_COMM_SELF, 1);
131 case MPI_COMBINER_DUP:
132 if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
133 array_of_datatypes[0] = datatype & 0x0fffffff;
134 break;
135 case MPI_COMBINER_CONTIGUOUS:
136 if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
137 array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
138 array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100; /* basic named type (count=1) from which the contiguous type is derived */
139 break;
140 default:
141 return MPIUni_Abort(MPI_COMM_SELF, 1);
142 }
143 return MPI_SUCCESS;
144 }
145
146 /*
147 Used to set the built-in MPI_TAG_UB attribute
148 */
Keyval_setup(void)149 static int Keyval_setup(void)
150 {
151 attr[CommIdx(MPI_COMM_WORLD)][0].active = 1;
152 attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
153 attr[CommIdx(MPI_COMM_SELF)][0].active = 1;
154 attr[CommIdx(MPI_COMM_SELF)][0].attribute_val = &mpi_tag_ub;
155 attr_keyval[0].active = 1;
156 return MPI_SUCCESS;
157 }
158
MPI_Comm_create_keyval(PETSC_UNUSED MPI_Copy_function * copy_fn,PETSC_UNUSED MPI_Delete_function * delete_fn,int * keyval,void * extra_state)159 int MPI_Comm_create_keyval(PETSC_UNUSED MPI_Copy_function *copy_fn, PETSC_UNUSED MPI_Delete_function *delete_fn, int *keyval, void *extra_state)
160 {
161 int i, keyid;
162
163 (void)copy_fn;
164 (void)delete_fn;
165 for (i = 1; i < num_attr; i++) { /* the first attribute is always in use */
166 if (!attr_keyval[i].active) {
167 keyid = i;
168 goto found;
169 }
170 }
171 if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD, 1);
172 keyid = num_attr++;
173
174 found:
175 attr_keyval[keyid].extra_state = extra_state;
176 attr_keyval[keyid].del = delete_fn;
177 attr_keyval[keyid].active = 1;
178 *keyval = keyid;
179 return MPI_SUCCESS;
180 }
181
182 /*
183 The reference counting business is here to guard against the following:
184
185 MPI_Comm_set_attr(comm, keyval, some_attr);
186 MPI_Comm_free_keyval(&keyval);
187 MPI_Comm_free(&comm);
188
189 Here MPI_Comm_free() will try to destroy all of the attributes of the comm, and hence we
190 should not clear the deleter or extra_state until all communicators that have the attribute
191 set are either freed or have given up their attribute.
192
193 The attribute reference count is INCREASED in:
194 - MPI_Comm_create_keyval()
195 - MPI_Comm_set_attr()
196
197 The atrtibute reference count is DECREASED in:
198 - MPI_Comm_free_keyval()
199 - MPI_Comm_delete_attr() (but only if the comm has the attribute)
200 */
MPI_Attr_dereference_keyval(int keyval)201 static int MPI_Attr_dereference_keyval(int keyval)
202 {
203 if (--attr_keyval[keyval].active <= 0) {
204 attr_keyval[keyval].extra_state = 0;
205 attr_keyval[keyval].del = 0;
206 }
207 return MPI_SUCCESS;
208 }
209
MPI_Attr_reference_keyval(int keyval)210 static int MPI_Attr_reference_keyval(int keyval)
211 {
212 ++attr_keyval[keyval].active;
213 return MPI_SUCCESS;
214 }
215
MPI_Comm_free_keyval(int * keyval)216 int MPI_Comm_free_keyval(int *keyval)
217 {
218 int ret;
219
220 if (*keyval < 0 || *keyval >= num_attr) return MPI_FAILURE;
221 if ((ret = MPI_Attr_dereference_keyval(*keyval))) return ret;
222 *keyval = 0;
223 return MPI_SUCCESS;
224 }
225
MPI_Comm_set_attr(MPI_Comm comm,int keyval,void * attribute_val)226 int MPI_Comm_set_attr(MPI_Comm comm, int keyval, void *attribute_val)
227 {
228 int idx = CommIdx(comm), ret;
229 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
230 if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
231
232 if ((ret = MPI_Comm_delete_attr(comm, keyval))) return ret;
233 if ((ret = MPI_Attr_reference_keyval(keyval))) return ret;
234 attr[idx][keyval].active = 1;
235 attr[idx][keyval].attribute_val = attribute_val;
236 return MPI_SUCCESS;
237 }
238
MPI_Comm_delete_attr(MPI_Comm comm,int keyval)239 int MPI_Comm_delete_attr(MPI_Comm comm, int keyval)
240 {
241 int idx = CommIdx(comm);
242 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
243 if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
244 if (attr[idx][keyval].active) {
245 int ret;
246 void *save_attribute_val = attr[idx][keyval].attribute_val;
247
248 attr[idx][keyval].active = 0;
249 attr[idx][keyval].attribute_val = 0;
250 if (attr_keyval[keyval].del) {
251 if ((ret = (*attr_keyval[keyval].del)(comm, keyval, save_attribute_val, attr_keyval[keyval].extra_state))) return ret;
252 }
253 if ((ret = MPI_Attr_dereference_keyval(keyval))) return ret;
254 }
255 return MPI_SUCCESS;
256 }
257
MPI_Comm_get_attr(MPI_Comm comm,int keyval,void * attribute_val,int * flag)258 int MPI_Comm_get_attr(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
259 {
260 int idx = CommIdx(comm);
261 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
262 if (!keyval) Keyval_setup();
263 *flag = attr[idx][keyval].active;
264 *(void **)attribute_val = attr[idx][keyval].attribute_val;
265 return MPI_SUCCESS;
266 }
267
268 static char all_comm_names[MAX_COMM][MPI_MAX_OBJECT_NAME] = {"MPI_COMM_SELF", "MPI_COMM_WORLD"};
269
MPI_Comm_get_name(MPI_Comm comm,char * comm_name,int * resultlen)270 int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen)
271 {
272 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
273 if (!comm_name || !resultlen) return MPI_FAILURE;
274 (void)strncpy(comm_name, all_comm_names[CommIdx(comm)], MPI_MAX_OBJECT_NAME - 1);
275 *resultlen = (int)strlen(comm_name);
276 return MPI_SUCCESS;
277 }
278
MPI_Comm_set_name(MPI_Comm comm,const char * comm_name)279 int MPI_Comm_set_name(MPI_Comm comm, const char *comm_name)
280 {
281 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
282 if (!comm_name) return MPI_FAILURE;
283 if (strlen(comm_name) > MPI_MAX_OBJECT_NAME - 1) return MPI_FAILURE;
284 (void)strncpy(all_comm_names[CommIdx(comm)], comm_name, MPI_MAX_OBJECT_NAME - 1);
285 return MPI_SUCCESS;
286 }
287
MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm * newcomm)288 int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm)
289 {
290 int j;
291 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
292 for (j = 3; j <= MaxComm; j++) {
293 if (!comm_active[CommIdx(j)]) {
294 comm_active[CommIdx(j)] = 1;
295 *newcomm = j;
296 return MPI_SUCCESS;
297 }
298 }
299 if (MaxComm >= MAX_COMM) return MPI_FAILURE;
300 *newcomm = ++MaxComm;
301 comm_active[CommIdx(*newcomm)] = 1;
302 return MPI_SUCCESS;
303 }
304
MPI_Comm_dup(MPI_Comm comm,MPI_Comm * out)305 int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *out)
306 {
307 int j;
308 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
309 for (j = 3; j <= MaxComm; j++) {
310 if (!comm_active[CommIdx(j)]) {
311 comm_active[CommIdx(j)] = 1;
312 *out = j;
313 return MPI_SUCCESS;
314 }
315 }
316 if (MaxComm >= MAX_COMM) return MPI_FAILURE;
317 *out = ++MaxComm;
318 comm_active[CommIdx(*out)] = 1;
319 return MPI_SUCCESS;
320 }
321
MPI_Comm_free(MPI_Comm * comm)322 int MPI_Comm_free(MPI_Comm *comm)
323 {
324 int idx = CommIdx(*comm);
325
326 if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
327 for (int i = 0; i < num_attr; i++) {
328 int ret = MPI_Comm_delete_attr(*comm, i);
329
330 if (ret) return ret;
331 }
332 if (*comm >= 3) comm_active[idx] = 0;
333 *comm = 0;
334 return MPI_SUCCESS;
335 }
336
MPI_Comm_size(MPI_Comm comm,int * size)337 int MPI_Comm_size(MPI_Comm comm, int *size)
338 {
339 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
340 *size = 1;
341 return MPI_SUCCESS;
342 }
343
MPI_Comm_rank(MPI_Comm comm,int * rank)344 int MPI_Comm_rank(MPI_Comm comm, int *rank)
345 {
346 if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
347 *rank = 0;
348 return MPI_SUCCESS;
349 }
350
MPIUni_Abort(MPI_Comm comm,int errorcode)351 int MPIUni_Abort(MPI_Comm comm, int errorcode)
352 {
353 (void)printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
354 return MPI_ERR_NOSUPPORT;
355 }
356
MPI_Abort(MPI_Comm comm,int errorcode)357 int MPI_Abort(MPI_Comm comm, int errorcode)
358 {
359 abort();
360 return MPI_SUCCESS;
361 }
362
363 static int MPI_was_initialized = 0;
364 static int MPI_was_finalized = 0;
365
MPI_Init(int * argc,char *** argv)366 int MPI_Init(int *argc, char ***argv)
367 {
368 if (MPI_was_initialized) return MPI_FAILURE;
369 /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
370 MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
371 of MPIU so that they can do multiple PetscInitialize/Finalize().
372 */
373 /* if (MPI_was_finalized) return MPI_FAILURE; */
374 MPI_was_initialized = 1;
375 MPI_was_finalized = 0;
376 return MPI_SUCCESS;
377 }
378
MPI_Init_thread(int * argc,char *** argv,int required,int * provided)379 int MPI_Init_thread(int *argc, char ***argv, int required, int *provided)
380 {
381 MPI_Query_thread(provided);
382 return MPI_Init(argc, argv);
383 }
384
MPI_Query_thread(int * provided)385 int MPI_Query_thread(int *provided)
386 {
387 *provided = MPI_THREAD_FUNNELED;
388 return MPI_SUCCESS;
389 }
390
MPI_Finalize(void)391 int MPI_Finalize(void)
392 {
393 if (MPI_was_finalized || !MPI_was_initialized) return MPI_FAILURE;
394 MPI_Comm comm = MPI_COMM_WORLD;
395 int ret = MPI_Comm_free(&comm);
396
397 if (ret) return ret;
398 comm = MPI_COMM_SELF;
399 ret = MPI_Comm_free(&comm);
400 if (ret) return ret;
401 if (PetscDefined(USE_DEBUG)) {
402 for (int i = 3; i <= MaxComm; ++i) {
403 if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
404 }
405
406 for (int i = 1; i <= MaxComm; ++i) {
407 for (int j = 0; j < num_attr; ++j) {
408 if (attr[CommIdx(i)][j].active) printf("MPIUni warning: MPI communicator %d attribute %d was not freed before MPI_Finalize()\n", i, j);
409 }
410 }
411
412 for (int i = 1; i < num_attr; ++i) {
413 if (attr_keyval[i].active) printf("MPIUni warning: MPI attribute %d was not freed before MPI_Finalize()\n", i);
414 }
415 }
416
417 /* reset counters */
418 MaxComm = 2;
419 num_attr = 1;
420 MPI_was_finalized = 1;
421 MPI_was_initialized = 0;
422 PETSC_COMM_WORLD = MPI_COMM_NULL;
423 return MPI_SUCCESS;
424 }
425
MPI_Initialized(int * flag)426 int MPI_Initialized(int *flag)
427 {
428 *flag = MPI_was_initialized;
429 return MPI_SUCCESS;
430 }
431
MPI_Finalized(int * flag)432 int MPI_Finalized(int *flag)
433 {
434 *flag = MPI_was_finalized;
435 return MPI_SUCCESS;
436 }
437
MPI_Win_free(MPI_Win * win)438 int MPI_Win_free(MPI_Win *win)
439 {
440 free(*win);
441 *win = NULL;
442 return MPI_SUCCESS;
443 }
444
MPI_Win_allocate_shared(size_t sz,size_t asz,MPI_Info info,MPI_Comm comm,void ** addr,MPI_Win * win)445 int MPI_Win_allocate_shared(size_t sz, size_t asz, MPI_Info info, MPI_Comm comm, void **addr, MPI_Win *win)
446 {
447 *win = *addr = malloc(sz);
448 return MPI_SUCCESS;
449 }
450
451 /* ------------------- Fortran versions of several routines ------------------ */
452
453 #if defined(PETSC_HAVE_FORTRAN_CAPS)
454 #define mpiunisetmoduleblock_ MPIUNISETMODULEBLOCK
455 #define mpiunisetfortranbasepointers_ MPIUNISETFORTRANBASEPOINTERS
456 #define petsc_mpi_init_ PETSC_MPI_INIT
457 #define petsc_mpi_finalize_ PETSC_MPI_FINALIZE
458 #define petsc_mpi_comm_size_ PETSC_MPI_COMM_SIZE
459 #define petsc_mpi_comm_rank_ PETSC_MPI_COMM_RANK
460 #define petsc_mpi_abort_ PETSC_MPI_ABORT
461 #define petsc_mpi_reduce_ PETSC_MPI_REDUCE
462 #define petsc_mpi_allreduce_ PETSC_MPI_ALLREDUCE
463 #define petsc_mpi_barrier_ PETSC_MPI_BARRIER
464 #define petsc_mpi_bcast_ PETSC_MPI_BCAST
465 #define petsc_mpi_gather_ PETSC_MPI_GATHER
466 #define petsc_mpi_allgather_ PETSC_MPI_ALLGATHER
467 #define petsc_mpi_comm_split_ PETSC_MPI_COMM_SPLIT
468 #define petsc_mpi_scan_ PETSC_MPI_SCAN
469 #define petsc_mpi_send_ PETSC_MPI_SEND
470 #define petsc_mpi_recv_ PETSC_MPI_RECV
471 #define petsc_mpi_reduce_scatter_ PETSC_MPI_REDUCE_SCATTER
472 #define petsc_mpi_irecv_ PETSC_MPI_IRECV
473 #define petsc_mpi_isend_ PETSC_MPI_ISEND
474 #define petsc_mpi_sendrecv_ PETSC_MPI_SENDRECV
475 #define petsc_mpi_test_ PETSC_MPI_TEST
476 #define petsc_mpi_waitall_ PETSC_MPI_WAITALL
477 #define petsc_mpi_waitany_ PETSC_MPI_WAITANY
478 #define petsc_mpi_allgatherv_ PETSC_MPI_ALLGATHERV
479 #define petsc_mpi_alltoallv_ PETSC_MPI_ALLTOALLV
480 #define petsc_mpi_comm_create_ PETSC_MPI_COMM_CREATE
481 #define petsc_mpi_address_ PETSC_MPI_ADDRESS
482 #define petsc_mpi_pack_ PETSC_MPI_PACK
483 #define petsc_mpi_unpack_ PETSC_MPI_UNPACK
484 #define petsc_mpi_pack_size_ PETSC_MPI_PACK_SIZE
485 #define petsc_mpi_type_struct_ PETSC_MPI_TYPE_STRUCT
486 #define petsc_mpi_type_commit_ PETSC_MPI_TYPE_COMMIT
487 #define petsc_mpi_wtime_ PETSC_MPI_WTIME
488 #define petsc_mpi_cancel_ PETSC_MPI_CANCEL
489 #define petsc_mpi_comm_dup_ PETSC_MPI_COMM_DUP
490 #define petsc_mpi_comm_free_ PETSC_MPI_COMM_FREE
491 #define petsc_mpi_get_count_ PETSC_MPI_GET_COUNT
492 #define petsc_mpi_get_processor_name_ PETSC_MPI_GET_PROCESSOR_NAME
493 #define petsc_mpi_initialized_ PETSC_MPI_INITIALIZED
494 #define petsc_mpi_iprobe_ PETSC_MPI_IPROBE
495 #define petsc_mpi_probe_ PETSC_MPI_PROBE
496 #define petsc_mpi_request_free_ PETSC_MPI_REQUEST_FREE
497 #define petsc_mpi_ssend_ PETSC_MPI_SSEND
498 #define petsc_mpi_wait_ PETSC_MPI_WAIT
499 #define petsc_mpi_comm_group_ PETSC_MPI_COMM_GROUP
500 #define petsc_mpi_exscan_ PETSC_MPI_EXSCAN
501 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
502 #define mpiunisetmoduleblock_ mpiunisetmoduleblock
503 #define mpiunisetfortranbasepointers_ mpiunisetfortranbasepointers
504 #define petsc_mpi_init_ petsc_mpi_init
505 #define petsc_mpi_finalize_ petsc_mpi_finalize
506 #define petsc_mpi_comm_size_ petsc_mpi_comm_size
507 #define petsc_mpi_comm_rank_ petsc_mpi_comm_rank
508 #define petsc_mpi_abort_ petsc_mpi_abort
509 #define petsc_mpi_reduce_ petsc_mpi_reduce
510 #define petsc_mpi_allreduce_ petsc_mpi_allreduce
511 #define petsc_mpi_barrier_ petsc_mpi_barrier
512 #define petsc_mpi_bcast_ petsc_mpi_bcast
513 #define petsc_mpi_gather_ petsc_mpi_gather
514 #define petsc_mpi_allgather_ petsc_mpi_allgather
515 #define petsc_mpi_comm_split_ petsc_mpi_comm_split
516 #define petsc_mpi_scan_ petsc_mpi_scan
517 #define petsc_mpi_send_ petsc_mpi_send
518 #define petsc_mpi_recv_ petsc_mpi_recv
519 #define petsc_mpi_reduce_scatter_ petsc_mpi_reduce_scatter
520 #define petsc_mpi_irecv_ petsc_mpi_irecv
521 #define petsc_mpi_isend_ petsc_mpi_isend
522 #define petsc_mpi_sendrecv_ petsc_mpi_sendrecv
523 #define petsc_mpi_test_ petsc_mpi_test
524 #define petsc_mpi_waitall_ petsc_mpi_waitall
525 #define petsc_mpi_waitany_ petsc_mpi_waitany
526 #define petsc_mpi_allgatherv_ petsc_mpi_allgatherv
527 #define petsc_mpi_alltoallv_ petsc_mpi_alltoallv
528 #define petsc_mpi_comm_create_ petsc_mpi_comm_create
529 #define petsc_mpi_address_ petsc_mpi_address
530 #define petsc_mpi_pack_ petsc_mpi_pack
531 #define petsc_mpi_unpack_ petsc_mpi_unpack
532 #define petsc_mpi_pack_size_ petsc_mpi_pack_size
533 #define petsc_mpi_type_struct_ petsc_mpi_type_struct
534 #define petsc_mpi_type_commit_ petsc_mpi_type_commit
535 #define petsc_mpi_wtime_ petsc_mpi_wtime
536 #define petsc_mpi_cancel_ petsc_mpi_cancel
537 #define petsc_mpi_comm_dup_ petsc_mpi_comm_dup
538 #define petsc_mpi_comm_free_ petsc_mpi_comm_free
539 #define petsc_mpi_get_count_ petsc_mpi_get_count
540 #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name
541 #define petsc_mpi_initialized_ petsc_mpi_initialized
542 #define petsc_mpi_iprobe_ petsc_mpi_iprobe
543 #define petsc_mpi_probe_ petsc_mpi_probe
544 #define petsc_mpi_request_free_ petsc_mpi_request_free
545 #define petsc_mpi_ssend_ petsc_mpi_ssend
546 #define petsc_mpi_wait_ petsc_mpi_wait
547 #define petsc_mpi_comm_group_ petsc_mpi_comm_group
548 #define petsc_mpi_exscan_ petsc_mpi_exscan
549 #endif
550
551 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
552 #define petsc_mpi_init_ petsc_mpi_init__
553 #define petsc_mpi_finalize_ petsc_mpi_finalize__
554 #define petsc_mpi_comm_size_ petsc_mpi_comm_size__
555 #define petsc_mpi_comm_rank_ petsc_mpi_comm_rank__
556 #define petsc_mpi_abort_ petsc_mpi_abort__
557 #define petsc_mpi_reduce_ petsc_mpi_reduce__
558 #define petsc_mpi_allreduce_ petsc_mpi_allreduce__
559 #define petsc_mpi_barrier_ petsc_mpi_barrier__
560 #define petsc_mpi_bcast_ petsc_mpi_bcast__
561 #define petsc_mpi_gather_ petsc_mpi_gather__
562 #define petsc_mpi_allgather_ petsc_mpi_allgather__
563 #define petsc_mpi_comm_split_ petsc_mpi_comm_split__
564 #define petsc_mpi_scan_ petsc_mpi_scan__
565 #define petsc_mpi_send_ petsc_mpi_send__
566 #define petsc_mpi_recv_ petsc_mpi_recv__
567 #define petsc_mpi_reduce_scatter_ petsc_mpi_reduce_scatter__
568 #define petsc_mpi_irecv_ petsc_mpi_irecv__
569 #define petsc_mpi_isend_ petsc_mpi_isend__
570 #define petsc_mpi_sendrecv_ petsc_mpi_sendrecv__
571 #define petsc_mpi_test_ petsc_mpi_test__
572 #define petsc_mpi_waitall_ petsc_mpi_waitall__
573 #define petsc_mpi_waitany_ petsc_mpi_waitany__
574 #define petsc_mpi_allgatherv_ petsc_mpi_allgatherv__
575 #define petsc_mpi_alltoallv_ petsc_mpi_alltoallv__
576 #define petsc_mpi_comm_create_ petsc_mpi_comm_create__
577 #define petsc_mpi_address_ petsc_mpi_address__
578 #define petsc_mpi_pack_ petsc_mpi_pack__
579 #define petsc_mpi_unpack_ petsc_mpi_unpack__
580 #define petsc_mpi_pack_size_ petsc_mpi_pack_size__
581 #define petsc_mpi_type_struct_ petsc_mpi_type_struct__
582 #define petsc_mpi_type_commit_ petsc_mpi_type_commit__
583 #define petsc_mpi_wtime_ petsc_mpi_wtime__
584 #define petsc_mpi_cancel_ petsc_mpi_cancel__
585 #define petsc_mpi_comm_dup_ petsc_mpi_comm_dup__
586 #define petsc_mpi_comm_free_ petsc_mpi_comm_free__
587 #define petsc_mpi_get_count_ petsc_mpi_get_count__
588 #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name__
589 #define petsc_mpi_initialized_ petsc_mpi_initialized__
590 #define petsc_mpi_iprobe_ petsc_mpi_iprobe__
591 #define petsc_mpi_probe_ petsc_mpi_probe__
592 #define petsc_mpi_request_free_ petsc_mpi_request_free__
593 #define petsc_mpi_ssend_ petsc_mpi_ssend__
594 #define petsc_mpi_wait_ petsc_mpi_wait__
595 #define petsc_mpi_comm_group_ petsc_mpi_comm_group__
596 #define petsc_mpi_exscan_ petsc_mpi_exscan__
597 #endif
598
599 /* Do not build fortran interface if MPI namespace collision is to be avoided */
600 #if defined(PETSC_USE_FORTRAN_BINDINGS)
601
602 PETSC_EXTERN void mpiunisetmoduleblock_(void);
603
mpiunisetfortranbasepointers_(void * f_mpi_in_place)604 PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
605 {
606 MPIUNIF_mpi_in_place = f_mpi_in_place;
607 }
608
petsc_mpi_init_(int * ierr)609 PETSC_EXTERN void petsc_mpi_init_(int *ierr)
610 {
611 mpiunisetmoduleblock_();
612 *ierr = MPI_Init(NULL, NULL);
613 }
614
petsc_mpi_finalize_(int * ierr)615 PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
616 {
617 *ierr = MPI_Finalize();
618 }
619
petsc_mpi_comm_size_(MPI_Comm * comm,int * size,int * ierr)620 PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm, int *size, int *ierr)
621 {
622 *size = 1;
623 *ierr = 0;
624 }
625
petsc_mpi_comm_rank_(MPI_Comm * comm,int * rank,int * ierr)626 PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm, int *rank, int *ierr)
627 {
628 *rank = 0;
629 *ierr = MPI_SUCCESS;
630 }
631
petsc_mpi_comm_split_(MPI_Comm * comm,int * color,int * key,MPI_Comm * newcomm,int * ierr)632 PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm, int *color, int *key, MPI_Comm *newcomm, int *ierr)
633 {
634 *newcomm = *comm;
635 *ierr = MPI_SUCCESS;
636 }
637
petsc_mpi_abort_(MPI_Comm * comm,int * errorcode,int * ierr)638 PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm, int *errorcode, int *ierr)
639 {
640 abort();
641 *ierr = MPI_SUCCESS;
642 }
643
petsc_mpi_reduce_(void * sendbuf,void * recvbuf,int * count,int * datatype,int * op,int * root,int * comm,int * ierr)644 PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *root, int *comm, int *ierr)
645 {
646 *ierr = MPI_Reduce(sendbuf, recvbuf, *count, *datatype, *op, *root, *comm);
647 }
648
petsc_mpi_allreduce_(void * sendbuf,void * recvbuf,int * count,int * datatype,int * op,int * comm,int * ierr)649 PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
650 {
651 *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, *datatype, *op, *comm);
652 }
653
petsc_mpi_barrier_(MPI_Comm * comm,int * ierr)654 PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm, int *ierr)
655 {
656 *ierr = MPI_SUCCESS;
657 }
658
petsc_mpi_bcast_(void * buf,int * count,int * datatype,int * root,int * comm,int * ierr)659 PETSC_EXTERN void petsc_mpi_bcast_(void *buf, int *count, int *datatype, int *root, int *comm, int *ierr)
660 {
661 *ierr = MPI_SUCCESS;
662 }
663
petsc_mpi_gather_(void * sendbuf,int * scount,int * sdatatype,void * recvbuf,int * rcount,int * rdatatype,int * root,int * comm,int * ierr)664 PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root, int *comm, int *ierr)
665 {
666 *ierr = MPI_Gather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *root, *comm);
667 }
668
petsc_mpi_allgather_(void * sendbuf,int * scount,int * sdatatype,void * recvbuf,int * rcount,int * rdatatype,int * comm,int * ierr)669 PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *comm, int *ierr)
670 {
671 *ierr = MPI_Allgather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *comm);
672 }
673
petsc_mpi_scan_(void * sendbuf,void * recvbuf,int * count,int * datatype,int * op,int * comm,int * ierr)674 PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
675 {
676 *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*count) * MPI_sizeof(*datatype));
677 }
678
petsc_mpi_send_(void * buf,int * count,int * datatype,int * dest,int * tag,int * comm,int * ierr)679 PETSC_EXTERN void petsc_mpi_send_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr)
680 {
681 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
682 }
683
petsc_mpi_recv_(void * buf,int * count,int * datatype,int * source,int * tag,int * comm,int status,int * ierr)684 PETSC_EXTERN void petsc_mpi_recv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int status, int *ierr)
685 {
686 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
687 }
688
petsc_mpi_reduce_scatter_(void * sendbuf,void * recvbuf,int * recvcounts,int * datatype,int * op,int * comm,int * ierr)689 PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf, void *recvbuf, int *recvcounts, int *datatype, int *op, int *comm, int *ierr)
690 {
691 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
692 }
693
petsc_mpi_irecv_(void * buf,int * count,int * datatype,int * source,int * tag,int * comm,int * request,int * ierr)694 PETSC_EXTERN void petsc_mpi_irecv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
695 {
696 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
697 }
698
petsc_mpi_isend_(void * buf,int * count,int * datatype,int * dest,int * tag,int * comm,int * request,int * ierr)699 PETSC_EXTERN void petsc_mpi_isend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *request, int *ierr)
700 {
701 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
702 }
703
petsc_mpi_sendrecv_(void * sendbuf,int * sendcount,int * sendtype,int * dest,int * sendtag,void * recvbuf,int * recvcount,int * recvtype,int * source,int * recvtag,int * comm,int * status,int * ierr)704 PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf, int *sendcount, int *sendtype, int *dest, int *sendtag, void *recvbuf, int *recvcount, int *recvtype, int *source, int *recvtag, int *comm, int *status, int *ierr)
705 {
706 *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*sendcount) * MPI_sizeof(*sendtype));
707 }
708
petsc_mpi_test_(int * request,int * flag,int * status,int * ierr)709 PETSC_EXTERN void petsc_mpi_test_(int *request, int *flag, int *status, int *ierr)
710 {
711 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
712 }
713
petsc_mpi_waitall_(int * count,int * array_of_requests,int * array_of_statuses,int * ierr)714 PETSC_EXTERN void petsc_mpi_waitall_(int *count, int *array_of_requests, int *array_of_statuses, int *ierr)
715 {
716 *ierr = MPI_SUCCESS;
717 }
718
petsc_mpi_waitany_(int * count,int * array_of_requests,int * index,int * status,int * ierr)719 PETSC_EXTERN void petsc_mpi_waitany_(int *count, int *array_of_requests, int *index, int *status, int *ierr)
720 {
721 *ierr = MPI_SUCCESS;
722 }
723
petsc_mpi_allgatherv_(void * sendbuf,int * sendcount,int * sendtype,void * recvbuf,int * recvcounts,int * displs,int * recvtype,int * comm,int * ierr)724 PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf, int *sendcount, int *sendtype, void *recvbuf, int *recvcounts, int *displs, int *recvtype, int *comm, int *ierr)
725 {
726 *ierr = MPI_Allgatherv(sendbuf, *sendcount, *sendtype, recvbuf, recvcounts, displs, *recvtype, *comm);
727 }
728
petsc_mpi_alltoallv_(void * sendbuf,int * sendcounts,int * sdispls,int * sendtype,void * recvbuf,int * recvcounts,int * rdispls,int * recvtype,int * comm,int * ierr)729 PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf, int *sendcounts, int *sdispls, int *sendtype, void *recvbuf, int *recvcounts, int *rdispls, int *recvtype, int *comm, int *ierr)
730 {
731 *ierr = MPI_Alltoallv(sendbuf, sendcounts, sdispls, *sendtype, recvbuf, recvcounts, rdispls, *recvtype, *comm);
732 }
733
petsc_mpi_comm_create_(int * comm,int * group,int * newcomm,int * ierr)734 PETSC_EXTERN void petsc_mpi_comm_create_(int *comm, int *group, int *newcomm, int *ierr)
735 {
736 *newcomm = *comm;
737 *ierr = MPI_SUCCESS;
738 }
739
petsc_mpi_address_(void * location,MPI_Aint * address,int * ierr)740 PETSC_EXTERN void petsc_mpi_address_(void *location, MPI_Aint *address, int *ierr)
741 {
742 *address = (MPI_Aint)((char *)location);
743 *ierr = MPI_SUCCESS;
744 }
745
petsc_mpi_pack_(void * inbuf,int * incount,int * datatype,void * outbuf,int * outsize,int * position,int * comm,int * ierr)746 PETSC_EXTERN void petsc_mpi_pack_(void *inbuf, int *incount, int *datatype, void *outbuf, int *outsize, int *position, int *comm, int *ierr)
747 {
748 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
749 }
750
petsc_mpi_unpack_(void * inbuf,int * insize,int * position,void * outbuf,int * outcount,int * datatype,int * comm,int * ierr)751 PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf, int *insize, int *position, void *outbuf, int *outcount, int *datatype, int *comm, int *ierr)
752 {
753 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
754 }
755
petsc_mpi_pack_size_(int * incount,int * datatype,int * comm,int * size,int * ierr)756 PETSC_EXTERN void petsc_mpi_pack_size_(int *incount, int *datatype, int *comm, int *size, int *ierr)
757 {
758 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
759 }
760
petsc_mpi_type_struct_(int * count,int * array_of_blocklengths,int * array_of_displaments,int * array_of_types,int * newtype,int * ierr)761 PETSC_EXTERN void petsc_mpi_type_struct_(int *count, int *array_of_blocklengths, int *array_of_displaments, int *array_of_types, int *newtype, int *ierr)
762 {
763 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
764 }
765
petsc_mpi_type_commit_(int * datatype,int * ierr)766 PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype, int *ierr)
767 {
768 *ierr = MPI_SUCCESS;
769 }
770
petsc_mpi_wtime_(void)771 double petsc_mpi_wtime_(void)
772 {
773 return 0.0;
774 }
775
petsc_mpi_cancel_(int * request,int * ierr)776 PETSC_EXTERN void petsc_mpi_cancel_(int *request, int *ierr)
777 {
778 *ierr = MPI_SUCCESS;
779 }
780
petsc_mpi_comm_dup_(int * comm,int * out,int * ierr)781 PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm, int *out, int *ierr)
782 {
783 *out = *comm;
784 *ierr = MPI_SUCCESS;
785 }
786
petsc_mpi_comm_free_(int * comm,int * ierr)787 PETSC_EXTERN void petsc_mpi_comm_free_(int *comm, int *ierr)
788 {
789 *ierr = MPI_SUCCESS;
790 }
791
petsc_mpi_get_count_(int * status,int * datatype,int * count,int * ierr)792 PETSC_EXTERN void petsc_mpi_get_count_(int *status, int *datatype, int *count, int *ierr)
793 {
794 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
795 }
796
petsc_mpi_get_processor_name_(char * name,int * result_len,int * ierr,PETSC_FORTRAN_CHARLEN_T len)797 PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name, int *result_len, int *ierr, PETSC_FORTRAN_CHARLEN_T len)
798 {
799 MPIUNI_Memcpy(name, "localhost", 9 * sizeof(char));
800 *result_len = 9;
801 *ierr = MPI_SUCCESS;
802 }
803
petsc_mpi_initialized_(int * flag,int * ierr)804 PETSC_EXTERN void petsc_mpi_initialized_(int *flag, int *ierr)
805 {
806 *flag = MPI_was_initialized;
807 *ierr = MPI_SUCCESS;
808 }
809
petsc_mpi_iprobe_(int * source,int * tag,int * comm,int * glag,int * status,int * ierr)810 PETSC_EXTERN void petsc_mpi_iprobe_(int *source, int *tag, int *comm, int *glag, int *status, int *ierr)
811 {
812 *ierr = MPI_SUCCESS;
813 }
814
petsc_mpi_probe_(int * source,int * tag,int * comm,int * flag,int * status,int * ierr)815 PETSC_EXTERN void petsc_mpi_probe_(int *source, int *tag, int *comm, int *flag, int *status, int *ierr)
816 {
817 *ierr = MPI_SUCCESS;
818 }
819
petsc_mpi_request_free_(int * request,int * ierr)820 PETSC_EXTERN void petsc_mpi_request_free_(int *request, int *ierr)
821 {
822 *ierr = MPI_SUCCESS;
823 }
824
petsc_mpi_ssend_(void * buf,int * count,int * datatype,int * dest,int * tag,int * comm,int * ierr)825 PETSC_EXTERN void petsc_mpi_ssend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr)
826 {
827 *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
828 }
829
petsc_mpi_wait_(int * request,int * status,int * ierr)830 PETSC_EXTERN void petsc_mpi_wait_(int *request, int *status, int *ierr)
831 {
832 *ierr = MPI_SUCCESS;
833 }
834
petsc_mpi_comm_group_(int * comm,int * group,int * ierr)835 PETSC_EXTERN void petsc_mpi_comm_group_(int *comm, int *group, int *ierr)
836 {
837 *ierr = MPI_SUCCESS;
838 }
839
petsc_mpi_exscan_(void * sendbuf,void * recvbuf,int * count,int * datatype,int * op,int * comm,int * ierr)840 PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
841 {
842 *ierr = MPI_SUCCESS;
843 }
844
845 #endif /* PETSC_USE_FORTRAN_BINDINGS */
846
847 #if defined(__cplusplus)
848 }
849 #endif
850