xref: /petsc/src/sys/mpiuni/mpi.c (revision 522d69832bbebeff16b0753664ee9139fc693483) !
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include "mpi.h"
6 #if !defined(__MPIUNI_H)
7 #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8 #endif
9 #if !defined(PETSC_STDCALL)
10 #define PETSC_STDCALL
11 #endif
12 #include <stdio.h>
13 #if defined(PETSC_HAVE_STDLIB_H)
14 #include <stdlib.h>
15 #endif
16 
17 #define MPI_SUCCESS 0
18 #define MPI_FAILURE 1
19 void    *MPIUNI_TMP        = 0;
20 int     MPIUNI_DATASIZE[7] = {sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char),2*sizeof(int),4*sizeof(double)};
21 /*
22        With MPI Uni there is only one communicator, which is called 1.
23 */
24 #define MAX_ATTR 128
25 
26 typedef struct {
27   void                *extra_state;
28   void                *attribute_val;
29   int                 active;
30   MPI_Delete_function *del;
31 } MPI_Attr;
32 
33 static MPI_Attr attr[MAX_ATTR];
34 static int      num_attr = 1,mpi_tag_ub = 100000000;
35 
36 #if defined(__cplusplus)
37 extern "C" {
38 #endif
39 
40 /*
41    To avoid problems with prototypes to the system memcpy() it is duplicated here
42 */
43 int MPIUNI_Memcpy(void *a,const void* b,int n) {
44   int  i;
45   char *aa= (char*)a;
46   char *bb= (char*)b;
47 
48   for (i=0; i<n; i++) aa[i] = bb[i];
49   return 0;
50 }
51 
52 /*
53    Used to set the built-in MPI_TAG_UB attribute
54 */
55 static int Keyval_setup(void)
56 {
57   attr[0].active        = 1;
58   attr[0].attribute_val = &mpi_tag_ub;
59   return 0;
60 }
61 
62 int MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
63 {
64   if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);
65 
66   attr[num_attr].extra_state = extra_state;
67   attr[num_attr].del         = delete_fn;
68   *keyval                    = num_attr++;
69   return 0;
70 }
71 
72 int MPI_Keyval_free(int *keyval)
73 {
74   attr[*keyval].active = 0;
75   return MPI_SUCCESS;
76 }
77 
78 int MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
79 {
80   attr[keyval].active        = 1;
81   attr[keyval].attribute_val = attribute_val;
82   return MPI_SUCCESS;
83 }
84 
85 int MPI_Attr_delete(MPI_Comm comm,int keyval)
86 {
87   if (attr[keyval].active && attr[keyval].del) {
88     (*(attr[keyval].del))(comm,keyval,attr[keyval].attribute_val,attr[keyval].extra_state);
89   }
90   attr[keyval].active        = 0;
91   attr[keyval].attribute_val = 0;
92   return MPI_SUCCESS;
93 }
94 
95 int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
96 {
97   if (!keyval) Keyval_setup();
98   *flag                   = attr[keyval].active;
99   *(void **)attribute_val = attr[keyval].attribute_val;
100   return MPI_SUCCESS;
101 }
102 
103 int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
104 {
105   *newcomm =  comm;
106   return MPI_SUCCESS;
107 }
108 
109 static int dups = 0;
110 int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
111 {
112   *out = comm;
113   dups++;
114   return 0;
115 }
116 
117 int MPI_Comm_free(MPI_Comm *comm)
118 {
119   int i;
120 
121   if (--dups) return MPI_SUCCESS;
122   for (i=0; i<num_attr; i++) {
123     if (attr[i].active && attr[i].del) {
124       (*attr[i].del)(*comm,i,attr[i].attribute_val,attr[i].extra_state);
125     }
126     attr[i].active = 0;
127   }
128   return MPI_SUCCESS;
129 }
130 
131 int MPI_Abort(MPI_Comm comm,int errorcode)
132 {
133   abort();
134   return MPI_SUCCESS;
135 }
136 
137 /* --------------------------------------------------------------------------*/
138 
139 static int MPI_was_initialized = 0;
140 static int MPI_was_finalized   = 0;
141 
142 int MPI_Init(int *argc, char ***argv)
143 {
144   if (MPI_was_initialized) return 1;
145   if (MPI_was_finalized) return 1;
146   MPI_was_initialized = 1;
147   return 0;
148 }
149 
150 int MPI_Finalize(void)
151 {
152   if (MPI_was_finalized) return 1;
153   if (!MPI_was_initialized) return 1;
154   MPI_was_finalized = 1;
155   return 0;
156 }
157 
158 int MPI_Initialized(int *flag)
159 {
160   *flag = MPI_was_initialized;
161   return 0;
162 }
163 
164 int MPI_Finalized(int *flag)
165 {
166   *flag = MPI_was_finalized;
167   return 0;
168 }
169 
170 /* -------------------     Fortran versions of several routines ------------------ */
171 
172 #if defined(PETSC_HAVE_FORTRAN_CAPS)
173 #define mpi_init_             MPI_INIT
174 #define mpi_finalize_         MPI_FINALIZE
175 #define mpi_comm_size_        MPI_COMM_SIZE
176 #define mpi_comm_rank_        MPI_COMM_RANK
177 #define mpi_abort_            MPI_ABORT
178 #define mpi_reduce_           MPI_REDUCE
179 #define mpi_allreduce_        MPI_ALLREDUCE
180 #define mpi_barrier_          MPI_BARRIER
181 #define mpi_bcast_            MPI_BCAST
182 #define mpi_gather_           MPI_GATHER
183 #define mpi_allgather_        MPI_ALLGATHER
184 #define mpi_comm_split_       MPI_COMM_SPLIT
185 #define mpi_scan_             MPI_SCAN
186 #define mpi_send_             MPI_SEND
187 #define mpi_recv_             MPI_RECV
188 
189 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
190 #define mpi_init_             mpi_init
191 #define mpi_finalize_         mpi_finalize
192 #define mpi_comm_size_        mpi_comm_size
193 #define mpi_comm_rank_        mpi_comm_rank
194 #define mpi_abort_            mpi_abort
195 #define mpi_reduce_           mpi_reduce
196 #define mpi_allreduce_        mpi_allreduce
197 #define mpi_barrier_          mpi_barrier
198 #define mpi_bcast_            mpi_bcast
199 #define mpi_gather_           mpi_gather
200 #define mpi_allgather_        mpi_allgather
201 #define mpi_comm_split_       mpi_comm_split
202 #define mpi_scan_             mpi_scan
203 #define mpi_send_             mpi_send
204 #define mpi_recv_             mpi_recv
205 #endif
206 
207 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
208 #define mpi_init_             mpi_init__
209 #define mpi_finalize_         mpi_finalize__
210 #define mpi_comm_size_        mpi_comm_size__
211 #define mpi_comm_rank_        mpi_comm_rank__
212 #define mpi_abort_            mpi_abort__
213 #define mpi_reduce_           mpi_reduce__
214 #define mpi_allreduce_        mpi_allreduce__
215 #define mpi_barrier_          mpi_barrier__
216 #define mpi_bcast_            mpi_bcast__
217 #define mpi_gather_           mpi_gather__
218 #define mpi_allgather_        mpi_allgather__
219 #define mpi_comm_split_       mpi_comm_split__
220 #define mpi_scan              mpi_scan__
221 #define mpi_send_             mpi_send__
222 #define mpi_recv_             mpi_recv__
223 #endif
224 
225 
226 /* Do not build fortran interface if MPI namespace colision is to be avoided */
227 #if !defined(MPIUNI_AVOID_MPI_NAMESPACE)
228 
229 void PETSC_STDCALL  mpi_init_(int *ierr)
230 {
231   *ierr = MPI_Init((int*)0, (char***)0);
232 }
233 
234 void PETSC_STDCALL  mpi_finalize_(int *ierr)
235 {
236   *ierr = MPI_Finalize();
237 }
238 
239 void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
240 {
241   *size = 1;
242   *ierr = 0;
243 }
244 
245 void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
246 {
247   *rank=0;
248   *ierr=MPI_SUCCESS;
249 }
250 
251 void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
252 {
253   *newcomm = *comm;
254   *ierr=MPI_SUCCESS;
255 }
256 
257 void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
258 {
259   abort();
260   *ierr = MPI_SUCCESS;
261 }
262 
263 void PETSC_STDCALL mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
264 {
265   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
266   *ierr = MPI_SUCCESS;
267 }
268 
269 void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
270 {
271   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
272   *ierr = MPI_SUCCESS;
273 }
274 
275 void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
276 {
277   *ierr = MPI_SUCCESS;
278 }
279 
280 void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
281 {
282   *ierr = MPI_SUCCESS;
283 }
284 
285 
286 void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype, int *root,int *comm,int *ierr)
287 {
288   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
289   *ierr = MPI_SUCCESS;
290 }
291 
292 void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype,int *comm,int *ierr)
293 {
294   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
295   *ierr = MPI_SUCCESS;
296 }
297 
298 void PETSC_STDCALL mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
299 {
300   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
301   *ierr = MPI_SUCCESS;
302 }
303 
304 void PETSC_STDCALL mpi_send_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr )
305 {
306   *ierr = MPI_Abort(MPI_COMM_WORLD,0);
307 }
308 
309 void PETSC_STDCALL mpi_recv_(void*buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr )
310 {
311   *ierr = MPI_Abort(MPI_COMM_WORLD,0);
312 }
313 
314 #endif /* MPIUNI_AVOID_MPI_NAMESPACE */
315 
316 #if defined(__cplusplus)
317 }
318 #endif
319