xref: /petsc/src/sys/mpiuni/mpi.c (revision c8217ed54290bbc6e6b671611943d367bca9f0e4)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include "include/mpiuni/mpi.h"
6 #include "petsc.h"
7 
8 #if defined(PETSC_HAVE_STDLIB_H)
9 #include <stdlib.h>
10 #endif
11 
12 #define MPI_SUCCESS 0
13 #define MPI_FAILURE 1
14 void    *MPIUNI_TMP        = 0;
15 int     MPIUNI_DATASIZE[5] = { sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char)};
16 /*
17        With MPI Uni there is only one communicator, which is called 1.
18 */
19 #define MAX_ATTR 128
20 
21 typedef struct {
22   void                *extra_state;
23   void                *attribute_val;
24   int                 active;
25   MPI_Delete_function *del;
26 } MPI_Attr;
27 
28 static MPI_Attr attr[MAX_ATTR];
29 static int      num_attr = 1,mpi_tag_ub = 100000000;
30 
31 #if defined(__cplusplus)
32 extern "C" {
33 #endif
34 
35 /*
36    To avoid problems with prototypes to the system memcpy() it is duplicated here
37 */
38 int MPIUNI_Memcpy(void *a,const void* b,int n) {
39   int  i;
40   char *aa= (char*)a;
41   char *bb= (char*)b;
42 
43   for (i=0; i<n; i++) aa[i] = bb[i];
44   return 0;
45 }
46 
47 /*
48    Used to set the built-in MPI_TAG_UB attribute
49 */
50 static int Keyval_setup(void)
51 {
52   attr[0].active        = 1;
53   attr[0].attribute_val = &mpi_tag_ub;
54   return 0;
55 }
56 
57 int MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
58 {
59   if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);
60 
61   attr[num_attr].extra_state = extra_state;
62   attr[num_attr].del         = delete_fn;
63   *keyval                    = num_attr++;
64   return 0;
65 }
66 
67 int MPI_Keyval_free(int *keyval)
68 {
69   attr[*keyval].active = 0;
70   return MPI_SUCCESS;
71 }
72 
73 int MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
74 {
75   attr[keyval].active        = 1;
76   attr[keyval].attribute_val = attribute_val;
77   return MPI_SUCCESS;
78 }
79 
80 int MPI_Attr_delete(MPI_Comm comm,int keyval)
81 {
82   if (attr[keyval].active && attr[keyval].del) {
83     (*(attr[keyval].del))(comm,keyval,attr[keyval].attribute_val,attr[keyval].extra_state);
84   }
85   attr[keyval].active        = 0;
86   attr[keyval].attribute_val = 0;
87   return MPI_SUCCESS;
88 }
89 
90 int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
91 {
92   if (!keyval) Keyval_setup();
93   *flag                   = attr[keyval].active;
94   *(void **)attribute_val = attr[keyval].attribute_val;
95   return MPI_SUCCESS;
96 }
97 
98 int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
99 {
100   *newcomm =  comm;
101   return MPI_SUCCESS;
102 }
103 
104 static int dups = 0;
105 int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
106 {
107   *out = comm;
108   dups++;
109   return 0;
110 }
111 
112 int MPI_Comm_free(MPI_Comm *comm)
113 {
114   int i;
115 
116   if (--dups) return MPI_SUCCESS;
117   for (i=0; i<num_attr; i++) {
118     if (attr[i].active && attr[i].del) {
119       (*attr[i].del)(*comm,i,attr[i].attribute_val,attr[i].extra_state);
120     }
121     attr[i].active = 0;
122   }
123   return MPI_SUCCESS;
124 }
125 
126 int MPI_Abort(MPI_Comm comm,int errorcode)
127 {
128   abort();
129   return MPI_SUCCESS;
130 }
131 
132 /* --------------------------------------------------------------------------*/
133 
134 static int MPI_was_initialized = 0;
135 static int MPI_was_finalized   = 0;
136 
137 int MPI_Init(int *argc, char ***argv)
138 {
139   if (MPI_was_initialized) return 1;
140   if (MPI_was_finalized) return 1;
141   MPI_was_initialized = 1;
142   return 0;
143 }
144 
145 int MPI_Finalize(void)
146 {
147   if (MPI_was_finalized) return 1;
148   if (!MPI_was_initialized) return 1;
149   MPI_was_finalized = 1;
150   return 0;
151 }
152 
153 int MPI_Initialized(int *flag)
154 {
155   *flag = MPI_was_initialized;
156   return 0;
157 }
158 
159 int MPI_Finalized(int *flag)
160 {
161   *flag = MPI_was_finalized;
162   return 0;
163 }
164 
165 /* -------------------     Fortran versions of several routines ------------------ */
166 
167 #if defined(PETSC_HAVE_FORTRAN_CAPS)
168 #define mpi_init_             MPI_INIT
169 #define mpi_finalize_         MPI_FINALIZE
170 #define mpi_comm_size_        MPI_COMM_SIZE
171 #define mpi_comm_rank_        MPI_COMM_RANK
172 #define mpi_abort_            MPI_ABORT
173 #define mpi_reduce_           MPI_REDUCE
174 #define mpi_allreduce_        MPI_ALLREDUCE
175 #define mpi_barrier_          MPI_BARRIER
176 #define mpi_bcast_            MPI_BCAST
177 #define mpi_gather_           MPI_GATHER
178 #define mpi_allgather_        MPI_ALLGATHER
179 #define mpi_comm_split_       MPI_COMM_SPLIT
180 #define mpi_scan_             MPI_SCAN
181 #define mpi_send_             MPI_SEND
182 #define mpi_recv_             MPI_RECV
183 
184 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
185 #define mpi_init_             mpi_init
186 #define mpi_finalize_         mpi_finalize
187 #define mpi_comm_size_        mpi_comm_size
188 #define mpi_comm_rank_        mpi_comm_rank
189 #define mpi_abort_            mpi_abort
190 #define mpi_reduce_           mpi_reduce
191 #define mpi_allreduce_        mpi_allreduce
192 #define mpi_barrier_          mpi_barrier
193 #define mpi_bcast_            mpi_bcast
194 #define mpi_gather_           mpi_gather
195 #define mpi_allgather_        mpi_allgather
196 #define mpi_comm_split_       mpi_comm_split
197 #define mpi_scan_             mpi_scan
198 #define mpi_send_             mpi_send
199 #define mpi_recv_             mpi_recv
200 #endif
201 
202 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
203 #define mpi_init_             mpi_init__
204 #define mpi_finalize_         mpi_finalize__
205 #define mpi_comm_size_        mpi_comm_size__
206 #define mpi_comm_rank_        mpi_comm_rank__
207 #define mpi_abort_            mpi_abort__
208 #define mpi_reduce_           mpi_reduce__
209 #define mpi_allreduce_        mpi_allreduce__
210 #define mpi_barrier_          mpi_barrier__
211 #define mpi_bcast_            mpi_bcast__
212 #define mpi_gather_           mpi_gather__
213 #define mpi_allgather_        mpi_allgather__
214 #define mpi_comm_split_       mpi_comm_split__
215 #define mpi_scan              mpi_scan__
216 #define mpi_send_             mpi_send__
217 #define mpi_recv_             mpi_recv__
218 #endif
219 
220 
221 /* Do not build fortran interface if MPI namespace colision is to be avoided */
222 #if !defined(MPIUNI_AVOID_MPI_NAMESPACE)
223 
224 void PETSC_STDCALL  mpi_init_(int *ierr)
225 {
226   *ierr = MPI_Init((int*)0, (char***)0);
227 }
228 
229 void PETSC_STDCALL  mpi_finalize_(int *ierr)
230 {
231   *ierr = MPI_Finalize();
232 }
233 
234 void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
235 {
236   *size = 1;
237   *ierr = 0;
238 }
239 
240 void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
241 {
242   *rank=0;
243   *ierr=MPI_SUCCESS;
244 }
245 
246 void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
247 {
248   *newcomm = *comm;
249   *ierr=MPI_SUCCESS;
250 }
251 
252 void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
253 {
254   abort();
255   *ierr = MPI_SUCCESS;
256 }
257 
258 void PETSC_STDCALL mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
259 {
260   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
261   *ierr = MPI_SUCCESS;
262 }
263 
264 void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
265 {
266   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
267   *ierr = MPI_SUCCESS;
268 }
269 
270 void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
271 {
272   *ierr = MPI_SUCCESS;
273 }
274 
275 void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
276 {
277   *ierr = MPI_SUCCESS;
278 }
279 
280 
281 void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype, int *root,int *comm,int *ierr)
282 {
283   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
284   *ierr = MPI_SUCCESS;
285 }
286 
287 void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype,int *comm,int *ierr)
288 {
289   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
290   *ierr = MPI_SUCCESS;
291 }
292 
293 void PETSC_STDCALL mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
294 {
295   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
296   *ierr = MPI_SUCCESS;
297 }
298 
299 void PETSC_STDCALL mpi_send_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr )
300 {
301   *ierr = MPI_SUCCESS;
302 }
303 
304 void PETSC_STDCALL mpi_recv_(void*buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr )
305 {
306   *ierr = MPI_Abort(MPI_COMM_WORLD,0);
307 }
308 
309 #endif /* MPIUNI_AVOID_MPI_NAMESPACE */
310 
311 #if defined(__cplusplus)
312 }
313 #endif
314