xref: /petsc/src/sys/mpiuni/mpi.c (revision 1086b0692845c998bbb8fcece0fee6b3fb8909cd)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include "include/mpiuni/mpi.h"
6 #include "petsc.h"
7 
8 #if defined(PETSC_HAVE_STDLIB_H)
9 #include <stdlib.h>
10 #endif
11 
12 #define MPI_SUCCESS 0
13 #define MPI_FAILURE 1
14 void    *MPIUNI_TMP        = 0;
15 int     MPIUNI_DATASIZE[5] = { sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char)};
16 /*
17        With MPI Uni there is only one communicator, which is called 1.
18 */
19 #define MAX_ATTR 128
20 
21 typedef struct {
22   void                *extra_state;
23   void                *attribute_val;
24   int                 active;
25   MPI_Delete_function *del;
26 } MPI_Attr;
27 
28 static MPI_Attr attr[MAX_ATTR];
29 static int      num_attr = 1,mpi_tag_ub = 100000000;
30 
31 #if defined(__cplusplus)
32 extern "C" {
33 #endif
34 
35 /*
36    To avoid problems with prototypes to the system memcpy() it is duplicated here
37 */
38 int MPIUNI_Memcpy(void *a,const void* b,int n) {
39   int  i;
40   char *aa= (char*)a;
41   char *bb= (char*)b;
42 
43   for (i=0; i<n; i++) aa[i] = bb[i];
44   return 0;
45 }
46 
47 /*
48    Used to set the built-in MPI_TAG_UB attribute
49 */
50 static int Keyval_setup(void)
51 {
52   attr[0].active        = 1;
53   attr[0].attribute_val = &mpi_tag_ub;
54   return 0;
55 }
56 
57 /*
58          These functions are mapped to the Petsc_ name by ./mpi.h
59 */
60 int Petsc_MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
61 {
62   if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);
63 
64   attr[num_attr].extra_state = extra_state;
65   attr[num_attr].del         = delete_fn;
66   *keyval                    = num_attr++;
67   return 0;
68 }
69 
70 int Petsc_MPI_Keyval_free(int *keyval)
71 {
72   attr[*keyval].active = 0;
73   return MPI_SUCCESS;
74 }
75 
76 int Petsc_MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
77 {
78   attr[keyval].active        = 1;
79   attr[keyval].attribute_val = attribute_val;
80   return MPI_SUCCESS;
81 }
82 
83 int Petsc_MPI_Attr_delete(MPI_Comm comm,int keyval)
84 {
85   if (attr[keyval].active && attr[keyval].del) {
86     (*(attr[keyval].del))(comm,keyval,attr[keyval].attribute_val,attr[keyval].extra_state);
87   }
88   attr[keyval].active        = 0;
89   attr[keyval].attribute_val = 0;
90   return MPI_SUCCESS;
91 }
92 
93 int Petsc_MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
94 {
95   if (!keyval) Keyval_setup();
96   *flag                   = attr[keyval].active;
97   *(void **)attribute_val = attr[keyval].attribute_val;
98   return MPI_SUCCESS;
99 }
100 
101 static int dups = 0;
102 int Petsc_MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
103 {
104   *out = comm;
105   dups++;
106   return 0;
107 }
108 
109 int Petsc_MPI_Comm_free(MPI_Comm *comm)
110 {
111   int i;
112 
113   if (--dups) return MPI_SUCCESS;
114   for (i=0; i<num_attr; i++) {
115     if (attr[i].active && attr[i].del) {
116       (*attr[i].del)(*comm,i,attr[i].attribute_val,attr[i].extra_state);
117     }
118     attr[i].active = 0;
119   }
120   return MPI_SUCCESS;
121 }
122 
123 int Petsc_MPI_Abort(MPI_Comm comm,int errorcode)
124 {
125   abort();
126   return MPI_SUCCESS;
127 }
128 
129 /* --------------------------------------------------------------------------*/
130 
131 static int MPI_was_initialized = 0;
132 static int MPI_was_finalized   = 0;
133 
134 int Petsc_MPI_Init(int *argc, char ***argv)
135 {
136   if (MPI_was_initialized) return 1;
137   if (MPI_was_finalized) return 1;
138   MPI_was_initialized = 1;
139   return 0;
140 }
141 
142 int Petsc_MPI_Finalize(void)
143 {
144   if (MPI_was_finalized) return 1;
145   if (!MPI_was_initialized) return 1;
146   MPI_was_finalized = 1;
147   return 0;
148 }
149 
150 int Petsc_MPI_Initialized(int *flag)
151 {
152   *flag = MPI_was_initialized;
153   return 0;
154 }
155 
156 int Petsc_MPI_Finalized(int *flag)
157 {
158   *flag = MPI_was_finalized;
159   return 0;
160 }
161 
162 /* -------------------     Fortran versions of several routines ------------------ */
163 
164 #if defined(PETSC_HAVE_FORTRAN_CAPS)
165 #define mpi_init_             MPI_INIT
166 #define mpi_finalize_         MPI_FINALIZE
167 #define mpi_comm_size_        MPI_COMM_SIZE
168 #define mpi_comm_rank_        MPI_COMM_RANK
169 #define mpi_abort_            MPI_ABORT
170 #define mpi_reduce_           MPI_REDUCE
171 #define mpi_allreduce_        MPI_ALLREDUCE
172 #define mpi_barrier_          MPI_BARRIER
173 #define mpi_bcast_            MPI_BCAST
174 #define mpi_gather_           MPI_GATHER
175 #define mpi_allgather_        MPI_ALLGATHER
176 #define mpi_comm_split_       MPI_COMM_SPLIT
177 #define mpi_scan_             MPI_SCAN
178 #define mpi_send_             MPI_SEND
179 #define mpi_recv_             MPI_RECV
180 
181 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
182 #define mpi_init_             mpi_init
183 #define mpi_finalize_         mpi_finalize
184 #define mpi_comm_size_        mpi_comm_size
185 #define mpi_comm_rank_        mpi_comm_rank
186 #define mpi_abort_            mpi_abort
187 #define mpi_reduce_           mpi_reduce
188 #define mpi_allreduce_        mpi_allreduce
189 #define mpi_barrier_          mpi_barrier
190 #define mpi_bcast_            mpi_bcast
191 #define mpi_gather_           mpi_gather
192 #define mpi_allgather_        mpi_allgather
193 #define mpi_comm_split_       mpi_comm_split
194 #define mpi_scan_             mpi_scan
195 #define mpi_send_             mpi_send
196 #define mpi_recv_             mpi_recv
197 #endif
198 
199 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
200 #define mpi_init_             mpi_init__
201 #define mpi_finalize_         mpi_finalize__
202 #define mpi_comm_size_        mpi_comm_size__
203 #define mpi_comm_rank_        mpi_comm_rank__
204 #define mpi_abort_            mpi_abort__
205 #define mpi_reduce_           mpi_reduce__
206 #define mpi_allreduce_        mpi_allreduce__
207 #define mpi_barrier_          mpi_barrier__
208 #define mpi_bcast_            mpi_bcast__
209 #define mpi_gather_           mpi_gather__
210 #define mpi_allgather_        mpi_allgather__
211 #define mpi_comm_split_       mpi_comm_split__
212 #define mpi_scan              mpi_scan__
213 #define mpi_send_             mpi_send__
214 #define mpi_recv_             mpi_recv__
215 #endif
216 
217 void PETSC_STDCALL  mpi_init_(int *ierr)
218 {
219   *ierr = Petsc_MPI_Init((int*)0, (char***)0);
220 }
221 
222 void PETSC_STDCALL  mpi_finalize_(int *ierr)
223 {
224   *ierr = Petsc_MPI_Finalize();
225 }
226 
227 void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
228 {
229   *size = 1;
230   *ierr = 0;
231 }
232 
233 void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
234 {
235   *rank=0;
236   *ierr=MPI_SUCCESS;
237 }
238 
239 void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
240 {
241   *newcomm = *comm;
242   *ierr=MPI_SUCCESS;
243 }
244 
245 void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
246 {
247   abort();
248   *ierr = MPI_SUCCESS;
249 }
250 
251 void PETSC_STDCALL mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
252 {
253   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
254   *ierr = MPI_SUCCESS;
255 }
256 
257 void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
258 {
259   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
260   *ierr = MPI_SUCCESS;
261 }
262 
263 void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
264 {
265   *ierr = MPI_SUCCESS;
266 }
267 
268 void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
269 {
270   *ierr = MPI_SUCCESS;
271 }
272 
273 
274 void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype, int *root,int *comm,int *ierr)
275 {
276   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
277   *ierr = MPI_SUCCESS;
278 }
279 
280 void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype,int *comm,int *ierr)
281 {
282   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
283   *ierr = MPI_SUCCESS;
284 }
285 
286 void PETSC_STDCALL mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
287 {
288   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
289   *ierr = MPI_SUCCESS;
290 }
291 
292 void PETSC_STDCALL mpi_send_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr )
293 {
294   *ierr = MPI_SUCCESS;
295 }
296 
297 void PETSC_STDCALL mpi_recv_(void*buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr )
298 {
299   *ierr = MPI_Abort(MPI_COMM_WORLD,0);
300 }
301 
302 #if defined(__cplusplus)
303 }
304 #endif
305