xref: /petsc/src/sys/mpiuni/mpi.c (revision 58cd72c3d0f0c5a63e84afc03ac54bb1bb84cd8d)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include "include/mpiuni/mpi.h"
6 #include "petsc.h"
7 
8 #if defined(PETSC_HAVE_STDLIB_H)
9 #include <stdlib.h>
10 #endif
11 
12 #define MPI_SUCCESS 0
13 #define MPI_FAILURE 1
14 void    *MPIUNI_TMP        = 0;
15 int     MPIUNI_DATASIZE[5] = { sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char)};
16 /*
17        With MPI Uni there is only one communicator, which is called 1.
18 */
19 #define MAX_ATTR 128
20 
21 typedef struct {
22   void                *extra_state;
23   void                *attribute_val;
24   int                 active;
25   MPI_Delete_function *del;
26 } MPI_Attr;
27 
28 static MPI_Attr attr[MAX_ATTR];
29 static int      num_attr = 1,mpi_tag_ub = 100000000;
30 
31 #if defined(__cplusplus)
32 extern "C" {
33 #endif
34 
35 /*
36    To avoid problems with prototypes to the system memcpy() it is duplicated here
37 */
38 int MPIUNI_Memcpy(void *a,const void* b,int n) {
39   int  i;
40   char *aa= (char*)a;
41   char *bb= (char*)b;
42 
43   for (i=0; i<n; i++) aa[i] = bb[i];
44   return 0;
45 }
46 
47 /*
48    Used to set the built-in MPI_TAG_UB attribute
49 */
50 static int Keyval_setup(void)
51 {
52   attr[0].active        = 1;
53   attr[0].attribute_val = &mpi_tag_ub;
54   return 0;
55 }
56 
57 /*
58          These functions are mapped to the Petsc_ name by ./mpi.h
59 */
60 int Petsc_MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
61 {
62   if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);
63 
64   attr[num_attr].extra_state = extra_state;
65   attr[num_attr].del         = delete_fn;
66   *keyval                    = num_attr++;
67   return 0;
68 }
69 
70 int Petsc_MPI_Keyval_free(int *keyval)
71 {
72   attr[*keyval].active = 0;
73   return MPI_SUCCESS;
74 }
75 
76 int Petsc_MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
77 {
78   attr[keyval].active        = 1;
79   attr[keyval].attribute_val = attribute_val;
80   return MPI_SUCCESS;
81 }
82 
83 int Petsc_MPI_Attr_delete(MPI_Comm comm,int keyval)
84 {
85   if (attr[keyval].active && attr[keyval].del) {
86     (*(attr[keyval].del))(comm,keyval,attr[keyval].attribute_val,attr[keyval].extra_state);
87   }
88   attr[keyval].active        = 0;
89   attr[keyval].attribute_val = 0;
90   return MPI_SUCCESS;
91 }
92 
93 int Petsc_MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
94 {
95   if (!keyval) Keyval_setup();
96   *flag                   = attr[keyval].active;
97   *(void **)attribute_val = attr[keyval].attribute_val;
98   return MPI_SUCCESS;
99 }
100 
101 static int dups = 0;
102 int Petsc_MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
103 {
104   *out = comm;
105   dups++;
106   return 0;
107 }
108 
109 int Petsc_MPI_Comm_free(MPI_Comm *comm)
110 {
111   int i;
112 
113   if (--dups) return MPI_SUCCESS;
114   for (i=0; i<num_attr; i++) {
115     if (attr[i].active && attr[i].del) {
116       (*attr[i].del)(*comm,i,attr[i].attribute_val,attr[i].extra_state);
117     }
118     attr[i].active = 0;
119   }
120   return MPI_SUCCESS;
121 }
122 
123 int Petsc_MPI_Abort(MPI_Comm comm,int errorcode)
124 {
125   abort();
126   return MPI_SUCCESS;
127 }
128 
129 /* --------------------------------------------------------------------------*/
130 
131 static int MPI_was_initialized = 0;
132 static int MPI_was_finalized   = 0;
133 
134 int Petsc_MPI_Init(int *argc, char ***argv)
135 {
136   MPI_was_initialized = 1;
137   return 0;
138 }
139 
140 int Petsc_MPI_Finalize(void)
141 {
142   MPI_was_finalized = 1;
143   return 0;
144 }
145 
146 int Petsc_MPI_Initialized(int *flag)
147 {
148   *flag = MPI_was_initialized;
149   return 0;
150 }
151 
152 int Petsc_MPI_Finalized(int *flag)
153 {
154   *flag = MPI_was_finalized;
155   return 0;
156 }
157 
158 /* -------------------     Fortran versions of several routines ------------------ */
159 
160 #if defined(PETSC_HAVE_FORTRAN_CAPS)
161 #define mpi_init_             MPI_INIT
162 #define mpi_finalize_         MPI_FINALIZE
163 #define mpi_comm_size_        MPI_COMM_SIZE
164 #define mpi_comm_rank_        MPI_COMM_RANK
165 #define mpi_abort_            MPI_ABORT
166 #define mpi_allreduce_        MPI_ALLREDUCE
167 #define mpi_barrier_          MPI_BARRIER
168 #define mpi_bcast_            MPI_BCAST
169 #define mpi_gather_           MPI_GATHER
170 #define mpi_allgather_        MPI_ALLGATHER
171 #define mpi_comm_split_       MPI_COMM_SPLIT
172 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
173 #define mpi_init_             mpi_init
174 #define mpi_finalize_         mpi_finalize
175 #define mpi_comm_size_        mpi_comm_size
176 #define mpi_comm_rank_        mpi_comm_rank
177 #define mpi_abort_            mpi_abort
178 #define mpi_allreduce_        mpi_allreduce
179 #define mpi_barrier_          mpi_barrier
180 #define mpi_bcast_            mpi_bcast
181 #define mpi_gather_           mpi_gather
182 #define mpi_allgather_        mpi_allgather
183 #define mpi_comm_split_       mpi_comm_split
184 #endif
185 
186 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
187 #define mpi_init_             mpi_init__
188 #define mpi_finalize_         mpi_finalize__
189 #define mpi_comm_size_        mpi_comm_size__
190 #define mpi_comm_rank_        mpi_comm_rank__
191 #define mpi_abort_            mpi_abort__
192 #define mpi_allreduce_        mpi_allreduce__
193 #define mpi_barrier_          mpi_barrier__
194 #define mpi_bcast_            mpi_bcast__
195 #define mpi_gather_           mpi_gather__
196 #define mpi_allgather_        mpi_allgather__
197 #define mpi_comm_split_       mpi_comm_split__
198 #endif
199 
200 void PETSC_STDCALL  mpi_init_(int *ierr)
201 {
202   MPI_was_initialized = 1;
203   *ierr = MPI_SUCCESS;
204 }
205 
206 void PETSC_STDCALL  mpi_finalize_(int *ierr)
207 {
208   MPI_was_finalized = 1;
209   *ierr = MPI_SUCCESS;
210 }
211 
212 void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
213 {
214   *size = 1;
215   *ierr = 0;
216 }
217 
218 void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
219 {
220   *rank=0;
221   *ierr=MPI_SUCCESS;
222 }
223 
224 void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
225 {
226   *newcomm = *comm;
227   *ierr=MPI_SUCCESS;
228 }
229 
230 void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
231 {
232   abort();
233   *ierr = MPI_SUCCESS;
234 }
235 
236 void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
237 {
238   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
239   *ierr = MPI_SUCCESS;
240 }
241 
242 void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
243 {
244   *ierr = MPI_SUCCESS;
245 }
246 
247 void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
248 {
249   *ierr = MPI_SUCCESS;
250 }
251 
252 
253 void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype, int *root,int *comm,int *ierr)
254 {
255   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
256   *ierr = MPI_SUCCESS;
257 }
258 
259 
260 void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype,int *comm,int *ierr)
261 {
262   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
263   *ierr = MPI_SUCCESS;
264 }
265 
266 #if defined(__cplusplus)
267 }
268 #endif
269 
270 
271