xref: /petsc/src/sys/mpiuni/mpi.c (revision 906ed7cc33fecbafab01746eec64dcdcc8a4842f)
1 /*
2       This provides a few of the MPI-uni functions that cannot be implemented
3     with C macros
4 */
5 #include "include/mpiuni/mpi.h"
6 #include "petsc.h"
7 
8 #if defined(PETSC_HAVE_STDLIB_H)
9 #include <stdlib.h>
10 #endif
11 
12 #define MPI_SUCCESS 0
13 #define MPI_FAILURE 1
14 void    *MPIUNI_TMP        = 0;
15 int     MPIUNI_DATASIZE[5] = { sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char)};
16 /*
17        With MPI Uni there is only one communicator, which is called 1.
18 */
19 #define MAX_ATTR 128
20 
21 typedef struct {
22   void                *extra_state;
23   void                *attribute_val;
24   int                 active;
25   MPI_Delete_function *del;
26 } MPI_Attr;
27 
28 static MPI_Attr attr[MAX_ATTR];
29 static int      num_attr = 1,mpi_tag_ub = 100000000;
30 
31 #if defined(__cplusplus)
32 extern "C" {
33 #endif
34 
35 /*
36    To avoid problems with prototypes to the system memcpy() it is duplicated here
37 */
38 int MPIUNI_Memcpy(void *a,const void* b,int n) {
39   int  i;
40   char *aa= (char*)a;
41   char *bb= (char*)b;
42 
43   for (i=0; i<n; i++) aa[i] = bb[i];
44   return 0;
45 }
46 
47 /*
48    Used to set the built-in MPI_TAG_UB attribute
49 */
50 static int Keyval_setup(void)
51 {
52   attr[0].active        = 1;
53   attr[0].attribute_val = &mpi_tag_ub;
54   return 0;
55 }
56 
57 /*
58          These functions are mapped to the Petsc_ name by ./mpi.h
59 */
60 int Petsc_MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
61 {
62   if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);
63 
64   attr[num_attr].extra_state = extra_state;
65   attr[num_attr].del         = delete_fn;
66   *keyval                    = num_attr++;
67   return 0;
68 }
69 
70 int Petsc_MPI_Keyval_free(int *keyval)
71 {
72   attr[*keyval].active = 0;
73   return MPI_SUCCESS;
74 }
75 
76 int Petsc_MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
77 {
78   attr[keyval].active        = 1;
79   attr[keyval].attribute_val = attribute_val;
80   return MPI_SUCCESS;
81 }
82 
83 int Petsc_MPI_Attr_delete(MPI_Comm comm,int keyval)
84 {
85   if (attr[keyval].active && attr[keyval].del) {
86     (*(attr[keyval].del))(comm,keyval,attr[keyval].attribute_val,attr[keyval].extra_state);
87   }
88   attr[keyval].active        = 0;
89   attr[keyval].attribute_val = 0;
90   return MPI_SUCCESS;
91 }
92 
93 int Petsc_MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
94 {
95   if (!keyval) Keyval_setup();
96   *flag                   = attr[keyval].active;
97   *(void **)attribute_val = attr[keyval].attribute_val;
98   return MPI_SUCCESS;
99 }
100 
101 static int dups = 0;
102 int Petsc_MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
103 {
104   *out = comm;
105   dups++;
106   return 0;
107 }
108 
109 int Petsc_MPI_Comm_free(MPI_Comm *comm)
110 {
111   int i;
112 
113   if (--dups) return MPI_SUCCESS;
114   for (i=0; i<num_attr; i++) {
115     if (attr[i].active && attr[i].del) {
116       (*attr[i].del)(*comm,i,attr[i].attribute_val,attr[i].extra_state);
117     }
118     attr[i].active = 0;
119   }
120   return MPI_SUCCESS;
121 }
122 
123 /* --------------------------------------------------------------------------*/
124 
125 int Petsc_MPI_Abort(MPI_Comm comm,int errorcode)
126 {
127   abort();
128   return MPI_SUCCESS;
129 }
130 
131 static int MPI_was_initialized = 0;
132 
133 int Petsc_MPI_Initialized(int *flag)
134 {
135   *flag = MPI_was_initialized;
136   return 0;
137 }
138 
139 int Petsc_MPI_Finalize(void)
140 {
141   MPI_was_initialized = 0;
142   return 0;
143 }
144 
145 /* -------------------     Fortran versions of several routines ------------------ */
146 
147 #if defined(PETSC_HAVE_FORTRAN_CAPS)
148 #define mpi_init_             MPI_INIT
149 #define mpi_finalize_         MPI_FINALIZE
150 #define mpi_comm_size_        MPI_COMM_SIZE
151 #define mpi_comm_rank_        MPI_COMM_RANK
152 #define mpi_abort_            MPI_ABORT
153 #define mpi_allreduce_        MPI_ALLREDUCE
154 #define mpi_barrier_          MPI_BARRIER
155 #define mpi_bcast_            MPI_BCAST
156 #define mpi_gather_           MPI_GATHER
157 #define mpi_allgather_        MPI_ALLGATHER
158 #define mpi_comm_split_       MPI_COMM_SPLIT
159 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
160 #define mpi_init_             mpi_init
161 #define mpi_finalize_         mpi_finalize
162 #define mpi_comm_size_        mpi_comm_size
163 #define mpi_comm_rank_        mpi_comm_rank
164 #define mpi_abort_            mpi_abort
165 #define mpi_allreduce_        mpi_allreduce
166 #define mpi_barrier_          mpi_barrier
167 #define mpi_bcast_            mpi_bcast
168 #define mpi_gather_           mpi_gather
169 #define mpi_allgather_        mpi_allgather
170 #define mpi_comm_split_       mpi_comm_split
171 #endif
172 
173 #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
174 #define mpi_init_             mpi_init__
175 #define mpi_finalize_         mpi_finalize__
176 #define mpi_comm_size_        mpi_comm_size__
177 #define mpi_comm_rank_        mpi_comm_rank__
178 #define mpi_abort_            mpi_abort__
179 #define mpi_allreduce_        mpi_allreduce__
180 #define mpi_barrier_          mpi_barrier__
181 #define mpi_bcast_            mpi_bcast__
182 #define mpi_gather_           mpi_gather__
183 #define mpi_allgather_        mpi_allgather__
184 #define mpi_comm_split_       mpi_comm_split__
185 #endif
186 
187 void PETSC_STDCALL  mpi_init_(int *ierr)
188 {
189   MPI_was_initialized = 1;
190   *ierr = MPI_SUCCESS;
191 }
192 
193 void PETSC_STDCALL  mpi_finalize_(int *ierr)
194 {
195   *ierr = MPI_SUCCESS;
196 }
197 
198 void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
199 {
200   *size = 1;
201   *ierr = 0;
202 }
203 
204 void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
205 {
206   *rank=0;
207   *ierr=MPI_SUCCESS;
208 }
209 
210 void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
211 {
212   *newcomm = *comm;
213   *ierr=MPI_SUCCESS;
214 }
215 
216 void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
217 {
218   abort();
219   *ierr = MPI_SUCCESS;
220 }
221 
222 void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
223 {
224   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
225   *ierr = MPI_SUCCESS;
226 }
227 
228 void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
229 {
230   *ierr = MPI_SUCCESS;
231 }
232 
233 void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
234 {
235   *ierr = MPI_SUCCESS;
236 }
237 
238 
239 void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype, int *root,int *comm,int *ierr)
240 {
241   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
242   *ierr = MPI_SUCCESS;
243 }
244 
245 
246 void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype,int *comm,int *ierr)
247 {
248   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
249   *ierr = MPI_SUCCESS;
250 }
251 
252 #if defined(__cplusplus)
253 }
254 #endif
255 
256 
257