xref: /petsc/src/benchmarks/streams/MPIVersion.c (revision a95d84e0230cb0d652d808aead973c41ec535430)
1 
2 # include <stdio.h>
3 # include <math.h>
4 # include <limits.h>
5 # include <float.h>
6 
7 /*
8 * Program: Stream
9 * Programmer: Joe R. Zagar
10 * Revision: 4.0-BETA, October 24, 1995
11 * Original code developed by John D. McCalpin
12 *
13 * This program measures memory transfer rates in MB/s for simple
14 * computational kernels coded in C.  These numbers reveal the quality
15 * of code generation for simple uncacheable kernels as well as showing
16 * the cost of floating-point operations relative to memory accesses.
17 *
18 * INSTRUCTIONS:
19 *
20 *       1) Stream requires a good bit of memory to run.  Adjust the
21 *          value of 'N' (below) to give a 'timing calibration' of
22 *          at least 20 clock-ticks.  This will provide rate estimates
23 *          that should be good to about 5% precision.
24 */
25 
26 # define N      2000000
27 # define NTIMES 50
28 # define OFFSET 0
29 
30 /*
31 *      3) Compile the code with full optimization.  Many compilers
32 *         generate unreasonably bad code before the optimizer tightens
33 *         things up.  If the results are unreasonably good, on the
34 *         other hand, the optimizer might be too smart for me!
35 *
36 *         Try compiling with:
37 *               cc -O stream_d.c second.c -o stream_d -lm
38 *
39 *         This is known to work on Cray, SGI, IBM, and Sun machines.
40 *
41 *
42 *      4) Mail the results to mccalpin@cs.virginia.edu
43 *         Be sure to include:
44 *              a) computer hardware model number and software revision
45 *              b) the compiler flags
46 *              c) all of the output from the test case.
47 * Thanks!
48 *
49 */
50 
51 # define HLINE "-------------------------------------------------------------\n"
52 
53 # ifndef MIN
54 # define MIN(x,y) ((x)<(y) ? (x) : (y))
55 # endif
56 # ifndef MAX
57 # define MAX(x,y) ((x)>(y) ? (x) : (y))
58 # endif
59 
60 static double a[N+OFFSET],
61               b[N+OFFSET],
62               c[N+OFFSET];
63 /*double *a,*b,*c;*/
64 
65 static double mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
66 
67 static const char *label[4] = {"Copy:      ", "Scale:     ", "Add:       ", "Triad:     "};
68 
69 static double bytes[4] = {
70   2 * sizeof(double) * N,
71   2 * sizeof(double) * N,
72   3 * sizeof(double) * N,
73   3 * sizeof(double) * N
74 };
75 
76 #include <petscsys.h>
77 
78 int main(int argc,char **args)
79 {
80   int          quantum, checktick(void);
81   register int j, k;
82   double       scalar, t, times[4][NTIMES],irate[4],rate[4];
83   int          rank,size,resultlen;
84   char         hostname[MPI_MAX_PROCESSOR_NAME];
85   MPI_Status   status;
86 
87   MPI_Init(&argc,&args);
88   MPI_Comm_rank(MPI_COMM_WORLD,&rank);
89   MPI_Comm_size(MPI_COMM_WORLD,&size);
90   if (!rank) printf("Number of MPI processes %d\n",size);
91 
92   for (j=0; j<MPI_MAX_PROCESSOR_NAME; j++) {
93     hostname[j] = 0;
94   }
95   MPI_Get_processor_name(hostname,&resultlen);
96   if (!rank) {
97     printf("Process %d %s\n",rank,hostname);
98     for (j=1; j<size; j++) {
99       MPI_Recv(hostname,MPI_MAX_PROCESSOR_NAME,MPI_CHAR,j,0,MPI_COMM_WORLD,&status);
100       printf("Process %d %s\n",j,hostname);
101     }
102  } else {
103    MPI_Send(hostname,MPI_MAX_PROCESSOR_NAME,MPI_CHAR,0,0,MPI_COMM_WORLD);
104  }
105  MPI_Barrier(MPI_COMM_WORLD);
106 
107   /* --- SETUP --- determine precision and check timing --- */
108 
109   if (!rank) {
110     /*printf(HLINE);
111     printf("Array size = %d, Offset = %d\n" , N, OFFSET);
112     printf("Total memory required = %.1f MB.\n", (3 * N * BytesPerWord) / 1048576.0);
113     printf("Each test is run %d times, but only\n", NTIMES);
114     printf("the *best* time for each is used.\n");
115     printf(HLINE); */
116   }
117 
118   /* Get initial value for system clock. */
119 
120   /*  a = malloc(N*sizeof(double));
121   b = malloc(N*sizeof(double));
122   c = malloc(N*sizeof(double));*/
123   for (j=0; j<N; j++) {
124     a[j] = 1.0;
125     b[j] = 2.0;
126     c[j] = 0.0;
127   }
128 
129   if (!rank) {
130     if  ((quantum = checktick()) >= 1) ; /* printf("Your clock granularity/precision appears to be %d microseconds.\n", quantum); */
131     else ; /* printf("Your clock granularity appears to be less than one microsecond.\n");*/
132   }
133 
134   t = MPI_Wtime();
135   for (j = 0; j < N; j++) a[j] = 2.0E0 * a[j];
136   t = 1.0E6 * (MPI_Wtime() - t);
137 
138   if (!rank) {
139     /*  printf("Each test below will take on the order of %d microseconds.\n", (int) t);
140     printf("   (= %d clock ticks)\n", (int) (t/quantum));
141     printf("Increase the size of the arrays if this shows that\n");
142     printf("you are not getting at least 20 clock ticks per test.\n");
143     printf(HLINE);*/
144   }
145 
146 
147   /*   --- MAIN LOOP --- repeat test cases NTIMES times --- */
148 
149   scalar = 3.0;
150   for (k=0; k<NTIMES; k++)
151   {
152     MPI_Barrier(MPI_COMM_WORLD);
153     times[0][k] = MPI_Wtime();
154     /* should all these barriers be pulled outside of the time call? */
155     MPI_Barrier(MPI_COMM_WORLD);
156     for (j=0; j<N; j++) c[j] = a[j];
157     MPI_Barrier(MPI_COMM_WORLD);
158     times[0][k] = MPI_Wtime() - times[0][k];
159 
160     times[1][k] = MPI_Wtime();
161     MPI_Barrier(MPI_COMM_WORLD);
162     for (j=0; j<N; j++) b[j] = scalar*c[j];
163     MPI_Barrier(MPI_COMM_WORLD);
164     times[1][k] = MPI_Wtime() - times[1][k];
165 
166     times[2][k] = MPI_Wtime();
167     MPI_Barrier(MPI_COMM_WORLD);
168     for (j=0; j<N; j++) c[j] = a[j]+b[j];
169     MPI_Barrier(MPI_COMM_WORLD);
170     times[2][k] = MPI_Wtime() - times[2][k];
171 
172     times[3][k] = MPI_Wtime();
173     MPI_Barrier(MPI_COMM_WORLD);
174     for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j];
175     MPI_Barrier(MPI_COMM_WORLD);
176     times[3][k] = MPI_Wtime() - times[3][k];
177   }
178 
179   /*   --- SUMMARY --- */
180 
181   for (k=0; k<NTIMES; k++)
182     for (j=0; j<4; j++) mintime[j] = MIN(mintime[j], times[j][k]);
183 
184   for (j=0; j<4; j++) irate[j] = 1.0E-06 * bytes[j]/mintime[j];
185   MPI_Reduce(irate,rate,4,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
186 
187   if (!rank) {
188     printf("Function      Rate (MB/s) \n");
189     for (j=0; j<4; j++) printf("%s%11.4f\n", label[j],rate[j]);
190   }
191   MPI_Finalize();
192   return 0;
193 }
194 
195 # define        M        20
196 
197 int checktick(void)
198 {
199   int    i, minDelta, Delta;
200   double t1, t2, timesfound[M];
201 
202 /*  Collect a sequence of M unique time values from the system. */
203 
204   for (i = 0; i < M; i++) {
205     t1 = MPI_Wtime();
206     while (((t2=MPI_Wtime()) - t1) < 1.0E-6) ;
207     timesfound[i] = t1 = t2;
208   }
209 
210 /*
211 * Determine the minimum difference between these M values.
212 * This result will be our estimate (in microseconds) for the
213 * clock granularity.
214 */
215 
216   minDelta = 1000000;
217   for (i = 1; i < M; i++) {
218     Delta    = (int)(1.0E6 * (timesfound[i]-timesfound[i-1]));
219     minDelta = MIN(minDelta, MAX(Delta,0));
220   }
221 
222   return(minDelta);
223 }
224 
225