xref: /petsc/src/benchmarks/streams/MPIVersion.c (revision fa2bb9fe3cba50bac3b093e83d6f95e65b517df9)
1 
2 # include <stdio.h>
3 # include <math.h>
4 # include <limits.h>
5 # include <float.h>
6 #include <petscsys.h>
7 
8 /*
9 * Program: Stream
10 * Programmer: Joe R. Zagar
11 * Revision: 4.0-BETA, October 24, 1995
12 * Original code developed by John D. McCalpin
13 *
14 * This program measures memory transfer rates in MB/s for simple
15 * computational kernels coded in C.  These numbers reveal the quality
16 * of code generation for simple uncacheable kernels as well as showing
17 * the cost of floating-point operations relative to memory accesses.
18 *
19 * INSTRUCTIONS:
20 *
21 *       1) Stream requires a good bit of memory to run.  Adjust the
22 *          value of 'N' (below) to give a 'timing calibration' of
23 *          at least 20 clock-ticks.  This will provide rate estimates
24 *          that should be good to about 5% precision.
25 */
26 
27 # define N      2000000
28 # define NTIMES 50
29 # define OFFSET 0
30 
31 /*
32 *      3) Compile the code with full optimization.  Many compilers
33 *         generate unreasonably bad code before the optimizer tightens
34 *         things up.  If the results are unreasonably good, on the
35 *         other hand, the optimizer might be too smart for me!
36 *
37 *         Try compiling with:
38 *               cc -O stream_d.c second.c -o stream_d -lm
39 *
40 *         This is known to work on Cray, SGI, IBM, and Sun machines.
41 *
42 *
43 *      4) Mail the results to mccalpin@cs.virginia.edu
44 *         Be sure to include:
45 *              a) computer hardware model number and software revision
46 *              b) the compiler flags
47 *              c) all of the output from the test case.
48 * Thanks!
49 *
50 */
51 
52 # define HLINE "-------------------------------------------------------------\n"
53 
54 # ifndef MIN
55 # define MIN(x,y) ((x)<(y) ? (x) : (y))
56 # endif
57 # ifndef MAX
58 # define MAX(x,y) ((x)>(y) ? (x) : (y))
59 # endif
60 
61 static double a[N+OFFSET],
62               b[N+OFFSET],
63               c[N+OFFSET];
64 /*double *a,*b,*c;*/
65 
66 static double mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
67 
68 static const char *label[4] = {"Copy:      ", "Scale:     ", "Add:       ", "Triad:     "};
69 
70 static double bytes[4] = {
71   2 * sizeof(double) * N,
72   2 * sizeof(double) * N,
73   3 * sizeof(double) * N,
74   3 * sizeof(double) * N
75 };
76 
77 int main(int argc,char **args)
78 {
79   int            quantum, checktick(void);
80   register int   j, k;
81   double         scalar, t, times[4][NTIMES],irate[4],rate[4];
82   int            rank,size,resultlen;
83   char           hostname[MPI_MAX_PROCESSOR_NAME];
84   MPI_Status     status;
85   int            ierr;
86 
87   ierr = PetscInitialize(&argc,&args,NULL,NULL);if (ierr) return ierr;
88   ierr = MPI_Comm_rank(MPI_COMM_WORLD,&rank);if (ierr) return ierr;
89   ierr = MPI_Comm_size(MPI_COMM_WORLD,&size);if (ierr) return ierr;
90   if (!rank) printf("Number of MPI processes %d ",size);
91 
92   for (j=0; j<MPI_MAX_PROCESSOR_NAME; j++) {
93     hostname[j] = 0;
94   }
95   ierr = MPI_Get_processor_name(hostname,&resultlen);if (ierr) return ierr;
96   if (!rank) {
97     printf("Processor names  %s ",hostname);
98     for (j=1; j<size; j++) {
99       ierr = MPI_Recv(hostname,MPI_MAX_PROCESSOR_NAME,MPI_CHAR,j,0,MPI_COMM_WORLD,&status);if (ierr) return ierr;
100       printf("%s ",hostname);
101     }
102     printf("\n");
103  } else {
104    ierr = MPI_Send(hostname,MPI_MAX_PROCESSOR_NAME,MPI_CHAR,0,0,MPI_COMM_WORLD);if (ierr) return ierr;
105  }
106  ierr = MPI_Barrier(MPI_COMM_WORLD);
107 
108   /* --- SETUP --- determine precision and check timing --- */
109 
110   if (!rank) {
111     /*printf(HLINE);
112     printf("Array size = %d, Offset = %d\n" , N, OFFSET);
113     printf("Total memory required = %.1f MB.\n", (3 * N * BytesPerWord) / 1048576.0);
114     printf("Each test is run %d times, but only\n", NTIMES);
115     printf("the *best* time for each is used.\n");
116     printf(HLINE); */
117   }
118 
119   /* Get initial value for system clock. */
120 
121   /*  a = malloc(N*sizeof(double));
122   b = malloc(N*sizeof(double));
123   c = malloc(N*sizeof(double));*/
124   for (j=0; j<N; j++) {
125     a[j] = 1.0;
126     b[j] = 2.0;
127     c[j] = 0.0;
128   }
129 
130   if (!rank) {
131     if  ((quantum = checktick()) >= 1) ; /* printf("Your clock granularity/precision appears to be %d microseconds.\n", quantum); */
132     else ; /* printf("Your clock granularity appears to be less than one microsecond.\n");*/
133   }
134 
135   t = MPI_Wtime();
136   for (j = 0; j < N; j++) a[j] = 2.0E0 * a[j];
137   t = 1.0E6 * (MPI_Wtime() - t);
138 
139   if (!rank) {
140     /*  printf("Each test below will take on the order of %d microseconds.\n", (int) t);
141     printf("   (= %d clock ticks)\n", (int) (t/quantum));
142     printf("Increase the size of the arrays if this shows that\n");
143     printf("you are not getting at least 20 clock ticks per test.\n");
144     printf(HLINE);*/
145   }
146 
147 
148   /*   --- MAIN LOOP --- repeat test cases NTIMES times --- */
149 
150   scalar = 3.0;
151   for (k=0; k<NTIMES; k++)
152   {
153     ierr = MPI_Barrier(MPI_COMM_WORLD);
154     times[0][k] = MPI_Wtime();
155     /* should all these barriers be pulled outside of the time call? */
156     ierr = MPI_Barrier(MPI_COMM_WORLD);
157     for (j=0; j<N; j++) c[j] = a[j];
158     ierr = MPI_Barrier(MPI_COMM_WORLD);
159     times[0][k] = MPI_Wtime() - times[0][k];
160 
161     times[1][k] = MPI_Wtime();
162     ierr = MPI_Barrier(MPI_COMM_WORLD);
163     for (j=0; j<N; j++) b[j] = scalar*c[j];
164     ierr = MPI_Barrier(MPI_COMM_WORLD);
165     times[1][k] = MPI_Wtime() - times[1][k];
166 
167     times[2][k] = MPI_Wtime();
168     ierr = MPI_Barrier(MPI_COMM_WORLD);
169     for (j=0; j<N; j++) c[j] = a[j]+b[j];
170     ierr = MPI_Barrier(MPI_COMM_WORLD);
171     times[2][k] = MPI_Wtime() - times[2][k];
172 
173     times[3][k] = MPI_Wtime();
174     ierr = MPI_Barrier(MPI_COMM_WORLD);
175     for (j=0; j<N; j++) a[j] = b[j]+scalar*c[j];
176     ierr = MPI_Barrier(MPI_COMM_WORLD);
177     times[3][k] = MPI_Wtime() - times[3][k];
178   }
179 
180   /*   --- SUMMARY --- */
181 
182   for (k=0; k<NTIMES; k++)
183     for (j=0; j<4; j++) mintime[j] = MIN(mintime[j], times[j][k]);
184 
185   for (j=0; j<4; j++) irate[j] = 1.0E-06 * bytes[j]/mintime[j];
186   ierr = MPI_Reduce(irate,rate,4,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
187   if (ierr) printf("Error calling MPI\n");
188 
189   if (!rank) {
190     printf("%s  %11.4f   Rate (MB/s) \n", label[3],rate[3]);
191     /* for (j=0; j<4; j++) printf("%s%11.4f\n", label[j],rate[j]);*/
192   }
193   PetscFinalize();
194   return 0;
195 }
196 
197 # define        M        20
198 
199 int checktick(void)
200 {
201   int    i, minDelta, Delta;
202   double t1, t2, timesfound[M];
203 
204 /*  Collect a sequence of M unique time values from the system. */
205 
206   for (i = 0; i < M; i++) {
207     t1 = MPI_Wtime();
208     while (((t2=MPI_Wtime()) - t1) < 1.0E-6) ;
209     timesfound[i] = t1 = t2;
210   }
211 
212 /*
213 * Determine the minimum difference between these M values.
214 * This result will be our estimate (in microseconds) for the
215 * clock granularity.
216 */
217 
218   minDelta = 1000000;
219   for (i = 1; i < M; i++) {
220     Delta    = (int)(1.0E6 * (timesfound[i]-timesfound[i-1]));
221     minDelta = MIN(minDelta, MAX(Delta,0));
222   }
223 
224   return(minDelta);
225 }
226 
227