xref: /petsc/src/benchmarks/streams/makefile (revision 6c5693054f5123506dab0f5da2d352ed973d0e50)
1-include ../../../petscdir.mk
2
3MANSEC        = Sys
4#CFLAGS        = -mcmodel=large
5# The CFLAGS above allows very large global arrays
6
7include ${PETSC_DIR}/lib/petsc/conf/variables
8include ${PETSC_DIR}/lib/petsc/conf/rules
9
10BasicVersion: BasicVersion.o
11	-@${CLINKER} -o BasicVersion BasicVersion.o ${PETSC_LIB}
12	@${RM} -f BasicVersion.o
13
14MPIVersion: MPIVersion.o
15	-@${CLINKER} -o MPIVersion MPIVersion.o ${PETSC_LIB}
16	@${RM} -f MPIVersion.o
17
18CUDAVersion: CUDAVersion.o
19	-@${CLINKER} -o CUDAVersion CUDAVersion.o ${PETSC_LIB}
20	@${RM} -f CUDAVersion.o
21
22OpenMPVersion: OpenMPVersion.o
23	-@${CLINKER} -o OpenMPVersion OpenMPVersion.o
24	@${RM} -f OpenMPVersion.o
25
26OpenMPVersionLikeMPI: OpenMPVersionLikeMPI.o
27	-@${CLINKER} -o OpenMPVersionLikeMPI OpenMPVersionLikeMPI.o
28	@${RM} -f OpenMPVersionLikeMPI.o
29
30SSEVersion: SSEVersion.o
31	-${CLINKER} -o $@ $< ${PETSC_LIB}
32	${RM} -f $<
33
34PthreadVersion: PthreadVersion.o
35	-@${CLINKER} -o PthreadVersion PthreadVersion.o ${PETSC_LIB}
36	@${RM} -f PthreadVersion.o
37
38# If not set by users, use a binding that's good for both MPICH and Open MPI
39MPI_BINDING ?= -map-by numa -bind-to core
40
41# make streams [NPMAX=integer_number_of_MPI_processes_to_use] [MPI_BINDING='binding options']
42mpistream:  MPIVersion
43	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or       [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi
44	-@printf "" > scaling.log
45	-@printf "Running streams with '${MPIEXEC} ${MPI_BINDING} ${MPI_BINDING_VIEW} -n <np> ./MPIVersion' using 'NPMAX=${NPMAX}'\n"
46	-@printf "(Hint: To change MPI process binding, use env var MPI_BINDING. To visualize the binding, with Open MPI, use another env var, MPI_BINDING_VIEW=-display-map, see 'mpiexec --help binding | grep mapping';"
47	-@printf " with MPICH, use HYDRA_TOPO_DEBUG=1 instead, see 'mpiexec -bind-to -help')\n"
48	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
49	  ${MPIEXEC} ${MPI_BINDING} ${MPI_BINDING_VIEW} -n $${i} ./MPIVersion | tee -a scaling.log; \
50        done
51	-@echo "------------------------------------------------"
52	-@${PYTHON} process.py MPI fileoutput
53
54# Works on SUMMIT
55cudastreamjsrun:  CUDAVersion
56	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or       [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi
57	-@printf "" > scaling.log
58	-@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}'\n"
59	-@i=0; while [ $${i} -lt ${NPMAX} ] && [ $${i} -lt 7 ]; do i=`expr $${i} + 1`; \
60	  ${MPIEXEC} ${MPI_BINDING} -n 1 -c$${i} -a$${i} -g1 ./CUDAVersion | tee -a scaling.log; \
61        done
62	-@n=1; i=7; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 7`; n=`expr $${n} + 1`; \
63	       c=5; while [ $${c} -lt 7 ]; do c=`expr $${c} + 1`; \
64	  ${MPIEXEC} ${MPI_BINDING} -n $${n} -c$${c} -a$${c} -g1 ./CUDAVersion | tee -a scaling.log; \
65        done; done
66	-@echo "------------------------------------------------"
67	-@${PYTHON} process.py CUDA fileoutput
68
69openmpstream:  OpenMPVersion
70	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with make openmpstream NPMAX=<integer number of threads>\n"; exit 1 ; fi
71	-@printf "" > scaling.log
72	@-@printf "Running openmpstreams using 'NPMAX=${NPMAX}'\n"
73	-@printf "You can set OMP_DISPLAY_AFFINITY=true to show binding, and set OMP_PLACES, OMP_PROC_BIND properly to set binding\n"
74	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
75	  OMP_NUM_THREADS=$${i} ./OpenMPVersion  | tee -a scaling.log;\
76        done
77	-@${PYTHON} process.py OpenMP fileoutput
78
79openmplikempistream:  OpenMPVersionLikeMPI
80	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with make openmplikempistream NPMAX=<integer number of threads>\n"; exit 1 ; fi
81	-@printf "" > scaling.log
82	@-@printf "Running openmplikempistreams using 'NPMAX=${NPMAX}'\n"
83	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
84	  OMP_NUM_THREADS=$${i} ./OpenMPVersionLikeMPI  | tee -a scaling.log;\
85        done
86	-@${PYTHON} process.py OpenMPLikeMPI fileoutput
87
88hwloc:
89	-@if [ "${LSTOPO}foo" != "foo" ]; then ${MPIEXEC} ${MPI_BINDING} -n 1 ${LSTOPO} --no-icaches --no-io --ignore PU ; fi
90
91mpistreams: mpistream hwloc
92	-@${PYTHON} process.py MPI
93
94openmpstreams: openmpstream hwloc
95	-@${PYTHON} process.py OpenMP
96