xref: /petsc/src/benchmarks/streams/makefile (revision 21e3ffae2f3b73c0bd738cf6d0a809700fc04bb0)
1-include ../../../petscdir.mk
2
3MANSEC        = Sys
4
5include ${PETSC_DIR}/lib/petsc/conf/variables
6include ${PETSC_DIR}/lib/petsc/conf/rules
7include ${PETSC_DIR}/lib/petsc/conf/test
8
9BasicVersion: BasicVersion.o
10	-@${CLINKER} -o BasicVersion BasicVersion.o ${PETSC_LIB}
11	@${RM} -f BasicVersion.o
12
13MPIVersion: MPIVersion.o
14	-@${CLINKER} -o MPIVersion MPIVersion.o ${PETSC_LIB}
15	@${RM} -f MPIVersion.o
16
17CUDAVersion: CUDAVersion.o
18	-@${CLINKER} -o CUDAVersion CUDAVersion.o ${PETSC_LIB}
19	@${RM} -f CUDAVersion.o
20
21OpenMPVersion: OpenMPVersion.o
22	-@${CLINKER} -o OpenMPVersion OpenMPVersion.o
23	@${RM} -f OpenMPVersion.o
24
25SSEVersion: SSEVersion.o
26	-${CLINKER} -o $@ $< ${PETSC_LIB}
27	${RM} -f $<
28
29PthreadVersion: PthreadVersion.o
30	-@${CLINKER} -o PthreadVersion PthreadVersion.o ${PETSC_LIB}
31	@${RM} -f PthreadVersion.o
32
33# make streams [NPMAX=integer_number_of_MPI_processes_to_use] [MPI_BINDING='binding options']
34mpistream:  MPIVersion
35	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or       [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi
36	-@printf "" > scaling.log
37	-@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n"
38	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
39	  ${MPIEXEC} ${MPI_BINDING} -n $${i} ./MPIVersion | tee -a scaling.log; \
40        done
41	-@echo "------------------------------------------------"
42	-@${PYTHON} process.py MPI fileoutput
43
44# Works on SUMMIT
45cudastreamjsrun:  CUDAVersion
46	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or       [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi
47	-@printf "" > scaling.log
48	-@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n"
49	-@i=0; while [ $${i} -lt ${NPMAX} ] && [ $${i} -lt 7 ]; do i=`expr $${i} + 1`; \
50	  ${MPIEXEC} ${MPI_BINDING} -n 1 -c$${i} -a$${i} -g1 ./CUDAVersion | tee -a scaling.log; \
51        done
52	-@n=1; i=7; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 7`; n=`expr $${n} + 1`; \
53	       c=5; while [ $${c} -lt 7 ]; do c=`expr $${c} + 1`; \
54	  ${MPIEXEC} ${MPI_BINDING} -n $${n} -c$${c} -a$${c} -g1 ./CUDAVersion | tee -a scaling.log; \
55        done; done
56	-@echo "------------------------------------------------"
57	-@${PYTHON} process.py CUDA fileoutput
58
59openmpstream:  OpenMPVersion
60	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with make openmpstream NPMAX=<integer number of threads>\n"; exit 1 ; fi
61	-@printf "" > scaling.log
62	@-@printf "Running openmpstreams using 'NPMAX=${NPMAX}'\n"
63	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
64	  OMP_NUM_THREADS=$${i} ./OpenMPVersion  | tee -a scaling.log;\
65        done
66	-@${PYTHON} process.py OpenMP fileoutput
67
68hwloc:
69	-@if [ "${LSTOPO}foo" != "foo" ]; then ${MPIEXEC} ${MPI_BINDING} -n 1 ${LSTOPO} --no-icaches --no-io --ignore PU ; fi
70
71mpistreams: mpistream hwloc
72	-@${PYTHON} process.py MPI
73
74
75openmpstreams: openmpstream hwloc
76	-@${PYTHON} process.py OpenMP
77
78
79