xref: /petsc/src/benchmarks/streams/makefile (revision a69119a591a03a9d906b29c0a4e9802e4d7c9795)
1-include ../../../petscdir.mk
2
3LOCDIR        = src/benchmarks/streams/
4MANSEC        = Sys
5
6include ${PETSC_DIR}/lib/petsc/conf/variables
7include ${PETSC_DIR}/lib/petsc/conf/rules
8include ${PETSC_DIR}/lib/petsc/conf/test
9
10BasicVersion: BasicVersion.o
11	-@${CLINKER} -o BasicVersion BasicVersion.o ${PETSC_LIB}
12	@${RM} -f BasicVersion.o
13
14MPIVersion: MPIVersion.o
15	-@${CLINKER} -o MPIVersion MPIVersion.o ${PETSC_LIB}
16	@${RM} -f MPIVersion.o
17
18CUDAVersion: CUDAVersion.o
19	-@${CLINKER} -o CUDAVersion CUDAVersion.o ${PETSC_LIB}
20	@${RM} -f CUDAVersion.o
21
22OpenMPVersion: OpenMPVersion.o
23	-@${CLINKER} -o OpenMPVersion OpenMPVersion.o
24	@${RM} -f OpenMPVersion.o
25
26SSEVersion: SSEVersion.o
27	-${CLINKER} -o $@ $< ${PETSC_LIB}
28	${RM} -f $<
29
30PthreadVersion: PthreadVersion.o
31	-@${CLINKER} -o PthreadVersion PthreadVersion.o ${PETSC_LIB}
32	@${RM} -f PthreadVersion.o
33
34# make streams [NPMAX=integer_number_of_MPI_processes_to_use] [MPI_BINDING='binding options']
35mpistream:  MPIVersion
36	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or       [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi
37	-@printf "" > scaling.log
38	-@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n"
39	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
40	  ${MPIEXEC} ${MPI_BINDING} -n $${i} ./MPIVersion | tee -a scaling.log; \
41        done
42	-@echo "------------------------------------------------"
43	-@${PYTHON} process.py MPI fileoutput
44
45# Works on SUMMIT
46cudastreamjsrun:  CUDAVersion
47	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or       [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi
48	-@printf "" > scaling.log
49	-@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n"
50	-@i=0; while [ $${i} -lt ${NPMAX} ] && [ $${i} -lt 7 ]; do i=`expr $${i} + 1`; \
51	  ${MPIEXEC} ${MPI_BINDING} -n 1 -c$${i} -a$${i} -g1 ./CUDAVersion | tee -a scaling.log; \
52        done
53	-@n=1; i=7; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 7`; n=`expr $${n} + 1`; \
54	       c=5; while [ $${c} -lt 7 ]; do c=`expr $${c} + 1`; \
55	  ${MPIEXEC} ${MPI_BINDING} -n $${n} -c$${c} -a$${c} -g1 ./CUDAVersion | tee -a scaling.log; \
56        done; done
57	-@echo "------------------------------------------------"
58	-@${PYTHON} process.py CUDA fileoutput
59
60openmpstream:  OpenMPVersion
61	@if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with make openmpstream NPMAX=<integer number of threads>\n"; exit 1 ; fi
62	-@printf "" > scaling.log
63	@-@printf "Running openmpstreams using 'NPMAX=${NPMAX}'\n"
64	-@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \
65	  OMP_NUM_THREADS=$${i} ./OpenMPVersion  | tee -a scaling.log;\
66        done
67	-@${PYTHON} process.py OpenMP fileoutput
68
69hwloc:
70	-@if [ "${LSTOPO}foo" != "foo" ]; then ${MPIEXEC} ${MPI_BINDING} -n 1 ${LSTOPO} --no-icaches --no-io --ignore PU ; fi
71
72mpistreams: mpistream hwloc
73	-@${PYTHON} process.py MPI
74
75
76openmpstreams: openmpstream hwloc
77	-@${PYTHON} process.py OpenMP
78
79
80