1 2ALL: 3 4CFLAGS = 5FFLAGS = 6CPPFLAGS = 7FPPFLAGS = 8LOCDIR = src/benchmarks/streams/ 9EXAMPLESC = BasicVersion.c MPIVersion.c OpenMPVersion.c SSEVersion.c PthreadVersion.c CUDAVersion.cu 10EXAMPLESF = 11TESTS = BasicVersion OpenMPVersion 12MANSEC = Sys 13 14include ${PETSC_DIR}/lib/petsc/conf/variables 15include ${PETSC_DIR}/lib/petsc/conf/rules 16include ${PETSC_DIR}/lib/petsc/conf/test 17 18BasicVersion: BasicVersion.o 19 -@${CLINKER} -o BasicVersion BasicVersion.o ${PETSC_LIB} 20 @${RM} -f BasicVersion.o 21 22MPIVersion: MPIVersion.o 23 -@${CLINKER} -o MPIVersion MPIVersion.o ${PETSC_LIB} 24 @${RM} -f MPIVersion.o 25 26CUDAVersion: CUDAVersion.o 27 -@${CLINKER} -o CUDAVersion CUDAVersion.o ${PETSC_LIB} 28 @${RM} -f CUDAVersion.o 29 30OpenMPVersion: OpenMPVersion.o 31 -@${CLINKER} -o OpenMPVersion OpenMPVersion.o 32 @${RM} -f OpenMPVersion.o 33 34SSEVersion: SSEVersion.o 35 -${CLINKER} -o $@ $< ${PETSC_LIB} 36 ${RM} -f $< 37 38PthreadVersion: PthreadVersion.o 39 -@${CLINKER} -o PthreadVersion PthreadVersion.o ${PETSC_LIB} 40 @${RM} -f PthreadVersion.o 41 42# make streams [NPMAX=integer_number_of_MPI_processes_to_use] [MPI_BINDING='binding options'] 43mpistream: MPIVersion 44 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi 45 -@printf "" > scaling.log 46 -@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n" 47 -@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \ 48 ${MPIEXEC} ${MPI_BINDING} -n $${i} ./MPIVersion | tee -a scaling.log; \ 49 done 50 -@echo "------------------------------------------------" 51 -@${PYTHON} process.py MPI fileoutput 52 53# Works on SUMMIT 54cudastreamjsrun: CUDAVersion 55 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi 56 -@printf "" > scaling.log 57 -@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n" 58 -@i=0; while [ $${i} -lt ${NPMAX} ] && [ $${i} -lt 7 ]; do i=`expr $${i} + 1`; \ 59 ${MPIEXEC} ${MPI_BINDING} -n 1 -c$${i} -a$${i} -g1 ./CUDAVersion | tee -a scaling.log; \ 60 done 61 -@n=1; i=7; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 7`; n=`expr $${n} + 1`; \ 62 c=5; while [ $${c} -lt 7 ]; do c=`expr $${c} + 1`; \ 63 ${MPIEXEC} ${MPI_BINDING} -n $${n} -c$${c} -a$${c} -g1 ./CUDAVersion | tee -a scaling.log; \ 64 done; done 65 -@echo "------------------------------------------------" 66 -@${PYTHON} process.py CUDA fileoutput 67 68openmpstream: OpenMPVersion 69 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with make openmpstream NPMAX=<integer number of threads>\n"; exit 1 ; fi 70 -@printf "" > scaling.log 71 @-@printf "Running openmpstreams using 'NPMAX=${NPMAX}'\n" 72 -@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \ 73 OMP_NUM_THREADS=$${i} ./OpenMPVersion | tee -a scaling.log;\ 74 done 75 -@${PYTHON} process.py OpenMP fileoutput 76 77hwloc: 78 -@if [ "${LSTOPO}foo" != "foo" ]; then ${MPIEXEC} ${MPI_BINDING} -n 1 ${LSTOPO} --no-icaches --no-io --ignore PU ; fi 79 80mpistreams: mpistream hwloc 81 -@${PYTHON} process.py MPI 82 83 84openmpstreams: openmpstream hwloc 85 -@${PYTHON} process.py OpenMP 86 87 88