1-include ../../../petscdir.mk 2 3LOCDIR = src/benchmarks/streams/ 4EXAMPLESC = BasicVersion.c MPIVersion.c OpenMPVersion.c SSEVersion.c PthreadVersion.c CUDAVersion.cu 5EXAMPLESF = 6TESTS = BasicVersion OpenMPVersion 7MANSEC = Sys 8 9include ${PETSC_DIR}/lib/petsc/conf/variables 10include ${PETSC_DIR}/lib/petsc/conf/rules 11include ${PETSC_DIR}/lib/petsc/conf/test 12 13BasicVersion: BasicVersion.o 14 -@${CLINKER} -o BasicVersion BasicVersion.o ${PETSC_LIB} 15 @${RM} -f BasicVersion.o 16 17MPIVersion: MPIVersion.o 18 -@${CLINKER} -o MPIVersion MPIVersion.o ${PETSC_LIB} 19 @${RM} -f MPIVersion.o 20 21CUDAVersion: CUDAVersion.o 22 -@${CLINKER} -o CUDAVersion CUDAVersion.o ${PETSC_LIB} 23 @${RM} -f CUDAVersion.o 24 25OpenMPVersion: OpenMPVersion.o 26 -@${CLINKER} -o OpenMPVersion OpenMPVersion.o 27 @${RM} -f OpenMPVersion.o 28 29SSEVersion: SSEVersion.o 30 -${CLINKER} -o $@ $< ${PETSC_LIB} 31 ${RM} -f $< 32 33PthreadVersion: PthreadVersion.o 34 -@${CLINKER} -o PthreadVersion PthreadVersion.o ${PETSC_LIB} 35 @${RM} -f PthreadVersion.o 36 37# make streams [NPMAX=integer_number_of_MPI_processes_to_use] [MPI_BINDING='binding options'] 38mpistream: MPIVersion 39 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi 40 -@printf "" > scaling.log 41 -@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n" 42 -@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \ 43 ${MPIEXEC} ${MPI_BINDING} -n $${i} ./MPIVersion | tee -a scaling.log; \ 44 done 45 -@echo "------------------------------------------------" 46 -@${PYTHON} process.py MPI fileoutput 47 48# Works on SUMMIT 49cudastreamjsrun: CUDAVersion 50 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi 51 -@printf "" > scaling.log 52 -@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n" 53 -@i=0; while [ $${i} -lt ${NPMAX} ] && [ $${i} -lt 7 ]; do i=`expr $${i} + 1`; \ 54 ${MPIEXEC} ${MPI_BINDING} -n 1 -c$${i} -a$${i} -g1 ./CUDAVersion | tee -a scaling.log; \ 55 done 56 -@n=1; i=7; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 7`; n=`expr $${n} + 1`; \ 57 c=5; while [ $${c} -lt 7 ]; do c=`expr $${c} + 1`; \ 58 ${MPIEXEC} ${MPI_BINDING} -n $${n} -c$${c} -a$${c} -g1 ./CUDAVersion | tee -a scaling.log; \ 59 done; done 60 -@echo "------------------------------------------------" 61 -@${PYTHON} process.py CUDA fileoutput 62 63openmpstream: OpenMPVersion 64 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with make openmpstream NPMAX=<integer number of threads>\n"; exit 1 ; fi 65 -@printf "" > scaling.log 66 @-@printf "Running openmpstreams using 'NPMAX=${NPMAX}'\n" 67 -@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \ 68 OMP_NUM_THREADS=$${i} ./OpenMPVersion | tee -a scaling.log;\ 69 done 70 -@${PYTHON} process.py OpenMP fileoutput 71 72hwloc: 73 -@if [ "${LSTOPO}foo" != "foo" ]; then ${MPIEXEC} ${MPI_BINDING} -n 1 ${LSTOPO} --no-icaches --no-io --ignore PU ; fi 74 75mpistreams: mpistream hwloc 76 -@${PYTHON} process.py MPI 77 78 79openmpstreams: openmpstream hwloc 80 -@${PYTHON} process.py OpenMP 81 82 83