1-include ../../../petscdir.mk 2 3MANSEC = Sys 4 5include ${PETSC_DIR}/lib/petsc/conf/variables 6include ${PETSC_DIR}/lib/petsc/conf/rules 7 8BasicVersion: BasicVersion.o 9 -@${CLINKER} -o BasicVersion BasicVersion.o ${PETSC_LIB} 10 @${RM} -f BasicVersion.o 11 12MPIVersion: MPIVersion.o 13 -@${CLINKER} -o MPIVersion MPIVersion.o ${PETSC_LIB} 14 @${RM} -f MPIVersion.o 15 16CUDAVersion: CUDAVersion.o 17 -@${CLINKER} -o CUDAVersion CUDAVersion.o ${PETSC_LIB} 18 @${RM} -f CUDAVersion.o 19 20OpenMPVersion: OpenMPVersion.o 21 -@${CLINKER} -o OpenMPVersion OpenMPVersion.o 22 @${RM} -f OpenMPVersion.o 23 24SSEVersion: SSEVersion.o 25 -${CLINKER} -o $@ $< ${PETSC_LIB} 26 ${RM} -f $< 27 28PthreadVersion: PthreadVersion.o 29 -@${CLINKER} -o PthreadVersion PthreadVersion.o ${PETSC_LIB} 30 @${RM} -f PthreadVersion.o 31 32# make streams [NPMAX=integer_number_of_MPI_processes_to_use] [MPI_BINDING='binding options'] 33mpistream: MPIVersion 34 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi 35 -@printf "" > scaling.log 36 -@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n" 37 -@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \ 38 ${MPIEXEC} ${MPI_BINDING} -n $${i} ./MPIVersion | tee -a scaling.log; \ 39 done 40 -@echo "------------------------------------------------" 41 -@${PYTHON} process.py MPI fileoutput 42 43# Works on SUMMIT 44cudastreamjsrun: CUDAVersion 45 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes> [MPI_BINDING='-bind-to core -map-by numa']\n or [I_MPI_PIN_PROCESSOR_LIST=:map=scatter] [PETSC_OPTIONS=-process_view] make streams NPMAX=<integer number of MPI processes>\n"; exit 1 ; fi 46 -@printf "" > scaling.log 47 -@printf "Running streams with '${MPIEXEC} ${MPI_BINDING}' using 'NPMAX=${NPMAX}' \n" 48 -@i=0; while [ $${i} -lt ${NPMAX} ] && [ $${i} -lt 7 ]; do i=`expr $${i} + 1`; \ 49 ${MPIEXEC} ${MPI_BINDING} -n 1 -c$${i} -a$${i} -g1 ./CUDAVersion | tee -a scaling.log; \ 50 done 51 -@n=1; i=7; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 7`; n=`expr $${n} + 1`; \ 52 c=5; while [ $${c} -lt 7 ]; do c=`expr $${c} + 1`; \ 53 ${MPIEXEC} ${MPI_BINDING} -n $${n} -c$${c} -a$${c} -g1 ./CUDAVersion | tee -a scaling.log; \ 54 done; done 55 -@echo "------------------------------------------------" 56 -@${PYTHON} process.py CUDA fileoutput 57 58openmpstream: OpenMPVersion 59 @if [ "${NPMAX}foo" = "foo" ]; then echo "---------"; printf " Run with make openmpstream NPMAX=<integer number of threads>\n"; exit 1 ; fi 60 -@printf "" > scaling.log 61 @-@printf "Running openmpstreams using 'NPMAX=${NPMAX}'\n" 62 -@i=0; while [ $${i} -lt ${NPMAX} ]; do i=`expr $${i} + 1`; \ 63 OMP_NUM_THREADS=$${i} ./OpenMPVersion | tee -a scaling.log;\ 64 done 65 -@${PYTHON} process.py OpenMP fileoutput 66 67hwloc: 68 -@if [ "${LSTOPO}foo" != "foo" ]; then ${MPIEXEC} ${MPI_BINDING} -n 1 ${LSTOPO} --no-icaches --no-io --ignore PU ; fi 69 70mpistreams: mpistream hwloc 71 -@${PYTHON} process.py MPI 72 73 74openmpstreams: openmpstream hwloc 75 -@${PYTHON} process.py OpenMP 76 77 78