aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--vhost-test/README.md72
-rw-r--r--vhost-test/conf.sh.default58
-rwxr-xr-xvhost-test/vhost.sh541
3 files changed, 671 insertions, 0 deletions
diff --git a/vhost-test/README.md b/vhost-test/README.md
new file mode 100644
index 0000000..ee5f56f
--- /dev/null
+++ b/vhost-test/README.md
@@ -0,0 +1,72 @@
+# Vhost and VPP testing script
+
+This script intends to provide a reference script for testing [VPP](http://fd.io/) vhost-user performances.
+It uses the basic topology PHY -> VM -> PHY.
+You will therefore need a machine with two physical interfaces and the ability to run a VM with qemu in
+order to run the test.
+
+## A word about the VM
+
+The VM that is executed is a CLONE of the running system.
+In practice, that means that the VM is using, as root filesystem,
+a mounted OverlayFS instance of the root directory (Meaning that
+the VM will share the same file, but all updates will not affect the
+root file system).
+
+This also means that the VM boots on a shared file system, which implies
+that the initramfs image must include the 9p driver.
+
+One way of doing it is documented [here] (http://unix.stackexchange.com/questions/90423/can-virtfs-9p-be-used-as-root-file-system).
+$ printf '%s\n' 9p 9pnet 9pnet_virtio | sudo tee -a /etc/initramfs-tools/modules
+$ sudo update-initramfs -u
+
+
+## HowTo
+
+$ cd vhost-test
+
+First copy and update the configuration file such that the parameters
+correspond to your setup.
+
+$ cp conf.sh.default conf.sh
+$ vim conf.sh
+
+Then, run the setup.
+
+$ ./vhost.sh start
+
+Once the setup is running, you can re-pin the processes.
+This is important, as it also performs 'chrt' on the working threads.
+
+$ ./vhost.sh pin
+
+Once the setup is running, you can:
+- Log into the VM
+$ ./vhost.sh ssh
+
+- Log into VPP
+$ sudo screen -r vhtestvpp
+
+
+Finally, when you are done, you can stop the VMs.
+
+$ ./vhost.sh stop
+
+## Traffic Generation
+
+Traffic generation is, for now, out of the scope of this script.
+You are supposed to update ./conf.sh by setting up the right parameters.
+Use the traffic generator you like to test the perfs.
+
+## Administrativa
+
+### Current status
+
+This script hasn't been tested by anyone but me for now.
+You should therefore expect bugs on different setups.
+
+### Main contributors
+
+Pierre Pfister - LF-ID:ppfister
+
+
diff --git a/vhost-test/conf.sh.default b/vhost-test/conf.sh.default
new file mode 100644
index 0000000..b3870d8
--- /dev/null
+++ b/vhost-test/conf.sh.default
@@ -0,0 +1,58 @@
+# This is the configuration file for vhost.sh script.
+# It is executed as bash script in order to load variables.
+# Which means that you can use syntaxes like $(uname -r) etc...
+# Normal utilization of this script shouldn't require you
+# to change anything outside of this file.
+#
+# Use ${CD} in order to get the path of the current directory
+#
+
+# Absolute path to a VPP git repository
+VPP_DIR=""
+
+# QEMU="qemu-system-x86_64" # The QEMU binary to use (can be a path to any binary)
+QEMU="qemu-system-x86_64"
+
+# Physical interface information for the test setup
+VPP_IF0_PCI="0000:0a:00.0"
+VPP_IF0_MAC="90:e2:ba:cb:f5:68"
+VPP_IF0_NAME="TenGigabitEtherneta/0/0"
+VPP_IF1_PCI="0000:0a:00.1"
+VPP_IF1_MAC="90:e2:ba:cb:f5:69"
+VPP_IF1_NAME="TenGigabitEtherneta/0/1"
+
+# VPP_BUILD="release" #Use release VPP build
+# VPP_BUILD="debug" #Use debug VPP build
+VPP_BUILD="release"
+
+# VPP_GDB="1" # Enable gdb for VPP (other values disable it)
+VPP_GDB=""
+
+# Number of queues to be used for DPDK and VPP
+# Only 1 or 2 are supported
+QUEUES=1
+
+# Disables configured vhost thread placement
+# USE_DEFAULT_VHOST_PLACEMENT="1"
+USE_DEFAULT_VHOST_PLACEMENT=""
+
+# Cores do be used by the VM.
+# In order to work in 2-queues setup, 5 cores are required.
+CORES_VM="3,4,5,16,17"
+
+# Worker cores to be used by VPP.
+# For 2-queues setup, 4 cores should be provided.
+# For 1-queue setup, 2 cores should be provided.
+# CORES_VPP="1,2,13,14"
+CORES_VPP="2,14"
+
+# The created VM is roughly a clone of the running machine.
+# OverlayFS is used in order to isolate the VM, although
+# it uses the same root file system.
+VM_INITRD="/boot/initrd.img-$(uname -r)"
+VM_VMLINUZ="/boot/vmlinuz-$(uname -r)"
+
+# Used by 'ssh' command when opening an ssh session
+# to the running VM
+VM_USERNAME="$USER"
+
diff --git a/vhost-test/vhost.sh b/vhost-test/vhost.sh
new file mode 100755
index 0000000..c80e39f
--- /dev/null
+++ b/vhost-test/vhost.sh
@@ -0,0 +1,541 @@
+#!/bin/bash -e
+
+CD="$( cd "$( dirname $0 )" && pwd )"
+
+CONFIG_FILE=""
+
+TMP_DIR="${CD}/tmp/"
+VMDIR="${CD}/tmp/vmdir/"
+VMMOUNT="${CD}/vmroot/"
+VMWORK="${CD}/tmp/work/"
+
+BRNAME="vhtestbr0"
+VMTAP="vhtesttap0"
+VPPSCREEN="vhtestvpp"
+
+VM_ROOT="/"
+VM_VNCPORT="3555"
+
+# Those variables are found after parsing the conf
+VPP_BUILD="xxx"
+VPP_INSTALL="xxx"
+VPP="xxx"
+DPDK_BIND="xxx"
+
+CORES_VM_LIST="xxx"
+CORES_VM_N="xxx"
+declare -a CORES_VM_ARRAY
+CORES_VPP_LIST="xxx"
+CORES_VPP_N="xxx"
+declare -a CORES_VPP_ARRAY
+
+function validate_parameter() {
+ for c in $@; do
+ [ "${!c}" = "" ] && echo "Configuration paramater $c is not set in $CONFIG_FILE" && return 1
+ done
+ return 0
+}
+
+function validate_directory() {
+ for c in $@; do
+ [ ! -d "${!c}" ] && echo "$c=${!c} is not a directory" && return 1
+ done
+ return 0
+}
+
+function validate_file() {
+ for c in $@; do
+ [ ! -f "${!c}" ] && echo "$c=${!c} is not a file" && return 1
+ done
+ return 0
+}
+
+function validate_exec() {
+ for c in $@; do
+ [ ! -e "${!c}" -a "$(echo ${!c} | grep '/')" = "" -a "$(which ${!c})" != "" ] && eval $c=$(which ${!c})
+ [ ! -x "${!c}" ] && echo "$c=${!c} is not executable" && return 1
+ done
+ return 0
+}
+
+function validate_cores() {
+ for c in $@; do
+ LIST_NAME="${c}_LIST"
+ N_NAME="${c}_N"
+ ARRAY_NAME="${c}_ARRAY"
+ eval $LIST_NAME=\"$(echo ${!c} | sed 's/,/ /g')\"
+ COUNT=0
+ for cid in ${!LIST_NAME}; do
+ if ! [[ "$cid" =~ ^[0-9]+$ ]]; then
+ echo "'$cid' is not a valid core ID"
+ return 1
+ fi
+ eval $ARRAY_NAME[$COUNT]=$cid
+ COUNT=$(expr $COUNT + 1)
+ done
+ eval $N_NAME=$COUNT
+ #echo $LIST_NAME=${!LIST_NAME}
+ #echo $N_NAME=${!N_NAME}
+ done
+}
+
+function install_directory() {
+ for c in $@; do
+ echo "mkdir ${!c}"
+ [ ! -d "${!c}" ] && mkdir -p ${!c}
+ done
+ for c in $@; do
+ [ ! -d "${!c}" ] && return 1
+ done
+ return 0
+}
+
+function load_config() {
+ if [ "$CONFIG_FILE" != "" ]; then
+ CONFIG_FILE="$CONFIG_FILE"
+ elif [ -f "${CD}/conf.sh" ]; then
+ CONFIG_FILE="${CD}/conf.sh"
+ else
+ CONFIG_FILE="${CD}/conf.sh.default"
+ fi
+ . $CONFIG_FILE
+
+ #Validate config
+ validate_parameter VPP_DIR VPP_IF0_PCI VPP_IF0_MAC VPP_IF1_PCI VPP_IF1_MAC VPP_IF0_NAME VPP_IF1_NAME \
+ QEMU VM_ROOT VM_INITRD VM_VMLINUZ VM_VNCPORT VM_USERNAME
+ validate_directory VPP_DIR VM_ROOT
+ validate_file VM_INITRD VM_VMLINUZ
+ validate_exec QEMU
+ validate_cores CORES_VM CORES_VPP
+
+ [ ! -d "$VPP_DIR/vnet" ] && echo "VPP_DIR=$VPP_DIR is not VPP source directory" && exit 5
+ [ "$($QEMU --version | grep QEMU)" = "" ] && echo "$QEMU is probably not a qemu executable" && exit 6
+
+ if [ "$VPP_BUILD" = "release" ] ; then
+ VPP_INSTALL="$VPP_DIR/build-root/install-vpp-native/"
+ VPP_BUILD="$VPP_DIR/build-root/build-vpp-native/"
+ elif [ "$VPP_BUILD" = "debug" ] ; then
+ VPP_INSTALL="$VPP_DIR/build-root/install-vpp_debug-native/"
+ VPP_BUILD="$VPP_DIR/build-root/build-vpp-native/"
+ else
+ echo "Invalid VPP_BUILD parameter '$VPP_BUILD'" && exit 1
+ fi
+
+ if [ "$QUEUES" != "1" -a "$QUEUES" != "2" ]; then
+ echo "QUEUES can only be 1 or 2"
+ exit 7
+ fi
+
+ VPP="$VPP_INSTALL/vpp/bin/vpp"
+ DPDK_BIND="$(ls $VPP_BUILD/dpdk/dpdk-*/tools/dpdk-devbind.py | head -n 1)"
+
+ validate_exec VPP DPDK_BIND
+
+ return 0
+}
+
+function banner() {
+ echo "-------------------------------------"
+ echo " VPP vhost test - BETA"
+ echo "-------------------------------------"
+ echo "VPP_DIR : $VPP_DIR"
+ echo "VPP : $VPP"
+ echo "DPDK_BIND : $DPDK_BIND"
+ echo "Qemu : $QEMU"
+ echo "Qemu : $($QEMU --version)"
+ echo "VM cores : ${CORES_VM_ARRAY[*]}"
+ echo "VPP cores : ${CORES_VPP_ARRAY[*]}"
+ echo "-------------------------------------"
+}
+
+function vmdir_umount() {
+ sleep 0.5
+ sudo umount $VMMOUNT
+ sleep 0.5
+ rmdir "$VMMOUNT"
+}
+
+function vmdir_mount() {
+ mkdir -p "$VMDIR"
+ mkdir -p "$VMMOUNT"
+ mkdir -p "$VMWORK"
+ sudo mount -t overlayfs -o lowerdir=${VM_ROOT},workdir=${VMWORK},upperdir=${VMDIR} overlayfs ${VMMOUNT}
+}
+
+function clean() {
+ #Cleaning
+ vmdir_umount > /dev/null 2>&1 || echo -n "" #Just make sure it's not running
+ if [ ! -d "$TMP_DIR" ]; then
+ echo "$TMP_DIR"
+ mkdir -p "$TMP_DIR"
+ touch "$TMP_DIR/.vpp-vhost-test-safety"
+ elif [ ! -f "$TMP_DIR/.vpp-vhost-test-safety" ]; then
+ echo "Error: I will not remove tmp directory as there is no safety file: $TMP_DIR/.vpp-vhost-test-safety"
+ echo "Please do 'touch $TMP_DIR/.vpp-vhost-test-safety' if you are sure the content of this directory can be removed"
+ exit 7
+ else
+ sudo rm -rf $TMP_DIR/
+ mkdir -p "$TMP_DIR"
+ touch "$TMP_DIR/.vpp-vhost-test-safety"
+ fi
+}
+
+function prepare_testpmd() {
+ #Set the VM in testpmd mode
+ cat > "$VMDIR/etc/startup.d/testpmd.sh" << EOF
+#!/bin/sh
+sysctl -w vm.nr_hugepages=1024
+mkdir -p /mnt/huge
+mount -t hugetlbfs none /mnt/huge
+modprobe uio
+insmod ${VPP_INSTALL}/dpdk/kmod/igb_uio.ko
+$DPDK_BIND -b igb_uio 00:07.0
+$DPDK_BIND -b igb_uio 00:08.0
+#gdb -ex run --args
+screen -d -m ${VPP_INSTALL}/dpdk/app/testpmd -l 0,1,2,3,4 --master-lcore 0 --socket-mem 512 --proc-type auto --file-prefix pg -w 0000:00:07.0 -w 0000:00:08.0 -- --disable-hw-vlan --rxq=$QUEUES --txq=$QUEUES --rxd=256 --txd=256 --auto-start --nb-cores=4 --eth-peer=0,aa:aa:aa:aa:bb:b1 --eth-peer=1,aa:aa:aa:aa:bb:b2 --port-topology=chained
+#--log-level 10
+#--rxq=2 --txq=2
+
+for i in \$(ls /proc/irq/ | grep [0-9]); do echo 1 > /proc/irq/\$i/smp_affinity ; done
+echo "0" > /proc/sys/kernel/watchdog_cpumask
+EOF
+sudo chmod u+x "$VMDIR/etc/startup.d/testpmd.sh"
+}
+
+function prepare_vm() {
+ #Generate VM configuration files in $VMDIR
+ mkdir -p "$VMDIR/etc/network"
+ echo "vpp-vhost-test-vm" > "$VMDIR/etc/hostname"
+
+ cat > "$VMDIR/etc/hosts" << EOF
+127.0.0.1 localhost.localdomain localhost
+127.0.1.1 $vpp-vhost-test-vm
+EOF
+
+ cat > "$VMDIR/etc/rc.local" << EOF
+#!/bin/sh
+mkdir -p /var/log/startup/
+for exe in \`ls /etc/startup.d\`; do
+ echo -n "Startup script \$exe "
+ ( (nohup /etc/startup.d/\$exe > /var/log/startup/\$exe 2>&1 &) && echo "[OK]") || echo "[Failed]"
+done
+exit 0
+EOF
+ sudo chmod a+x "$VMDIR/etc/rc.local"
+
+ mkdir -p $VMDIR/etc/udev/rules.d/
+ cat > "$VMDIR/etc/udev/rules.d/70-persistent-net.rules" << EOF
+SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="00:00:00:10:10:10", ATTR{type}=="1", KERNEL=="eth*", NAME="eth0"
+SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="de:ad:be:ef:01:00", ATTR{type}=="1", KERNEL=="eth*", NAME="vhost0"
+SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="de:ad:be:ef:01:01", ATTR{type}=="1", KERNEL=="eth*", NAME="vhost1"
+EOF
+ cat > "$VMDIR/etc/fstab" << EOF
+/dev/sda1 / ext4 errors=remount-ro 0 1
+EOF
+
+ cat > "$VMDIR/etc/network/interfaces" << EOF
+auto lo
+iface lo inet loopback
+auto eth0
+iface eth0 inet manual
+iface eth0 inet6 static
+ address fd00::1
+ netmask 64
+EOF
+
+ mkdir -p "$VMDIR/etc/startup.d"
+ cat > "$VMDIR/etc/startup.d/dmesg.sh" << EOF
+#!/bin/sh
+while [ "1" = "1" ]; do
+ dmesg > /var/log/startup/dmesg
+ sleep 10
+ echo "--------------------"
+done
+EOF
+ chmod u+x $VMDIR/etc/startup.d/*.sh
+ mkdir -p "$VMDIR/etc/default"
+ cat > "$VMDIR/etc/default/irqbalance" << EOF
+ENABLED="0"
+ONESHOT="0"
+EOF
+
+ prepare_testpmd
+
+ MQ=""
+ if [ "$QUEUES" != "1" ]; then
+ MQ=",mq=on"
+ fi
+ LAST_VM_CPU="$(expr $CORES_VM_N - 1)"
+
+ cat << EOF > "$TMP_DIR/vm.conf"
+-enable-kvm -machine pc -initrd $VM_INITRD -kernel $VM_VMLINUZ -vnc 127.0.0.1:1 -m 4G
+-append 'root=ro ro rootfstype=9p rootflags=trans=virtio nohz_full=1-$LAST_VM_CPU isolcpus=1-$LAST_VM_CPU rcu_nocbs=1-$LAST_VM_CPU selinux=0 audit=0 net.ifnames=0 biosdevname=0'
+-cpu host -smp $CORES_VM_N
+-device e1000,netdev=network0,mac=00:00:00:10:10:10,bus=pci.0,addr=3.0
+-netdev tap,id=network0,ifname=$VMTAP,script=no,downscript=no
+-fsdev local,security_model=none,id=fsdev_id,path=${VMMOUNT}
+-device virtio-9p-pci,id=dev_fs,fsdev=fsdev_id,mount_tag=ro
+-daemonize -pidfile $TMP_DIR/qemu.pid
+
+-chardev socket,id=chr0,path=$TMP_DIR/sock0,server
+-netdev type=vhost-user,id=thrnet0,chardev=chr0,queues=$QUEUES
+-device virtio-net-pci,netdev=thrnet0,mac=de:ad:be:ef:01:00,bus=pci.0,addr=7.0${MQ}
+-object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on
+-numa node,memdev=mem
+-chardev socket,id=chr1,path=$TMP_DIR/sock1,server
+-netdev type=vhost-user,id=thrnet1,chardev=chr1,queues=$QUEUES
+-device virtio-net-pci,netdev=thrnet1,mac=de:ad:be:ef:01:01,bus=pci.0,addr=8.0${MQ}
+EOF
+}
+
+function prepare_vpp() {
+ VPP_CPU=""
+ VPP_DEV0=""
+ VPP_DEV1=""
+ ENABLE_MULTIQUEUE=""
+ if [ "$CORES_VPP_N" = "0" ]; then
+ VPP_CPU=""
+ elif [ "$CORES_VPP_N" = "1" ]; then
+ VPP_CPU="corelist-workers ${CORES_VPP_ARRAY[0]}"
+ elif [ "$CORES_VPP_N" -lt "4" ]; then
+ VPP_CPU="corelist-workers ${CORES_VPP_ARRAY[0]},${CORES_VPP_ARRAY[1]}"
+ VPP_DEV0="workers 0"
+ VPP_DEV1="workers 1"
+ else
+ VPP_CPU="corelist-workers ${CORES_VPP_ARRAY[0]},${CORES_VPP_ARRAY[1]},${CORES_VPP_ARRAY[2]},${CORES_VPP_ARRAY[3]}"
+ VPP_DEV0="workers 0,1"
+ VPP_DEV1="workers 2,3"
+ ENABLE_MULTIQUEUE="1"
+ fi
+
+ if [ "$QUEUES" = "2" -a "$ENABLE_MULTIQUEUE" = "1" ]; then
+ VPP_DEV0="$VPP_DEV0 num-rx-queues 2 num-tx-queues 2"
+ VPP_DEV1="$VPP_DEV1 num-rx-queues 2 num-tx-queues 2"
+ fi
+
+ cat << EOF > "$TMP_DIR/vpp.cmdline"
+cpu { $VPP_CPU }
+unix { startup-config $TMP_DIR/vpp.conf interactive }
+dpdk { dev $VPP_IF0_PCI { $VPP_DEV0 } dev $VPP_IF1_PCI { $VPP_DEV1 } }
+EOF
+
+ cat << EOF > "$TMP_DIR/vpp.conf"
+create vhost-user socket $TMP_DIR/sock0 hwaddr aa:aa:aa:aa:bb:b1
+create vhost-user socket $TMP_DIR/sock1 hwaddr aa:aa:aa:aa:bb:b2
+set interface l2 xconnect VirtualEthernet0/0/0 $VPP_IF0_NAME
+set interface l2 xconnect VirtualEthernet0/0/1 $VPP_IF1_NAME
+set interface l2 xconnect $VPP_IF0_NAME VirtualEthernet0/0/0
+set interface l2 xconnect $VPP_IF1_NAME VirtualEthernet0/0/1
+set interface state VirtualEthernet0/0/0 up
+set interface state VirtualEthernet0/0/1 up
+set interface state $VPP_IF1_NAME up
+set interface state $VPP_IF0_NAME up
+EOF
+
+ #VHOST queue pining
+ if [ "$QUEUES" = "1" -a "$USE_DEFAULT_VHOST_PLACEMENT" != "1" ]; then
+ if [ "$CORES_VPP_N" = "0" ]; then
+ echo -n ""
+ elif [ "$CORES_VPP_N" = "1" ]; then
+ cat << EOF >> "$TMP_DIR/vpp.conf"
+vhost thread VirtualEthernet0/0/1 1
+vhost thread VirtualEthernet0/0/0 1
+EOF
+ elif [ "$CORES_VPP_N" -lt "4" ]; then
+ cat << EOF >> "$TMP_DIR/vpp.conf"
+vhost thread VirtualEthernet0/0/1 2
+vhost thread VirtualEthernet0/0/0 1
+EOF
+ else
+ cat << EOF >> "$TMP_DIR/vpp.conf"
+vhost thread VirtualEthernet0/0/1 3
+vhost thread VirtualEthernet0/0/0 4
+EOF
+ fi
+ elif [ "$QUEUES" = "2" -a "$USE_DEFAULT_VHOST_PLACEMENT" != "1" ]; then
+ if [ "$CORES_VPP_N" = "0" ]; then
+ echo -n ""
+ elif [ "$CORES_VPP_N" = "1" ]; then
+ cat << EOF >> "$TMP_DIR/vpp.conf"
+vhost thread VirtualEthernet0/0/1 1
+vhost thread VirtualEthernet0/0/1 1
+vhost thread VirtualEthernet0/0/0 1
+vhost thread VirtualEthernet0/0/0 1
+EOF
+ elif [ "$CORES_VPP_N" -lt "4" ]; then
+ cat << EOF >> "$TMP_DIR/vpp.conf"
+vhost thread VirtualEthernet0/0/1 2
+vhost thread VirtualEthernet0/0/1 2
+vhost thread VirtualEthernet0/0/0 1
+vhost thread VirtualEthernet0/0/0 1
+EOF
+ else
+ cat << EOF >> "$TMP_DIR/vpp.conf"
+vhost thread VirtualEthernet0/0/1 3
+vhost thread VirtualEthernet0/0/1 4
+vhost thread VirtualEthernet0/0/0 1
+vhost thread VirtualEthernet0/0/0 2
+EOF
+ fi
+ fi
+
+}
+
+function prepare() {
+ #Generate all configuration and VM files
+ clean
+ prepare_vm
+ prepare_vpp
+}
+
+function start_vpp() {
+ GDB=""
+ if [ "$VPP_GDB" = "1" ]; then
+ [ -e "$TMP_DIR/vpp.sh.gdbinit" ] && sudo rm "$TMP_DIR/vpp.sh.gdbinit"
+ cat << EOF > "$TMP_DIR/vpp.sh.gdbinit"
+handle SIGUSR1 nostop
+handle SIGUSR1 noprint
+set print thread-events off
+run
+EOF
+ GDB="gdb -x $TMP_DIR/vpp.sh.gdbinit --args "
+ fi
+
+ echo "------- Starting VPP --------"
+ echo " Screen $VPPSCREEN (sudo screen -r $VPPSCREEN)"
+ echo " Command-line Conf:"
+ cat $TMP_DIR/vpp.cmdline
+ echo " CLI Conf:"
+ cat $TMP_DIR/vpp.conf
+ echo "-----------------------------"
+
+ sudo screen -d -m -S "$VPPSCREEN" $GDB $VPP_DIR/build-root/install-vpp-native/vpp/bin/vpp -c $TMP_DIR/vpp.cmdline
+}
+
+function start_vm() {
+ echo "------- Starting VM --------"
+ echo " VM conf:"
+ cat $TMP_DIR/vm.conf
+ echo "----------------------------"
+
+ #Eval is used so that ' characters are not ignored
+ eval sudo chrt -rr 1 taskset -c $CORES_VM $QEMU $(cat $TMP_DIR/vm.conf)
+
+ echo "Started QEMU with PID $(sudo cat $TMP_DIR/qemu.pid)"
+
+ sudo brctl addbr $BRNAME
+ sudo ip link set $BRNAME up
+ sudo ip addr add fd00::ffff/64 dev $BRNAME
+ sudo brctl addif $BRNAME $VMTAP
+ sudo ip link set $VMTAP up
+}
+
+function start() {
+ if [ -f "$TMP_DIR/.started" ]; then
+ echo "$TMP_DIR/.started exists"
+ echo "This means the setup is probably running already."
+ echo "Please stop the setup first, or remove this file."
+ exit 2
+ fi
+
+ banner
+
+ prepare
+
+ touch "$TMP_DIR/.started"
+
+ start_vpp
+
+ vmdir_mount
+
+ start_vm
+}
+
+function pin_vm() {
+ PIDS=$(ps -eLf | grep qemu-system-x86_64 | awk '$5 > 50 { print $4; }')
+ idx=1
+ for p in $PIDS; do
+ if [ "${CORES_VM_ARRAY[$idx]}" = "" ]; then
+ echo "Too many working threads in VM"
+ return 1
+ fi
+ echo "VM PID $p on core ${CORES_VM_ARRAY[$idx]}"
+ sudo taskset -pc ${CORES_VM_ARRAY[$idx]} $p && sudo chrt -r -p 1 $p
+ idx=$(expr $idx + 1)
+ done
+}
+
+function pin_vpp() {
+ PIDS=$(ps -eLf | grep /bin/vpp | awk '$5 > 50 { print $4; }')
+ idx=0
+ for p in $PIDS; do
+ if [ "${CORES_VPP_ARRAY[$idx]}" = "" ]; then
+ echo "Too many working threads in VPP"
+ return 1
+ fi
+ echo "VPP PID $p on core ${CORES_VPP_ARRAY[$idx]}"
+ sudo taskset -pc ${CORES_VPP_ARRAY[$idx]} $p && sudo chrt -r -p 1 $p
+ idx=$(expr $idx + 1)
+ done
+}
+
+function pin() {
+ pin_vm
+ pin_vpp
+}
+
+function stop() {
+ set +e
+
+ [ -f "$TMP_DIR/qemu.pid" ] && echo "Stopping VM ($(sudo cat $TMP_DIR/qemu.pid))" && sudo kill "$(sudo cat $TMP_DIR/qemu.pid)" && sudo rm $TMP_DIR/qemu.pid
+
+ vmdir_umount
+
+ sudo ip link set $BRNAME down
+ sudo brctl delbr $BRNAME
+
+ sudo screen -S "$VPPSCREEN" -X quit && echo "Stopping VPP"
+
+ [ -f "$TMP_DIR/.started" ] && rm "$TMP_DIR/.started"
+}
+
+function cmd_openvnc() {
+ load_config
+ echo Please VNC to 5900 to connect to this VM console
+ socat TCP6-LISTEN:5900,reuseaddr TCP:localhost:5901 &
+}
+
+function cmd_start() {
+ load_config
+ start
+}
+
+function cmd_pin() {
+ load_config
+ pin
+}
+
+function cmd_stop() {
+ load_config
+ stop
+}
+
+function cmd_clean() {
+ load_config
+ clean
+}
+
+function cmd_ssh() {
+ load_config
+ ssh ${VM_USERNAME}@fd00::1
+}
+
+function cmd_config() {
+ load_config
+}
+
+[ "$1" = "" ] && echo "Missing arguments" && usage && exit 1
+CMD="$1"
+shift
+eval "cmd_$CMD" "$@"
+