summaryrefslogtreecommitdiffstats
path: root/tests/data_plane/vpp_lite_topo
diff options
context:
space:
mode:
Diffstat (limited to 'tests/data_plane/vpp_lite_topo')
-rw-r--r--tests/data_plane/vpp_lite_topo/test_driver/multihoming.sh85
-rw-r--r--tests/data_plane/vpp_lite_topo/test_driver/multihoming_l2.sh94
-rwxr-xr-xtests/data_plane/vpp_lite_topo/tests/test_multihoming_4o4.sh10
-rwxr-xr-xtests/data_plane/vpp_lite_topo/tests/test_multihoming_6o6.sh10
-rwxr-xr-xtests/data_plane/vpp_lite_topo/tests/test_multihoming_l2o4.sh10
-rw-r--r--tests/data_plane/vpp_lite_topo/topologies/multihoming_topo.sh124
-rw-r--r--tests/data_plane/vpp_lite_topo/topologies/multihoming_topo_l2.sh133
7 files changed, 466 insertions, 0 deletions
diff --git a/tests/data_plane/vpp_lite_topo/test_driver/multihoming.sh b/tests/data_plane/vpp_lite_topo/test_driver/multihoming.sh
new file mode 100644
index 0000000..7d623f0
--- /dev/null
+++ b/tests/data_plane/vpp_lite_topo/test_driver/multihoming.sh
@@ -0,0 +1,85 @@
+source config.sh
+source odl_utils.sh
+source topologies/multihoming_topo.sh
+
+ODL_CONFIG_FILE1="vpp1.json"
+ODL_CONFIG_FILE2="vpp2.json"
+ODL_CONFIG_FILE3="update_vpp2.json"
+
+if [ "$1" == "clean" ] ; then
+ multihoming_topo_clean
+ exit 0
+fi
+
+if [[ $(id -u) != 0 ]]; then
+ echo "Error: run this as a root."
+ exit 1
+fi
+
+function test_multihoming
+{
+ if [ "$3" != "no_setup" ] ; then
+ multihoming_topo_setup
+ fi
+
+ if [ "$3" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+
+ test_result=1
+
+ ip netns exec vppns1 "${1}" -w 15 -c 1 "${2}"
+
+ # do some port sweeping to see that load balancing works
+ ip netns exec vppns1 nc -n -z "${2}" 1-1000 > /dev/null 2>&1
+
+ # check that it works
+ pkts=$(echo "show int" | nc 0 5002 | grep host-intervpp11 | awk '{print $6}' | tr -d '\r')
+
+ if [ $pkts -gt 450 ] && [ $pkts -lt 550 ] ; then
+ rc=0
+ else
+ rc=1
+ fi
+
+ if [ $rc -ne 0 ] ; then
+ echo "Load balancing doesn't work!"
+
+ if [ "$3" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+
+ multihoming_topo_clean
+ exit $test_result
+ fi
+
+ if [ "$3" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+
+ # change IP addresses of destination RLOC
+ echo "set int ip address del host-intervpp12 6.0.3.2/24" | nc 0 5003
+ echo "set int ip address host-intervpp12 6.0.3.20/24" | nc 0 5003
+ echo "set int ip address del host-intervpp12 6:0:3::2/64" | nc 0 5003
+ echo "set int ip address host-intervpp12 6:0:3::20/64" | nc 0 5003
+ post_curl "update-mapping" ${ODL_CONFIG_FILE3}
+
+ ip netns exec vppns1 "${1}" -w 15 -c 1 "${2}"
+ rc=$?
+
+ # test done
+
+ if [ "$3" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+
+ multihoming_topo_clean
+ if [ $rc -ne 0 ] ; then
+ echo "Test failed: No ICMP response received within specified timeout limit!"
+ else
+ echo "Test passed."
+ test_result=0
+ fi
+
+ exit $test_result
+}
diff --git a/tests/data_plane/vpp_lite_topo/test_driver/multihoming_l2.sh b/tests/data_plane/vpp_lite_topo/test_driver/multihoming_l2.sh
new file mode 100644
index 0000000..64d3486
--- /dev/null
+++ b/tests/data_plane/vpp_lite_topo/test_driver/multihoming_l2.sh
@@ -0,0 +1,94 @@
+source config.sh
+source odl_utils.sh
+source topologies/multihoming_topo_l2.sh
+
+ODL_CONFIG_FILE1="vpp1.json"
+ODL_CONFIG_FILE2="vpp2.json"
+ODL_CONFIG_FILE3="update_vpp2.json"
+
+function maybe_pause
+{
+ if [ "$1" == "wait" ] ; then
+ read -p "press any key to continue .." -n1
+ fi
+}
+
+if [ "$1" == "clean" ] ; then
+ multihoming_topo_clean
+ exit 0
+fi
+
+if [[ $(id -u) != 0 ]]; then
+ echo "Error: run this as a root."
+ exit 1
+fi
+
+function test_multihoming
+{
+ if [ "$3" != "no_setup" ] ; then
+ multihoming_topo_setup
+ fi
+
+ maybe_pause $3
+
+ test_result=1
+
+ ip netns exec vppns1 "${1}" -w 15 -c 1 "${2}"
+ rc=$?
+ if [ $rc -ne 0 ] ; then
+ echo "No response received!"
+
+ maybe_pause $3
+ multihoming_topo_clean
+ exit $test_result
+ fi
+
+ maybe_pause $3
+
+ # do some port sweeping to see that load balancing works
+ ip netns exec vppns1 nc -n -z "${2}" 1-1000 > /dev/null 2>&1
+
+ # check that it works
+ pkts=$(echo "show int" | nc 0 5002 | grep host-intervpp11 | awk '{print $6}' | tr -d '\r')
+
+ if [ $pkts -gt 450 ] && [ $pkts -lt 550 ] ; then
+ rc=0
+ else
+ rc=1
+ fi
+
+ if [ $rc -ne 0 ] ; then
+ echo "Load balancing doesn't work!"
+
+ maybe_pause $3
+
+ multihoming_topo_clean
+ exit $test_result
+ fi
+
+ maybe_pause $3
+
+ # change IP addresses of destination RLOC
+ echo "set int ip address del host-intervpp12 6.0.3.2/24" | nc 0 5003
+ echo "set int ip address host-intervpp12 6.0.3.20/24" | nc 0 5003
+ echo "set int ip address del host-intervpp12 6:0:3::2/64" | nc 0 5003
+ echo "set int ip address host-intervpp12 6:0:3::20/64" | nc 0 5003
+ post_curl "update-mapping" ${ODL_CONFIG_FILE3}
+
+ ip netns exec vppns1 "${1}" -w 15 -c 1 "${2}"
+ rc=$?
+
+ # test done
+
+ maybe_pause $3
+
+ multihoming_topo_clean
+ if [ $rc -ne 0 ] ; then
+ echo "Test failed: No ICMP response received within specified timeout limit!"
+ else
+ echo "Test passed."
+ test_result=0
+ fi
+
+ exit $test_result
+}
diff --git a/tests/data_plane/vpp_lite_topo/tests/test_multihoming_4o4.sh b/tests/data_plane/vpp_lite_topo/tests/test_multihoming_4o4.sh
new file mode 100755
index 0000000..69f46bf
--- /dev/null
+++ b/tests/data_plane/vpp_lite_topo/tests/test_multihoming_4o4.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+# Test basic LISP functionality (ip4 over ip4)
+
+VPP_LITE_CONF=`pwd`/../configs/vpp_lite_config/multihoming/4o4
+ODL_CONFIG_DIR=`pwd`/../configs/odl/multihoming/4o4
+
+source test_driver/multihoming.sh
+
+test_multihoming ping "6.0.2.2"
diff --git a/tests/data_plane/vpp_lite_topo/tests/test_multihoming_6o6.sh b/tests/data_plane/vpp_lite_topo/tests/test_multihoming_6o6.sh
new file mode 100755
index 0000000..85681e3
--- /dev/null
+++ b/tests/data_plane/vpp_lite_topo/tests/test_multihoming_6o6.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+# Test LISP multihoming functionality (ip6 over ip6)
+
+VPP_LITE_CONF=`pwd`/../configs/vpp_lite_config/multihoming/6o6
+ODL_CONFIG_DIR=`pwd`/../configs/odl/multihoming/6o6
+
+source test_driver/multihoming.sh
+
+test_multihoming ping6 "6:0:2::2"
diff --git a/tests/data_plane/vpp_lite_topo/tests/test_multihoming_l2o4.sh b/tests/data_plane/vpp_lite_topo/tests/test_multihoming_l2o4.sh
new file mode 100755
index 0000000..c308b51
--- /dev/null
+++ b/tests/data_plane/vpp_lite_topo/tests/test_multihoming_l2o4.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+# Test basic LISP functionality (ip4 over ip4)
+
+VPP_LITE_CONF=`pwd`/../configs/vpp_lite_config/multihoming/l2o4
+ODL_CONFIG_DIR=`pwd`/../configs/odl/multihoming/l2o4
+
+source test_driver/multihoming_l2.sh
+
+test_multihoming ping "6.0.1.12"
diff --git a/tests/data_plane/vpp_lite_topo/topologies/multihoming_topo.sh b/tests/data_plane/vpp_lite_topo/topologies/multihoming_topo.sh
new file mode 100644
index 0000000..3d493f0
--- /dev/null
+++ b/tests/data_plane/vpp_lite_topo/topologies/multihoming_topo.sh
@@ -0,0 +1,124 @@
+#!/usr/bin/env bash
+
+function multihoming_topo_clean
+{
+ echo "Clearing all VPP instances.."
+ pkill vpp --signal 9
+ rm /dev/shm/*
+
+ echo "Cleaning topology.."
+ ip netns exec intervppns1 ifconfig vppbr down
+ ip netns exec intervppns1 brctl delbr vppbr
+ ip link del dev veth_vpp1 &> /dev/null
+ ip link del dev veth_vpp2 &> /dev/null
+ ip link del dev veth_intervpp11 &> /dev/null
+ ip link del dev veth_intervpp12 &> /dev/null
+ ip link del dev veth_odl &> /dev/null
+ ip netns del vppns1 &> /dev/null
+ ip netns del vppns2 &> /dev/null
+ ip netns del intervppns1 &> /dev/null
+
+ ip netns exec intervppns2 ifconfig vppbr down
+ ip netns exec intervppns2 brctl delbr vppbr
+ ip link del dev veth_intervpp21 &> /dev/null
+ ip link del dev veth_intervpp22 &> /dev/null
+ ip netns del intervppns2 &> /dev/null
+
+ if [ "$1" != "no_odl" ] ; then
+ odl_clear_all
+ fi
+}
+
+function multihoming_topo_setup
+{
+
+ # create vpp to clients and inter-vpp namespaces
+ ip netns add vppns1
+ ip netns add vppns2
+ ip netns add intervppns1
+ ip netns add intervppns2
+
+ # create vpp and odl interfaces and set them in intervppns1
+ ip link add veth_intervpp11 type veth peer name intervpp11
+ ip link add veth_intervpp12 type veth peer name intervpp12
+ ip link add veth_odl type veth peer name odl
+ ip link set dev intervpp11 up
+ ip link set dev intervpp12 up
+ ip link set dev odl up
+ ip link set dev veth_intervpp11 up netns intervppns1
+ ip link set dev veth_intervpp12 up netns intervppns1
+ ip link set dev veth_odl up netns intervppns1
+
+ ip link add veth_intervpp21 type veth peer name intervpp21
+ ip link add veth_intervpp22 type veth peer name intervpp22
+ ip link set dev intervpp21 up
+ ip link set dev intervpp22 up
+ ip link set dev veth_intervpp21 up netns intervppns2
+ ip link set dev veth_intervpp22 up netns intervppns2
+
+ # create bridge in intervppns1 and add vpp and odl interfaces
+ ip netns exec intervppns1 brctl addbr vppbr
+ ip netns exec intervppns1 brctl addif vppbr veth_intervpp11
+ ip netns exec intervppns1 brctl addif vppbr veth_intervpp12
+ ip netns exec intervppns1 brctl addif vppbr veth_odl
+ ip netns exec intervppns1 ifconfig vppbr up
+
+ # create bridge in intervppns2 and add vpp and odl interfaces
+ ip netns exec intervppns2 brctl addbr vppbr
+ ip netns exec intervppns2 brctl addif vppbr veth_intervpp21
+ ip netns exec intervppns2 brctl addif vppbr veth_intervpp22
+ ip netns exec intervppns2 brctl addif vppbr veth_odl
+ ip netns exec intervppns2 ifconfig vppbr up
+
+ # create and configure 1st veth client to vpp pair
+ ip link add veth_vpp1 type veth peer name vpp1
+ ip link set dev vpp1 up
+ ip link set dev veth_vpp1 up netns vppns1
+
+ # create and configure 2nd veth client to vpp pair
+ ip link add veth_vpp2 type veth peer name vpp2
+ ip link set dev vpp2 up
+ ip link set dev veth_vpp2 up netns vppns2
+
+ ip netns exec vppns1 \
+ bash -c "
+ ip link set dev lo up
+ ip addr add 6.0.1.2/24 dev veth_vpp1
+ ip route add 6.0.2.0/24 via 6.0.1.1
+ ip addr add 6:0:1::2/64 dev veth_vpp1
+ ip route add 6:0:2::0/64 via 6:0:1::1
+ "
+
+ ip netns exec vppns2 \
+ bash -c "
+ ip link set dev lo up
+ ip addr add 6.0.2.2/24 dev veth_vpp2
+ ip route add 6.0.1.0/24 via 6.0.2.1
+ ip addr add 6:0:2::2/64 dev veth_vpp2
+ ip route add 6:0:1::0/64 via 6:0:2::1
+ "
+
+ # set odl iface ip and disable checksum offloading
+ ip addr add 6.0.3.100/24 dev odl
+ ip addr add 6:0:3::100/64 dev odl
+ ethtool --offload odl rx off tx off
+
+ # start vpp1 and vpp2 in separate chroot
+ ${VPP_LITE_BIN} \
+ unix { log /tmp/vpp1.log cli-listen \
+ localhost:5002 full-coredump \
+ exec ${VPP_LITE_CONF}/vpp1.config } \
+ api-trace { on } api-segment {prefix xtr1}
+
+ ${VPP_LITE_BIN} \
+ unix { log /tmp/vpp2.log cli-listen \
+ localhost:5003 full-coredump \
+ exec ${VPP_LITE_CONF}/vpp2.config } \
+ api-trace { on } api-segment {prefix xtr2}
+
+ if [ "$1" != "no_odl" ] ; then
+ post_curl "add-mapping" ${ODL_CONFIG_FILE1}
+ post_curl "add-mapping" ${ODL_CONFIG_FILE2}
+ fi
+}
+
diff --git a/tests/data_plane/vpp_lite_topo/topologies/multihoming_topo_l2.sh b/tests/data_plane/vpp_lite_topo/topologies/multihoming_topo_l2.sh
new file mode 100644
index 0000000..bd490c0
--- /dev/null
+++ b/tests/data_plane/vpp_lite_topo/topologies/multihoming_topo_l2.sh
@@ -0,0 +1,133 @@
+#!/usr/bin/env bash
+
+function multihoming_topo_clean
+{
+ echo "Clearing all VPP instances.."
+ pkill vpp --signal 9
+ rm /dev/shm/*
+
+ echo "Cleaning topology.."
+ ip netns exec intervppns1 ifconfig vppbr down
+ ip netns exec intervppns1 brctl delbr vppbr
+ ip link del dev veth_vpp1 &> /dev/null
+ ip link del dev veth_vpp2 &> /dev/null
+ ip link del dev veth_intervpp11 &> /dev/null
+ ip link del dev veth_intervpp12 &> /dev/null
+ ip link del dev veth_odl &> /dev/null
+ ip netns del vppns1 &> /dev/null
+ ip netns del vppns2 &> /dev/null
+ ip netns del intervppns1 &> /dev/null
+
+ ip netns exec intervppns2 ifconfig vppbr down
+ ip netns exec intervppns2 brctl delbr vppbr
+ ip link del dev veth_intervpp21 &> /dev/null
+ ip link del dev veth_intervpp22 &> /dev/null
+ ip netns del intervppns2 &> /dev/null
+
+ if [ "$1" != "no_odl" ] ; then
+ odl_clear_all
+ fi
+}
+
+function set_arp
+{
+ mac1=`ip netns exec vppns1 ip a show dev veth_vpp1 | grep "link/ether" | awk '{print $2}'`
+ ip netns exec vppns2 arp -s 6.0.1.11 $mac1
+
+ mac2=`ip netns exec vppns2 ip a show dev veth_vpp2 | grep "link/ether" | awk '{print $2}'`
+ ip netns exec vppns1 arp -s 6.0.1.12 $mac2
+}
+
+function multihoming_topo_setup
+{
+
+ # create vpp to clients and inter-vpp namespaces
+ ip netns add vppns1
+ ip netns add vppns2
+ ip netns add intervppns1
+ ip netns add intervppns2
+
+ # create vpp and odl interfaces and set them in intervppns1
+ ip link add veth_intervpp11 type veth peer name intervpp11
+ ip link add veth_intervpp12 type veth peer name intervpp12
+ ip link add veth_odl type veth peer name odl
+ ip link set dev intervpp11 up
+ ip link set dev intervpp12 up
+ ip link set dev odl up
+ ip link set dev veth_intervpp11 up netns intervppns1
+ ip link set dev veth_intervpp12 up netns intervppns1
+ ip link set dev veth_odl up netns intervppns1
+
+ ip link add veth_intervpp21 type veth peer name intervpp21
+ ip link add veth_intervpp22 type veth peer name intervpp22
+ ip link set dev intervpp21 up
+ ip link set dev intervpp22 up
+ ip link set dev veth_intervpp21 up netns intervppns2
+ ip link set dev veth_intervpp22 up netns intervppns2
+
+ # create bridge in intervppns1 and add vpp and odl interfaces
+ ip netns exec intervppns1 brctl addbr vppbr
+ ip netns exec intervppns1 brctl addif vppbr veth_intervpp11
+ ip netns exec intervppns1 brctl addif vppbr veth_intervpp12
+ ip netns exec intervppns1 brctl addif vppbr veth_odl
+ ip netns exec intervppns1 ifconfig vppbr up
+
+ # create bridge in intervppns2 and add vpp and odl interfaces
+ ip netns exec intervppns2 brctl addbr vppbr
+ ip netns exec intervppns2 brctl addif vppbr veth_intervpp21
+ ip netns exec intervppns2 brctl addif vppbr veth_intervpp22
+ ip netns exec intervppns2 brctl addif vppbr veth_odl
+ ip netns exec intervppns2 ifconfig vppbr up
+
+ # create and configure 1st veth client to vpp pair
+ ip link add veth_vpp1 type veth peer name vpp1
+ ip link set dev vpp1 up
+ ip link set dev veth_vpp1 address 08:11:11:11:11:11
+ ip link set dev veth_vpp1 up netns vppns1
+
+ # create and configure 2nd veth client to vpp pair
+ ip link add veth_vpp2 type veth peer name vpp2
+ ip link set dev vpp2 up
+ ip link set dev veth_vpp2 address 08:22:22:22:22:22
+ ip link set dev veth_vpp2 up netns vppns2
+
+ ip netns exec vppns1 \
+ bash -c "
+ ip link set dev lo up
+ ip addr add 6.0.1.11/24 dev veth_vpp1
+ ip addr add 6:0:1::11/64 dev veth_vpp1
+ "
+
+ ip netns exec vppns2 \
+ bash -c "
+ ip link set dev lo up
+ ip addr add 6.0.1.12/24 dev veth_vpp2
+ ip addr add 6:0:1::12/64 dev veth_vpp2
+ "
+
+ # set odl iface ip and disable checksum offloading
+ ip addr add 6.0.3.100/24 dev odl
+ ip addr add 6:0:3::100/64 dev odl
+ ethtool --offload odl rx off tx off
+
+ # start vpp1 and vpp2 in separate chroot
+ ${VPP_LITE_BIN} \
+ unix { log /tmp/vpp1.log cli-listen \
+ localhost:5002 full-coredump \
+ exec ${VPP_LITE_CONF}/vpp1.config } \
+ api-trace { on } api-segment {prefix xtr1}
+
+ ${VPP_LITE_BIN} \
+ unix { log /tmp/vpp2.log cli-listen \
+ localhost:5003 full-coredump \
+ exec ${VPP_LITE_CONF}/vpp2.config } \
+ api-trace { on } api-segment {prefix xtr2}
+
+ if [ "$1" != "no_odl" ] ; then
+ post_curl "add-mapping" ${ODL_CONFIG_FILE1}
+ post_curl "add-mapping" ${ODL_CONFIG_FILE2}
+ fi
+
+ set_arp
+}
+