#!/usr/bin/env python3 import unittest import binascii from socket import AF_INET6 from framework import VppTestCase, VppTestRunner from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto, VppIpTable from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \ SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes import scapy.compat from scapy.packet import Raw from scapy.layers.l2 import Ether, Dot1Q from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting from scapy.layers.inet import IP, UDP from util import ppp class TestSRv6(VppTestCase): """ SRv6 Test Case """ @classmethod def setUpClass(cls): super(TestSRv6, cls).setUpClass() @classmethod def tearDownClass(cls): super(TestSRv6, cls).tearDownClass() def setUp(self): """ Perform test setup before each test case. """ super(TestSRv6, self).setUp() # packet sizes, inclusive L2 overhead self.pg_packet_sizes = [64, 512, 1518, 9018] # reset packet_infos self.reset_packet_infos() def tearDown(self): """ Clean up test setup after each test case. """ self.teardown_interfaces() super(TestSRv6, self).tearDown() def configure_interface(self, interface, ipv6=False, ipv4=False, ipv6_table_id=0, ipv4_table_id=0): """ Configure interface. :param ipv6: configure IPv6 on interface :param ipv4: configure IPv4 on interface :param ipv6_table_id: FIB table_id for IPv6 :param ipv4_table_id: FIB table_id for IPv4 """ self.logger.debug("Configuring interface %s" % (interface.name)) if ipv6: self.logger.debug("Configuring IPv6") interface.set_table_ip6(ipv6_table_id) interface.config_ip6() interface.resolve_ndp(timeout=5) if ipv4: self.logger.debug("Configuring IPv4") interface.set_table_ip4(ipv4_table_id) interface.config_ip4() interface.resolve_arp() interface.admin_up() def setup_interfaces(self, ipv6=[], ipv4=[], ipv6_table_id=[], ipv4_table_id=[]): """ Create and configure interfaces. :param ipv6: list of interface IPv6 capabilities :param ipv4: list of interface IPv4 capabilities :param ipv6_table_id: list of intf IPv6 FIB table_ids :param ipv4_table_id: list of intf IPv4 FIB table_ids :returns: List of created interfaces. """ # how many interfaces? if len(ipv6): count = len(ipv6) else: count = len(ipv4) self.logger.debug("Creating and configuring %d interfaces" % (count)) # fill up ipv6 and ipv4 lists if needed # not enabled (False) is the default if len(ipv6) < count: ipv6 += (count - len(ipv6)) * [False] if len(ipv4) < count: ipv4 += (count - len(ipv4)) * [False] # fill up table_id lists if needed # table_id 0 (global) is the default if len(ipv6_table_id) < count: ipv6_table_id += (count - len(ipv6_table_id)) * [0] if len(ipv4_table_id) < count: ipv4_table_id += (count - len(ipv4_table_id)) * [0] # create 'count' pg interfaces self.create_pg_interfaces(range(count)) # setup all interfaces for i in range(count): intf = self.pg_interfaces[i] self.configure_interface(intf, ipv6[i], ipv4[i], ipv6_table_id[i], ipv4_table_id[i]) if any(ipv6): self.logger.debug(self.vapi.cli("show ip6 neighbors")) if any(ipv4): self.logger.debug(self.vapi.cli("show ip4 neighbors")) self.logger.debug(self.vapi.cli("show interface")) self.logger.debug(self.vapi.cli("show hardware")) return self.pg_interfaces def teardown_interfaces(self): """ Unconfigure and bring down interface. """ self.logger.debug("Tearing down interfaces") # tear down all interfaces # AFAIK they cannot be deleted for i in self.pg_interfaces: self.logger.debug("Tear down interface %s" % (i.name)) i.admin_down() i.unconfig() i.set_table_ip4(0) i.set_table_ip6(0) @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Encaps(self): """ Test SRv6 Transit.Encaps behavior for IPv6. """ # send traffic to one destination interface # source and destination are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index)]) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=1, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv6 traffic to a7::/64 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="a7::", mask_width=64, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = 'a7::1234' pkts = [] # create IPv6 packets without SRH packet_header = self.create_packet_header_IPv6(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH( sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH and IPv6 # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Encaps) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Insert(self): """ Test SRv6 Transit.Insert behavior (IPv6 only). """ # send traffic to one destination interface # source and destination are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index)]) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=0, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv6 traffic to a7::/64 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="a7::", mask_width=64, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = 'a7::1234' pkts = [] # create IPv6 packets without SRH packet_header = self.create_packet_header_IPv6(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH( sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Insert) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Encaps_IPv4(self): """ Test SRv6 Transit.Encaps behavior for IPv4. """ # send traffic to one destination interface # source interface is IPv4 only # destination interface is IPv6 only self.setup_interfaces(ipv6=[False, True], ipv4=[True, False]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index)]) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=1, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv4 traffic to 7.1.1.0/24 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="7.1.1.0", mask_width=24, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV4, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = '7.1.1.123' pkts = [] # create IPv4 packets packet_header = self.create_packet_header_IPv4(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Encaps_IPv4) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skip("VPP crashes after running this test") def test_SRv6_T_Encaps_L2(self): """ Test SRv6 Transit.Encaps behavior for
General Notes
=============
All CSIT test results listed in this report are sourced and auto-generated
from :file:`output.xml` :abbr:`RF (Robot Framework)` files resulting from
:abbr:`LF (Linux Foundation)` FD.io Jenkins jobs execution against |vpp-release|
release artifacts. References are provided to the original :abbr:`LF (Linux
Foundation)` FD.io Jenkins job results. However, as :abbr:`LF (Linux
Foundation)` FD.io Jenkins infrastructure does not automatically archive all jobs
(history record is provided for the last 30 days or 40 jobs only), additional
references are provided to the :abbr:`RF (Robot Framework)` result files that
got archived in FD.io nexus online storage system.
FD.io CSIT project currently covers multiple FD.io system and sub-system
testing areas and this is reflected in this report, where each testing area
is listed separately, as follows:
#. **VPP Performance Tests** - VPP performance tests are executed in physical
FD.io testbeds, focusing on VPP network data plane performance at this stage,
both for Phy-to-Phy (NIC-to-NIC) and Phy-to-VM-to-Phy (NIC-to-VM-to-NIC)
forwarding topologies. Tested across a range of NICs, 10GE and 40GE
interfaces, range of multi-thread and multi-core configurations. VPP
application runs in host user-mode. TRex is used as a traffic generator.
#. **Container memif connections** - VPP memif virtual interface (shared memory
interface) tests to interconnect VPP instances. VPP vswitch instance runs in
bare-metal user-mode handling Intel x520 NIC 10GbE interfaces and connecting
over memif (Master side) virtual interfaces to more instances of VPP running
in LXC or in Docker Containers, both with memif virtual interfaces (Slave
side). Tested across a range of NICs, 10GE and 40GE interfaces, range of
multi-thread and multi-core configurations. VPP application runs in host
user-mode. TRex is used as a traffic generator.
#. **Container Orchestrated Performance Tests** - CSIT |release| introduced new
tests of Container topologies connected over the memif virtual interface
(shared memory interface). For these tests VPP vswitch instance runs in a
Docker Container handling Intel x520 NIC 10GbE interfaces and connecting over
memif (Master side) virtual interfaces to more instances of VPP running in
Docker Containers with memif virtual interfaces (Slave side). Tested across a
range of multi-thread and multi-core configurations. VPP application runs in
host user-mode. TRex is used as a traffic generator.
#. **DPDK Performance Tests** - VPP is using DPDK code to control and drive
the NICs and physical interfaces. Testpmd tests are used as a baseline to
profile the DPDK sub-system of VPP. DPDK performance tests executed in
physical FD.io testbeds, focusing on Testpmd/L3FWD data plane performance for
Phy-to-Phy (NIC-to-NIC). Tests cover a range of NICs, 10GE and 40GE
interfaces, range of multi-thread and multi-core configurations.
Testpmd/L3FWD application runs in host user-mode. TRex is used as a traffic
generator.
#. **VPP Functional Tests** - VPP functional tests are executed in virtual
FD.io testbeds focusing on VPP packet processing functionality, including
network data plane and in -line control plane. Tests cover vNIC-to-vNIC
vNIC-to-VM-to-vNIC forwarding topologies. Scapy is used as a traffic
generator.
#. **Honeycomb Functional Tests** - Honeycomb functional tests are executed in
virtual FD.io testbeds, focusing on Honeycomb management and programming
functionality of VPP. Tests cover a range of CRUD operations executed
against VPP.
#. **Honeycomb Performance Tests** - Honeycomb performance tests are executed in
physical FD.io testbeds, focusing on the performance of Honeycomb management
and programming functionality of VPP. Tests cover a range of CRUD operations
executed against VPP.
#. **NSH_SFC Functional Tests** - NSH_SFC functional tests are executed in
virtual FD.io testbeds focusing on NSH_SFC of VPP. Tests cover a range of
CRUD operations executed against VPP.
In addition to above, CSIT |release| report does also include VPP unit test
results. VPP unit tests are developed within the FD.io VPP project and as they
complement CSIT system functional tests, they are provided mainly as a reference
and to provide a more complete view of automated testing executed against
|vpp-release|.
FD.io CSIT system is developed using two main coding platforms :abbr:`RF (Robot
Framework)` and Python. CSIT |release| source code for the executed test
suites is available in CSIT branch |release| in the directory
:file:`./tests/<name_of_the_test_suite>`. A local copy of CSIT source code
can be obtained by cloning CSIT git repository - :command:`git clone
https://gerrit.fd.io/r/csit`. The CSIT testing virtual environment can be run
on a local computer workstation (laptop, server) using Vagrant by following
the instructions in `CSIT tutorials
<https://wiki.fd.io/view/CSIT#Tutorials>`_.