#!/usr/bin/env python import unittest import binascii from socket import AF_INET6 from framework import VppTestCase, VppTestRunner from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto, VppIpTable from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \ SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes from scapy.packet import Raw from scapy.layers.l2 import Ether, Dot1Q from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting from scapy.layers.inet import IP, UDP from scapy.utils import inet_pton, inet_ntop from util import ppp class TestSRv6(VppTestCase): """ SRv6 Test Case """ @classmethod def setUpClass(self): super(TestSRv6, self).setUpClass() def setUp(self): """ Perform test setup before each test case. """ super(TestSRv6, self).setUp() # packet sizes, inclusive L2 overhead self.pg_packet_sizes = [64, 512, 1518, 9018] # reset packet_infos self.reset_packet_infos() def tearDown(self): """ Clean up test setup after each test case. """ self.teardown_interfaces() super(TestSRv6, self).tearDown() def configure_interface(self, interface, ipv6=False, ipv4=False, ipv6_table_id=0, ipv4_table_id=0): """ Configure interface. :param ipv6: configure IPv6 on interface :param ipv4: configure IPv4 on interface :param ipv6_table_id: FIB table_id for IPv6 :param ipv4_table_id: FIB table_id for IPv4 """ self.logger.debug("Configuring interface %s" % (interface.name)) if ipv6: self.logger.debug("Configuring IPv6") interface.set_table_ip6(ipv6_table_id) interface.config_ip6() interface.resolve_ndp(timeout=5) if ipv4: self.logger.debug("Configuring IPv4") interface.set_table_ip4(ipv4_table_id) interface.config_ip4() interface.resolve_arp() interface.admin_up() def setup_interfaces(self, ipv6=[], ipv4=[], ipv6_table_id=[], ipv4_table_id=[]): """ Create and configure interfaces. :param ipv6: list of interface IPv6 capabilities :param ipv4: list of interface IPv4 capabilities :param ipv6_table_id: list of intf IPv6 FIB table_ids :param ipv4_table_id: list of intf IPv4 FIB table_ids :returns: List of created interfaces. """ # how many interfaces? if len(ipv6): count = len(ipv6) else: count = len(ipv4) self.logger.debug("Creating and configuring %d interfaces" % (count)) # fill up ipv6 and ipv4 lists if needed # not enabled (False) is the default if len(ipv6) < count: ipv6 += (count - len(ipv6)) * [False] if len(ipv4) < count: ipv4 += (count - len(ipv4)) * [False] # fill up table_id lists if needed # table_id 0 (global) is the default if len(ipv6_table_id) < count: ipv6_table_id += (count - len(ipv6_table_id)) * [0] if len(ipv4_table_id) < count: ipv4_table_id += (count - len(ipv4_table_id)) * [0] # create 'count' pg interfaces self.create_pg_interfaces(range(count)) # setup all interfaces for i in range(count): intf = self.pg_interfaces[i] self.configure_interface(intf, ipv6[i], ipv4[i], ipv6_table_id[i], ipv4_table_id[i]) if any(ipv6): self.logger.debug(self.vapi.cli("show ip6 neighbors")) if any(ipv4): self.logger.debug(self.vapi.cli("show ip arp")) self.logger.debug(self.vapi.cli("show interface")) self.logger.debug(self.vapi.cli("show hardware")) return self.pg_interfaces def teardown_interfaces(self): """ Unconfigure and bring down interface. """ self.logger.debug("Tearing down interfaces") # tear down all interfaces # AFAIK they cannot be deleted for i in self.pg_interfaces: self.logger.debug("Tear down interface %s" % (i.name)) i.admin_down() i.unconfig() i.set_table_ip4(0) i.set_table_ip6(0) @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Encaps(self): """ Test SRv6 Transit.Encaps behavior for IPv6. """ # send traffic to one destination interface # source and destination are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=1, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv6 traffic to a7::/64 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="a7::", mask_width=64, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = 'a7::1234' pkts = [] # create IPv6 packets without SRH packet_header = self.create_packet_header_IPv6(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH( sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH and IPv6 # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Encaps) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Insert(self): """ Test SRv6 Transit.Insert behavior (IPv6 only). """ # send traffic to one destination interface # source and destination are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=0, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv6 traffic to a7::/64 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="a7::", mask_width=64, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = 'a7::1234' pkts = [] # create IPv6 packets without SRH packet_header = self.create_packet_header_IPv6(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH( sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Insert) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Encaps_IPv4(self): """ Test SRv6 Transit.Encaps behavior for IPv4. """ # send traffic to one destination interface # source interface is IPv4 only # destination interface is IPv6 only self.setup_interfaces(ipv6=[False, True], ipv4=[True, False]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=1, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv4 traffic to 7.1.1.0/24 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="7.1.1.0", mask_width=24, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV4, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = '7.1.1.123' pkts = [] # create IPv4 packets packet_header = self.create_packet_header_IPv4(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Encaps_IPv4) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skip("VPP crashes after running this test") def test_SRv6_T_Encaps_L2(self): """ Test SRv6 Transit.Encaps behavior for L2. """ # send traffic to one destination interface # source interface is IPv4 only TODO? # destination interface is IPv6 only self.setup_interfaces(ipv6=[False, True], ipv4=[False, False]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=1, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer L2 traffic into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="::", mask_width=0, traffic_type=SRv6PolicySteeringTypes.SR_STEER_L2, sr_policy_index=0, table_id=0, sw_if_index=self.pg0.sw_if_index) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) pkts = [] # create L2 packets without dot1q header packet_header = self.create_packet_header_L2() # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create L2 packets with dot1q header packet_header = self.create_packet_header_L2(vlan=123) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Encaps_L2) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End(self): """ Test SRv6 End (without PSP) behavior. """ # send traffic to one destination interface # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure SRv6 localSID End without PSP behavior localsid = VppSRv6LocalSID( self, localsid={'addr': 'A3::0'}, behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_END, nh_addr4='0.0.0.0', nh_addr6='::', end_psp=0, sw_if_index=0, vlan_index=0, fib_table=0) localsid.add_vpp_config() # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # create IPv6 packets with SRH (SL=2, SL=1, SL=0) # send one packet per SL value per packet size # SL=0 packet with localSID End with USP needs 2nd SRH count = len(self.pg_packet_sizes) dst_inner = 'a4::1234' pkts = [] # packets with segments-left 2, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a5::', 'a4::', 'a3::'], segleft=2) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # packets with segments-left 1, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a4::', 'a3::', 'a2::'], segleft=1) # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # TODO: test behavior with SL=0 packet (needs 2*SRH?) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_End) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs localsid.remove_vpp_config() # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End_with_PSP(self): """ Test SRv6 End with PSP behavior. """ # send traffic to one destination interface # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure SRv6 localSID End with PSP behavior localsid = VppSRv6LocalSID( self, localsid={'addr': 'A3::0'}, behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_END, nh_addr4='0.0.0.0', nh_addr6='::', end_psp=1, sw_if_index=0, vlan_index=0, fib_table=0) localsid.add_vpp_config() # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # create IPv6 packets with SRH (SL=2, SL=1) # send one packet per SL value per packet size # SL=0 packet with localSID End with PSP is dropped count = len(self.pg_packet_sizes) dst_inner = 'a4::1234' pkts = [] # packets with segments-left 2, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a5::', 'a4::', 'a3::'], segleft=2) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # packets with segments-left 1, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a4::', 'a3::', 'a2::'], segleft=1) # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_End_PSP) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs localsid.remove_vpp_config() # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End_X(self): """ Test SRv6 End.X (without PSP) behavior. """ # create three interfaces (1 source, 2 destinations) # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True, True]) # configure FIB entries # a4::/64 via pg1 and pg2 route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg2.remote_ip6, self.pg2.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() self.logger.debug(self.vapi.cli("show ip6 fib")) # configure SRv6 localSID End.X without PSP behavior # End.X points to interface pg1 localsid = VppSRv6LocalSID( self, localsid={'addr': 'A3::C4'}, behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_X, nh_addr4='0.0.0.0', nh_addr6=self.pg1.remote_ip6, end_psp=0, sw_if_index=self.pg1.sw_if_index, vlan_index=0, fib_table=0) localsid.add_vpp_config() # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # create IPv6 packets with SRH (SL=2, SL=1) # send one packet per SL value per packet size # SL=0 packet with localSID End with PSP is dropped count = len(self.pg_packet_sizes) dst_inner = 'a4::1234' pkts = [] # packets with segments-left 2, active segment a3::c4 packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a5::', 'a4::', 'a3::c4'], segleft=2) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # packets with segments-left 1, active segment a3::c4 packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a4::', 'a3::c4', 'a2::'], segleft=1) # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets # using same comparison function as End (no PSP) self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_End) # assert nothing was received on the other interface (pg2) self.pg2.assert_nothing_captured("mis-directed packet(s)") # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs localsid.remove_vpp_config() # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End_X_with_PSP(self): """ Test SRv6 End.X with PSP behavior. """ # create three interfaces (1 source, 2 destinations) # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True, True]) # configure FIB entries # a4::/64 via pg1 and pg2 route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg2.remote_ip6, self.pg2.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure SRv6 localSID End with PSP behavior localsid = VppSRv6LocalSID( self, localsid={'addr': 'A3::C4'}, behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_X, nh_addr4='0.0.0.0', nh_addr6=self.pg1.remote_ip6, end_psp=1, sw_if_index=self.pg1.sw_if_index, vlan_index=0, fib_table=0) localsid.add_vpp_config() # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # create IPv6 packets with SRH (SL=2, SL=1) # send one packet per SL value per packet size # SL=0 packet with localSID End with PSP is dropped count = len(self.pg_packet_sizes) dst_inner = 'a4::1234' pkts = [] # packets with segments-left 2, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a5::', 'a4::', 'a3::c4'], segleft=2) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # packets with segments-left 1, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a4::', 'a3::c4', 'a2::'], segleft=1) # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets # using same comparison function as End with PSP self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_End_PSP) # assert nothing was received on the other interface (pg2) self.pg2.assert_nothing_captured("mis-directed packet(s)") # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs localsid.remove_vpp_config() # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End_DX6(self): """ Test SRv6 End.DX6 behavior. """ # send traffic to one destination interface # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure SRv6 localSID End.DX6 behavior localsid = VppSRv6LocalSID( self, localsid={'addr': 'A3::C4'}, behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DX6, nh_addr4='0.0.0.0', nh_addr6=self.pg1.remote_ip6, end_psp=0, sw_if_index=self.pg1.sw_if_index, vlan_index=0, fib_table=0) localsid.add_vpp_config() # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # create IPv6 packets with SRH (SL=0) # send one packet per packet size count = len(self.pg_packet_sizes) dst_inner = 'a4::1234' # inner header destination address pkts = [] # packets with SRH, segments-left 0, active segment a3::c4 packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a3::c4', 'a2::', 'a1::'], segleft=0) # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # packets without SRH, IPv6 in IPv6 # outer IPv6 dest addr is the localsid End.DX6 packet_header = self.create_packet_header_IPv6_IPv6( dst_inner, dst_outer='a3::c4') # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_End_DX6) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs localsid.remove_vpp_config() # cleanup interfaces self.teardown_interfaces() def test_SRv6_End_DT6(self): """ Test SRv6 End.DT6 behavior. """ # create three interfaces (1 source, 2 destinations) # all interfaces are IPv6 only # source interface in global FIB (0) # destination interfaces in global and vrf vrf_1 = 1 ipt = VppIpTable(self, vrf_1, is_ip6=True) ipt.add_vpp_config() self.setup_interfaces(ipv6=[True, True, True], ipv6_table_id=[0, 0, vrf_1]) # configure FIB entries # a4::/64 is reachable # via pg1 in table 0 (global) # and via pg2 in table vrf_1 route0 = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6, nh_table_id=0)], table_id=0, is_ip6=1) route0.add_vpp_config() route1 = VppIpRoute(self, "a4::", 64, [V }
/*
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef __included_session_h__
#define __included_session_h__

#include <vnet/session/stream_session.h>
#include <vnet/session/session_lookup.h>
#include <vnet/session/transport_interface.h>
#include <vnet/session/session_debug.h>
#include <vnet/session/segment_manager.h>
#include <svm/queue.h>

#define HALF_OPEN_LOOKUP_INVALID_VALUE ((u64)~0)
#define INVALID_INDEX ((u32)~0)
#define SESSION_PROXY_LISTENER_INDEX ((u32)~0 - 1)
#define SESSION_LOCAL_HANDLE_PREFIX 0x7FFFFFFF

/* TODO decide how much since we have pre-data as well */
#define MAX_HDRS_LEN    100	/* Max number of bytes for headers */

typedef enum
{
  FIFO_EVENT_APP_RX,
  FIFO_EVENT_APP_TX,
  FIFO_EVENT_TIMEOUT,
  FIFO_EVENT_DISCONNECT,
  FIFO_EVENT_BUILTIN_RX,
  FIFO_EVENT_RPC,
} fifo_event_type_t;

static inline const char *
fifo_event_type_str (fifo_event_type_t et)
{
  switch (et)
    {
    case FIFO_EVENT_APP_RX:
      return "FIFO_EVENT_APP_RX";
    case FIFO_EVENT_APP_TX:
      return "FIFO_EVENT_APP_TX";
    case FIFO_EVENT_TIMEOUT:
      return "FIFO_EVENT_TIMEOUT";
    case FIFO_EVENT_DISCONNECT:
      return "FIFO_EVENT_DISCONNECT";
    case FIFO_EVENT_BUILTIN_RX:
      return "FIFO_EVENT_BUILTIN_RX";
    case FIFO_EVENT_RPC:
      return "FIFO_EVENT_RPC";
    default:
      return "UNKNOWN FIFO EVENT";
    }
}

#define foreach_session_input_error                                    	\
_(NO_SESSION, "No session drops")                                       \
_(NO_LISTENER, "No listener for dst port drops")                        \
_(ENQUEUED, "Packets pushed into rx fifo")                              \
_(NOT_READY, "Session not ready packets")                               \
_(FIFO_FULL, "Packets dropped for lack of rx fifo space")               \
_(EVENT_FIFO_FULL, "Events not sent for lack of event fifo space")      \
_(API_QUEUE_FULL, "Sessions not created for lack of API queue space")   \
_(NEW_SEG_NO_SPACE, "Created segment, couldn't allocate a fifo pair")   \
_(NO_SPACE, "Couldn't allocate a fifo pair")				\
_(SEG_CREATE, "Couldn't create a new segment")

typedef enum
{
#define _(sym,str) SESSION_ERROR_##sym,
  foreach_session_input_error
#undef _
    SESSION_N_ERROR,
} session_error_t;

typedef struct
{
  void *fp;
  void *arg;
} rpc_args_t;

typedef u64 session_handle_t;

/* *INDENT-OFF* */
typedef CLIB_PACKED (struct {
  union
    {
      svm_fifo_t * fifo;
      session_handle_t session_handle;
      rpc_args_t rpc_args;
    };
  u8 event_type;
  u8 postponed;
}) session_fifo_event_t;
/* *INDENT-ON* */

/* Forward definition */
typedef struct _session_manager_main session_manager_main_t;

typedef int
  (session_fifo_rx_fn) (vlib_main_t * vm, vlib_node_runtime_t * node,
			session_manager_main_t * smm,
			session_fifo_event_t * e0, stream_session_t * s0,
			u32 thread_index, int *n_tx_pkts);

extern session_fifo_rx_fn session_tx_fifo_peek_and_snd;
extern session_fifo_rx_fn session_tx_fifo_dequeue_and_snd;
extern session_fifo_rx_fn session_tx_fifo_dequeue_internal;

u8 session_node_lookup_fifo_event (svm_fifo_t * f, session_fifo_event_t * e);

struct _session_manager_main
{
  /** Per worker thread session pools */
  stream_session_t **sessions;

  /** Per worker-thread session pool peekers rw locks */
  clib_rwlock_t *peekers_rw_locks;

  /** Pool of listen sessions. Same type as stream sessions to ease lookups */
  stream_session_t **listen_sessions;

  /** Per-proto, per-worker enqueue epoch counters */
  u32 *current_enqueue_epoch[TRANSPORT_N_PROTO];

  /** Per-proto, per-worker thread vector of sessions to enqueue */
  u32 **session_to_enqueue[TRANSPORT_N_PROTO];

  /** per-worker tx buffer free lists */
  u32 **tx_buffers;

  /** Per worker-thread vector of partially read events */
  session_fifo_event_t **free_event_vector;

  /** per-worker active event vectors */
  session_fifo_event_t **pending_event_vector;

  /** per-worker postponed disconnects */
  session_fifo_event_t **pending_disconnects;

  /** vpp fifo event queue */
  svm_queue_t **vpp_event_queues;

  /** Event queues memfd segment initialized only if so configured */
  ssvm_private_t evt_qs_segment;

  /** Unique segment name counter */
  u32 unique_segment_name_counter;

  /** Per transport rx function that can either dequeue or peek */
  session_fifo_rx_fn **session_tx_fns;

  /** Per session type output nodes. Could optimize to group nodes by
   * fib but lookup would then require session type parsing in session node.
   * Trade memory for speed, for now */
  u32 *session_type_to_next;

  /** Session manager is enabled */
  u8 is_enabled;

  /** vpp fifo event queue configured length */
  u32 configured_event_queue_length;

  /*
   * Config parameters
   */

  /** Session ssvm segment configs*/
  uword session_baseva;
  uword session_va_space_size;
  u32 evt_qs_segment_size;
  u8 evt_qs_use_memfd_seg;

  /** Session table size parameters */
  u32 configured_v4_session_table_buckets;
  u32 configured_v4_session_table_memory;
  u32 configured_v4_halfopen_table_buckets;
  u32 configured_v4_halfopen_table_memory;
  u32 configured_v6_session_table_buckets;
  u32 configured_v6_session_table_memory;
  u32 configured_v6_halfopen_table_buckets;
  u32 configured_v6_halfopen_table_memory;

  /** Transport table (preallocation) size parameters */
  u32 local_endpoints_table_memory;
  u32 local_endpoints_table_buckets;

  /** Preallocate session config parameter */
  u32 preallocated_sessions;

#if SESSION_DBG
  /**
   * last event poll time by thread
   * Debug only. Will cause false cache-line sharing as-is
   */
  f64 *last_event_poll_by_thread;
#endif

};

extern session_manager_main_t session_manager_main;
extern vlib_node_registration_t session_queue_node;

/*
 * Session manager function
 */
always_inline session_manager_main_t *
vnet_get_session_manager_main ()
{
  return &session_manager_main;
}

always_inline u8
stream_session_is_valid (u32 si, u8 thread_index)
{
  stream_session_t *s;
  s = pool_elt_at_index (session_manager_main.sessions[thread_index], si);
  if (s->thread_index != thread_index || s->session_index != si
      /* || s->server_rx_fifo->master_session_index != si
         || s->server_tx_fifo->master_session_index != si
         || s->server_rx_fifo->master_thread_index != thread_index
         || s->server_tx_fifo->master_thread_index != thread_index */ )
    return 0;
  return 1;
}

stream_session_t *session_alloc (u32 thread_index);
int session_alloc_fifos (segment_manager_t * sm, stream_session_t * s);
void session_free (stream_session_t * s);

always_inline stream_session_t *
session_get (u32 si, u32 thread_index)
{
  ASSERT (stream_session_is_valid (si, thread_index));
  return pool_elt_at_index (session_manager_main.sessions[thread_index], si);
}

always_inline stream_session_t *
session_get_if_valid (u64 si, u32 thread_index)
{
  if (thread_index >= vec_len (session_manager_main.sessions))
    return 0;

  if (pool_is_free_index (session_manager_main.sessions[thread_index], si))
    return 0;

  ASSERT (stream_session_is_valid (si, thread_index));
  return pool_elt_at_index (session_manager_main.sessions[thread_index], si);
}

always_inline session_handle_t
session_handle (stream_session_t * s)
{
  return ((u64) s->thread_index << 32) | (u64) s->session_index;
}

always_inline u32
session_index_from_handle (session_handle_t handle)
{
  return handle & 0xFFFFFFFF;
}

always_inline u32
session_thread_from_handle (session_handle_t handle)
{
  return handle >> 32;
}

always_inline void
session_parse_handle (session_handle_t handle, u32 * index,
		      u32 * thread_index)
{
  *index = session_index_from_handle (handle);
  *thread_index = session_thread_from_handle (handle);
}

always_inline stream_session_t *
session_get_from_handle (session_handle_t handle)
{
  session_manager_main_t *smm = &session_manager_main;
  u32 session_index, thread_index;
  session_parse_handle (handle, &session_index, &thread_index);
  return pool_elt_at_index (smm->sessions[thread_index], session_index);
}

always_inline stream_session_t *
session_get_from_handle_if_valid (session_handle_t handle)
{
  u32 session_index, thread_index;
  session_parse_handle (handle, &session_index, &thread_index);
  return session_get_if_valid (session_index, thread_index);
}

always_inline u8
session_handle_is_local (session_handle_t handle)
{
  if ((handle >> 32) == SESSION_LOCAL_HANDLE_PREFIX)
    return 1;
  return 0;
}

always_inline transport_proto_t
session_type_transport_proto (session_type_t st)
{
  return (st >> 1);
}

always_inline u8
session_type_is_ip4 (session_type_t st)
{
  return (st & 1);
}

always_inline transport_proto_t
session_get_transport_proto (stream_session_t * s)
{
  return (s->session_type >> 1);
}

always_inline fib_protocol_t
session_get_fib_proto (stream_session_t * s)
{
  u8 is_ip4 = s->session_type & 1;
  return (is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6);
}

always_inline session_type_t
session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4)
{
  return (proto << 1 | is_ip4);
}

always_inline u8
session_has_transport (stream_session_t * s)
{
  return (session_get_transport_proto (s) != TRANSPORT_PROTO_NONE);
}

/**
 * Acquires a lock that blocks a session pool from expanding.
 *
 * This is typically used for safely peeking into other threads'
 * pools in order to clone elements. Lock should be dropped as soon
 * as possible by calling @ref session_pool_remove_peeker.
 *
 * NOTE: Avoid using pool_elt_at_index while the lock is held because
 * it may lead to free elt bitmap expansion/contraction!
 */
always_inline void
session_pool_add_peeker (u32 thread_index)
{
  session_manager_main_t *smm = &session_manager_main;
  if (thread_index == vlib_get_thread_index ())
    return;
  clib_rwlock_reader_lock (&smm->peekers_rw_locks[thread_index]);
}

always_inline void
session_pool_remove_peeker (u32 thread_index)
{
  session_manager_main_t *smm = &session_manager_main;
  if (thread_index == vlib_get_thread_index ())
    return;
  clib_rwlock_reader_unlock (&smm->peekers_rw_locks[thread_index]);
}

/**
 * Get session from handle and 'lock' pool resize if not in same thread
 *
 * Caller should drop the peek 'lock' as soon as possible.
 */
always_inline stream_session_t *
session_get_from_handle_safe (u64 handle)
{
  session_manager_main_t *smm = &session_manager_main;
  u32 thread_index = session_thread_from_handle (handle);
  if (thread_index == vlib_get_thread_index ())
    {
      return pool_elt_at_index (smm->sessions[thread_index],
				session_index_from_handle (handle));
    }
  else
    {
      session_pool_add_peeker (thread_index);
      /* Don't use pool_elt_at index. See @ref session_pool_add_peeker */
      return smm->sessions[thread_index] + session_index_from_handle (handle);
    }
}

always_inline stream_session_t *
stream_session_listener_get (u8 sst, u64 si)
{
  return pool_elt_at_index (session_manager_main.listen_sessions[sst], si);
}

always_inline u32
stream_session_get_index (stream_session_t * s)
{
  if (s->session_state == SESSION_STATE_LISTENING)
    return s - session_manager_main.listen_sessions[s->session_type];

  return s - session_manager_main.sessions[s->thread_index];
}

always_inline u32
stream_session_max_rx_enqueue (transport_connection_t * tc)
{
  stream_session_t *s = session_get (tc->s_index, tc->thread_index);
  return svm_fifo_max_enqueue (s->server_rx_fifo);
}

always_inline u32
stream_session_rx_fifo_size (transport_connection_t * tc)
{
  stream_session_t *s = session_get (tc->s_index, tc->thread_index);
  return s->server_rx_fifo->nitems;
}

always_inline u32
session_get_index (stream_session_t * s)
{
  return (s - session_manager_main.sessions[s->thread_index]);
}

always_inline stream_session_t *
session_clone_safe (u32 session_index, u32 thread_index)
{
  stream_session_t *old_s, *new_s;
  u32 current_thread_index = vlib_get_thread_index ();

  /* If during the memcpy pool is reallocated AND the memory allocator
   * decides to give the old chunk of memory to somebody in a hurry to
   * scribble something on it, we have a problem. So add this thread as
   * a session pool peeker.
   */
  session_pool_add_peeker (thread_index);
  new_s = session_alloc (current_thread_index);
  old_s = session_manager_main.sessions[thread_index] + session_index;
  clib_memcpy (new_s, old_s, sizeof (*new_s));
  session_pool_remove_peeker (thread_index);
  new_s->thread_index = current_thread_index;
  new_s->session_index = session_get_index (new_s);
  return new_s;
}

transport_connection_t *session_get_transport (stream_session_t * s);

u32 stream_session_tx_fifo_max_dequeue (transport_connection_t * tc);

int
session_enqueue_stream_connection (transport_connection_t * tc,
				   vlib_buffer_t * b, u32 offset,
				   u8 queue_event, u8 is_in_order);
int session_enqueue_dgram_connection (stream_session_t * s, vlib_buffer_t * b,
				      u8 proto, u8 queue_event);
int stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
			       u32 offset, u32 max_bytes);
u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes);

int session_stream_connect_notify (transport_connection_t * tc, u8 is_fail);
int session_dgram_connect_notify (transport_connection_t * tc,
				  u32 old_thread_index,
				  stream_session_t ** new_session);
void stream_session_init_fifos_pointers (transport_connection_t * tc,
					 u32 rx_pointer, u32 tx_pointer);

void stream_session_accept_notify (transport_connection_t * tc);
void stream_session_disconnect_notify (transport_connection_t * tc);
void stream_session_delete_notify (transport_connection_t * tc);
void stream_session_reset_notify (transport_connection_t * tc);
int stream_session_accept (transport_connection_t * tc, u32 listener_index,
			   u8 notify);
int session_open (u32 app_index, session_endpoint_t * tep, u32 opaque);
int stream_session_listen (stream_session_t * s, session_endpoint_t * tep);
int stream_session_stop_listen (stream_session_t * s);
void stream_session_disconnect (stream_session_t * s);
void stream_session_disconnect_transport (stream_session_t * s);
void stream_session_cleanup (stream_session_t * s);
void session_send_session_evt_to_thread (u64 session_handle,
					 fifo_event_type_t evt_type,
					 u32 thread_index);
ssvm_private_t *session_manager_get_evt_q_segment (void);

u8 *format_stream_session (u8 * s, va_list * args);
uword unformat_stream_session (