#!/usr/bin/env python import unittest import socket from framework import VppTestCase, VppTestRunner from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \ VppMplsIpBind, VppIpMRoute, VppMRoutePath, \ MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface from scapy.packet import Raw from scapy.layers.l2 import Ether from scapy.layers.inet import IP, UDP, ICMP from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded from scapy.contrib.mpls import MPLS def verify_filter(capture, sent): if not len(capture) == len(sent): # filter out any IPv6 RAs from the capture for p in capture: if p.haslayer(IPv6): capture.remove(p) return capture def verify_mpls_stack(tst, rx, mpls_labels, ttl=255, num=0): # the rx'd packet has the MPLS label popped eth = rx[Ether] tst.assertEqual(eth.type, 0x8847) rx_mpls = rx[MPLS] for ii in range(len(mpls_labels)): tst.assertEqual(rx_mpls.label, mpls_labels[ii]) tst.assertEqual(rx_mpls.cos, 0) if ii == num: tst.assertEqual(rx_mpls.ttl, ttl) else: tst.assertEqual(rx_mpls.ttl, 255) if ii == len(mpls_labels) - 1: tst.assertEqual(rx_mpls.s, 1) else: # not end of stack tst.assertEqual(rx_mpls.s, 0) # pop the label to expose the next rx_mpls = rx_mpls[MPLS].payload class TestMPLS(VppTestCase): """ MPLS Test Case """ def setUp(self): super(TestMPLS, self).setUp() # create 2 pg interfaces self.create_pg_interfaces(range(4)) # setup both interfaces # assign them different tables. table_id = 0 self.tables = [] tbl = VppMplsTable(self, 0) tbl.add_vpp_config() self.tables.append(tbl) for i in self.pg_interfaces: i.admin_up() if table_id != 0: tbl = VppIpTable(self, table_id) tbl.add_vpp_config() self.tables.append(tbl) tbl = VppIpTable(self, table_id, is_ip6=1) tbl.add_vpp_config() self.tables.append(tbl) i.set_table_ip4(table_id) i.set_table_ip6(table_id) i.config_ip4() i.resolve_arp() i.config_ip6() i.resolve_ndp() i.enable_mpls() table_id += 1 def tearDown(self): for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() i.ip6_disable() i.set_table_ip4(0) i.set_table_ip6(0) i.disable_mpls() i.admin_down() super(TestMPLS, self).tearDown() # the default of 64 matches the IP packet TTL default def create_stream_labelled_ip4( self, src_if, mpls_labels, mpls_ttl=255, ping=0, ip_itf=None, dst_ip=None, chksum=None, n=257): self.reset_packet_infos() pkts = [] for i in range(0, n): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) p = Ether(dst=src_if.local_mac, src=src_if.remote_mac) for ii in range(len(mpls_labels)): if ii == len(mpls_labels) - 1: p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=1) else: p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=0) if not ping: if not dst_ip: p = (p / IP(src=src_if.local_ip4, dst=src_if.remote_ip4) / UDP(sport=1234, dport=1234) / Raw(payload)) else: p = (p / IP(src=src_if.local_ip4, dst=dst_ip) / UDP(sport=1234, dport=1234) / Raw(payload)) else: p = (p / IP(src=ip_itf.remote_ip4, dst=ip_itf.local_ip4) / ICMP()) if chksum: p[IP].chksum = chksum info.data = p.copy() pkts.append(p) return pkts def create_stream_ip4(self, src_if, dst_ip): self.reset_packet_infos() pkts = [] for i in range(0, 257): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / IP(src=src_if.remote_ip4, dst=dst_ip) / UDP(sport=1234, dport=1234) / Raw(payload)) info.data = p.copy() pkts.append(p) return pkts def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl, dst_ip=None, hlim=64): if dst_ip is None: dst_ip = src_if.remote_ip6 self.reset_packet_infos() pkts = [] for i in range(0, 257): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / MPLS(label=mpls_label, ttl=mpls_ttl) / IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) / UDP(sport=1234, dport=1234) / Raw(payload)) info.data = p.copy() pkts.append(p) return pkts def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0): try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] # the rx'd packet has the MPLS label popped eth = rx[Ether] self.assertEqual(eth.type, 0x800) tx_ip = tx[IP] rx_ip = rx[IP] if not ping_resp: self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) else: self.assertEqual(rx_ip.src, tx_ip.dst) self.assertEqual(rx_ip.dst, tx_ip.src) except: raise def verify_capture_labelled_ip4(self, src_if, capture, sent, mpls_labels): try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] tx_ip = tx[IP] rx_ip = rx[IP] # the MPLS TTL is copied from the IP verify_mpls_stack(self, rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) except: raise def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels, ttl=255, top=None): if top is None: top = len(mpls_labels) - 1 try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] tx_ip = tx[IP] rx_ip = rx[IP] # the MPLS TTL is 255 since it enters a new tunnel verify_mpls_stack(self, rx, mpls_labels, ttl, top) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) except: raise def verify_capture_labelled(self, src_if, capture, sent, mpls_labels, ttl=254, num=0): try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): rx = capture[i] verify_mpls_stack(self, rx, mpls_labels, ttl, num) except: raise def verify_capture_ip6(self, src_if, capture, sent): try: self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] # the rx'd packet has the MPLS label popped eth = rx[Ether] self.assertEqual(eth.type, 0x86DD) tx_ip = tx[IPv6] rx_ip = rx[IPv6] self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) except: raise def verify_capture_ip6_icmp(self, src_if, capture, sent): try: self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] # the rx'd packet has the MPLS label popped eth = rx[Ether] self.assertEqual(eth.type, 0x86DD) tx_ip = tx[IPv6] rx_ip = rx[IPv6] self.assertEqual(rx_ip.dst, tx_ip.src) # ICMP sourced from the interface's address self.assertEqual(rx_ip.src, src_if.local_ip6) # hop-limit reset to 255 for IMCP packet self.assertEqual(rx_ip.hlim, 254) icmp = rx[ICMPv6TimeExceeded] except: raise def send_and_assert_no_replies(self, intf, pkts, remark): intf.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() for i in self.pg_interfaces: i.assert_nothing_captured(remark=remark) def test_swap(self): """ MPLS label swap tests """ # # A simple MPLS xconnect - eos label in label out # route_32_eos = VppMplsRoute(self, 32, 1, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[33])]) route_32_eos.add_vpp_config() # # a stream that matches the route for 10.0.0.1 # PG0 is in the default table # self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg0, [32]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled(self.pg0, rx, tx, [33]) # # A simple MPLS xconnect - non-eos label in label out # route_32_neos = VppMplsRoute(self, 32, 0, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[33])]) route_32_neos.add_vpp_config() # # a stream that matches the route for 10.0.0.1 # PG0 is in the default table # self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg0, [32, 99]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled(self.pg0, rx, tx, [33, 99]) # # An MPLS xconnect - EOS label in IP out # route_33_eos = VppMplsRoute(self, 33, 1, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[])]) route_33_eos.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg0, [33]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_ip4(self.pg0, rx, tx) # # disposed packets have an invalid IPv4 checkusm # tx = self.create_stream_labelled_ip4(self.pg0, [33], dst_ip=self.pg0.remote_ip4, n=65, chksum=1) self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum") # # An MPLS xconnect - EOS label in IPv6 out # route_333_eos = VppMplsRoute( self, 333, 1, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, labels=[], proto=DpoProto.DPO_PROTO_IP6)]) route_333_eos.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip6(self.pg0, [333], 64) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_ip6(self.pg0, rx, tx) # # disposed packets have an TTL expired # tx = self.create_stream_labelled_ip6(self.pg0, [333], 64, dst_ip=self.pg1.remote_ip6, hlim=1) self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip6(self.pg0, [333], 64, dst_ip=self.pg1.remote_ip6, hlim=0) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_ip6_icmp(self.pg0, rx, tx) # # An MPLS xconnect - EOS label in IPv6 out w imp-null # route_334_eos = VppMplsRoute( self, 334, 1, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, labels=[3], proto=DpoProto.DPO_PROTO_IP6)]) route_334_eos.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip6(self.pg0, [334], 64) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_ip6(self.pg0, rx, tx) # # disposed packets have an TTL expired # self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip6(self.pg0, [334], 64, dst_ip=self.pg1.remote_ip6, hlim=0) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_ip6_icmp(self.pg0, rx, tx) # # An MPLS xconnect - non-EOS label in IP out - an invalid configuration # so this traffic should be dropped. # route_33_neos = VppMplsRoute(self, 33, 0, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[])]) route_33_neos.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg0, [33, 99]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.pg0.assert_nothing_captured( remark="MPLS non-EOS packets popped and forwarded") # # A recursive EOS x-connect, which resolves through another x-connect # route_34_eos = VppMplsRoute(self, 34, 1, [VppRoutePath("0.0.0.0", 0xffffffff, nh_via_label=32, labels=[44, 45])]) route_34_eos.add_vpp_config() tx = self.create_stream_labelled_ip4(self.pg0, [34]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 45], num=2) # # A recursive non-EOS x-connect, which resolves through another # x-connect # route_34_neos = VppMplsRoute(self, 34, 0, [VppRoutePath("0.0.0.0", 0xffffffff, nh_via_label=32, labels=[44, 46])]) route_34_neos.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg0, [34, 99]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() # it's the 2nd (counting from 0) label in the stack that is swapped self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 46, 99], num=2) # # an recursive IP route that resolves through the recursive non-eos # x-connect # ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, [VppRoutePath("0.0.0.0", 0xffffffff, nh_via_label=34, labels=[55])]) ip_10_0_0_1.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "10.0.0.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 46, 55]) ip_10_0_0_1.remove_vpp_config() route_34_neos.remove_vpp_config() route_34_eos.remove_vpp_config() route_33_neos.remove_vpp_config() route_33_eos.remove_vpp_config() route_32_neos.remove_vpp_config() route_32_eos.remove_vpp_config() def test_bind(self): """ MPLS Local Label Binding test """ # # Add a non-recursive route with a single out label # route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[45])]) route_10_0_0_1.add_vpp_config() # bind a local label to the route binding = VppMplsIpBind(self, 44, "10.0.0.1", 32) binding.add_vpp_config() # non-EOS stream self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg0, [44, 99]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled(self.pg0, rx, tx, [45, 99]) # EOS stream self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg0, [44]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled(self.pg0, rx, tx, [45]) # IP stream self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "10.0.0.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled_ip4(self.pg0, rx, tx, [45]) # # cleanup # binding.remove_vpp_config() route_10_0_0_1.remove_vpp_config() def test_imposition(self): """ MPLS label imposition test """ # # Add a non-recursive route with a single out label # route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[32])]) route_10_0_0_1.add_vpp_config() # # a stream that matches the route for 10.0.0.1 # PG0 is in the default table # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "10.0.0.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32]) # # Add a non-recursive route with a 3 out labels # route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[32, 33, 34])]) route_10_0_0_2.add_vpp_config() # # a stream that matches the route for 10.0.0.1 # PG0 is in the default table # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "10.0.0.2") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 33, 34]) # # add a recursive path, with output label, via the 1 label route # route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32, [VppRoutePath("10.0.0.1", 0xffffffff, labels=[44])]) route_11_0_0_1.add_vpp_config() # # a stream that matches the route for 11.0.0.1, should pick up # the label stack for 11.0.0.1 and 10.0.0.1 # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "11.0.0.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 44]) # # add a recursive path, with 2 labels, via the 3 label route # route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32, [VppRoutePath("10.0.0.2", 0xffffffff, labels=[44, 45])]) route_11_0_0_2.add_vpp_config() # # a stream that matches the route for 11.0.0.1, should pick up # the label stack for 11.0.0.1 and 10.0.0.1 # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "11.0.0.2") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_labelled_ip4( self.pg0, rx, tx, [32, 33, 34, 44, 45]) # # cleanup # route_11_0_0_2.remove_vpp_config() route_11_0_0_1.remove_vpp_config() route_10_0_0_2.remove_vpp_config() route_10_0_0_1.remove_vpp_config() def test_tunnel(self): """ MPLS Tunnel Tests """ # # Create a tunnel with a single out label # mpls_tun = VppMPLSTunnelInterface(self, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[44, 46])]) mpls_tun.add_vpp_config() mpls_tun.admin_up() # # add an unlabelled route through the new tunnel # route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32, [VppRoutePath("0.0.0.0", mpls_tun._sw_if_index)]) route_10_0_0_3.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "10.0.0.3") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46]) # # add a labelled route through the new tunnel # route_10_0_0_4 = VppIpRoute(self, "10.0.0.4", 32, [VppRoutePath("0.0.0.0", mpls_tun._sw_if_index, labels=[33])]) route_10_0_0_4.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "10.0.0.4") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46, 33], ttl=63, top=2) def test_v4_exp_null(self): """ MPLS V4 Explicit NULL test """ # # The first test case has an MPLS TTL of 0 # all packet should be dropped # tx = self.create_stream_labelled_ip4(self.pg0, [0], 0) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.pg0.assert_nothing_captured(remark="MPLS TTL=0 packets forwarded") # # a stream with a non-zero MPLS TTL # PG0 is in the default table # tx = self.create_stream_labelled_ip4(self.pg0, [0]) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_ip4(self.pg0, rx, tx) # # a stream with a non-zero MPLS TTL # PG1 is in table 1 # we are ensuring the post-pop lookup occurs in the VRF table # self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg1, [0]) self.pg1.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg1.get_capture() self.verify_capture_ip4(self.pg0, rx, tx) def test_v6_exp_null(self): """ MPLS V6 Explicit NULL test """ # # a stream with a non-zero MPLS TTL # PG0 is in the default table # self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip6(self.pg0, 2, 2) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_ip6(self.pg0, rx, tx) # # a stream with a non-zero MPLS TTL # PG1 is in table 1 # we are ensuring the post-pop lookup occurs in the VRF table # self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip6(self.pg1, 2, 2) self.pg1.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg1.get_capture() self.verify_capture_ip6(self.pg0, rx, tx) def test_deag(self): """ MPLS Deagg """ # # A de-agg route - next-hop lookup in default table # route_34_eos = VppMplsRoute(self, 34, 1, [VppRoutePath("0.0.0.0", 0xffffffff, nh_table_id=0)]) route_34_eos.add_vpp_config() # # ping an interface in the default table # PG0 is in the default table # self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self.pg0, [34], ping=1, ip_itf=self.pg0) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg0.get_capture() self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1) # # A de-agg route - next-hop lookup in non-default table # route_35_eos = VppMplsRoute(self, 35, 1, [VppRoutePath("0.0.0.0", 0xffffffff, nh_table_id=1)]) route_35_eos.add_vpp_config() # # ping an interface in the non-default table # PG0 is in the default table. packet arrive labelled in the # default table and egress unlabelled in the non-default # self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4( self.pg0, [35], ping=1, ip_itf=self.pg1) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() packet_count = self.get_packet_count_for_if_idx(self.pg0.sw_if_index) rx = self.pg1.get_capture(packet_count) self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1) # # Double pop # route_36_neos = VppMplsRoute(self, 36, 0, [VppRoutePath("0.0.0.0", 0xffffffff)]) route_36_neos.add_vpp_config() self.vapi.cli("clear trace") tx = self.create_stream_labelled_ip4(self }
/*
 * Copyright (c) 2018-2019 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vcl/vcl_private.h>

static pthread_key_t vcl_worker_stop_key;

vcl_mq_evt_conn_t *
vcl_mq_evt_conn_alloc (vcl_worker_t * wrk)
{
  vcl_mq_evt_conn_t *mqc;
  pool_get (wrk->mq_evt_conns, mqc);
  memset (mqc, 0, sizeof (*mqc));
  return mqc;
}

u32
vcl_mq_evt_conn_index (vcl_worker_t * wrk, vcl_mq_evt_conn_t * mqc)
{
  return (mqc - wrk->mq_evt_conns);
}

vcl_mq_evt_conn_t *
vcl_mq_evt_conn_get (vcl_worker_t * wrk, u32 mq_conn_idx)
{
  return pool_elt_at_index (wrk->mq_evt_conns, mq_conn_idx);
}

int
vcl_mq_epoll_add_evfd (vcl_worker_t * wrk, svm_msg_q_t * mq)
{
  struct epoll_event e = { 0 };
  vcl_mq_evt_conn_t *mqc;
  u32 mqc_index;
  int mq_fd;

  mq_fd = svm_msg_q_get_eventfd (mq);

  if (wrk->mqs_epfd < 0 || mq_fd == -1)
    return -1;

  mqc = vcl_mq_evt_conn_alloc (wrk);
  mqc_index = vcl_mq_evt_conn_index (wrk, mqc);
  mqc->mq_fd = mq_fd;
  mqc->mq = mq;

  e.events = EPOLLIN;
  e.data.u32 = mqc_index;
  if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_ADD, mq_fd, &e) < 0)
    {
      VDBG (0, "failed to add mq eventfd to mq epoll fd");
      return -1;
    }

  return mqc_index;
}

int
vcl_mq_epoll_del_evfd (vcl_worker_t * wrk, u32 mqc_index)
{
  vcl_mq_evt_conn_t *mqc;

  if (wrk->mqs_epfd || mqc_index == ~0)
    return -1;

  mqc = vcl_mq_evt_conn_get (wrk, mqc_index);
  if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_DEL, mqc->mq_fd, 0) < 0)
    {
      VDBG (0, "failed to del mq eventfd to mq epoll fd");
      return -1;
    }
  return 0;
}

static vcl_worker_t *
vcl_worker_alloc (void)
{
  vcl_worker_t *wrk;
  pool_get (vcm->workers, wrk);
  memset (wrk, 0, sizeof (*wrk));
  wrk->wrk_index = wrk - vcm->workers;
  wrk->forked_child = ~0;
  return wrk;
}

static void
vcl_worker_free (vcl_worker_t * wrk)
{
  pool_put (vcm->workers, wrk);
}

int
vcl_api_app_worker_add (void)
{
  if (vcm->cfg.vpp_app_socket_api)
    return vcl_sapi_app_worker_add ();

  return vcl_bapi_app_worker_add ();
}

void
vcl_api_app_worker_del (vcl_worker_t * wrk)
{
  if (vcm->cfg.vpp_app_socket_api)
    return vcl_sapi_app_worker_del (wrk);

  vcl_bapi_app_worker_del (wrk);
}

void
vcl_worker_cleanup (vcl_worker_t * wrk, u8 notify_vpp)
{
  clib_spinlock_lock (&vcm->workers_lock);
  if (notify_vpp)
    vcl_api_app_worker_del (wrk);

  if (wrk->mqs_epfd > 0)
    close (wrk->mqs_epfd);
  hash_free (wrk->session_index_by_vpp_handles);
  vec_free (wrk->mq_events);
  vec_free (wrk->mq_msg_vector);
  vcl_worker_free (wrk);
  clib_spinlock_unlock (&vcm->workers_lock);
}

static void
vcl_worker_cleanup_cb (void *arg)
{
  vcl_worker_t *wrk = vcl_worker_get_current ();
  u32 wrk_index = wrk->wrk_index;
  vcl_worker_cleanup (wrk, 1 /* notify vpp */ );
  vcl_set_worker_index (~0);
  VDBG (0, "cleaned up worker %u", wrk_index);
}

vcl_worker_t *
vcl_worker_alloc_and_init ()
{
  vcl_worker_t *wrk;

  /* This was initialized already */
  if (vcl_get_worker_index () != ~0)
    return 0;

  /* Use separate heap map entry for worker */
  clib_mem_set_thread_index ();

  if (pool_elts (vcm->workers) == vcm->cfg.max_workers)
    {
      VDBG (0, "max-workers %u limit reached", vcm->cfg.max_workers);
      return 0;
    }

  clib_spinlock_lock (&vcm->workers_lock);
  wrk = vcl_worker_alloc ();
  vcl_set_worker_index (wrk->wrk_index);
  wrk->thread_id = pthread_self ();
  wrk->current_pid = getpid ();

  wrk->mqs_epfd = -1;
  if (vcm->cfg.use_mq_eventfd)
    {
      wrk->vcl_needs_real_epoll = 1;
      wrk->mqs_epfd = epoll_create (1);
      wrk->vcl_needs_real_epoll = 0;
      if (wrk->mqs_epfd < 0)
	{
	  clib_unix_warning ("epoll_create() returned");
	  goto done;
	}
    }

  wrk->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
  clib_time_init (&wrk->clib_time);
  vec_validate (wrk->mq_events, 64);
  vec_validate (wrk->mq_msg_vector, 128);
  vec_reset_length (wrk->mq_msg_vector);
  vec_validate (wrk->unhandled_evts_vector, 128);
  vec_reset_length (wrk->unhandled_evts_vector);
  clib_spinlock_unlock (&vcm->workers_lock);

done:
  return wrk;
}

int
vcl_worker_register_with_vpp (void)
{
  vcl_worker_t *wrk = vcl_worker_get_current ();

  clib_spinlock_lock (&vcm->workers_lock);

  if (vcl_api_app_worker_add ())
    {
      VDBG (0, "failed to add worker to vpp");
      clib_spinlock_unlock (&vcm->workers_lock);
      return -1;
    }
  if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb))
    VDBG (0, "failed to add pthread cleanup function");
  if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id))
    VDBG (0, "failed to setup key value");

  clib_spinlock_unlock (&vcm->workers_lock);

  VDBG (0, "added worker %u", wrk->wrk_index);
  return 0;
}

svm_msg_q_t *
vcl_worker_ctrl_mq (vcl_worker_t * wrk)
{
  return wrk->ctrl_mq;
}

int
vcl_session_read_ready (vcl_session_t * s)
{
  if (PREDICT_FALSE (s->flags & VCL_SESSION_F_IS_VEP))
    {
      VDBG (0, "ERROR: session %u: cannot read from an epoll session!",
	    s->session_index);
      return VPPCOM_EBADFD;
    }

  if (vcl_session_is_open (s))
    {
      if (vcl_session_is_ct (s))
	return svm_fifo_max_dequeue_cons (s->ct_rx_fifo);

      if (s->is_dgram)
	{
	  session_dgram_pre_hdr_t ph;
	  u32 max_deq;

	  max_deq = svm_fifo_max_dequeue_cons (s->rx_fifo);
	  if (max_deq <= SESSION_CONN_HDR_LEN)
	    return 0;
	  if (svm_fifo_peek (s->rx_fifo, 0, sizeof (ph), (u8 *) & ph) < 0)
	    return 0;
	  if (ph.data_length + SESSION_CONN_HDR_LEN > max_deq)
	    return 0;

	  return ph.data_length;
	}

      return svm_fifo_max_dequeue_cons (s->rx_fifo);
    }
  else if (s->session_state == VCL_STATE_LISTEN)
    {
      return clib_fifo_elts (s->accept_evts_fifo);
    }
  else
    {
      return (s->session_state == VCL_STATE_DISCONNECT) ?
	VPPCOM_ECONNRESET : VPPCOM_ENOTCONN;
    }
}

int
vcl_session_write_ready (vcl_session_t * s)
{
  if (PREDICT_FALSE (s->flags & VCL_SESSION_F_IS_VEP))
    {
      VDBG (0, "session %u [0x%llx]: cannot write to an epoll session!",
	    s->session_index, s->vpp_handle);
      return VPPCOM_EBADFD;
    }

  if (vcl_session_is_open (s))
    {
      if (vcl_session_is_ct (s))
	return svm_fifo_max_enqueue_prod (s->ct_tx_fifo);

      if (s->is_dgram)
	{
	  u32 max_enq = svm_fifo_max_enqueue_prod (s->tx_fifo);

	  if (max_enq <= sizeof (session_dgram_hdr_t))
	    return 0;
	  return max_enq - sizeof (session_dgram_hdr_t);
	}

      return svm_fifo_max_enqueue_prod (s->tx_fifo);
    }
  else if (s->session_state == VCL_STATE_LISTEN)
    {
      if (s->tx_fifo)
	return svm_fifo_max_enqueue_prod (s->tx_fifo);
      else
	return VPPCOM_EBADFD;
    }
  else
    {
      return (s