diff options
-rw-r--r-- | .gitignore | 3 | ||||
-rw-r--r-- | test/Makefile | 30 | ||||
-rw-r--r-- | test/framework.py | 2 | ||||
-rw-r--r-- | test/patches/scapy-2.3.3/gre-layers.patch | 25 | ||||
-rw-r--r-- | test/test_gre.py | 700 | ||||
-rw-r--r-- | test/vpp_gre_interface.py | 34 | ||||
-rw-r--r-- | test/vpp_interface.py | 31 | ||||
-rw-r--r-- | test/vpp_ip_route.py | 46 | ||||
-rw-r--r-- | test/vpp_papi_provider.py | 51 | ||||
-rw-r--r-- | test/vpp_sub_interface.py | 3 | ||||
-rw-r--r-- | vnet/vnet/adj/adj.c | 10 | ||||
-rw-r--r-- | vnet/vnet/adj/adj_internal.h | 3 | ||||
-rw-r--r-- | vnet/vnet/adj/adj_nbr.c | 4 | ||||
-rw-r--r-- | vnet/vnet/ethernet/arp.c | 2 | ||||
-rw-r--r-- | vnet/vnet/gre/gre.c | 43 | ||||
-rw-r--r-- | vnet/vnet/gre/gre.h | 77 | ||||
-rw-r--r-- | vnet/vnet/gre/interface.c | 51 | ||||
-rw-r--r-- | vnet/vnet/gre/node.c | 21 | ||||
-rw-r--r-- | vnet/vnet/interface_output.c | 29 | ||||
-rw-r--r-- | vnet/vnet/ip/ip6_forward.c | 2 | ||||
-rw-r--r-- | vnet/vnet/ip/ip6_neighbor.c | 2 | ||||
-rw-r--r-- | vpp/vpp-api/api.c | 2 |
22 files changed, 1095 insertions, 76 deletions
diff --git a/.gitignore b/.gitignore index 5a8384a8273..cec432c5cf0 100644 --- a/.gitignore +++ b/.gitignore @@ -84,3 +84,6 @@ GTAGS /vpp-api/python/vpp_papi.egg-info /vpp-api/python/vpp_papi/memclnt.py /vpp-api/python/vpp_papi/vpe.py + +# Build files in the test directory +/test/*.ok diff --git a/test/Makefile b/test/Makefile index aac637d3e5f..de6aaa7a793 100644 --- a/test/Makefile +++ b/test/Makefile @@ -6,23 +6,45 @@ ifndef VPP_PYTHON_PREFIX endif PYTHON_VENV_PATH=$(VPP_PYTHON_PREFIX)/virtualenv -PYTHON_DEPENDS=scapy pexpect +PYTHON_DEPENDS=scapy==2.3.3 pexpect +SCAPY_SOURCE=$(WS_ROOT)/build-root/python/virtualenv/lib/python2.7/site-packages/ -test: wipe verify-python-path + +.pip-install.ok: @virtualenv $(PYTHON_VENV_PATH) @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && pip install $(PYTHON_DEPENDS)" + @touch $@ + +.pip-patch.ok: .pip-install.ok + @echo --- patching --- + for f in $(CURDIR)/patches/scapy-2.3.3/*.patch ; do \ + echo Applying patch: $$(basename $$f) ; \ + patch -p1 -d $(SCAPY_SOURCE) < $$f ; \ + done + @touch $@ + +.install.ok: .pip-patch.ok @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && cd $(WS_ROOT)/vpp-api/python && python setup.py install" + @touch $@ + +PHONIES=.install.ok .pip-patch.ok .pip-install.ok +.PHONY: $(PHONIES) + +test: reset verify-python-path .install.ok @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && python run_tests.py discover -p test_$(TEST)\"*.py\"" -retest: wipe verify-python-path +retest: reset verify-python-path @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && python run_tests.py discover -p test_$(TEST)\"*.py\"" .PHONY: wipe doc -wipe: +reset: @rm -f /dev/shm/vpp-unittest-* @rm -rf /tmp/vpp-unittest-* +wipe: reset + @rm -f $(PHONIES) + doc: verify-python-path @virtualenv $(PYTHON_VENV_PATH) @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && pip install $(PYTHON_DEPENDS) sphinx" diff --git a/test/framework.py b/test/framework.py index 227428ef9c2..1375f076dd6 100644 --- a/test/framework.py +++ b/test/framework.py @@ -265,8 +265,8 @@ class VppTestCase(unittest.TestCase): def tearDown(self): """ Show various debug prints after each test """ if not self.vpp_dead: - self.logger.info(self.vapi.ppcli("show int")) self.logger.debug(self.vapi.cli("show trace")) + self.logger.info(self.vapi.ppcli("show int")) self.logger.info(self.vapi.ppcli("show hardware")) self.logger.info(self.vapi.ppcli("show error")) self.logger.info(self.vapi.ppcli("show run")) diff --git a/test/patches/scapy-2.3.3/gre-layers.patch b/test/patches/scapy-2.3.3/gre-layers.patch new file mode 100644 index 00000000000..605a705b0f7 --- /dev/null +++ b/test/patches/scapy-2.3.3/gre-layers.patch @@ -0,0 +1,25 @@ +diff --git a/scapy/layers/inet6.py b/scapy/layers/inet6.py +index 03b80ec..a7e1e0f 100644 +--- a/scapy/layers/inet6.py ++++ b/scapy/layers/inet6.py +@@ -3722,6 +3722,7 @@ conf.l2types.register(31, IPv6) + + bind_layers(Ether, IPv6, type = 0x86dd ) + bind_layers(CookedLinux, IPv6, proto = 0x86dd ) ++bind_layers(GRE, IPv6, proto = 0x86dd ) + bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP ) + bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP ) + bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP ) +diff --git a/scapy/layers/l2.py b/scapy/layers/l2.py +index 4f491d2..661a5da 100644 +--- a/scapy/layers/l2.py ++++ b/scapy/layers/l2.py +@@ -628,7 +628,7 @@ bind_layers( CookedLinux, EAPOL, proto=34958) + bind_layers( GRE, LLC, proto=122) + bind_layers( GRE, Dot1Q, proto=33024) + bind_layers( GRE, Dot1AD, type=0x88a8) +-bind_layers( GRE, Ether, proto=1) ++bind_layers( GRE, Ether, proto=0x6558) + bind_layers( GRE, ARP, proto=2054) + bind_layers( GRE, EAPOL, proto=34958) + bind_layers( GRE, GRErouting, { "routing_present" : 1 } ) diff --git a/test/test_gre.py b/test/test_gre.py new file mode 100644 index 00000000000..b5a1e346eca --- /dev/null +++ b/test/test_gre.py @@ -0,0 +1,700 @@ +#!/usr/bin/env python + +import unittest +import socket +from logging import * + +from framework import VppTestCase, VppTestRunner +from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint +from vpp_gre_interface import VppGreInterface +from vpp_ip_route import IpRoute, IpPath +from vpp_papi_provider import L2_VTR_OP + +from scapy.packet import Raw +from scapy.layers.l2 import Ether, Dot1Q, ARP, GRE +from scapy.layers.inet import IP, UDP +from scapy.layers.inet6 import ICMPv6ND_NS, IPv6, UDP +from scapy.contrib.mpls import MPLS +from scapy.volatile import RandMAC, RandIP + + +class TestGRE(VppTestCase): + """ GRE Test Case """ + + @classmethod + def setUpClass(cls): + super(TestGRE, cls).setUpClass() + + def setUp(self): + super(TestGRE, self).setUp() + + # create 2 pg interfaces - set one in a non-default table. + self.create_pg_interfaces(range(2)) + + self.pg1.set_table_ip4(1) + for i in self.pg_interfaces: + i.admin_up() + i.config_ip4() + i.resolve_arp() + + def tearDown(self): + super(TestGRE, self).tearDown() + + def create_stream_ip4(self, src_if, src_ip, dst_ip): + pkts = [] + for i in range(0, 257): + info = self.create_packet_info(src_if.sw_if_index, + src_if.sw_if_index) + payload = self.info_to_payload(info) + p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / + IP(src=src_ip, dst=dst_ip) / + UDP(sport=1234, dport=1234) / + Raw(payload)) + info.data = p.copy() + pkts.append(p) + return pkts + + def create_tunnel_stream_4o4(self, src_if, + tunnel_src, tunnel_dst, + src_ip, dst_ip): + pkts = [] + for i in range(0, 257): + info = self.create_packet_info(src_if.sw_if_index, + src_if.sw_if_index) + payload = self.info_to_payload(info) + p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / + IP(src=tunnel_src, dst=tunnel_dst) / + GRE() / + IP(src=src_ip, dst=dst_ip) / + UDP(sport=1234, dport=1234) / + Raw(payload)) + info.data = p.copy() + pkts.append(p) + return pkts + + def create_tunnel_stream_6o4(self, src_if, + tunnel_src, tunnel_dst, + src_ip, dst_ip): + pkts = [] + for i in range(0, 257): + info = self.create_packet_info(src_if.sw_if_index, + src_if.sw_if_index) + payload = self.info_to_payload(info) + p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / + IP(src=tunnel_src, dst=tunnel_dst) / + GRE() / + IPv6(src=src_ip, dst=dst_ip) / + UDP(sport=1234, dport=1234) / + Raw(payload)) + info.data = p.copy() + pkts.append(p) + return pkts + + def create_tunnel_stream_l2o4(self, src_if, + tunnel_src, tunnel_dst): + pkts = [] + for i in range(0, 257): + info = self.create_packet_info(src_if.sw_if_index, + src_if.sw_if_index) + payload = self.info_to_payload(info) + p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / + IP(src=tunnel_src, dst=tunnel_dst) / + GRE() / + Ether(dst=RandMAC('*:*:*:*:*:*'), + src=RandMAC('*:*:*:*:*:*')) / + IP(src=str(RandIP()), dst=str(RandIP())) / + UDP(sport=1234, dport=1234) / + Raw(payload)) + info.data = p.copy() + pkts.append(p) + return pkts + + def create_tunnel_stream_vlano4(self, src_if, + tunnel_src, tunnel_dst, vlan): + pkts = [] + for i in range(0, 257): + info = self.create_packet_info(src_if.sw_if_index, + src_if.sw_if_index) + payload = self.info_to_payload(info) + p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / + IP(src=tunnel_src, dst=tunnel_dst) / + GRE() / + Ether(dst=RandMAC('*:*:*:*:*:*'), + src=RandMAC('*:*:*:*:*:*')) / + Dot1Q(vlan=vlan) / + IP(src=str(RandIP()), dst=str(RandIP())) / + UDP(sport=1234, dport=1234) / + Raw(payload)) + info.data = p.copy() + pkts.append(p) + return pkts + + def verify_filter(self, capture, sent): + if not len(capture) == len(sent): + # filter out any IPv6 RAs from the captur + for p in capture: + if (p.haslayer(ICMPv6ND_RA)): + capture.remove(p) + return capture + + def verify_tunneled_4o4(self, src_if, capture, sent, + tunnel_src, tunnel_dst): + + capture = self.verify_filter(capture, sent) + self.assertEqual(len(capture), len(sent)) + + for i in range(len(capture)): + try: + tx = sent[i] + rx = capture[i] + + tx_ip = tx[IP] + rx_ip = rx[IP] + + self.assertEqual(rx_ip.src, tunnel_src) + self.assertEqual(rx_ip.dst, tunnel_dst) + + rx_gre = rx[GRE] + rx_ip = rx_gre[IP] + + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + # IP processing post pop has decremented the TTL + self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) + + except: + rx.show() + tx.show() + raise + + def verify_tunneled_l2o4(self, src_if, capture, sent, + tunnel_src, tunnel_dst): + capture = self.verify_filter(capture, sent) + self.assertEqual(len(capture), len(sent)) + + for i in range(len(capture)): + try: + tx = sent[i] + rx = capture[i] + + tx_ip = tx[IP] + rx_ip = rx[IP] + + self.assertEqual(rx_ip.src, tunnel_src) + self.assertEqual(rx_ip.dst, tunnel_dst) + + rx_gre = rx[GRE] + rx_l2 = rx_gre[Ether] + rx_ip = rx_l2[IP] + tx_gre = tx[GRE] + tx_l2 = tx_gre[Ether] + tx_ip = tx_l2[IP] + + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + # bridged, not L3 forwarded, so no TTL decrement + self.assertEqual(rx_ip.ttl, tx_ip.ttl) + + except: + rx.show() + tx.show() + raise + + def verify_tunneled_vlano4(self, src_if, capture, sent, + tunnel_src, tunnel_dst, vlan): + try: + capture = self.verify_filter(capture, sent) + self.assertEqual(len(capture), len(sent)) + except: + capture.show() + raise + + for i in range(len(capture)): + try: + tx = sent[i] + rx = capture[i] + + tx_ip = tx[IP] + rx_ip = rx[IP] + + self.assertEqual(rx_ip.src, tunnel_src) + self.assertEqual(rx_ip.dst, tunnel_dst) + + rx_gre = rx[GRE] + rx_l2 = rx_gre[Ether] + rx_vlan = rx_l2[Dot1Q] + rx_ip = rx_l2[IP] + + self.assertEqual(rx_vlan.vlan, vlan) + + tx_gre = tx[GRE] + tx_l2 = tx_gre[Ether] + tx_ip = tx_l2[IP] + + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + # bridged, not L3 forwarded, so no TTL decrement + self.assertEqual(rx_ip.ttl, tx_ip.ttl) + + except: + rx.show() + tx.show() + raise + + def verify_decapped_4o4(self, src_if, capture, sent): + capture = self.verify_filter(capture, sent) + self.assertEqual(len(capture), len(sent)) + + for i in range(len(capture)): + try: + tx = sent[i] + rx = capture[i] + + tx_ip = tx[IP] + rx_ip = rx[IP] + tx_gre = tx[GRE] + tx_ip = tx_gre[IP] + + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + # IP processing post pop has decremented the TTL + self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) + + except: + rx.show() + tx.show() + raise + + def verify_decapped_6o4(self, src_if, capture, sent): + capture = self.verify_filter(capture, sent) + self.assertEqual(len(capture), len(sent)) + + for i in range(len(capture)): + try: + tx = sent[i] + rx = capture[i] + + tx_ip = tx[IP] + rx_ip = rx[IPv6] + tx_gre = tx[GRE] + tx_ip = tx_gre[IPv6] + + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) + + except: + rx.show() + tx.show() + raise + + def test_gre(self): + """ GRE tunnel Tests """ + + # + # Create an L3 GRE tunnel. + # - set it admin up + # - assign an IP Addres + # - Add a route via the tunnel + # + gre_if = VppGreInterface(self, + self.pg0.local_ip4, + "1.1.1.2") + gre_if.add_vpp_config() + + # + # The double create (create the same tunnel twice) should fail, + # and we should still be able to use the original + # + try: + gre_if.add_vpp_config() + except Exception: + pass + else: + self.fail("Double GRE tunnel add does not fail") + + gre_if.admin_up() + gre_if.config_ip4() + + route_via_tun = IpRoute(self, "4.4.4.4", 32, + [IpPath("0.0.0.0", gre_if.sw_if_index)]) + + route_via_tun.add_vpp_config() + + # + # Send a packet stream that is routed into the tunnel + # - they are all dropped since the tunnel's desintation IP + # is unresolved - or resolves via the default route - which + # which is a drop. + # + tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "4.4.4.4") + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + + try: + self.assertEqual(0, len(rx)) + except: + error("GRE packets forwarded without DIP resolved") + error(rx.show()) + raise + + # + # Add a route that resolves the tunnel's destination + # + route_tun_dst = IpRoute(self, "1.1.1.2", 32, + [IpPath(self.pg0.remote_ip4, self.pg0.sw_if_index)]) + route_tun_dst.add_vpp_config() + + # + # Send a packet stream that is routed into the tunnel + # - packets are GRE encapped + # + self.vapi.cli("clear trace") + tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "4.4.4.4") + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + self.verify_tunneled_4o4(self.pg0, rx, tx, + self.pg0.local_ip4, "1.1.1.2") + + # + # Send tunneled packets that match the created tunnel and + # are decapped and forwarded + # + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_4o4(self.pg0, + "1.1.1.2", + self.pg0.local_ip4, + self.pg0.local_ip4, + self.pg0.remote_ip4) + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + self.verify_decapped_4o4(self.pg0, rx, tx) + + # + # Send tunneled packets that do not match the tunnel's src + # + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_4o4(self.pg0, + "1.1.1.3", + self.pg0.local_ip4, + self.pg0.local_ip4, + self.pg0.remote_ip4) + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + try: + self.assertEqual(0, len(rx)) + except: + error("GRE packets forwarded despite no SRC address match") + error(rx.show()) + raise + + # + # Configure IPv6 on the PG interface so we can route IPv6 + # packets + # + self.pg0.config_ip6() + self.pg0.resolve_ndp() + + # + # Send IPv6 tunnel encapslated packets + # - dropped since IPv6 is not enabled on the tunnel + # + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_6o4(self.pg0, + "1.1.1.2", + self.pg0.local_ip4, + self.pg0.local_ip6, + self.pg0.remote_ip6) + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + try: + self.assertEqual(0, len(rx)) + except: + error("IPv6 GRE packets forwarded despite IPv6 not enabled on tunnel") + error(rx.show()) + raise + + # + # Enable IPv6 on the tunnel + # + gre_if.config_ip6() + + # + # Send IPv6 tunnel encapslated packets + # - forwarded since IPv6 is enabled on the tunnel + # + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_6o4(self.pg0, + "1.1.1.2", + self.pg0.local_ip4, + self.pg0.local_ip6, + self.pg0.remote_ip6) + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + self.verify_decapped_6o4(self.pg0, rx, tx) + + # + # test case cleanup + # + route_tun_dst.remove_vpp_config() + route_via_tun.remove_vpp_config() + gre_if.remove_vpp_config() + + self.pg0.unconfig_ip6() + + def test_gre_vrf(self): + """ GRE tunnel VRF Tests """ + + # + # Create an L3 GRE tunnel whose destination is in the non-default + # table. The underlay is thus non-default - the overlay is still + # the default. + # - set it admin up + # - assign an IP Addres + # + gre_if = VppGreInterface(self, self.pg1.local_ip4, + "2.2.2.2", + outer_fib_id=1) + gre_if.add_vpp_config() + gre_if.admin_up() + gre_if.config_ip4() + + # + # Add a route via the tunnel - in the overlay + # + route_via_tun = IpRoute(self, "9.9.9.9", 32, + [IpPath("0.0.0.0", gre_if.sw_if_index)]) + route_via_tun.add_vpp_config() + + # + # Add a route that resolves the tunnel's destination - in the + # underlay table + # + route_tun_dst = IpRoute(self, "2.2.2.2", 32, table_id=1, + paths=[IpPath(self.pg1.remote_ip4, + self.pg1.sw_if_index)]) + route_tun_dst.add_vpp_config() + + # + # Send a packet stream that is routed into the tunnel + # packets are sent in on pg0 which is in the default table + # - packets are GRE encapped + # + self.vapi.cli("clear trace") + tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "9.9.9.9") + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg1.get_capture() + self.verify_tunneled_4o4(self.pg1, rx, tx, + self.pg1.local_ip4, "2.2.2.2") + + # + # Send tunneled packets that match the created tunnel and + # are decapped and forwarded. This tests the decap lookup + # does not happen in the encap table + # + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_4o4(self.pg1, + "2.2.2.2", + self.pg1.local_ip4, + self.pg0.local_ip4, + self.pg0.remote_ip4) + self.pg1.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + self.verify_decapped_4o4(self.pg0, rx, tx) + + # + # test case cleanup + # + route_tun_dst.remove_vpp_config() + route_via_tun.remove_vpp_config() + gre_if.remove_vpp_config() + + def test_gre_l2(self): + """ GRE tunnel L2 Tests """ + + # + # Add routes to resolve the tunnel destinations + # + route_tun1_dst = IpRoute(self, "2.2.2.2", 32, + [IpPath(self.pg0.remote_ip4, + self.pg0.sw_if_index)]) + route_tun2_dst = IpRoute(self, "2.2.2.3", 32, + [IpPath(self.pg0.remote_ip4, + self.pg0.sw_if_index)]) + + route_tun1_dst.add_vpp_config() + route_tun2_dst.add_vpp_config() + + # + # Create 2 L2 GRE tunnels and x-connect them + # + gre_if1 = VppGreInterface(self, self.pg0.local_ip4, + "2.2.2.2", + is_teb=1) + gre_if2 = VppGreInterface(self, self.pg0.local_ip4, + "2.2.2.3", + is_teb=1) + gre_if1.add_vpp_config() + gre_if2.add_vpp_config() + + gre_if1.admin_up() + gre_if2.admin_up() + + self.vapi.sw_interface_set_l2_xconnect(gre_if1.sw_if_index, + gre_if2.sw_if_index, + enable=1) + self.vapi.sw_interface_set_l2_xconnect(gre_if2.sw_if_index, + gre_if1.sw_if_index, + enable=1) + + # + # Send in tunnel encapped L2. expect out tunnel encapped L2 + # in both directions + # + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_l2o4(self.pg0, + "2.2.2.2", + self.pg0.local_ip4) + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + self.verify_tunneled_l2o4(self.pg0, rx, tx, + self.pg0.local_ip4, + "2.2.2.3") + + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_l2o4(self.pg0, + "2.2.2.3", + self.pg0.local_ip4) + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + self.verify_tunneled_l2o4(self.pg0, rx, tx, + self.pg0.local_ip4, + "2.2.2.2") + + self.vapi.sw_interface_set_l2_xconnect(gre_if1.sw_if_index, + gre_if2.sw_if_index, + enable=0) + self.vapi.sw_interface_set_l2_xconnect(gre_if2.sw_if_index, + gre_if1.sw_if_index, + enable=0) + + # + # Create a VLAN sub-interfaces on the GRE TEB interfaces + # then x-connect them + # + gre_if_11 = VppDot1QSubint(self, gre_if1, 11) + gre_if_12 = VppDot1QSubint(self, gre_if2, 12) + + # gre_if_11.add_vpp_config() + # gre_if_12.add_vpp_config() + + gre_if_11.admin_up() + gre_if_12.admin_up() + + self.vapi.sw_interface_set_l2_xconnect(gre_if_11.sw_if_index, + gre_if_12.sw_if_index, + enable=1) + self.vapi.sw_interface_set_l2_xconnect(gre_if_12.sw_if_index, + gre_if_11.sw_if_index, + enable=1) + + # + # Configure both to pop thier respective VLAN tags, + # so that during the x-coonect they will subsequently push + # + self.vapi.sw_interface_set_l2_tag_rewrite(gre_if_12.sw_if_index, + L2_VTR_OP.L2_POP_1, + 12) + self.vapi.sw_interface_set_l2_tag_rewrite(gre_if_11.sw_if_index, + L2_VTR_OP.L2_POP_1, + 11) + + # + # Send traffic in both directiond - expect the VLAN tags to + # be swapped. + # + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_vlano4(self.pg0, + "2.2.2.2", + self.pg0.local_ip4, + 11) + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + self.verify_tunneled_vlano4(self.pg0, rx, tx, + self.pg0.local_ip4, + "2.2.2.3", + 12) + + self.vapi.cli("clear trace") + tx = self.create_tunnel_stream_vlano4(self.pg0, + "2.2.2.3", + self.pg0.local_ip4, + 12) + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture() + self.verify_tunneled_vlano4(self.pg0, rx, tx, + self.pg0.local_ip4, + "2.2.2.2", + 11) + + # + # Cleanup Test resources + # + gre_if_11.remove_vpp_config() + gre_if_12.remove_vpp_config() + gre_if1.remove_vpp_config() + gre_if2.remove_vpp_config() + route_tun1_dst.add_vpp_config() + route_tun2_dst.add_vpp_config() + + +if __name__ == '__main__': + unittest.main(testRunner=VppTestRunner) diff --git a/test/vpp_gre_interface.py b/test/vpp_gre_interface.py new file mode 100644 index 00000000000..b6a66332a99 --- /dev/null +++ b/test/vpp_gre_interface.py @@ -0,0 +1,34 @@ + +from vpp_interface import VppInterface +import socket + + +class VppGreInterface(VppInterface): + """ + VPP GRE interface + """ + + def __init__(self, test, src_ip, dst_ip, outer_fib_id=0, is_teb=0): + """ Create VPP loopback interface """ + self._test = test + self.t_src = src_ip + self.t_dst = dst_ip + self.t_outer_fib = outer_fib_id + self.t_is_teb = is_teb + + def add_vpp_config(self): + s = socket.inet_pton(socket.AF_INET, self.t_src) + d = socket.inet_pton(socket.AF_INET, self.t_dst) + r = self.test.vapi.gre_tunnel_add_del(s, d, + outer_fib_id=self.t_outer_fib, + is_teb=self.t_is_teb) + self._sw_if_index = r.sw_if_index + self.post_init_setup() + + def remove_vpp_config(self): + s = socket.inet_pton(socket.AF_INET, self.t_src) + d = socket.inet_pton(socket.AF_INET, self.t_dst) + self.unconfig() + r = self.test.vapi.gre_tunnel_add_del(s, d, + outer_fib_id=self.t_outer_fib, + is_add=0) diff --git a/test/vpp_interface.py b/test/vpp_interface.py index 30ef8ae7a0a..511cf4bc2dd 100644 --- a/test/vpp_interface.py +++ b/test/vpp_interface.py @@ -176,6 +176,19 @@ class VppInterface(object): addr_len = 24 self.test.vapi.sw_interface_add_del_address( self.sw_if_index, addr, addr_len) + self.has_ip4_config = True + + def unconfig_ip4(self): + """Remove IPv4 address on the VPP interface""" + try: + if (self.has_ip4_config): + self.test.vapi.sw_interface_add_del_address( + self.sw_if_index, + self.local_ip4n, + 24, is_add=0) + except AttributeError: + self.has_ip4_config = False + self.has_ip4_config = False def configure_ipv4_neighbors(self): """For every remote host assign neighbor's MAC to IPv4 addresses.""" @@ -190,6 +203,24 @@ class VppInterface(object): addr_len = 64 self.test.vapi.sw_interface_add_del_address( self.sw_if_index, addr, addr_len, is_ipv6=1) + self.has_ip6_config = True + + def unconfig_ip6(self): + """Remove IPv6 address on the VPP interface""" + try: + if (self.has_ip6_config): + self.test.vapi.sw_interface_add_del_address( + self.sw_if_index, + self.local_ip6n, + 64, is_ipv6=1, is_add=0) + except AttributeError: + self.has_ip6_config = False + self.has_ip6_config = False + + def unconfig(self): + """Unconfigure IPv6 and IPv4 address on the VPP interface""" + self.unconfig_ip4() + self.unconfig_ip6() def set_table_ip4(self, table_id): """Set the interface in a IPv4 Table. diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py new file mode 100644 index 00000000000..78e6aaa23b9 --- /dev/null +++ b/test/vpp_ip_route.py @@ -0,0 +1,46 @@ +""" + IP Routes + + object abstractions for representing IP routes in VPP +""" + +import socket + + +class IpPath: + + def __init__(self, nh_addr, nh_sw_if_index, nh_table_id=0): + self.nh_addr = socket.inet_pton(socket.AF_INET, nh_addr) + self.nh_itf = nh_sw_if_index + self.nh_table_id = nh_table_id + + +class IpRoute: + """ + IP Route + """ + + def __init__(self, test, dest_addr, + dest_addr_len, paths, table_id=0): + self._test = test + self.paths = paths + self.dest_addr = socket.inet_pton(socket.AF_INET, dest_addr) + self.dest_addr_len = dest_addr_len + self.table_id = table_id + + def add_vpp_config(self): + for path in self.paths: + self._test.vapi.ip_add_del_route(self.dest_addr, + self.dest_addr_len, + path.nh_addr, + path.nh_itf, + table_id=self.table_id) + + def remove_vpp_config(self): + for path in self.paths: + self._test.vapi.ip_add_del_route(self.dest_addr, + self.dest_addr_len, + path.nh_addr, + path.nh_itf, + table_id=self.table_id, + is_add=0) diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index 2148e94b2a8..23a108d9ca2 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -18,6 +18,9 @@ if do_import: MPLS_IETF_MAX_LABEL = 0xfffff MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1 +class L2_VTR_OP: + L2_POP_1 = 3 + class VppPapiProvider(object): """VPP-api provider using vpp-papi @@ -230,6 +233,20 @@ class VppPapiProvider(object): return self.api(vpp_papi.sw_interface_set_l2_xconnect, (rx_sw_if_index, tx_sw_if_index, enable)) + def sw_interface_set_l2_tag_rewrite(self, sw_if_index, vtr_oper, push=0, tag1=0, tag2=0): + """L2 interface vlan tag rewrite configure request + :param client_index - opaque cookie to identify the sender + :param context - sender context, to match reply w/ request + :param sw_if_index - interface the operation is applied to + :param vtr_op - Choose from l2_vtr_op_t enum values + :param push_dot1q - first pushed flag dot1q id set, else dot1ad + :param tag1 - Needed for any push or translate vtr op + :param tag2 - Needed for any push 2 or translate x-2 vtr ops + + """ + return self.api(vpp_papi.l2_interface_vlan_tag_rewrite, + (sw_if_index, vtr_oper, push, tag1, tag2)) + def sw_interface_set_flags(self, sw_if_index, admin_up_down, link_up_down=0, deleted=0): """ @@ -278,6 +295,13 @@ class VppPapiProvider(object): outer_vlan, inner_vlan)) + def delete_subif(self, sw_if_index): + """Delete subinterface + + :param sw_if_index: + """ + return self.api(vpp_papi.delete_subif, ([sw_if_index])) + def create_vlan_subif(self, sw_if_index, vlan): """ @@ -411,3 +435,30 @@ class VppPapiProvider(object): """ return self.api(vpp_papi.sw_interface_span_enable_disable, (sw_if_index_from, sw_if_index_to, enable )) + + def gre_tunnel_add_del(self, + src_address, + dst_address, + outer_fib_id=0, + is_teb=0, + is_add=1, + is_ip6=0): + """ Add a GRE tunnel + + :param src_address: + :param dst_address: + :param outer_fib_id: (Default value = 0) + :param is_add: (Default value = 1) + :param is_ipv6: (Default value = 0) + :param is_teb: (Default value = 0) + """ + + return self.api( + vpp_papi.gre_add_del_tunnel, + (is_add, + is_ip6, + is_teb, + src_address, + dst_address, + outer_fib_id) + ) diff --git a/test/vpp_sub_interface.py b/test/vpp_sub_interface.py index b387d27b49d..027a24b2a1d 100644 --- a/test/vpp_sub_interface.py +++ b/test/vpp_sub_interface.py @@ -41,6 +41,9 @@ class VppSubInterface(VppPGInterface): def add_dot1_layer(self, pkt): pass + def remove_vpp_config(self): + self.test.vapi.delete_subif(self._sw_if_index) + class VppDot1QSubint(VppSubInterface): diff --git a/vnet/vnet/adj/adj.c b/vnet/vnet/adj/adj.c index 24f7662d943..2741c885cee 100644 --- a/vnet/vnet/adj/adj.c +++ b/vnet/vnet/adj/adj.c @@ -61,6 +61,7 @@ adj_alloc (fib_protocol_t proto) adj->mcast_group_index = ~0; adj->saved_lookup_next_index = 0; adj->n_adj = 1; + adj->lookup_next_index = 0; fib_node_init(&adj->ia_node, FIB_NODE_TYPE_ADJ); @@ -163,7 +164,8 @@ adj_last_lock_gone (ip_adjacency_t *adj) /* * complete and incomplete nbr adjs */ - adj_nbr_remove(adj->ia_nh_proto, + adj_nbr_remove(adj_get_index(adj), + adj->ia_nh_proto, adj->ia_link, &adj->sub_type.nbr.next_hop, adj->rewrite_header.sw_if_index); @@ -376,6 +378,12 @@ adj_show (vlib_main_t * vm, if (ADJ_INDEX_INVALID != ai) { + if (pool_is_free_index(adj_pool, ai)) + { + vlib_cli_output (vm, "adjacency %d invalid", ai); + return 0; + } + vlib_cli_output (vm, "[@%d] %U", ai, format_ip_adjacency, ai, diff --git a/vnet/vnet/adj/adj_internal.h b/vnet/vnet/adj/adj_internal.h index e3e0e04c99a..833bc7c9e01 100644 --- a/vnet/vnet/adj/adj_internal.h +++ b/vnet/vnet/adj/adj_internal.h @@ -93,7 +93,8 @@ extern void adj_nbr_update_rewrite_internal (ip_adjacency_t *adj, extern ip_adjacency_t * adj_alloc(fib_protocol_t proto); -extern void adj_nbr_remove(fib_protocol_t nh_proto, +extern void adj_nbr_remove(adj_index_t ai, + fib_protocol_t nh_proto, vnet_link_t link_type, const ip46_address_t *nh_addr, u32 sw_if_index); diff --git a/vnet/vnet/adj/adj_nbr.c b/vnet/vnet/adj/adj_nbr.c index 1a78ecbc49f..003e18e8d66 100644 --- a/vnet/vnet/adj/adj_nbr.c +++ b/vnet/vnet/adj/adj_nbr.c @@ -76,7 +76,8 @@ adj_nbr_insert (fib_protocol_t nh_proto, } void -adj_nbr_remove (fib_protocol_t nh_proto, +adj_nbr_remove (adj_index_t ai, + fib_protocol_t nh_proto, vnet_link_t link_type, const ip46_address_t *nh_addr, u32 sw_if_index) @@ -87,6 +88,7 @@ adj_nbr_remove (fib_protocol_t nh_proto, return; ADJ_NBR_SET_KEY(kv, link_type, nh_addr); + kv.value = ai; BV(clib_bihash_add_del) (adj_nbr_tables[nh_proto][sw_if_index], &kv, 0); } diff --git a/vnet/vnet/ethernet/arp.c b/vnet/vnet/ethernet/arp.c index eeaac4d3808..c6dbbc689b2 100644 --- a/vnet/vnet/ethernet/arp.c +++ b/vnet/vnet/ethernet/arp.c @@ -1509,7 +1509,7 @@ arp_add_del_interface_address (ip4_main_t * im, ethernet_arp_main_t *am = ðernet_arp_main; ethernet_arp_ip4_entry_t *e; - if (vec_len (am->ethernet_arp_by_sw_if_index) < sw_if_index) + if (vec_len (am->ethernet_arp_by_sw_if_index) <= sw_if_index) return; if (is_del) diff --git a/vnet/vnet/gre/gre.c b/vnet/vnet/gre/gre.c index a4b3f9fc228..0faed13eb29 100644 --- a/vnet/vnet/gre/gre.c +++ b/vnet/vnet/gre/gre.c @@ -250,9 +250,9 @@ gre_update_adj (vnet_main_t * vnm, * @brief TX function. Only called L2. L3 traffic uses the adj-midchains */ static uword -gre_interface_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +gre_interface_tx_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { gre_main_t * gm = &gre_main; u32 next_index; @@ -318,12 +318,34 @@ gre_interface_tx (vlib_main_t * vm, return frame->n_vectors; } +static uword +gre_interface_tx (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return (gre_interface_tx_inline (vm, node, frame)); +} + +static uword +gre_teb_interface_tx (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return (gre_interface_tx_inline (vm, node, frame)); +} + static u8 * format_gre_tunnel_name (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); return format (s, "gre%d", dev_instance); } +static u8 * format_gre_tunnel_teb_name (u8 * s, va_list * args) +{ + u32 dev_instance = va_arg (*args, u32); + return format (s, "teb-gre%d", dev_instance); +} + static u8 * format_gre_device (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); @@ -348,6 +370,21 @@ VNET_DEVICE_CLASS (gre_device_class) = { VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_class, gre_interface_tx) +VNET_DEVICE_CLASS (gre_device_teb_class) = { + .name = "GRE TEB tunnel device", + .format_device_name = format_gre_tunnel_teb_name, + .format_device = format_gre_device, + .format_tx_trace = format_gre_tx_trace, + .tx_function = gre_teb_interface_tx, + .admin_up_down_function = gre_interface_admin_up_down, +#ifdef SOON + .clear counter = 0; +#endif +}; + +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_teb_class, + gre_teb_interface_tx) + VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = { .name = "GRE", .format_header = format_gre_header_with_length, diff --git a/vnet/vnet/gre/gre.h b/vnet/vnet/gre/gre.h index a0ee9ad263b..b6544b9b737 100644 --- a/vnet/vnet/gre/gre.h +++ b/vnet/vnet/gre/gre.h @@ -36,20 +36,48 @@ typedef enum { GRE_N_ERROR, } gre_error_t; +/** + * A GRE payload protocol registration + */ typedef struct { - /* Name (a c string). */ + /** Name (a c string). */ char * name; - /* GRE protocol type in host byte order. */ + /** GRE protocol type in host byte order. */ gre_protocol_t protocol; - /* Node which handles this type. */ + /** Node which handles this type. */ u32 node_index; - /* Next index for this type. */ + /** Next index for this type. */ u32 next_index; } gre_protocol_info_t; +/** + * @brief The GRE tunnel type + */ +typedef enum gre_tunnel_tyoe_t_ +{ + /** + * L3 GRE (i.e. this tunnel is in L3 mode) + */ + GRE_TUNNEL_TYPE_L3, + /** + * Transparent Ethernet Bridging - the tunnel is in L2 mode + */ + GRE_TUNNEL_TYPE_TEB, +} gre_tunnel_type_t; + +#define GRE_TUNNEL_TYPE_NAMES { \ + [GRE_TUNNEL_TYPE_L3] = "L3", \ + [GRE_TUNNEL_TYPE_TEB] = "TEB", \ +} + +#define GRE_TUNNEL_N_TYPES ((gre_tunnel_type_t)GRE_TUNNEL_TYPE_TEB+1) + +/** + * @brief A representation of a GRE tunnel + */ typedef struct { /** * Linkage into the FIB object graph @@ -70,7 +98,7 @@ typedef struct { u32 outer_fib_index; u32 hw_if_index; u32 sw_if_index; - u8 teb; + gre_tunnel_type_t type; /** * The FIB entry sourced by the tunnel for its destination prefix @@ -96,21 +124,39 @@ typedef struct { adj_index_t l2_adj_index; } gre_tunnel_t; +/** + * @brief GRE related global data + */ typedef struct { - /* pool of tunnel instances */ + /** + * pool of tunnel instances + */ gre_tunnel_t *tunnels; + /** + * GRE payload protocol registrations + */ gre_protocol_info_t * protocol_infos; - /* Hash tables mapping name/protocol to protocol info index. */ + /** + * Hash tables mapping name/protocol to protocol info index. + */ uword * protocol_info_by_name, * protocol_info_by_protocol; - /* Hash mapping src/dst addr pair to tunnel */ + /** + * Hash mapping src/dst addr pair to tunnel + */ uword * tunnel_by_key; - /* Free vlib hw_if_indices */ - u32 * free_gre_tunnel_hw_if_indices; + /** + * Free vlib hw_if_indices. + * A free list per-tunnel type since the interfaces ctreated are fo different + * types and we cannot change the type. + */ + u32 * free_gre_tunnel_hw_if_indices[GRE_TUNNEL_N_TYPES]; - /* Mapping from sw_if_index to tunnel index */ + /** + * Mapping from sw_if_index to tunnel index + */ u32 * tunnel_index_by_sw_if_index; /* convenience */ @@ -120,8 +166,7 @@ typedef struct { /** * @brief IPv4 and GRE header. - * -*/ + */ typedef CLIB_PACKED (struct { ip4_header_t ip4; gre_header_t gre; @@ -148,8 +193,8 @@ extern clib_error_t * gre_interface_admin_up_down (vnet_main_t * vnm, extern void gre_tunnel_stack (adj_index_t ai); extern void gre_update_adj (vnet_main_t * vnm, - u32 sw_if_index, - adj_index_t ai); + u32 sw_if_index, + adj_index_t ai); format_function_t format_gre_protocol; format_function_t format_gre_header; @@ -157,7 +202,7 @@ format_function_t format_gre_header_with_length; extern vlib_node_registration_t gre_input_node; extern vnet_device_class_t gre_device_class; -extern vnet_device_class_t gre_l2_device_class; +extern vnet_device_class_t gre_device_teb_class; /* Parse gre protocol as 0xXXXX or protocol name. In either host or network byte order. */ diff --git a/vnet/vnet/gre/interface.c b/vnet/vnet/gre/interface.c index 3234de09858..7adc5268446 100644 --- a/vnet/vnet/gre/interface.c +++ b/vnet/vnet/gre/interface.c @@ -24,6 +24,8 @@ #include <vnet/adj/adj_nbr.h> #include <vnet/mpls/mpls.h> +static const char *gre_tunnel_type_names[] = GRE_TUNNEL_TYPE_NAMES; + static inline u64 gre_mk_key (const ip4_address_t *src, const ip4_address_t *dst, @@ -34,17 +36,25 @@ gre_mk_key (const ip4_address_t *src, } static u8 * +format_gre_tunnel_type (u8 * s, va_list * args) +{ + gre_tunnel_type_t type = va_arg (*args, gre_tunnel_type_t); + + return (format(s, "%s", gre_tunnel_type_names[type])); +} + +static u8 * format_gre_tunnel (u8 * s, va_list * args) { gre_tunnel_t * t = va_arg (*args, gre_tunnel_t *); gre_main_t * gm = &gre_main; s = format (s, - "[%d] %U (src) %U (dst) payload %s outer_fib_index %d", + "[%d] %U (src) %U (dst) payload %U outer_fib_index %d", t - gm->tunnels, format_ip4_address, &t->tunnel_src, format_ip4_address, &t->tunnel_dst, - (t->teb ? "teb" : "ip"), + format_gre_tunnel_type, t->type, t->outer_fib_index); return s; @@ -248,12 +258,17 @@ vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a, memset (t, 0, sizeof (*t)); fib_node_init(&t->node, FIB_NODE_TYPE_GRE_TUNNEL); - if (vec_len (gm->free_gre_tunnel_hw_if_indices) > 0) { + if (a->teb) + t->type = GRE_TUNNEL_TYPE_TEB; + else + t->type = GRE_TUNNEL_TYPE_L3; + + if (vec_len (gm->free_gre_tunnel_hw_if_indices[t->type]) > 0) { vnet_interface_main_t * im = &vnm->interface_main; - hw_if_index = gm->free_gre_tunnel_hw_if_indices - [vec_len (gm->free_gre_tunnel_hw_if_indices)-1]; - _vec_len (gm->free_gre_tunnel_hw_if_indices) -= 1; + hw_if_index = gm->free_gre_tunnel_hw_if_indices[t->type] + [vec_len (gm->free_gre_tunnel_hw_if_indices[t->type])-1]; + _vec_len (gm->free_gre_tunnel_hw_if_indices[t->type]) -= 1; hi = vnet_get_hw_interface (vnm, hw_if_index); hi->dev_instance = t - gm->tunnels; @@ -269,14 +284,14 @@ vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a, vlib_zero_simple_counter (&im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], sw_if_index); vnet_interface_counter_unlock(im); - if (a->teb) + if (GRE_TUNNEL_TYPE_TEB == t->type) { - t->l2_tx_arc = vlib_node_add_named_next(vlib_get_main(), - hi->tx_node_index, - "adj-l2-midchain"); + t->l2_tx_arc = vlib_node_add_named_next(vlib_get_main(), + hi->tx_node_index, + "adj-l2-midchain"); } } else { - if (a->teb) + if (GRE_TUNNEL_TYPE_TEB == t->type) { /* Default MAC address (d00b:eed0:0000 + sw_if_index) */ memset (address, 0, sizeof (address)); @@ -287,7 +302,7 @@ vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a, address[4] = t - gm->tunnels; error = ethernet_register_interface(vnm, - gre_device_class.index, + gre_device_teb_class.index, t - gm->tunnels, address, &hw_if_index, 0); @@ -316,14 +331,11 @@ vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a, t->hw_if_index = hw_if_index; t->outer_fib_index = outer_fib_index; t->sw_if_index = sw_if_index; - t->teb = a->teb; vec_validate_init_empty (gm->tunnel_index_by_sw_if_index, sw_if_index, ~0); gm->tunnel_index_by_sw_if_index[sw_if_index] = t - gm->tunnels; vec_validate (im->fib_index_by_sw_if_index, sw_if_index); - im->fib_index_by_sw_if_index[sw_if_index] = t->outer_fib_index; - ip4_sw_interface_enable_disable(sw_if_index, 1); hi->min_packet_bytes = 64 + sizeof (gre_header_t) + sizeof (ip4_header_t); hi->per_packet_overhead_bytes = @@ -365,13 +377,12 @@ vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a, clib_memcpy (&t->tunnel_src, &a->src, sizeof (t->tunnel_src)); clib_memcpy (&t->tunnel_dst, &a->dst, sizeof (t->tunnel_dst)); - if (t->teb) + if (GRE_TUNNEL_TYPE_TEB == t->type) { t->l2_adj_index = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4, VNET_LINK_ETHERNET, &zero_addr, sw_if_index); - gre_update_adj(vnm, t->sw_if_index, t->l2_adj_index); } @@ -399,9 +410,11 @@ vnet_gre_tunnel_delete (vnet_gre_add_del_tunnel_args_t *a, vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */); /* make sure tunnel is removed from l2 bd or xconnect */ set_int_l2_mode(gm->vlib_main, vnm, MODE_L3, sw_if_index, 0, 0, 0, 0); - vec_add1 (gm->free_gre_tunnel_hw_if_indices, t->hw_if_index); + vec_add1 (gm->free_gre_tunnel_hw_if_indices[t->type], t->hw_if_index); gm->tunnel_index_by_sw_if_index[sw_if_index] = ~0; - ip4_sw_interface_enable_disable(sw_if_index, 0); + + if (GRE_TUNNEL_TYPE_TEB == t->type) + adj_unlock(t->l2_adj_index); fib_entry_child_remove(t->fib_entry_index, t->sibling_index); diff --git a/vnet/vnet/gre/node.c b/vnet/vnet/gre/node.c index 556f1a81837..86f7a6eeea4 100644 --- a/vnet/vnet/gre/node.c +++ b/vnet/vnet/gre/node.c @@ -68,12 +68,10 @@ gre_input (vlib_main_t * vm, vlib_frame_t * from_frame) { gre_main_t * gm = &gre_main; - ip4_main_t * ip4m = &ip4_main; gre_input_runtime_t * rt = (void *) node->runtime_data; __attribute__((unused)) u32 n_left_from, next_index, * from, * to_next; u64 cached_tunnel_key = (u64) ~0; u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index = 0; - u32 cached_tunnel_fib_index = 0, tunnel_fib_index; u32 cpu_index = os_get_cpu_number(); u32 len; @@ -193,16 +191,12 @@ gre_input (vlib_main_t * vm, hi = vnet_get_hw_interface (gm->vnet_main, t->hw_if_index); tunnel_sw_if_index = hi->sw_if_index; - tunnel_fib_index = vec_elt (ip4m->fib_index_by_sw_if_index, - tunnel_sw_if_index); cached_tunnel_sw_if_index = tunnel_sw_if_index; - cached_tunnel_fib_index = tunnel_fib_index; } else { tunnel_sw_if_index = cached_tunnel_sw_if_index; - tunnel_fib_index = cached_tunnel_fib_index; } } else @@ -218,7 +212,6 @@ gre_input (vlib_main_t * vm, 1 /* packets */, len /* bytes */); - vnet_buffer(b0)->sw_if_index[VLIB_TX] = tunnel_fib_index; vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index; drop0: @@ -247,16 +240,12 @@ drop0: hi = vnet_get_hw_interface (gm->vnet_main, t->hw_if_index); tunnel_sw_if_index = hi->sw_if_index; - tunnel_fib_index = vec_elt (ip4m->fib_index_by_sw_if_index, - tunnel_sw_if_index); cached_tunnel_sw_if_index = tunnel_sw_if_index; - cached_tunnel_fib_index = tunnel_fib_index; } else { tunnel_sw_if_index = cached_tunnel_sw_if_index; - tunnel_fib_index = cached_tunnel_fib_index; } } else @@ -272,7 +261,6 @@ drop0: 1 /* packets */, len /* bytes */); - vnet_buffer(b1)->sw_if_index[VLIB_TX] = tunnel_fib_index; vnet_buffer(b1)->sw_if_index[VLIB_RX] = tunnel_sw_if_index; drop1: @@ -280,7 +268,7 @@ drop1: { gre_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->tunnel_id = ~0; + tr->tunnel_id = tunnel_sw_if_index; tr->length = ip0->length; tr->src.as_u32 = ip0->src_address.as_u32; tr->dst.as_u32 = ip0->dst_address.as_u32; @@ -290,7 +278,7 @@ drop1: { gre_rx_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof (*tr)); - tr->tunnel_id = ~0; + tr->tunnel_id = tunnel_sw_if_index; tr->length = ip1->length; tr->src.as_u32 = ip1->src_address.as_u32; tr->dst.as_u32 = ip1->dst_address.as_u32; @@ -374,16 +362,12 @@ drop1: hi = vnet_get_hw_interface (gm->vnet_main, t->hw_if_index); tunnel_sw_if_index = hi->sw_if_index; - tunnel_fib_index = vec_elt (ip4m->fib_index_by_sw_if_index, - tunnel_sw_if_index); cached_tunnel_sw_if_index = tunnel_sw_if_index; - cached_tunnel_fib_index = tunnel_fib_index; } else { tunnel_sw_if_index = cached_tunnel_sw_if_index; - tunnel_fib_index = cached_tunnel_fib_index; } } else @@ -399,7 +383,6 @@ drop1: 1 /* packets */, len /* bytes */); - vnet_buffer(b0)->sw_if_index[VLIB_TX] = tunnel_fib_index; vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index; drop: diff --git a/vnet/vnet/interface_output.c b/vnet/vnet/interface_output.c index 1d1546f6ebe..46e8a98c75d 100644 --- a/vnet/vnet/interface_output.c +++ b/vnet/vnet/interface_output.c @@ -58,15 +58,30 @@ format_vnet_interface_output_trace (u8 * s, va_list * va) if (t->sw_if_index != (u32) ~ 0) { - si = vnet_get_sw_interface (vnm, t->sw_if_index); indent = format_get_indent (s); - s = format (s, "%U\n%U%U", - format_vnet_sw_interface_name, vnm, si, - format_white_space, indent, - node->format_buffer ? node-> - format_buffer : format_hex_bytes, t->data, - sizeof (t->data)); + if (pool_is_free_index + (vnm->interface_main.sw_interfaces, t->sw_if_index)) + { + /* the interface may have been deleted by the time the trace is printed */ + s = format (s, "sw_if_index: %d\n%U%U", + t->sw_if_index, + format_white_space, indent, + node->format_buffer ? node-> + format_buffer : format_hex_bytes, t->data, + sizeof (t->data)); + } + else + { + si = vnet_get_sw_interface (vnm, t->sw_if_index); + + s = format (s, "%U\n%U%U", + format_vnet_sw_interface_name, vnm, si, + format_white_space, indent, + node->format_buffer ? node-> + format_buffer : format_hex_bytes, t->data, + sizeof (t->data)); + } } return s; } diff --git a/vnet/vnet/ip/ip6_forward.c b/vnet/vnet/ip/ip6_forward.c index bc346786283..14dd9dfbdf0 100644 --- a/vnet/vnet/ip/ip6_forward.c +++ b/vnet/vnet/ip/ip6_forward.c @@ -498,6 +498,8 @@ ip6_add_del_interface_address (vlib_main_t * vm, goto done; } + ip6_sw_interface_enable_disable(sw_if_index, !is_del); + if (is_del) ip6_del_interface_routes (im, ip6_af.fib_index, address, address_length); diff --git a/vnet/vnet/ip/ip6_neighbor.c b/vnet/vnet/ip/ip6_neighbor.c index cebe09a9557..af852a2be86 100644 --- a/vnet/vnet/ip/ip6_neighbor.c +++ b/vnet/vnet/ip/ip6_neighbor.c @@ -1849,7 +1849,6 @@ ip6_neighbor_sw_interface_add_del (vnet_main_t * vnm, pool_put (nm->if_radv_pool, a); nm->if_radv_pool_index_by_sw_if_index[sw_if_index] = ~0; ri = ~0; - ip6_sw_interface_enable_disable(sw_if_index, 0); } } else @@ -1858,7 +1857,6 @@ ip6_neighbor_sw_interface_add_del (vnet_main_t * vnm, { vnet_hw_interface_t * hw_if0; - ip6_sw_interface_enable_disable(sw_if_index, 1); hw_if0 = vnet_get_sup_hw_interface (vnm, sw_if_index); pool_get (nm->if_radv_pool, a); diff --git a/vpp/vpp-api/api.c b/vpp/vpp-api/api.c index 91726e163b6..676b0c45728 100644 --- a/vpp/vpp-api/api.c +++ b/vpp/vpp-api/api.c @@ -5008,7 +5008,7 @@ static void send_gre_tunnel_details clib_memcpy (rmp->src_address, &(t->tunnel_src), 4); clib_memcpy (rmp->dst_address, &(t->tunnel_dst), 4); rmp->outer_fib_id = htonl (im->fibs[t->outer_fib_index].ft_table_id); - rmp->teb = t->teb; + rmp->teb = (GRE_TUNNEL_TYPE_TEB == t->type); rmp->sw_if_index = htonl (t->sw_if_index); rmp->context = context; |