summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--test/patches/scapy-2.3.3/mpls.py.patch13
-rw-r--r--test/test_gre.py21
-rw-r--r--test/test_mpls.py522
-rw-r--r--test/vpp_ip_route.py75
-rw-r--r--test/vpp_papi_provider.py183
-rw-r--r--vnet/Makefile.am7
-rw-r--r--vnet/etc/scripts/mpls-tunnel87
-rw-r--r--vnet/vnet/buffer.h20
-rw-r--r--vnet/vnet/dhcp/client.c2
-rw-r--r--vnet/vnet/dpo/mpls_label_dpo.c194
-rw-r--r--vnet/vnet/dpo/mpls_label_dpo.h36
-rw-r--r--vnet/vnet/ethernet/arp.c4
-rw-r--r--vnet/vnet/fib/fib_attached_export.c101
-rw-r--r--vnet/vnet/fib/fib_attached_export.h4
-rw-r--r--vnet/vnet/fib/fib_entry.c280
-rw-r--r--vnet/vnet/fib/fib_entry.h20
-rw-r--r--vnet/vnet/fib/fib_entry_cover.c63
-rw-r--r--vnet/vnet/fib/fib_entry_delegate.c149
-rw-r--r--vnet/vnet/fib/fib_entry_delegate.h124
-rw-r--r--vnet/vnet/fib/fib_entry_src.c118
-rw-r--r--vnet/vnet/fib/fib_entry_src.h3
-rw-r--r--vnet/vnet/fib/fib_entry_src_rr.c25
-rw-r--r--vnet/vnet/fib/fib_internal.h3
-rw-r--r--vnet/vnet/fib/fib_node.c27
-rw-r--r--vnet/vnet/fib/fib_node.h6
-rw-r--r--vnet/vnet/fib/fib_path.c99
-rw-r--r--vnet/vnet/fib/fib_path.h1
-rw-r--r--vnet/vnet/fib/fib_path_ext.c103
-rw-r--r--vnet/vnet/fib/fib_path_ext.h8
-rw-r--r--vnet/vnet/fib/fib_path_list.c14
-rw-r--r--vnet/vnet/fib/fib_path_list.h1
-rw-r--r--vnet/vnet/fib/fib_table.c39
-rw-r--r--vnet/vnet/fib/fib_table.h22
-rw-r--r--vnet/vnet/fib/fib_test.c1033
-rw-r--r--vnet/vnet/fib/fib_types.c15
-rw-r--r--vnet/vnet/fib/fib_types.h27
-rw-r--r--vnet/vnet/fib/fib_walk.c8
-rw-r--r--vnet/vnet/fib/mpls_fib.h2
-rw-r--r--vnet/vnet/gre/interface.c4
-rw-r--r--vnet/vnet/ip/ip4_forward.c4
-rw-r--r--vnet/vnet/ip/ip6_forward.c4
-rw-r--r--vnet/vnet/ip/ip6_neighbor.c2
-rw-r--r--vnet/vnet/ip/lookup.c24
-rw-r--r--vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c1
-rw-r--r--vnet/vnet/mpls/interface.c992
-rw-r--r--vnet/vnet/mpls/mpls.c451
-rw-r--r--vnet/vnet/mpls/mpls.h78
-rw-r--r--vnet/vnet/mpls/mpls_lookup.c27
-rw-r--r--vnet/vnet/mpls/mpls_tunnel.c779
-rw-r--r--vnet/vnet/mpls/mpls_tunnel.h98
-rw-r--r--vnet/vnet/mpls/policy_encap.c181
-rw-r--r--vpp-api-test/vat/api_format.c427
-rw-r--r--vpp/app/vpe_cli.c2
-rw-r--r--vpp/vpp-api/api.c699
-rw-r--r--vpp/vpp-api/custom_dump.c100
-rw-r--r--vpp/vpp-api/test_client.c11
-rw-r--r--vpp/vpp-api/vpe.api312
57 files changed, 3858 insertions, 3797 deletions
diff --git a/test/patches/scapy-2.3.3/mpls.py.patch b/test/patches/scapy-2.3.3/mpls.py.patch
new file mode 100644
index 00000000000..5c819110ddc
--- /dev/null
+++ b/test/patches/scapy-2.3.3/mpls.py.patch
@@ -0,0 +1,13 @@
+diff --git a/scapy/contrib/mpls.py b/scapy/contrib/mpls.py
+index 640a0c5..6af1d4a 100644
+--- a/scapy/contrib/mpls.py
++++ b/scapy/contrib/mpls.py
+@@ -18,6 +18,8 @@ class MPLS(Packet):
+
+ def guess_payload_class(self, payload):
+ if len(payload) >= 1:
++ if not self.s:
++ return MPLS
+ ip_version = (ord(payload[0]) >> 4) & 0xF
+ if ip_version == 4:
+ return IP
diff --git a/test/test_gre.py b/test/test_gre.py
index f54b6e01307..0b5082859b7 100644
--- a/test/test_gre.py
+++ b/test/test_gre.py
@@ -7,7 +7,7 @@ from logging import *
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from vpp_gre_interface import VppGreInterface
-from vpp_ip_route import IpRoute, IpPath
+from vpp_ip_route import IpRoute, RoutePath
from vpp_papi_provider import L2_VTR_OP
from scapy.packet import Raw
@@ -317,7 +317,7 @@ class TestGRE(VppTestCase):
gre_if.config_ip4()
route_via_tun = IpRoute(self, "4.4.4.4", 32,
- [IpPath("0.0.0.0", gre_if.sw_if_index)])
+ [RoutePath("0.0.0.0", gre_if.sw_if_index)])
route_via_tun.add_vpp_config()
@@ -346,7 +346,8 @@ class TestGRE(VppTestCase):
# Add a route that resolves the tunnel's destination
#
route_tun_dst = IpRoute(self, "1.1.1.2", 32,
- [IpPath(self.pg0.remote_ip4, self.pg0.sw_if_index)])
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
route_tun_dst.add_vpp_config()
#
@@ -487,7 +488,7 @@ class TestGRE(VppTestCase):
# Add a route via the tunnel - in the overlay
#
route_via_tun = IpRoute(self, "9.9.9.9", 32,
- [IpPath("0.0.0.0", gre_if.sw_if_index)])
+ [RoutePath("0.0.0.0", gre_if.sw_if_index)])
route_via_tun.add_vpp_config()
#
@@ -495,8 +496,8 @@ class TestGRE(VppTestCase):
# underlay table
#
route_tun_dst = IpRoute(self, "2.2.2.2", 32, table_id=1,
- paths=[IpPath(self.pg1.remote_ip4,
- self.pg1.sw_if_index)])
+ paths=[RoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
route_tun_dst.add_vpp_config()
#
@@ -548,11 +549,11 @@ class TestGRE(VppTestCase):
# Add routes to resolve the tunnel destinations
#
route_tun1_dst = IpRoute(self, "2.2.2.2", 32,
- [IpPath(self.pg0.remote_ip4,
- self.pg0.sw_if_index)])
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
route_tun2_dst = IpRoute(self, "2.2.2.3", 32,
- [IpPath(self.pg0.remote_ip4,
- self.pg0.sw_if_index)])
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
route_tun1_dst.add_vpp_config()
route_tun2_dst.add_vpp_config()
diff --git a/test/test_mpls.py b/test/test_mpls.py
index 45af470421e..d1b1b9198c0 100644
--- a/test/test_mpls.py
+++ b/test/test_mpls.py
@@ -6,6 +6,7 @@ from logging import *
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
+from vpp_ip_route import IpRoute, RoutePath, MplsRoute, MplsIpBind
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q, ARP
@@ -13,6 +14,7 @@ from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import ICMPv6ND_NS, IPv6, UDP
from scapy.contrib.mpls import MPLS
+
class TestMPLS(VppTestCase):
""" MPLS Test Case """
@@ -24,7 +26,7 @@ class TestMPLS(VppTestCase):
super(TestMPLS, self).setUp()
# create 2 pg interfaces
- self.create_pg_interfaces(range(3))
+ self.create_pg_interfaces(range(2))
# setup both interfaces
# assign them different tables.
@@ -35,31 +37,51 @@ class TestMPLS(VppTestCase):
i.set_table_ip4(table_id)
i.set_table_ip6(table_id)
i.config_ip4()
- i.config_ip6()
- i.enable_mpls()
i.resolve_arp()
+ i.config_ip6()
i.resolve_ndp()
+ i.enable_mpls()
table_id += 1
def tearDown(self):
super(TestMPLS, self).tearDown()
- def create_stream_ip4(self, src_if, mpls_label, mpls_ttl):
+ # the default of 64 matches the IP packet TTL default
+ def create_stream_labelled_ip4(self, src_if, mpls_labels, mpls_ttl=255):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)
+
+ for ii in range(len(mpls_labels)):
+ if ii == len(mpls_labels) - 1:
+ p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=1)
+ else:
+ p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=0)
+ p = (p / IP(src=src_if.remote_ip4, dst=src_if.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_stream_ip4(self, src_if, dst_ip):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
src_if.sw_if_index)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
- MPLS(label=mpls_label, ttl=mpls_ttl) /
- IP(src=src_if.remote_ip4, dst=src_if.remote_ip4) /
+ IP(src=src_if.remote_ip4, dst=dst_ip) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
- def create_stream_ip6(self, src_if, mpls_label, mpls_ttl):
+ def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
@@ -74,8 +96,18 @@ class TestMPLS(VppTestCase):
pkts.append(p)
return pkts
+ def verify_filter(self, capture, sent):
+ if not len(capture) == len(sent):
+ # filter out any IPv6 RAs from the captur
+ for p in capture:
+ if (p.haslayer(IPv6)):
+ capture.remove(p)
+ return capture
+
def verify_capture_ip4(self, src_if, capture, sent):
try:
+ capture = self.verify_filter(capture, sent)
+
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
@@ -83,8 +115,8 @@ class TestMPLS(VppTestCase):
rx = capture[i]
# the rx'd packet has the MPLS label popped
- eth = rx[Ether];
- self.assertEqual(eth.type, 0x800);
+ eth = rx[Ether]
+ self.assertEqual(eth.type, 0x800)
tx_ip = tx[IP]
rx_ip = rx[IP]
@@ -92,10 +124,95 @@ class TestMPLS(VppTestCase):
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
- self.assertEqual(rx_ip.ttl+1, tx_ip.ttl)
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
except:
- raise;
+ raise
+
+ def verify_mpls_stack(self, rx, mpls_labels, ttl=255, num=0):
+ # the rx'd packet has the MPLS label popped
+ eth = rx[Ether]
+ self.assertEqual(eth.type, 0x8847)
+
+ rx_mpls = rx[MPLS]
+
+ for ii in range(len(mpls_labels)):
+ self.assertEqual(rx_mpls.label, mpls_labels[ii])
+ self.assertEqual(rx_mpls.cos, 0)
+ if ii == num:
+ self.assertEqual(rx_mpls.ttl, ttl)
+ else:
+ self.assertEqual(rx_mpls.ttl, 255)
+
+ if ii == len(mpls_labels) - 1:
+ self.assertEqual(rx_mpls.s, 1)
+ else:
+ # not end of stack
+ self.assertEqual(rx_mpls.s, 0)
+ # pop the label to expose the next
+ rx_mpls = rx_mpls[MPLS].payload
+
+ def verify_capture_labelled_ip4(self, src_if, capture, sent,
+ mpls_labels):
+ try:
+ capture = self.verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ # the MPLS TTL is copied from the IP
+ self.verify_mpls_stack(
+ rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1)
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ raise
+
+ def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels):
+ try:
+ capture = self.verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ # the MPLS TTL is 255 since it enters a new tunnel
+ self.verify_mpls_stack(
+ rx, mpls_labels, 255, len(mpls_labels) - 1)
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ raise
+
+ def verify_capture_labelled(self, src_if, capture, sent,
+ mpls_labels, ttl=254, num=0):
+ try:
+ capture = self.verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ rx = capture[i]
+ self.verify_mpls_stack(rx, mpls_labels, ttl, num)
+ except:
+ raise
def verify_capture_ip6(self, src_if, capture, sent):
try:
@@ -106,8 +223,8 @@ class TestMPLS(VppTestCase):
rx = capture[i]
# the rx'd packet has the MPLS label popped
- eth = rx[Ether];
- self.assertEqual(eth.type, 0x86DD);
+ eth = rx[Ether]
+ self.assertEqual(eth.type, 0x86DD)
tx_ip = tx[IPv6]
rx_ip = rx[IPv6]
@@ -118,8 +235,373 @@ class TestMPLS(VppTestCase):
self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
except:
- raise;
+ raise
+
+ def test_swap(self):
+ """ MPLS label swap tests """
+
+ #
+ # A simple MPLS xconnect - eos label in label out
+ #
+ route_32_eos = MplsRoute(self, 32, 1,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[33])])
+ route_32_eos.add_vpp_config()
+
+ #
+ # a stream that matches the route for 10.0.0.1
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [32])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33])
+
+ #
+ # A simple MPLS xconnect - non-eos label in label out
+ #
+ route_32_neos = MplsRoute(self, 32, 0,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[33])])
+ route_32_neos.add_vpp_config()
+
+ #
+ # a stream that matches the route for 10.0.0.1
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [32, 99])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled(self.pg0, rx, tx, [33, 99])
+
+ #
+ # An MPLS xconnect - EOS label in IP out
+ #
+ route_33_eos = MplsRoute(self, 33, 1,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[])])
+ route_33_eos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [33])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip4(self.pg0, rx, tx)
+
+ #
+ # An MPLS xconnect - non-EOS label in IP out - an invalid configuration
+ # so this traffic should be dropped.
+ #
+ route_33_neos = MplsRoute(self, 33, 0,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[])])
+ route_33_neos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [33, 99])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ try:
+ self.assertEqual(0, len(rx))
+ except:
+ error("MPLS non-EOS packets popped and forwarded")
+ error(packet.show())
+ raise
+
+ #
+ # A recursive EOS x-connect, which resolves through another x-connect
+ #
+ route_34_eos = MplsRoute(self, 34, 1,
+ [RoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=32,
+ labels=[44, 45])])
+ route_34_eos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [34])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 45])
+
+ #
+ # A recursive non-EOS x-connect, which resolves through another x-connect
+ #
+ route_34_neos = MplsRoute(self, 34, 0,
+ [RoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=32,
+ labels=[44, 46])])
+ route_34_neos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [34, 99])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ # it's the 2nd (counting from 0) lael in the stack that is swapped
+ self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 46, 99], num=2)
+
+ #
+ # an recursive IP route that resolves through the recursive non-eos x-connect
+ #
+ ip_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
+ [RoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=34,
+ labels=[55])])
+ ip_10_0_0_1.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 46, 55])
+
+ ip_10_0_0_1.remove_vpp_config()
+ route_34_neos.remove_vpp_config()
+ route_34_eos.remove_vpp_config()
+ route_33_neos.remove_vpp_config()
+ route_33_eos.remove_vpp_config()
+ route_32_neos.remove_vpp_config()
+ route_32_eos.remove_vpp_config()
+
+ def test_bind(self):
+ """ MPLS Local Label Binding test """
+
+ #
+ # Add a non-recursive route with a single out label
+ #
+ route_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[45])])
+ route_10_0_0_1.add_vpp_config()
+
+ # bind a local label to the route
+ binding = MplsIpBind(self, 44, "10.0.0.1", 32)
+ binding.add_vpp_config()
+
+ # non-EOS stream
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [44, 99])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled(self.pg0, rx, tx, [45, 99])
+
+ # EOS stream
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [44])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled(self.pg0, rx, tx, [45])
+
+ # IP stream
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [45])
+
+ #
+ # cleanup
+ #
+ binding.remove_vpp_config()
+ route_10_0_0_1.remove_vpp_config()
+
+ def test_imposition(self):
+ """ MPLS label imposition test """
+
+ #
+ # Add a non-recursive route with a single out label
+ #
+ route_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[32])])
+ route_10_0_0_1.add_vpp_config()
+
+ #
+ # a stream that matches the route for 10.0.0.1
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32])
+
+ #
+ # Add a non-recursive route with a 3 out labels
+ #
+ route_10_0_0_2 = IpRoute(self, "10.0.0.2", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[32, 33, 34])])
+ route_10_0_0_2.add_vpp_config()
+
+ #
+ # a stream that matches the route for 10.0.0.1
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.2")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 33, 34])
+
+ #
+ # add a recursive path, with ouput label, via the 1 label route
+ #
+ route_11_0_0_1 = IpRoute(self, "11.0.0.1", 32,
+ [RoutePath("10.0.0.1",
+ 0xffffffff,
+ labels=[44])])
+ route_11_0_0_1.add_vpp_config()
+
+ #
+ # a stream that matches the route for 11.0.0.1, should pick up
+ # the label stack for 11.0.0.1 and 10.0.0.1
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "11.0.0.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 44])
+
+ #
+ # add a recursive path, with 2 labels, via the 3 label route
+ #
+ route_11_0_0_2 = IpRoute(self, "11.0.0.2", 32,
+ [RoutePath("10.0.0.2",
+ 0xffffffff,
+ labels=[44, 45])])
+ route_11_0_0_2.add_vpp_config()
+
+ #
+ # a stream that matches the route for 11.0.0.1, should pick up
+ # the label stack for 11.0.0.1 and 10.0.0.1
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "11.0.0.2")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(
+ self.pg0, rx, tx, [32, 33, 34, 44, 45])
+
+ #
+ # cleanup
+ #
+ route_11_0_0_2.remove_vpp_config()
+ route_11_0_0_1.remove_vpp_config()
+ route_10_0_0_2.remove_vpp_config()
+ route_10_0_0_1.remove_vpp_config()
+
+ def test_tunnel(self):
+ """ MPLS Tunnel Tests """
+
+ #
+ # Create a tunnel with a single out label
+ #
+ nh_addr = socket.inet_pton(socket.AF_INET, self.pg0.remote_ip4)
+
+ reply = self.vapi.mpls_tunnel_add_del(0xffffffff, # don't know the if index yet
+ 1, # IPv4 next-hop
+ nh_addr,
+ self.pg0.sw_if_index,
+ 0, # next-hop-table-id
+ 1, # next-hop-weight
+ 2, # num-out-labels,
+ [44, 46])
+ self.vapi.sw_interface_set_flags(reply.sw_if_index, admin_up_down=1)
+
+ #
+ # add an unlabelled route through the new tunnel
+ #
+ dest_addr = socket.inet_pton(socket.AF_INET, "10.0.0.3")
+ nh_addr = socket.inet_pton(socket.AF_INET, "0.0.0.0")
+ dest_addr_len = 32
+
+ self.vapi.ip_add_del_route(dest_addr,
+ dest_addr_len,
+ nh_addr, # all zeros next-hop - tunnel is p2p
+ reply.sw_if_index, # sw_if_index of the new tunnel
+ 0, # table-id
+ 0, # next-hop-table-id
+ 1, # next-hop-weight
+ 0, # num-out-labels,
+ []) # out-label
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46])
def test_v4_exp_null(self):
""" MPLS V4 Explicit NULL test """
@@ -128,7 +610,7 @@ class TestMPLS(VppTestCase):
# The first test case has an MPLS TTL of 0
# all packet should be dropped
#
- tx = self.create_stream_ip4(self.pg0, 0, 0)
+ tx = self.create_stream_labelled_ip4(self.pg0, [0], 0)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
@@ -137,7 +619,7 @@ class TestMPLS(VppTestCase):
rx = self.pg0.get_capture()
try:
- self.assertEqual(0, len(rx));
+ self.assertEqual(0, len(rx))
except:
error("MPLS TTL=0 packets forwarded")
error(packet.show())
@@ -148,7 +630,7 @@ class TestMPLS(VppTestCase):
# PG0 is in the default table
#
self.vapi.cli("clear trace")
- tx = self.create_stream_ip4(self.pg0, 0, 2)
+ tx = self.create_stream_labelled_ip4(self.pg0, [0])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
@@ -163,7 +645,7 @@ class TestMPLS(VppTestCase):
# we are ensuring the post-pop lookup occurs in the VRF table
#
self.vapi.cli("clear trace")
- tx = self.create_stream_ip4(self.pg1, 0, 2)
+ tx = self.create_stream_labelled_ip4(self.pg1, [0])
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
@@ -180,7 +662,7 @@ class TestMPLS(VppTestCase):
# PG0 is in the default table
#
self.vapi.cli("clear trace")
- tx = self.create_stream_ip6(self.pg0, 2, 2)
+ tx = self.create_stream_labelled_ip6(self.pg0, 2, 2)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
@@ -195,7 +677,7 @@ class TestMPLS(VppTestCase):
# we are ensuring the post-pop lookup occurs in the VRF table
#
self.vapi.cli("clear trace")
- tx = self.create_stream_ip6(self.pg1, 2, 2)
+ tx = self.create_stream_labelled_ip6(self.pg1, 2, 2)
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py
index 78e6aaa23b9..1dc8c1abb0f 100644
--- a/test/vpp_ip_route.py
+++ b/test/vpp_ip_route.py
@@ -6,13 +6,19 @@
import socket
+# from vnet/vnet/mpls/mpls_types.h
+MPLS_IETF_MAX_LABEL = 0xfffff
+MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1
-class IpPath:
- def __init__(self, nh_addr, nh_sw_if_index, nh_table_id=0):
+class RoutePath:
+
+ def __init__(self, nh_addr, nh_sw_if_index, nh_table_id=0, labels=[], nh_via_label=MPLS_LABEL_INVALID):
self.nh_addr = socket.inet_pton(socket.AF_INET, nh_addr)
self.nh_itf = nh_sw_if_index
self.nh_table_id = nh_table_id
+ self.nh_via_label = nh_via_label
+ self.nh_labels = labels
class IpRoute:
@@ -34,7 +40,11 @@ class IpRoute:
self.dest_addr_len,
path.nh_addr,
path.nh_itf,
- table_id=self.table_id)
+ table_id=self.table_id,
+ next_hop_out_label_stack=path.nh_labels,
+ next_hop_n_out_labels=len(
+ path.nh_labels),
+ next_hop_via_label=path.nh_via_label)
def remove_vpp_config(self):
for path in self.paths:
@@ -44,3 +54,62 @@ class IpRoute:
path.nh_itf,
table_id=self.table_id,
is_add=0)
+
+
+class MplsIpBind:
+ """
+ MPLS to IP Binding
+ """
+
+ def __init__(self, test, local_label, dest_addr, dest_addr_len):
+ self._test = test
+ self.dest_addr = socket.inet_pton(socket.AF_INET, dest_addr)
+ self.dest_addr_len = dest_addr_len
+ self.local_label = local_label
+
+ def add_vpp_config(self):
+ self._test.vapi.mpls_ip_bind_unbind(self.local_label,
+ self.dest_addr,
+ self.dest_addr_len)
+
+ def remove_vpp_config(self):
+ self._test.vapi.mpls_ip_bind_unbind(self.local_label,
+ self.dest_addr,
+ self.dest_addr_len,
+ is_bind=0)
+
+
+class MplsRoute:
+ """
+ MPLS Route
+ """
+
+ def __init__(self, test, local_label, eos_bit, paths, table_id=0):
+ self._test = test
+ self.paths = paths
+ self.local_label = local_label
+ self.eos_bit = eos_bit
+ self.table_id = table_id
+
+ def add_vpp_config(self):
+ for path in self.paths:
+ self._test.vapi.mpls_route_add_del(self.local_label,
+ self.eos_bit,
+ 1,
+ path.nh_addr,
+ path.nh_itf,
+ table_id=self.table_id,
+ next_hop_out_label_stack=path.nh_labels,
+ next_hop_n_out_labels=len(
+ path.nh_labels),
+ next_hop_via_label=path.nh_via_label)
+
+ def remove_vpp_config(self):
+ for path in self.paths:
+ self._test.vapi.mpls_route_add_del(self.local_label,
+ self.eos_bit,
+ 1,
+ path.nh_addr,
+ path.nh_itf,
+ table_id=self.table_id,
+ is_add=0)
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index 9db26d9fd1f..d29f90c1789 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -1,4 +1,5 @@
import os
+import array
from logging import error
from hook import Hook
@@ -21,6 +22,7 @@ MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1
class L2_VTR_OP:
L2_POP_1 = 3
+need_swap = True if os.sys.byteorder == 'little' else False
class VppPapiProvider(object):
"""VPP-api provider using vpp-papi
@@ -368,12 +370,15 @@ class VppPapiProvider(object):
next_hop_address,
next_hop_sw_if_index=0xFFFFFFFF,
table_id=0,
- resolve_attempts=0,
- classify_table_index=0xFFFFFFFF,
- next_hop_out_label=MPLS_LABEL_INVALID,
next_hop_table_id=0,
+ next_hop_weight=1,
+ next_hop_n_out_labels = 0,
+ next_hop_out_label_stack = [],
+ next_hop_via_label = MPLS_LABEL_INVALID,
create_vrf_if_needed=0,
- resolve_if_needed=0,
+ is_resolve_host=0,
+ is_resolve_attached=0,
+ classify_table_index=0xFFFFFFFF,
is_add=1,
is_drop=0,
is_unreach=0,
@@ -382,10 +387,7 @@ class VppPapiProvider(object):
is_local=0,
is_classify=0,
is_multipath=0,
- is_resolve_host=0,
- is_resolve_attached=0,
- not_last=0,
- next_hop_weight=1):
+ not_last=0):
"""
:param dst_address_length:
@@ -395,10 +397,8 @@ class VppPapiProvider(object):
:param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
:param vrf_id: (Default value = 0)
:param lookup_in_vrf: (Default value = 0)
- :param resolve_attempts: (Default value = 0)
:param classify_table_index: (Default value = 0xFFFFFFFF)
:param create_vrf_if_needed: (Default value = 0)
- :param resolve_if_needed: (Default value = 0)
:param is_add: (Default value = 1)
:param is_drop: (Default value = 0)
:param is_ipv6: (Default value = 0)
@@ -411,16 +411,18 @@ class VppPapiProvider(object):
:param next_hop_weight: (Default value = 1)
"""
+ stack = array.array('I', next_hop_out_label_stack)
+ if need_swap:
+ stack.byteswap()
+ stack = stack.tostring()
+
return self.api(
vpp_papi.ip_add_del_route,
(next_hop_sw_if_index,
table_id,
- resolve_attempts,
classify_table_index,
- next_hop_out_label,
next_hop_table_id,
create_vrf_if_needed,
- resolve_if_needed,
is_add,
is_drop,
is_unreach,
@@ -435,7 +437,10 @@ class VppPapiProvider(object):
next_hop_weight,
dst_address_length,
dst_address,
- next_hop_address))
+ next_hop_address,
+ next_hop_n_out_labels,
+ next_hop_via_label,
+ stack))
def ip_neighbor_add_del(self,
sw_if_index,
@@ -505,3 +510,153 @@ class VppPapiProvider(object):
dst_address,
outer_fib_id)
)
+
+ def mpls_route_add_del(
+ self,
+ label,
+ eos,
+ next_hop_proto_is_ip4,
+ next_hop_address,
+ next_hop_sw_if_index=0xFFFFFFFF,
+ table_id=0,
+ next_hop_table_id=0,
+ next_hop_weight=1,
+ next_hop_n_out_labels = 0,
+ next_hop_out_label_stack = [],
+ next_hop_via_label = MPLS_LABEL_INVALID,
+ create_vrf_if_needed=0,
+ is_resolve_host=0,
+ is_resolve_attached=0,
+ is_add=1,
+ is_drop=0,
+ is_multipath=0,
+ classify_table_index=0xFFFFFFFF,
+ is_classify=0,
+ not_last=0):
+ """
+
+ :param dst_address_length:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param dst_address:
+ :param next_hop_address:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param vrf_id: (Default value = 0)
+ :param lookup_in_vrf: (Default value = 0)
+ :param classify_table_index: (Default value = 0xFFFFFFFF)
+ :param create_vrf_if_needed: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param is_drop: (Default value = 0)
+ :param is_ipv6: (Default value = 0)
+ :param is_local: (Default value = 0)
+ :param is_classify: (Default value = 0)
+ :param is_multipath: (Default value = 0)
+ :param is_resolve_host: (Default value = 0)
+ :param is_resolve_attached: (Default value = 0)
+ :param not_last: (Default value = 0)
+ :param next_hop_weight: (Default value = 1)
+
+ """
+ stack = array.array('I', next_hop_out_label_stack)
+ if need_swap:
+ stack.byteswap()
+ stack = stack.tostring()
+
+ return self.api(
+ vpp_papi.mpls_route_add_del,
+ (label,
+ eos,
+ table_id,
+ classify_table_index,
+ create_vrf_if_needed,
+ is_add,
+ is_classify,
+ is_multipath,
+ is_resolve_host,
+ is_resolve_attached,
+ next_hop_proto_is_ip4,
+ next_hop_weight,
+ next_hop_address,
+ next_hop_n_out_labels,
+ next_hop_sw_if_index,
+ next_hop_table_id,
+ next_hop_via_label,
+ stack))
+
+ def mpls_ip_bind_unbind(
+ self,
+ label,
+ dst_address,
+ dst_address_length,
+ table_id=0,
+ ip_table_id=0,
+ is_ip4=1,
+ create_vrf_if_needed=0,
+ is_bind=1):
+ """
+ """
+ return self.api(
+ vpp_papi.mpls_ip_bind_unbind,
+ (table_id,
+ label,
+ ip_table_id,
+ create_vrf_if_needed,
+ is_bind,
+ is_ip4,
+ dst_address_length,
+ dst_address))
+
+ def mpls_tunnel_add_del(
+ self,
+ tun_sw_if_index,
+ next_hop_proto_is_ip4,
+ next_hop_address,
+ next_hop_sw_if_index=0xFFFFFFFF,
+ next_hop_table_id=0,
+ next_hop_weight=1,
+ next_hop_n_out_labels = 0,
+ next_hop_out_label_stack = [],
+ next_hop_via_label = MPLS_LABEL_INVALID,
+ create_vrf_if_needed=0,
+ is_add=1,
+ l2_only=0):
+ """
+
+ :param dst_address_length:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param dst_address:
+ :param next_hop_address:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param vrf_id: (Default value = 0)
+ :param lookup_in_vrf: (Default value = 0)
+ :param classify_table_index: (Default value = 0xFFFFFFFF)
+ :param create_vrf_if_needed: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param is_drop: (Default value = 0)
+ :param is_ipv6: (Default value = 0)
+ :param is_local: (Default value = 0)
+ :param is_classify: (Default value = 0)
+ :param is_multipath: (Default value = 0)
+ :param is_resolve_host: (Default value = 0)
+ :param is_resolve_attached: (Default value = 0)
+ :param not_last: (Default value = 0)
+ :param next_hop_weight: (Default value = 1)
+
+ """
+ stack = array.array('I', next_hop_out_label_stack)
+ if need_swap:
+ stack.byteswap()
+ stack = stack.tostring()
+
+ return self.api(
+ vpp_papi.mpls_tunnel_add_del,
+ (tun_sw_if_index,
+ is_add,
+ l2_only,
+ next_hop_proto_is_ip4,
+ next_hop_weight,
+ next_hop_address,
+ next_hop_n_out_labels,
+ next_hop_sw_if_index,
+ next_hop_table_id,
+ stack))
+
diff --git a/vnet/Makefile.am b/vnet/Makefile.am
index 0ba07bac5bd..7d6abc60840 100644
--- a/vnet/Makefile.am
+++ b/vnet/Makefile.am
@@ -452,12 +452,13 @@ libvnet_la_SOURCES += \
vnet/mpls/mpls_features.c \
vnet/mpls/node.c \
vnet/mpls/interface.c \
- vnet/mpls/policy_encap.c \
+ vnet/mpls/mpls_tunnel.c \
vnet/mpls/pg.c
nobase_include_HEADERS += \
vnet/mpls/mpls.h \
vnet/mpls/mpls_types.h \
+ vnet/mpls/mpls_tunnel.h \
vnet/mpls/packet.h \
vnet/mpls/error.def
@@ -801,6 +802,7 @@ libvnet_la_SOURCES += \
vnet/fib/fib_entry_src_mpls.c \
vnet/fib/fib_entry_src_lisp.c \
vnet/fib/fib_entry_cover.c \
+ vnet/fib/fib_entry_delegate.c \
vnet/fib/fib_path_list.c \
vnet/fib/fib_path.c \
vnet/fib/fib_path_ext.c \
@@ -815,7 +817,8 @@ nobase_include_HEADERS += \
vnet/fib/fib_table.h \
vnet/fib/fib_node.h \
vnet/fib/fib_node_list.h \
- vnet/fib/fib_entry.h
+ vnet/fib/fib_entry.h \
+ vnet/fib/fib_entry_delegate.h
########################################
# ADJ
diff --git a/vnet/etc/scripts/mpls-tunnel b/vnet/etc/scripts/mpls-tunnel
new file mode 100644
index 00000000000..d04b29702e7
--- /dev/null
+++ b/vnet/etc/scripts/mpls-tunnel
@@ -0,0 +1,87 @@
+packet-generator new {
+ name x0
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ ICMP: 1.0.0.2 -> 2.0.0.2
+ ICMP echo_request
+ incrementing 100
+ }
+}
+packet-generator new {
+ name x1
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ ICMP: 1.0.0.2 -> 2.0.1.2
+ ICMP echo_request
+ incrementing 100
+ }
+}
+packet-generator new {
+ name x2
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ ICMP: 1.0.0.2 -> 2.0.2.2
+ ICMP echo_request
+ incrementing 100
+ }
+}
+packet-generator new {
+ name x3
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ ICMP: 1.0.0.2 -> 2.0.3.2
+ ICMP echo_request
+ incrementing 100
+ }
+}
+
+
+
+trace add pg-input 100
+
+loop create
+set int state loop0 up
+
+set int ip address loop0 10.0.0.1/24
+set ip arp loop0 10.0.0.2 00:00:11:aa:bb:cc
+
+mpls tunnel add via 10.0.0.2 loop0 out-label 33 out-label 34 out-label 35 out-label 36
+set int state mpls-tunnel0 up
+set int ip addr mpls-tunnel0 192.168.0.1/32
+ip route add 2.0.0.2/32 via 192.168.0.2 mpls-tunnel0
+
+
+mpls tunnel add via 10.0.0.2 out-label 33
+set int state mpls-tunnel1 up
+set int ip addr mpls-tunnel1 192.168.1.1/32
+ip route add 2.0.1.2/32 via 192.168.1.2 mpls-tunnel1 out-label 99
+
+mpls tunnel add via 10.0.0.2 loop0 out-label 3
+set int state mpls-tunnel2 up
+set int ip addr mpls-tunnel2 192.168.2.1/32
+ip route add 2.0.2.2/32 via 192.168.2.2 mpls-tunnel2
+
+
+mpls tunnel add l2-only via 10.0.0.2 loop0 out-label 234 out-label 0
+set int state mpls-tunnel3 up
+set int l2 bridge mpls-tunnel3 1
+
+loop create
+set int ip addr loop1 6.0.1.44/24
+set int l2 bridge loop1 1 bvi
+set int l2 learn loop1 disable
+set int state loop1 up
+
+ip route add 2.0.3.2/32 via 6.0.1.45 loop1
diff --git a/vnet/vnet/buffer.h b/vnet/vnet/buffer.h
index 2ae4f1ccb70..b3c71c127cc 100644
--- a/vnet/vnet/buffer.h
+++ b/vnet/vnet/buffer.h
@@ -143,6 +143,18 @@ typedef struct
};
} ip;
+ /*
+ * MPLS:
+ * data copied from the MPLS header that was popped from the packet
+ * during the look-up.
+ */
+ struct
+ {
+ u8 ttl;
+ u8 exp;
+ u8 first;
+ } mpls;
+
/* Multicast replication */
struct
{
@@ -331,6 +343,14 @@ typedef struct
};
} vnet_buffer_opaque_t;
+/*
+ * The opaque field of the vlib_buffer_t is intepreted as a
+ * vnet_buffer_opaque_t. Hence it should be big enough to accommodate one.
+ */
+STATIC_ASSERT (sizeof (vnet_buffer_opaque_t) <= STRUCT_SIZE_OF (vlib_buffer_t,
+ opaque),
+ "VNET buffer meta-data too large for vlib_buffer");
+
#define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque)
/* Full cache line (64 bytes) of additional space */
diff --git a/vnet/vnet/dhcp/client.c b/vnet/vnet/dhcp/client.c
index f555f19ef12..c352e3109ee 100644
--- a/vnet/vnet/dhcp/client.c
+++ b/vnet/vnet/dhcp/client.c
@@ -224,7 +224,7 @@ int dhcp_client_for_us (u32 bi, vlib_buffer_t * b,
c->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL, // no label stack
FIB_ROUTE_PATH_FLAG_NONE);
}
diff --git a/vnet/vnet/dpo/mpls_label_dpo.c b/vnet/vnet/dpo/mpls_label_dpo.c
index 48c03be57fc..606b7ba3911 100644
--- a/vnet/vnet/dpo/mpls_label_dpo.c
+++ b/vnet/vnet/dpo/mpls_label_dpo.c
@@ -42,29 +42,55 @@ mpls_label_dpo_get_index (mpls_label_dpo_t *mld)
}
index_t
-mpls_label_dpo_create (mpls_label_t label,
+mpls_label_dpo_create (mpls_label_t *label_stack,
mpls_eos_bit_t eos,
u8 ttl,
u8 exp,
+ dpo_proto_t payload_proto,
const dpo_id_t *dpo)
{
mpls_label_dpo_t *mld;
+ u32 ii;
mld = mpls_label_dpo_alloc();
-
- vnet_mpls_uc_set_label(&mld->mld_hdr.label_exp_s_ttl, label);
- vnet_mpls_uc_set_ttl(&mld->mld_hdr.label_exp_s_ttl, ttl);
- vnet_mpls_uc_set_exp(&mld->mld_hdr.label_exp_s_ttl, exp);
- vnet_mpls_uc_set_s(&mld->mld_hdr.label_exp_s_ttl, eos);
+ mld->mld_n_labels = vec_len(label_stack);
+ mld->mld_payload_proto = payload_proto;
/*
+ * construct label rewrite headers for each value value passed.
* get the header in network byte order since we will paint it
* on a packet in the data-plane
*/
- mld->mld_hdr.label_exp_s_ttl =
- clib_host_to_net_u32(mld->mld_hdr.label_exp_s_ttl);
- dpo_stack(DPO_MPLS_LABEL, DPO_PROTO_MPLS, &mld->mld_dpo, dpo);
+ for (ii = 0; ii < mld->mld_n_labels-1; ii++)
+ {
+ vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
+ vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, 255);
+ vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, 0);
+ vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, MPLS_NON_EOS);
+ mld->mld_hdr[ii].label_exp_s_ttl =
+ clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+ }
+
+ /*
+ * the inner most label
+ */
+ ii = mld->mld_n_labels-1;
+
+ vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
+ vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, ttl);
+ vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, exp);
+ vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos);
+ mld->mld_hdr[ii].label_exp_s_ttl =
+ clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+
+ /*
+ * stack this label objct on its parent.
+ */
+ dpo_stack(DPO_MPLS_LABEL,
+ mld->mld_payload_proto,
+ &mld->mld_dpo,
+ dpo);
return (mpls_label_dpo_get_index(mld));
}
@@ -76,15 +102,20 @@ format_mpls_label_dpo (u8 *s, va_list *args)
u32 indent = va_arg (*args, u32);
mpls_unicast_header_t hdr;
mpls_label_dpo_t *mld;
+ u32 ii;
mld = mpls_label_dpo_get(index);
- hdr.label_exp_s_ttl =
- clib_net_to_host_u32(mld->mld_hdr.label_exp_s_ttl);
-
s = format(s, "mpls-label:[%d]:", index);
- s = format(s, "%U\n", format_mpls_header, hdr);
- s = format(s, "%U", format_white_space, indent);
+
+ for (ii = 0; ii < mld->mld_n_labels; ii++)
+ {
+ hdr.label_exp_s_ttl =
+ clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+ s = format(s, "%U", format_mpls_header, hdr);
+ }
+
+ s = format(s, "\n%U", format_white_space, indent);
s = format(s, "%U", format_dpo_id, &mld->mld_dpo, indent+2);
return (s);
@@ -129,9 +160,11 @@ typedef struct mpls_label_imposition_trace_t_
} mpls_label_imposition_trace_t;
always_inline uword
-mpls_label_imposition (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+mpls_label_imposition_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ u8 payload_is_ip4,
+ u8 payload_is_ip6)
{
u32 n_left_from, next_index, * from, * to_next;
@@ -153,6 +186,7 @@ mpls_label_imposition (vlib_main_t * vm,
vlib_buffer_t * b0;
u32 bi0, mldi0;
u32 next0;
+ u8 ttl;
bi0 = from[0];
to_next[0] = bi0;
@@ -167,16 +201,69 @@ mpls_label_imposition (vlib_main_t * vm,
mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
mld0 = mpls_label_dpo_get(mldi0);
+ if (payload_is_ip4)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip4_header_t * ip0 = vlib_buffer_get_current(b0);
+ u32 checksum0;
+
+ checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
+ checksum0 += checksum0 >= 0xffff;
+
+ ip0->checksum = checksum0;
+ ip0->ttl -= 1;
+ ttl = ip0->ttl;
+ }
+ else if (payload_is_ip6)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip6_header_t * ip0 = vlib_buffer_get_current(b0);
+
+ ip0->hop_limit -= 1;
+ ttl = ip0->hop_limit;
+ }
+ else
+ {
+ /*
+ * else, the packet to be encapped is an MPLS packet
+ */
+ if (vnet_buffer(b0)->mpls.first)
+ {
+ /*
+ * The first label to be imposed on the packet. this is a label swap.
+ * in which case we stashed the TTL and EXP bits in the
+ * packet in the lookup node
+ */
+ ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
+
+ ttl = vnet_buffer(b0)->mpls.ttl - 1;
+ }
+ else
+ {
+ /*
+ * not the first label. implying we are recusring down a chain of
+ * output labels.
+ * Each layer is considered a new LSP - hence the TTL is reset.
+ */
+ ttl = 255;
+ }
+ }
+ vnet_buffer(b0)->mpls.first = 0;
+
/* Paint the MPLS header */
- vlib_buffer_advance(b0, -sizeof(*hdr0));
+ vlib_buffer_advance(b0, -(sizeof(*hdr0) * mld0->mld_n_labels));
hdr0 = vlib_buffer_get_current(b0);
- // FIXME.
- // need to copy the TTL from the correct place.
- // for IPvX imposition from the IP header
- // so we need a deidcated ipx-to-mpls-label-imp-node
- // for mpls switch and stack another solution is required.
- *hdr0 = mld0->mld_hdr;
+ clib_memcpy(hdr0, mld0->mld_hdr,
+ sizeof(*hdr0) * mld0->mld_n_labels);
+
+ /* fixup the TTL for the inner most label */
+ hdr0 = hdr0 + (mld0->mld_n_labels - 1);
+ ((char*)hdr0)[3] = ttl;
next0 = mld0->mld_dpo.dpoi_next_node;
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
@@ -215,6 +302,14 @@ format_mpls_label_imposition_trace (u8 * s, va_list * args)
return (s);
}
+static uword
+mpls_label_imposition (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame, 0, 0));
+}
+
VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
.function = mpls_label_imposition,
.name = "mpls-label-imposition",
@@ -226,7 +321,52 @@ VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
[0] = "error-drop",
}
};
-VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node, mpls_label_imposition)
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node,
+ mpls_label_imposition)
+
+static uword
+ip4_mpls_label_imposition (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame, 1, 0));
+}
+
+VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
+ .function = ip4_mpls_label_imposition,
+ .name = "ip4-mpls-label-imposition",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node,
+ ip4_mpls_label_imposition)
+
+static uword
+ip6_mpls_label_imposition (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame, 0, 1));
+}
+
+VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
+ .function = ip6_mpls_label_imposition,
+ .name = "ip6-mpls-label-imposition",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node,
+ ip6_mpls_label_imposition)
static void
mpls_label_dpo_mem_show (void)
@@ -246,12 +386,12 @@ const static dpo_vft_t mld_vft = {
const static char* const mpls_label_imp_ip4_nodes[] =
{
- "mpls-label-imposition",
+ "ip4-mpls-label-imposition",
NULL,
};
const static char* const mpls_label_imp_ip6_nodes[] =
{
- "mpls-label-imposition",
+ "ip6-mpls-label-imposition",
NULL,
};
const static char* const mpls_label_imp_mpls_nodes[] =
diff --git a/vnet/vnet/dpo/mpls_label_dpo.h b/vnet/vnet/dpo/mpls_label_dpo.h
index 47ee344933f..6580c47d7cc 100644
--- a/vnet/vnet/dpo/mpls_label_dpo.h
+++ b/vnet/vnet/dpo/mpls_label_dpo.h
@@ -26,9 +26,9 @@
typedef struct mpls_label_dpo_t
{
/**
- * The MPLS label header to impose
+ * The MPLS label header to impose. Outer most label first.
*/
- mpls_unicast_header_t mld_hdr;
+ mpls_unicast_header_t mld_hdr[8];
/**
* Next DPO in the graph
@@ -36,15 +36,45 @@ typedef struct mpls_label_dpo_t
dpo_id_t mld_dpo;
/**
+ * The protocol of the payload/packets that are being encapped
+ */
+ dpo_proto_t mld_payload_proto;
+
+ /**
+ * Size of the label stack
+ */
+ u16 mld_n_labels;
+
+ /**
* Number of locks/users of the label
*/
u16 mld_locks;
} mpls_label_dpo_t;
-extern index_t mpls_label_dpo_create(mpls_label_t label,
+/**
+ * @brief Assert that the MPLS label object is less than a cache line in size.
+ * Should this get any bigger then we will need to reconsider how many labels
+ * can be pushed in one object.
+ */
+_Static_assert((sizeof(mpls_label_dpo_t) <= CLIB_CACHE_LINE_BYTES),
+ "MPLS label DPO is larger than one cache line.");
+
+/**
+ * @brief Create an MPLS label object
+ *
+ * @param label_stack The stack if labels to impose, outer most label first
+ * @param eos The inner most label's EOS bit
+ * @param ttl The inner most label's TTL bit
+ * @param exp The inner most label's EXP bit
+ * @param payload_proto The ptocool of the payload packets that will
+ * be imposed with this label header.
+ * @param dpo The parent of the created MPLS label object
+ */
+extern index_t mpls_label_dpo_create(mpls_label_t *label_stack,
mpls_eos_bit_t eos,
u8 ttl,
u8 exp,
+ dpo_proto_t payload_proto,
const dpo_id_t *dpo);
extern u8* format_mpls_label_dpo(u8 *s, va_list *args);
diff --git a/vnet/vnet/ethernet/arp.c b/vnet/vnet/ethernet/arp.c
index d0b7132ef74..4968d7b780d 100644
--- a/vnet/vnet/ethernet/arp.c
+++ b/vnet/vnet/ethernet/arp.c
@@ -557,9 +557,7 @@ vnet_arp_set_ip4_over_ethernet_internal (vnet_main_t * vnm,
&pfx.fp_addr,
e->sw_if_index,
~0,
- 1,
- MPLS_LABEL_INVALID,
- FIB_ROUTE_PATH_FLAG_NONE);
+ 1, NULL, FIB_ROUTE_PATH_FLAG_NONE);
}
else
{
diff --git a/vnet/vnet/fib/fib_attached_export.c b/vnet/vnet/fib/fib_attached_export.c
index afc953a4ac5..07bce5548cc 100644
--- a/vnet/vnet/fib/fib_attached_export.c
+++ b/vnet/vnet/fib/fib_attached_export.c
@@ -16,9 +16,10 @@
#include <vnet/fib/fib_entry.h>
#include <vnet/fib/fib_table.h>
-#include "fib_attached_export.h"
-#include "fib_entry_cover.h"
-#include "fib_entry_src.h"
+#include <vnet/fib/fib_attached_export.h>
+#include <vnet/fib/fib_entry_cover.h>
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_entry_delegate.h>
/**
* A description of the need to import routes from the export table
@@ -92,22 +93,27 @@ static fib_ae_export_t *fib_ae_export_pool;
static fib_ae_export_t *
fib_entry_ae_add_or_lock (fib_node_index_t connected)
{
+ fib_entry_delegate_t *fed;
fib_ae_export_t *export;
fib_entry_t *entry;
entry = fib_entry_get(connected);
+ fed = fib_entry_delegate_get(entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
- if (FIB_NODE_INDEX_INVALID == entry->fe_export)
+ if (NULL == fed)
{
+ fed = fib_entry_delegate_find_or_add(entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
pool_get(fib_ae_export_pool, export);
memset(export, 0, sizeof(*export));
- entry->fe_export = (export - fib_ae_export_pool);
+ fed->fd_index = (export - fib_ae_export_pool);
export->faee_ei = connected;
}
else
{
- export = pool_elt_at_index(fib_ae_export_pool, entry->fe_export);
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
}
export->faee_locks++;
@@ -235,6 +241,7 @@ void
fib_attached_export_import (fib_entry_t *fib_entry,
fib_node_index_t export_fib)
{
+ fib_entry_delegate_t *fed;
fib_ae_import_t *import;
pool_get(fib_ae_import_pool, import);
@@ -290,7 +297,9 @@ fib_attached_export_import (fib_entry_t *fib_entry,
fib_entry_cover_track(fib_entry_get(import->faei_export_entry),
fib_entry_get_index(fib_entry));
- fib_entry->fe_import = (import - fib_ae_import_pool);
+ fed = fib_entry_delegate_find_or_add(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+ fed->fd_index = (import - fib_ae_import_pool);
}
/**
@@ -299,15 +308,19 @@ fib_attached_export_import (fib_entry_t *fib_entry,
void
fib_attached_export_purge (fib_entry_t *fib_entry)
{
- if (FIB_NODE_INDEX_INVALID != fib_entry->fe_import)
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+
+ if (NULL != fed)
{
fib_node_index_t *import_index;
fib_entry_t *export_entry;
fib_ae_import_t *import;
fib_ae_export_t *export;
- import = pool_elt_at_index(fib_ae_import_pool,
- fib_entry->fe_import);
+ import = pool_elt_at_index(fib_ae_import_pool, fed->fd_index);
/*
* remove each imported entry
@@ -342,9 +355,15 @@ fib_attached_export_purge (fib_entry_t *fib_entry)
*/
if (FIB_NODE_INDEX_INVALID != import->faei_exporter)
{
+ fib_entry_delegate_t *fed;
+
export_entry = fib_entry_get(import->faei_export_entry);
- ASSERT(FIB_NODE_INDEX_INVALID != export_entry->fe_export);
- export = pool_elt_at_index(fib_ae_export_pool, export_entry->fe_export);
+
+ fed = fib_entry_delegate_get(export_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+ ASSERT(NULL != fed);
+
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
u32 index = vec_search(export->faee_importers,
(import - fib_ae_import_pool));
@@ -358,7 +377,8 @@ fib_attached_export_purge (fib_entry_t *fib_entry)
if (0 == --export->faee_locks)
{
pool_put(fib_ae_export_pool, export);
- export_entry->fe_export = FIB_NODE_INDEX_INVALID;
+ fib_entry_delegate_remove(export_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
}
}
@@ -366,7 +386,8 @@ fib_attached_export_purge (fib_entry_t *fib_entry)
* free the import tracker
*/
pool_put(fib_ae_import_pool, import);
- fib_entry->fe_import = FIB_NODE_INDEX_INVALID;
+ fib_entry_delegate_remove(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
}
}
@@ -374,7 +395,12 @@ void
fib_attached_export_covered_added (fib_entry_t *cover,
fib_node_index_t covered)
{
- if (FIB_NODE_INDEX_INVALID != cover->fe_export)
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(cover,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+
+ if (NULL != fed)
{
/*
* the covering prefix is exporting to other tables
@@ -383,7 +409,7 @@ fib_attached_export_covered_added (fib_entry_t *cover,
fib_ae_import_t *import;
fib_ae_export_t *export;
- export = pool_elt_at_index(fib_ae_export_pool, cover->fe_export);
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
/*
* export the covered entry to each of the importers
@@ -401,7 +427,12 @@ void
fib_attached_export_covered_removed (fib_entry_t *cover,
fib_node_index_t covered)
{
- if (FIB_NODE_INDEX_INVALID != cover->fe_export)
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(cover,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+
+ if (NULL != fed)
{
/*
* the covering prefix is exporting to other tables
@@ -410,7 +441,7 @@ fib_attached_export_covered_removed (fib_entry_t *cover,
fib_ae_import_t *import;
fib_ae_export_t *export;
- export = pool_elt_at_index(fib_ae_export_pool, cover->fe_export);
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
/*
* remove the covered entry from each of the importers
@@ -427,7 +458,12 @@ fib_attached_export_covered_removed (fib_entry_t *cover,
static void
fib_attached_export_cover_modified_i (fib_entry_t *fib_entry)
{
- if (FIB_NODE_INDEX_INVALID != fib_entry->fe_import)
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+
+ if (NULL != fed)
{
fib_ae_import_t *import;
u32 export_fib;
@@ -436,7 +472,7 @@ fib_attached_export_cover_modified_i (fib_entry_t *fib_entry)
* safe the temporaries we need from the existing import
* since it will be toast after the purge.
*/
- import = pool_elt_at_index(fib_ae_import_pool, fib_entry->fe_import);
+ import = pool_elt_at_index(fib_ae_import_pool, fed->fd_index);
export_fib = import->faei_export_fib;
/*
@@ -469,15 +505,20 @@ fib_attached_export_cover_update (fib_entry_t *fib_entry)
}
u8*
-fib_ae_import_format (fib_node_index_t import_index,
+fib_ae_import_format (fib_entry_t *fib_entry,
u8* s)
{
- if (FIB_NODE_INDEX_INVALID != import_index)
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
+
+ if (NULL != fed)
{
fib_node_index_t *index;
fib_ae_import_t *import;
- import = pool_elt_at_index(fib_ae_import_pool, import_index);
+ import = pool_elt_at_index(fib_ae_import_pool, fed->fd_index);
s = format(s, "\n Attached-Import:%d:[", (import - fib_ae_import_pool));
s = format(s, "export-prefix:%U ", format_fib_prefix, &import->faei_prefix);
@@ -501,14 +542,20 @@ fib_ae_import_format (fib_node_index_t import_index,
}
u8*
-fib_ae_export_format (fib_node_index_t export_index, u8*s)
+fib_ae_export_format (fib_entry_t *fib_entry,
+ u8* s)
{
- if (FIB_NODE_INDEX_INVALID != export_index)
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(fib_entry,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+
+ if (NULL != fed)
{
- fib_node_index_t *index;
+ fib_node_index_t *index;
fib_ae_export_t *export;
- export = pool_elt_at_index(fib_ae_export_pool, export_index);
+ export = pool_elt_at_index(fib_ae_export_pool, fed->fd_list);
s = format(s, "\n Attached-Export:%d:[", (export - fib_ae_export_pool));
s = format(s, "export-entry:%d ", export->faee_ei);
diff --git a/vnet/vnet/fib/fib_attached_export.h b/vnet/vnet/fib/fib_attached_export.h
index ee68481187c..fa28a6e13b8 100644
--- a/vnet/vnet/fib/fib_attached_export.h
+++ b/vnet/vnet/fib/fib_attached_export.h
@@ -51,7 +51,7 @@ extern void fib_attached_export_covered_removed(fib_entry_t *cover,
extern void fib_attached_export_cover_change(fib_entry_t *fib_entry);
extern void fib_attached_export_cover_update(fib_entry_t *fib_entry);
-extern u8* fib_ae_import_format(fib_node_index_t import_index, u8*s);
-extern u8* fib_ae_export_format(fib_node_index_t export_index, u8*s);
+extern u8* fib_ae_import_format(fib_entry_t *fib_entry, u8*s);
+extern u8* fib_ae_export_format(fib_entry_t *fib_entry, u8*s);
#endif
diff --git a/vnet/vnet/fib/fib_entry.c b/vnet/vnet/fib/fib_entry.c
index da2656e35aa..24b506379ac 100644
--- a/vnet/vnet/fib/fib_entry.c
+++ b/vnet/vnet/fib/fib_entry.c
@@ -64,47 +64,6 @@ fib_entry_get_proto (const fib_entry_t * fib_entry)
return (fib_entry->fe_prefix.fp_proto);
}
-/**
- * @brief Turn the chain type requested by the client into the one they
- * really wanted
- */
-static fib_forward_chain_type_t
-fib_entry_chain_type_fixup (const fib_entry_t *entry,
- fib_forward_chain_type_t fct)
-{
- if (FIB_FORW_CHAIN_TYPE_MPLS_EOS == fct)
- {
- /*
- * The EOS chain is a tricky since one cannot know the adjacency
- * to link to without knowing what the packets payload protocol
- * will be once the label is popped.
- */
- fib_forward_chain_type_t dfct;
-
- dfct = fib_entry_get_default_chain_type(entry);
-
- if (FIB_FORW_CHAIN_TYPE_MPLS_EOS == dfct)
- {
- /*
- * If the entry being asked is a eos-MPLS label entry,
- * then use the payload-protocol field, that we stashed there
- * for just this purpose
- */
- return (fib_forw_chain_type_from_dpo_proto(
- entry->fe_prefix.fp_payload_proto));
- }
- /*
- * else give them what this entry would be by default. i.e. if it's a v6
- * entry, then the label its local labelled should be carrying v6 traffic.
- * If it's a non-EOS label entry, then there are more labels and we want
- * a non-eos chain.
- */
- return (dfct);
- }
-
- return (fct);
-}
-
fib_forward_chain_type_t
fib_entry_get_default_chain_type (const fib_entry_t *fib_entry)
{
@@ -189,8 +148,8 @@ format_fib_entry (u8 * s, va_list * args)
s = format(s, "\n tracking %d covered: ", n_covered);
s = fib_entry_cover_list_format(fib_entry, s);
}
- s = fib_ae_import_format(fib_entry->fe_import, s);
- s = fib_ae_export_format(fib_entry->fe_export, s);
+ s = fib_ae_import_format(fib_entry, s);
+ s = fib_ae_export_format(fib_entry, s);
s = format (s, "\n forwarding: ");
}
@@ -201,34 +160,33 @@ format_fib_entry (u8 * s, va_list * args)
fct = fib_entry_get_default_chain_type(fib_entry);
- if (!dpo_id_is_valid(&fib_entry->fe_lb[fct]))
+ if (!dpo_id_is_valid(&fib_entry->fe_lb))
{
s = format (s, " UNRESOLVED\n");
return (s);
}
else
{
+ s = format(s, " %U-chain\n %U",
+ format_fib_forw_chain_type, fct,
+ format_dpo_id,
+ &fib_entry->fe_lb,
+ 2);
+ s = format(s, "\n");
+
if (level >= FIB_ENTRY_FORMAT_DETAIL2)
{
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
- FOR_EACH_FIB_FORW_MPLS_CHAIN(fct)
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
{
s = format(s, " %U-chain\n %U",
- format_fib_forw_chain_type, fct,
- format_dpo_id,
- &fib_entry->fe_lb[fct],
- 2);
+ format_fib_forw_chain_type,
+ fib_entry_delegate_type_to_chain_type(fdt),
+ format_dpo_id, &fed->fd_dpo, 2);
s = format(s, "\n");
- }
- }
- else
- {
- s = format(s, " %U-chain\n %U",
- format_fib_forw_chain_type, fct,
- format_dpo_id,
- &fib_entry->fe_lb[fct],
- 2);
- s = format(s, "\n");
+ });
}
}
@@ -238,68 +196,6 @@ format_fib_entry (u8 * s, va_list * args)
s = fib_node_children_format(fib_entry->fe_node.fn_children, s);
}
- /* adj = adj_get(fib_entry->fe_prefix.fp_proto, fib_entry->fe_adj_index); */
-
- /* ip_multipath_next_hop_t * nhs, tmp_nhs[1]; */
- /* u32 i, j, n_left, n_nhs; */
- /* vlib_counter_t c, sum; */
- /* ip_lookup_main_t *lm = fib_get_lookup_main(fib_entry->fe_prefix.fp_proto); */
-
- /* if (adj->n_adj == 1) */
- /* { */
- /* nhs = &tmp_nhs[0]; */
- /* nhs[0].next_hop_adj_index = ~0; /\* not used *\/ */
- /* nhs[0].weight = 1; */
- /* n_nhs = 1; */
- /* } */
- /* else */
- /* { */
- /* ip_multipath_adjacency_t * madj; */
- /* madj = vec_elt_at_index (lm->multipath_adjacencies, adj->heap_handle); */
- /* nhs = heap_elt_at_index (lm->next_hop_heap, madj->normalized_next_hops.heap_offset); */
- /* n_nhs = madj->normalized_next_hops.count; */
- /* } */
-
- /* n_left = nhs[0].weight; */
- /* vlib_counter_zero (&sum); */
- /* for (i = j = 0; i < adj->n_adj; i++) */
- /* { */
- /* n_left -= 1; */
- /* vlib_get_combined_counter(&lm->adjacency_counters, */
- /* fib_entry->fe_adj_index + i, */
- /* &c); */
- /* /\* if (clear) *\/ */
- /* /\* vlib_zero_combined_counter (&lm->adjacency_counters, *\/ */
- /* /\* fib_entry->fe_adj_index + i); *\/ */
-
- /* vlib_counter_add (&sum, &c); */
- /* if (n_left == 0) */
- /* { */
- /* s = format (s, "%16Ld%16Ld ", sum.packets, sum.bytes); */
- /* s = format (s, "weight %d, index %d", */
- /* nhs[j].weight, fib_entry->fe_adj_index + i); */
-
- /* if (adj->n_adj > 1) */
- /* s = format (s, ", multipath"); */
-
- /* s = format (s, "\n%U", */
- /* format_ip_adjacency, */
- /* vnet_get_main(), lm, fib_entry->fe_adj_index + i); */
-
- /* // vlib_cli_output (vm, "%v", msg); */
- /* //vec_free (msg); */
- /* } */
- /* else */
- /* { */
- /* j++; */
- /* if (j < n_nhs) */
- /* { */
- /* n_left = nhs[j].weight; */
- /* vlib_counter_zero (&sum); */
- /* } */
- /* } */
- /* } */
-
return (s);
}
@@ -315,20 +211,25 @@ fib_entry_from_fib_node (fib_node_t *node)
static void
fib_entry_last_lock_gone (fib_node_t *node)
{
- fib_forward_chain_type_t fct;
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
fib_entry_t *fib_entry;
fib_entry = fib_entry_from_fib_node(node);
- FOR_EACH_FIB_FORW_MPLS_CHAIN(fct)
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
{
- dpo_reset(&fib_entry->fe_lb[fct]);
- }
+ dpo_reset(&fed->fd_dpo);
+ fib_entry_delegate_remove(fib_entry, fdt);
+ });
FIB_ENTRY_DBG(fib_entry, "last-lock");
fib_node_deinit(&fib_entry->fe_node);
// FIXME -RR Backwalk
+
+ ASSERT(0 == vec_len(fib_entry->fe_delegates));
+ vec_free(fib_entry->fe_delegates);
pool_put(fib_entry_pool, fib_entry);
}
@@ -487,44 +388,58 @@ fib_entry_contribute_urpf (fib_node_index_t entry_index,
*/
void
fib_entry_contribute_forwarding (fib_node_index_t fib_entry_index,
- fib_forward_chain_type_t type,
+ fib_forward_chain_type_t fct,
dpo_id_t *dpo)
{
+ fib_entry_delegate_t *fed;
fib_entry_t *fib_entry;
fib_entry = fib_entry_get(fib_entry_index);
- /*
- * these are not the droids you are looking for...
- */
- type = fib_entry_chain_type_fixup(fib_entry, type);
-
- if (!dpo_id_is_valid(&fib_entry->fe_lb[type]))
+ if (fct == fib_entry_get_default_chain_type(fib_entry))
{
- /*
- * on-demand create eos/non-eos.
- * There is no on-demand delete because:
- * - memory versus complexity & reliability:
- * leaving unrequired [n]eos LB arounds wastes memory, cleaning
- * then up on the right trigger is more code. i favour the latter.
- */
- fib_entry_src_mk_lb(fib_entry,
- fib_entry_get_best_src_i(fib_entry),
- type,
- &fib_entry->fe_lb[type]);
+ dpo_copy(dpo, &fib_entry->fe_lb);
}
+ else
+ {
+ fed = fib_entry_delegate_get(fib_entry,
+ fib_entry_chain_type_to_delegate_type(fct));
+
+ if (NULL == fed)
+ {
+ fed = fib_entry_delegate_find_or_add(
+ fib_entry,
+ fib_entry_chain_type_to_delegate_type(fct));
+ /*
+ * on-demand create eos/non-eos.
+ * There is no on-demand delete because:
+ * - memory versus complexity & reliability:
+ * leaving unrequired [n]eos LB arounds wastes memory, cleaning
+ * then up on the right trigger is more code. i favour the latter.
+ */
+ fib_entry_src_mk_lb(fib_entry,
+ fib_entry_get_best_src_i(fib_entry),
+ fct,
+ &fed->fd_dpo);
+ }
- dpo_copy(dpo, &fib_entry->fe_lb[type]);
+ dpo_copy(dpo, &fed->fd_dpo);
+ }
}
const dpo_id_t *
fib_entry_contribute_ip_forwarding (fib_node_index_t fib_entry_index)
{
+ fib_forward_chain_type_t fct;
fib_entry_t *fib_entry;
fib_entry = fib_entry_get(fib_entry_index);
+ fct = fib_entry_get_default_chain_type(fib_entry);
+
+ ASSERT((fct == FIB_FORW_CHAIN_TYPE_UNICAST_IP4 ||
+ fct == FIB_FORW_CHAIN_TYPE_UNICAST_IP6));
- return (&fib_entry->fe_lb[fib_entry_get_default_chain_type(fib_entry)]);
+ return (&fib_entry->fe_lb);
}
adj_index_t
@@ -570,6 +485,27 @@ fib_entry_child_remove (fib_node_index_t fib_entry_index,
fib_node_child_remove(FIB_NODE_TYPE_ENTRY,
fib_entry_index,
sibling_index);
+
+ if (0 == fib_node_get_n_children(FIB_NODE_TYPE_ENTRY,
+ fib_entry_index))
+ {
+ /*
+ * if there are no children left then there is no reason to keep
+ * the non-default forwarding chains. those chains are built only
+ * because the children want them.
+ */
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
+ {
+ dpo_reset(&fed->fd_dpo);
+ fib_entry_delegate_remove(fib_entry, fdt);
+ });
+ }
}
static fib_entry_t *
@@ -577,8 +513,8 @@ fib_entry_alloc (u32 fib_index,
const fib_prefix_t *prefix,
fib_node_index_t *fib_entry_index)
{
- fib_forward_chain_type_t fct;
fib_entry_t *fib_entry;
+ fib_prefix_t *fep;
pool_get(fib_entry_pool, fib_entry);
memset(fib_entry, 0, sizeof(*fib_entry));
@@ -587,20 +523,25 @@ fib_entry_alloc (u32 fib_index,
FIB_NODE_TYPE_ENTRY);
fib_entry->fe_fib_index = fib_index;
- fib_entry->fe_prefix = *prefix;
+
+ /*
+ * the one time we need to update the const prefix is when
+ * the entry is first created
+ */
+ fep = (fib_prefix_t*)&(fib_entry->fe_prefix);
+ *fep = *prefix;
+
if (FIB_PROTOCOL_MPLS == fib_entry->fe_prefix.fp_proto)
{
- fib_entry->fe_prefix.fp_len = 21;
+ fep->fp_len = 21;
+ if (MPLS_NON_EOS == fep->fp_eos)
+ {
+ fep->fp_payload_proto = DPO_PROTO_MPLS;
+ }
ASSERT(DPO_PROTO_NONE != fib_entry->fe_prefix.fp_payload_proto);
}
- fib_entry->fe_export = FIB_NODE_INDEX_INVALID;
- fib_entry->fe_import = FIB_NODE_INDEX_INVALID;
- fib_entry->fe_covered = FIB_NODE_INDEX_INVALID;
- FOR_EACH_FIB_FORW_MPLS_CHAIN(fct)
- {
- dpo_reset(&fib_entry->fe_lb[fct]);
- }
+ dpo_reset(&fib_entry->fe_lb);
*fib_entry_index = fib_entry_get_index(fib_entry);
@@ -1316,7 +1257,6 @@ fib_entry_recursive_loop_detect (fib_node_index_t entry_index,
if (FIB_NODE_INDEX_INVALID != fib_entry->fe_parent)
{
fib_node_index_t *entries = *entry_indicies;
- fib_forward_chain_type_t fct;
vec_add1(entries, entry_index);
was_looped = fib_path_list_is_looped(fib_entry->fe_parent);
@@ -1331,16 +1271,16 @@ fib_entry_recursive_loop_detect (fib_node_index_t entry_index,
* re-evaluate all the entry's forwarding
* NOTE: this is an inplace modify
*/
- FOR_EACH_FIB_FORW_MPLS_CHAIN(fct)
- {
- if (dpo_id_is_valid(&fib_entry->fe_lb[fct]))
- {
- fib_entry_src_mk_lb(fib_entry,
- fib_entry_get_best_src_i(fib_entry),
- fct,
- &fib_entry->fe_lb[fct]);
- }
- }
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
+
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
+ {
+ fib_entry_src_mk_lb(fib_entry,
+ fib_entry_get_best_src_i(fib_entry),
+ fib_entry_delegate_type_to_chain_type(fdt),
+ &fed->fd_dpo);
+ });
}
}
else
@@ -1379,8 +1319,8 @@ fib_entry_get_best_source (fib_node_index_t entry_index)
}
static int
-fib_ip4_address_compare (ip4_address_t * a1,
- ip4_address_t * a2)
+fib_ip4_address_compare (const ip4_address_t * a1,
+ const ip4_address_t * a2)
{
/*
* IP addresses are unsiged ints. the return value here needs to be signed
@@ -1393,8 +1333,8 @@ fib_ip4_address_compare (ip4_address_t * a1,
}
static int
-fib_ip6_address_compare (ip6_address_t * a1,
- ip6_address_t * a2)
+fib_ip6_address_compare (const ip6_address_t * a1,
+ const ip6_address_t * a2)
{
int i;
for (i = 0; i < ARRAY_LEN (a1->as_u16); i++)
diff --git a/vnet/vnet/fib/fib_entry.h b/vnet/vnet/fib/fib_entry.h
index d62a94043b3..44a5f2e6d7f 100644
--- a/vnet/vnet/fib/fib_entry.h
+++ b/vnet/vnet/fib/fib_entry.h
@@ -17,6 +17,7 @@
#define __FIB_ENTRY_H__
#include <vnet/fib/fib_node.h>
+#include <vnet/fib/fib_entry_delegate.h>
#include <vnet/adj/adj.h>
#include <vnet/ip/ip.h>
#include <vnet/dpo/dpo.h>
@@ -363,9 +364,10 @@ typedef struct fib_entry_t_ {
*/
fib_node_t fe_node;
/**
- * The prefix of the route
+ * The prefix of the route. this is const just to be sure.
+ * It is the entry's key/identity and so should never change.
*/
- fib_prefix_t fe_prefix;
+ const fib_prefix_t fe_prefix;
/**
* The index of the FIB table this entry is in
*/
@@ -382,7 +384,7 @@ typedef struct fib_entry_t_ {
* paint the header straight on without the need to check the packet
* type to derive the EOS bit value.
*/
- dpo_id_t fe_lb[FIB_FORW_CHAIN_MPLS_NUM];
+ dpo_id_t fe_lb; // [FIB_FORW_CHAIN_MPLS_NUM];
/**
* Vector of source infos.
* Most entries will only have 1 source. So we optimise for memory usage,
@@ -400,17 +402,11 @@ typedef struct fib_entry_t_ {
* be changed by the parent as it manages its list.
*/
u32 fe_sibling;
+
/**
- * Dependency list of covered entries.
- * these are more specific entries that are interested in changes
- * to their respective cover
- */
- fib_node_list_t fe_covered;
- /**
- * exporter
+ * A vector of delegates.
*/
- fib_node_index_t fe_export;
- fib_node_index_t fe_import;
+ fib_entry_delegate_t *fe_delegates;
} fib_entry_t;
#define FOR_EACH_FIB_ENTRY_FLAG(_item) \
diff --git a/vnet/vnet/fib/fib_entry_cover.c b/vnet/vnet/fib/fib_entry_cover.c
index 06b5b918abc..147c5daa4fd 100644
--- a/vnet/vnet/fib/fib_entry_cover.c
+++ b/vnet/vnet/fib/fib_entry_cover.c
@@ -21,16 +21,21 @@ u32
fib_entry_cover_track (fib_entry_t* cover,
fib_node_index_t covered)
{
+ fib_entry_delegate_t *fed;
+
FIB_ENTRY_DBG(cover, "cover-track %d", covered);
ASSERT(fib_entry_get_index(cover) != covered);
- if (FIB_NODE_INDEX_INVALID == cover->fe_covered)
+ fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);
+
+ if (NULL == fed)
{
- cover->fe_covered = fib_node_list_create();
+ fed = fib_entry_delegate_find_or_add(cover, FIB_ENTRY_DELEGATE_COVERED);
+ fed->fd_list = fib_node_list_create();
}
- return (fib_node_list_push_front(cover->fe_covered,
+ return (fib_node_list_push_front(fed->fd_list,
0, FIB_NODE_TYPE_ENTRY,
covered));
}
@@ -39,16 +44,21 @@ void
fib_entry_cover_untrack (fib_entry_t* cover,
u32 tracked_index)
{
+ fib_entry_delegate_t *fed;
+
FIB_ENTRY_DBG(cover, "cover-untrack @ %d", tracked_index);
- if (FIB_NODE_INDEX_INVALID == cover->fe_covered)
+ fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);
+
+ if (NULL == fed)
return;
- fib_node_list_remove(cover->fe_covered, tracked_index);
+ fib_node_list_remove(fed->fd_list, tracked_index);
- if (0 == fib_node_list_get_size(cover->fe_covered))
+ if (0 == fib_node_list_get_size(fed->fd_list))
{
- fib_node_list_destroy(&cover->fe_covered);
+ fib_node_list_destroy(&fed->fd_list);
+ fib_entry_delegate_remove(cover, FIB_ENTRY_DELEGATE_COVERED);
}
}
@@ -78,26 +88,35 @@ fib_entry_cover_walk (fib_entry_t *cover,
fib_entry_covered_walk_t walk,
void *args)
{
- if (FIB_NODE_INDEX_INVALID != cover->fe_covered)
- {
- fib_enty_cover_walk_ctx_t ctx = {
- .cover = cover,
- .walk = walk,
- .ctx = args,
- };
-
- fib_node_list_walk(cover->fe_covered,
- fib_entry_cover_walk_node_ptr,
- &ctx);
- }
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);
+
+ if (NULL == fed)
+ return;
+
+ fib_enty_cover_walk_ctx_t ctx = {
+ .cover = cover,
+ .walk = walk,
+ .ctx = args,
+ };
+
+ fib_node_list_walk(fed->fd_list,
+ fib_entry_cover_walk_node_ptr,
+ &ctx);
}
u32
fib_entry_cover_get_size (fib_entry_t *cover)
{
- if (FIB_NODE_INDEX_INVALID != cover->fe_covered)
- return (fib_node_list_get_size(cover->fe_covered));
- return (0);
+ fib_entry_delegate_t *fed;
+
+ fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);
+
+ if (NULL == fed)
+ return (0);
+
+ return (fib_node_list_get_size(fed->fd_list));
}
typedef struct fib_entry_cover_list_format_ctx_t_ {
diff --git a/vnet/vnet/fib/fib_entry_delegate.c b/vnet/vnet/fib/fib_entry_delegate.c
new file mode 100644
index 00000000000..a0d45f970b3
--- /dev/null
+++ b/vnet/vnet/fib/fib_entry_delegate.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_entry_delegate.h>
+#include <vnet/fib/fib_entry.h>
+
+static fib_entry_delegate_t *
+fib_entry_delegate_find_i (const fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t type,
+ u32 *index)
+{
+ fib_entry_delegate_t *delegate;
+ int ii;
+
+ ii = 0;
+ vec_foreach(delegate, fib_entry->fe_delegates)
+ {
+ if (delegate->fd_type == type)
+ {
+ if (NULL != index)
+ *index = ii;
+
+ return (delegate);
+ }
+ else
+ {
+ ii++;
+ }
+ }
+
+ return (NULL);
+}
+
+fib_entry_delegate_t *
+fib_entry_delegate_get (const fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t type)
+{
+ return (fib_entry_delegate_find_i(fib_entry, type, NULL));
+}
+
+void
+fib_entry_delegate_remove (fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t type)
+{
+ fib_entry_delegate_t *fed;
+ u32 index = ~0;
+
+ fed = fib_entry_delegate_find_i(fib_entry, type, &index);
+
+ ASSERT(NULL != fed);
+
+ vec_del1(fib_entry->fe_delegates, index);
+}
+
+static int
+fib_entry_delegate_cmp_for_sort (void * v1,
+ void * v2)
+{
+ fib_entry_delegate_t *delegate1 = v1, *delegate2 = v2;
+
+ return (delegate1->fd_type - delegate2->fd_type);
+}
+
+static void
+fib_entry_delegate_init (fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t type)
+
+{
+ fib_entry_delegate_t delegate = {
+ .fd_entry_index = fib_entry_get_index(fib_entry),
+ .fd_type = type,
+ };
+
+ vec_add1(fib_entry->fe_delegates, delegate);
+ vec_sort_with_function(fib_entry->fe_delegates,
+ fib_entry_delegate_cmp_for_sort);
+}
+
+fib_entry_delegate_t *
+fib_entry_delegate_find_or_add (fib_entry_t *fib_entry,
+ fib_entry_delegate_type_t fdt)
+{
+ fib_entry_delegate_t *delegate;
+
+ delegate = fib_entry_delegate_get(fib_entry, fdt);
+
+ if (NULL == delegate)
+ {
+ fib_entry_delegate_init(fib_entry, fdt);
+ }
+
+ return (fib_entry_delegate_get(fib_entry, fdt));
+}
+
+fib_entry_delegate_type_t
+fib_entry_chain_type_to_delegate_type (fib_forward_chain_type_t fct)
+{
+ switch (fct)
+ {
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
+ return (FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4);
+ case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
+ return (FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP6);
+ case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ return (FIB_ENTRY_DELEGATE_CHAIN_MPLS_EOS);
+ case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+ return (FIB_ENTRY_DELEGATE_CHAIN_MPLS_NON_EOS);
+ case FIB_FORW_CHAIN_TYPE_ETHERNET:
+ return (FIB_ENTRY_DELEGATE_CHAIN_ETHERNET);
+ }
+ ASSERT(0);
+ return (FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4);
+}
+
+fib_forward_chain_type_t
+fib_entry_delegate_type_to_chain_type (fib_entry_delegate_type_t fdt)
+{
+ switch (fdt)
+ {
+ case FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ case FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP6:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ case FIB_ENTRY_DELEGATE_CHAIN_MPLS_EOS:
+ return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
+ case FIB_ENTRY_DELEGATE_CHAIN_MPLS_NON_EOS:
+ return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
+ case FIB_ENTRY_DELEGATE_CHAIN_ETHERNET:
+ return (FIB_FORW_CHAIN_TYPE_ETHERNET);
+ case FIB_ENTRY_DELEGATE_COVERED:
+ case FIB_ENTRY_DELEGATE_ATTACHED_IMPORT:
+ case FIB_ENTRY_DELEGATE_ATTACHED_EXPORT:
+ break;
+ }
+ ASSERT(0);
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+}
diff --git a/vnet/vnet/fib/fib_entry_delegate.h b/vnet/vnet/fib/fib_entry_delegate.h
new file mode 100644
index 00000000000..6d3a6549f32
--- /dev/null
+++ b/vnet/vnet/fib/fib_entry_delegate.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FIB_ENTRY_DELEGATE_T__
+#define __FIB_ENTRY_DELEGATE_T__
+
+#include <vnet/fib/fib_node.h>
+
+/**
+ * Delegate types
+ */
+typedef enum fib_entry_delegate_type_t_ {
+ /**
+ * Forwarding chain types:
+ * for the vast majority of FIB entries only one chain is required - the
+ * one that forwards traffic matching the fib_entry_t's fib_prefix_t. For those
+ * fib_entry_t that are a resolution target for other fib_entry_t's they will also
+ * need the chain to provide forwarding for those children. We store these additional
+ * chains in delegates to save memory in the common case.
+ */
+ FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4 = FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP6 = FIB_FORW_CHAIN_TYPE_UNICAST_IP6,
+ FIB_ENTRY_DELEGATE_CHAIN_MPLS_EOS = FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ FIB_ENTRY_DELEGATE_CHAIN_MPLS_NON_EOS = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ FIB_ENTRY_DELEGATE_CHAIN_ETHERNET = FIB_FORW_CHAIN_TYPE_ETHERNET,
+ /**
+ * Dependency list of covered entries.
+ * these are more specific entries that are interested in changes
+ * to their respective cover
+ */
+ FIB_ENTRY_DELEGATE_COVERED,
+ /**
+ * Attached import/export functionality
+ */
+ FIB_ENTRY_DELEGATE_ATTACHED_IMPORT,
+ FIB_ENTRY_DELEGATE_ATTACHED_EXPORT,
+} fib_entry_delegate_type_t;
+
+#define FOR_EACH_DELEGATE_CHAIN(_entry, _fdt, _fed, _body) \
+{ \
+ for (_fdt = FIB_ENTRY_DELEGATE_CHAIN_UNICAST_IP4; \
+ _fdt <= FIB_ENTRY_DELEGATE_CHAIN_ETHERNET; \
+ _fdt++) \
+ { \
+ _fed = fib_entry_delegate_get(_entry, _fdt); \
+ if (NULL != _fed) { \
+ _body; \
+ } \
+ } \
+}
+
+/**
+ * A Delagate is a means to implmenet the Delagation design pattern; the extension of an
+ * objects functionality through the composition of, and delgation to, other objects.
+ * These 'other' objects are delegates. Delagates are thus attached to other FIB objects
+ * to extend their functionality.
+ */
+typedef struct fib_entry_delegate_t_
+{
+ /**
+ * The FIB entry object to which the delagate is attached
+ */
+ fib_node_index_t fd_entry_index;
+
+ /**
+ * The delagate type
+ */
+ fib_entry_delegate_type_t fd_type;
+
+ /**
+ * A union of data for the different delegate types
+ * These delegates are stored in a sparse vector on the entry, so they
+ * must all be of the same size. We could use indirection here for all types,
+ * i.e. store an index, that's ok for large delegates, like the attached export
+ * but for the chain delegates it's excessive
+ */
+ union
+ {
+ /**
+ * Valid for the forwarding chain delegates. The LB that is built.
+ */
+ dpo_id_t fd_dpo;
+
+ /**
+ * Valid for the attached import cases. An index of the importer/exporter
+ */
+ fib_node_index_t fd_index;
+
+ /**
+ * For the cover tracking. The node list;
+ */
+ fib_node_list_t fd_list;
+ };
+} fib_entry_delegate_t;
+
+struct fib_entry_t_;
+
+extern void fib_entry_delegate_remove(struct fib_entry_t_ *fib_entry,
+ fib_entry_delegate_type_t type);
+
+extern fib_entry_delegate_t *fib_entry_delegate_find_or_add(struct fib_entry_t_ *fib_entry,
+ fib_entry_delegate_type_t fdt);
+extern fib_entry_delegate_t *fib_entry_delegate_get(const struct fib_entry_t_ *fib_entry,
+ fib_entry_delegate_type_t type);
+
+extern fib_forward_chain_type_t fib_entry_delegate_type_to_chain_type(
+ fib_entry_delegate_type_t type);
+
+extern fib_entry_delegate_type_t fib_entry_chain_type_to_delegate_type(
+ fib_forward_chain_type_t type);
+
+#endif
diff --git a/vnet/vnet/fib/fib_entry_src.c b/vnet/vnet/fib/fib_entry_src.c
index 6fb2e64c287..060fac941d2 100644
--- a/vnet/vnet/fib/fib_entry_src.c
+++ b/vnet/vnet/fib/fib_entry_src.c
@@ -221,6 +221,44 @@ fib_entry_src_valid_out_label (mpls_label_t label)
MPLS_IETF_IMPLICIT_NULL_LABEL == label));
}
+/**
+ * @brief Turn the chain type requested by the client into the one they
+ * really wanted
+ */
+fib_forward_chain_type_t
+fib_entry_chain_type_fixup (const fib_entry_t *entry,
+ fib_forward_chain_type_t fct)
+{
+ ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS == fct);
+
+ /*
+ * The EOS chain is a tricky since one cannot know the adjacency
+ * to link to without knowing what the packets payload protocol
+ * will be once the label is popped.
+ */
+ fib_forward_chain_type_t dfct;
+
+ dfct = fib_entry_get_default_chain_type(entry);
+
+ if (FIB_FORW_CHAIN_TYPE_MPLS_EOS == dfct)
+ {
+ /*
+ * If the entry being asked is a eos-MPLS label entry,
+ * then use the payload-protocol field, that we stashed there
+ * for just this purpose
+ */
+ return (fib_forw_chain_type_from_dpo_proto(
+ entry->fe_prefix.fp_payload_proto));
+ }
+ /*
+ * else give them what this entry would be by default. i.e. if it's a v6
+ * entry, then the label its local labelled should be carrying v6 traffic.
+ * If it's a non-EOS label entry, then there are more labels and we want
+ * a non-eos chain.
+ */
+ return (dfct);
+}
+
static int
fib_entry_src_collect_forwarding (fib_node_index_t pl_index,
fib_node_index_t path_index,
@@ -255,13 +293,13 @@ fib_entry_src_collect_forwarding (fib_node_index_t pl_index,
if (NULL != path_ext &&
path_ext->fpe_path_index == path_index &&
- fib_entry_src_valid_out_label(path_ext->fpe_label))
+ fib_entry_src_valid_out_label(path_ext->fpe_label_stack[0]))
{
/*
* found a matching extension. stack it to obtain the forwarding
* info for this path.
*/
- ctx->next_hops = fib_path_ext_stack(path_ext, ctx->fct, ctx->next_hops);
+ ctx->next_hops = fib_path_ext_stack(path_ext, ctx->fib_entry, ctx->fct, ctx->next_hops);
}
else
{
@@ -299,6 +337,21 @@ fib_entry_src_collect_forwarding (fib_node_index_t pl_index,
}
break;
case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
+ {
+ /*
+ * no label. we need a chain based on the payload. fixup.
+ */
+ vec_add2(ctx->next_hops, nh, 1);
+
+ nh->path_index = path_index;
+ nh->path_weight = fib_path_get_weight(path_index);
+ fib_path_contribute_forwarding(path_index,
+ fib_entry_chain_type_fixup(ctx->fib_entry,
+ ctx->fct),
+ &nh->path_dpo);
+
+ break;
+ }
case FIB_FORW_CHAIN_TYPE_ETHERNET:
ASSERT(0);
break;
@@ -420,12 +473,12 @@ fib_entry_src_action_install (fib_entry_t *fib_entry,
* the load-balance object only needs to be added to the forwarding
* DB once, when it is created.
*/
- insert = !dpo_id_is_valid(&fib_entry->fe_lb[fct]);
+ insert = !dpo_id_is_valid(&fib_entry->fe_lb);
- fib_entry_src_mk_lb(fib_entry, esrc, fct, &fib_entry->fe_lb[fct]);
+ fib_entry_src_mk_lb(fib_entry, esrc, fct, &fib_entry->fe_lb);
- ASSERT(dpo_id_is_valid(&fib_entry->fe_lb[fct]));
- FIB_ENTRY_DBG(fib_entry, "install: %d", fib_entry->fe_lb[fct]);
+ ASSERT(dpo_id_is_valid(&fib_entry->fe_lb));
+ FIB_ENTRY_DBG(fib_entry, "install: %d", fib_entry->fe_lb);
/*
* insert the adj into the data-plane forwarding trie
@@ -434,51 +487,41 @@ fib_entry_src_action_install (fib_entry_t *fib_entry,
{
fib_table_fwding_dpo_update(fib_entry->fe_fib_index,
&fib_entry->fe_prefix,
- &fib_entry->fe_lb[fct]);
+ &fib_entry->fe_lb);
}
- if (FIB_FORW_CHAIN_TYPE_UNICAST_IP4 == fct ||
- FIB_FORW_CHAIN_TYPE_UNICAST_IP6 == fct)
+ /*
+ * if any of the other chain types are already created they will need
+ * updating too
+ */
+ fib_entry_delegate_type_t fdt;
+ fib_entry_delegate_t *fed;
+
+ FOR_EACH_DELEGATE_CHAIN(fib_entry, fdt, fed,
{
- for (fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS;
- fct <= FIB_FORW_CHAIN_TYPE_MPLS_EOS;
- fct++)
- {
- /*
- * if any of the other chain types are already created they will need
- * updating too
- */
- if (dpo_id_is_valid(&fib_entry->fe_lb[fct]))
- {
- fib_entry_src_mk_lb(fib_entry,
- esrc,
- fct,
- &fib_entry->fe_lb[fct]);
- }
- }
- }
+ fib_entry_src_mk_lb(fib_entry, esrc,
+ fib_entry_delegate_type_to_chain_type(fdt),
+ &fed->fd_dpo);
+ });
}
void
fib_entry_src_action_uninstall (fib_entry_t *fib_entry)
{
- fib_forward_chain_type_t fct;
-
- fct = fib_entry_get_default_chain_type(fib_entry);
/*
* uninstall the forwarding chain from the forwarding tables
*/
FIB_ENTRY_DBG(fib_entry, "uninstall: %d",
fib_entry->fe_adj_index);
- if (dpo_id_is_valid(&fib_entry->fe_lb[fct]))
+ if (dpo_id_is_valid(&fib_entry->fe_lb))
{
fib_table_fwding_dpo_remove(
fib_entry->fe_fib_index,
&fib_entry->fe_prefix,
- &fib_entry->fe_lb[fct]);
+ &fib_entry->fe_lb);
- dpo_reset(&fib_entry->fe_lb[fct]);
+ dpo_reset(&fib_entry->fe_lb);
}
}
@@ -965,7 +1008,7 @@ static void
fib_entry_src_path_ext_append (fib_entry_src_t *esrc,
const fib_route_path_t *rpath)
{
- if (MPLS_LABEL_INVALID != rpath->frp_label)
+ if (NULL != rpath->frp_label_stack)
{
fib_path_ext_t *path_ext;
@@ -991,7 +1034,7 @@ fib_entry_src_path_ext_insert (fib_entry_src_t *esrc,
if (0 == vec_len(esrc->fes_path_exts))
return (fib_entry_src_path_ext_append(esrc, rpath));
- if (MPLS_LABEL_INVALID != rpath->frp_label)
+ if (NULL != rpath->frp_label_stack)
{
fib_path_ext_t path_ext;
int i = 0;
@@ -1097,6 +1140,7 @@ fib_entry_src_action_path_swap (fib_entry_t *fib_entry,
fib_node_index_t old_path_list, fib_entry_index;
fib_path_list_flags_t pl_flags;
const fib_route_path_t *rpath;
+ fib_path_ext_t *path_ext;
fib_entry_src_t *esrc;
esrc = fib_entry_src_find(fib_entry, source, NULL);
@@ -1138,7 +1182,12 @@ fib_entry_src_action_path_swap (fib_entry_t *fib_entry,
pl_flags,
rpaths);
+ vec_foreach(path_ext, esrc->fes_path_exts)
+ {
+ vec_free(path_ext->fpe_label_stack);
+ }
vec_free(esrc->fes_path_exts);
+
vec_foreach(rpath, rpaths)
{
fib_entry_src_path_ext_append(esrc, rpath);
@@ -1192,6 +1241,7 @@ fib_entry_src_action_path_remove (fib_entry_t *fib_entry,
* delete the element moving the remaining elements down 1 position.
* this preserves the sorted order.
*/
+ vec_free(path_ext->fpe_label_stack);
vec_delete(esrc->fes_path_exts, 1, (path_ext - esrc->fes_path_exts));
break;
}
diff --git a/vnet/vnet/fib/fib_entry_src.h b/vnet/vnet/fib/fib_entry_src.h
index 0b98c1c35c0..640c174db47 100644
--- a/vnet/vnet/fib/fib_entry_src.h
+++ b/vnet/vnet/fib/fib_entry_src.h
@@ -269,6 +269,9 @@ extern fib_entry_flag_t fib_entry_get_flags_i(const fib_entry_t *fib_entry);
extern fib_path_list_flags_t fib_entry_src_flags_2_path_list_flags(
fib_entry_flag_t eflags);
+extern fib_forward_chain_type_t fib_entry_chain_type_fixup(const fib_entry_t *entry,
+ fib_forward_chain_type_t fct);
+
extern void fib_entry_src_mk_lb (fib_entry_t *fib_entry,
const fib_entry_src_t *esrc,
fib_forward_chain_type_t fct,
diff --git a/vnet/vnet/fib/fib_entry_src_rr.c b/vnet/vnet/fib/fib_entry_src_rr.c
index 6d56541dec2..ff15c54e281 100644
--- a/vnet/vnet/fib/fib_entry_src_rr.c
+++ b/vnet/vnet/fib/fib_entry_src_rr.c
@@ -58,6 +58,7 @@ fib_entry_src_rr_resolve_via_connected (fib_entry_src_t *src,
vec_free(paths);
}
+
/**
* Source initialisation Function
*/
@@ -79,8 +80,18 @@ fib_entry_src_rr_activate (fib_entry_src_t *src,
/*
* find the covering prefix. become a dependent thereof.
- * there should always be a cover, though it may be the default route.
+ * for IP there should always be a cover, though it may be the default route.
+ * For MPLS there is never a cover.
*/
+ if (FIB_PROTOCOL_MPLS == fib_entry->fe_prefix.fp_proto)
+ {
+ src->fes_pl = fib_path_list_create_special(FIB_PROTOCOL_MPLS,
+ FIB_PATH_LIST_FLAG_DROP,
+ NULL);
+ fib_path_list_lock(src->fes_pl);
+ return (!0);
+ }
+
src->rr.fesr_cover = fib_table_get_less_specific(fib_entry->fe_fib_index,
&fib_entry->fe_prefix);
@@ -157,12 +168,12 @@ fib_entry_src_rr_deactivate (fib_entry_src_t *src,
/*
* remove the depednecy on the covering entry
*/
- ASSERT(FIB_NODE_INDEX_INVALID != src->rr.fesr_cover);
- cover = fib_entry_get(src->rr.fesr_cover);
-
- fib_entry_cover_untrack(cover, src->rr.fesr_sibling);
-
- src->rr.fesr_cover = FIB_NODE_INDEX_INVALID;
+ if (FIB_NODE_INDEX_INVALID != src->rr.fesr_cover)
+ {
+ cover = fib_entry_get(src->rr.fesr_cover);
+ fib_entry_cover_untrack(cover, src->rr.fesr_sibling);
+ src->rr.fesr_cover = FIB_NODE_INDEX_INVALID;
+ }
fib_path_list_unlock(src->fes_pl);
src->fes_pl = FIB_NODE_INDEX_INVALID;
diff --git a/vnet/vnet/fib/fib_internal.h b/vnet/vnet/fib/fib_internal.h
index a0238ac3cdb..2d980bcce0a 100644
--- a/vnet/vnet/fib/fib_internal.h
+++ b/vnet/vnet/fib/fib_internal.h
@@ -24,6 +24,9 @@
*/
#undef FIB_DEBUG
+extern void fib_prefix_from_mpls_label(mpls_label_t label,
+ fib_prefix_t *prf);
+
extern int fib_route_path_cmp(const fib_route_path_t *rpath1,
const fib_route_path_t *rpath2);
diff --git a/vnet/vnet/fib/fib_node.c b/vnet/vnet/fib/fib_node.c
index 35dc874effb..db3e22bb3b8 100644
--- a/vnet/vnet/fib/fib_node.c
+++ b/vnet/vnet/fib/fib_node.c
@@ -117,22 +117,6 @@ fib_node_child_add (fib_node_type_t parent_type,
index));
}
-u32
-fib_node_child_get_n_children (fib_node_type_t parent_type,
- fib_node_index_t parent_index)
-{
- fib_node_t *parent;
-
- parent = fn_vfts[parent_type].fnv_get(parent_index);
-
- if (FIB_NODE_INDEX_INVALID == parent->fn_children)
- {
- return (0);
- }
-
- return (fib_node_list_get_size(parent->fn_children));
-}
-
void
fib_node_child_remove (fib_node_type_t parent_type,
fib_node_index_t parent_index,
@@ -152,6 +136,17 @@ fib_node_child_remove (fib_node_type_t parent_type,
fib_node_unlock(parent);
}
+u32
+fib_node_get_n_children (fib_node_type_t parent_type,
+ fib_node_index_t parent_index)
+{
+ fib_node_t *parent;
+
+ parent = fn_vfts[parent_type].fnv_get(parent_index);
+
+ return (fib_node_list_get_size(parent->fn_children));
+}
+
fib_node_back_walk_rc_t
fib_node_back_walk_one (fib_node_ptr_t *ptr,
diff --git a/vnet/vnet/fib/fib_node.h b/vnet/vnet/fib/fib_node.h
index 4aabc64e288..3ad8ee95b64 100644
--- a/vnet/vnet/fib/fib_node.h
+++ b/vnet/vnet/fib/fib_node.h
@@ -35,6 +35,7 @@ typedef enum fib_node_type_t_ {
FIB_NODE_TYPE_PATH,
FIB_NODE_TYPE_ADJ,
FIB_NODE_TYPE_MPLS_ENTRY,
+ FIB_NODE_TYPE_MPLS_TUNNEL,
FIB_NODE_TYPE_LISP_GPE_FWD_ENTRY,
FIB_NODE_TYPE_LISP_ADJ,
FIB_NODE_TYPE_GRE_TUNNEL,
@@ -54,6 +55,7 @@ typedef enum fib_node_type_t_ {
[FIB_NODE_TYPE_PATH_LIST] = "path-list", \
[FIB_NODE_TYPE_PATH] = "path", \
[FIB_NODE_TYPE_MPLS_ENTRY] = "mpls-entry", \
+ [FIB_NODE_TYPE_MPLS_TUNNEL] = "mpls-tunnel", \
[FIB_NODE_TYPE_ADJ] = "adj", \
[FIB_NODE_TYPE_LISP_GPE_FWD_ENTRY] = "lisp-gpe-fwd-entry", \
[FIB_NODE_TYPE_LISP_ADJ] = "lisp-adj", \
@@ -341,8 +343,8 @@ extern void fib_node_deinit(fib_node_t *node);
extern void fib_node_lock(fib_node_t *node);
extern void fib_node_unlock(fib_node_t *node);
-extern u32 fib_node_child_get_n_children(fib_node_type_t parent_type,
- fib_node_index_t parent_index);
+extern u32 fib_node_get_n_children(fib_node_type_t parent_type,
+ fib_node_index_t parent_index);
extern u32 fib_node_child_add(fib_node_type_t parent_type,
fib_node_index_t parent_index,
fib_node_type_t child_type,
diff --git a/vnet/vnet/fib/fib_path.c b/vnet/vnet/fib/fib_path.c
index 988f689b102..809e3e166da 100644
--- a/vnet/vnet/fib/fib_path.c
+++ b/vnet/vnet/fib/fib_path.c
@@ -207,10 +207,17 @@ typedef struct fib_path_t_ {
u32 fp_interface;
} attached;
struct {
- /**
- * The next-hop
- */
- ip46_address_t fp_nh;
+ union
+ {
+ /**
+ * The next-hop
+ */
+ ip46_address_t fp_ip;
+ /**
+ * The local label to resolve through.
+ */
+ mpls_label_t fp_local_label;
+ } fp_nh;
/**
* The FIB table index in which to find the next-hop.
* This needs to be fixed. We should lookup the adjacencies in
@@ -237,7 +244,7 @@ typedef struct fib_path_t_ {
} recursive;
struct {
/**
- * The FIN index in which to perfom the next lookup
+ * The FIB index in which to perfom the next lookup
*/
fib_node_index_t fp_tbl_id;
} deag;
@@ -428,11 +435,22 @@ format_fib_path (u8 * s, va_list * args)
}
break;
case FIB_PATH_TYPE_RECURSIVE:
- s = format (s, "via %U",
- format_ip46_address,
- &path->recursive.fp_nh,
- IP46_TYPE_ANY);
- s = format (s, " in fib:%d", path->recursive.fp_tbl_id, path->fp_via_fib);
+ if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
+ {
+ s = format (s, "via %U",
+ format_mpls_unicast_label,
+ path->recursive.fp_nh.fp_local_label);
+ }
+ else
+ {
+ s = format (s, "via %U",
+ format_ip46_address,
+ &path->recursive.fp_nh.fp_ip,
+ IP46_TYPE_ANY);
+ }
+ s = format (s, " in fib:%d",
+ path->recursive.fp_tbl_id,
+ path->fp_via_fib);
s = format (s, " via-fib:%d", path->fp_via_fib);
s = format (s, " via-dpo:[%U:%d]",
format_dpo_type, path->fp_dpo.dpoi_type,
@@ -677,7 +695,7 @@ fib_path_unresolve (fib_path_t *path)
{
fib_prefix_t pfx;
- fib_prefix_from_ip46_addr(&path->recursive.fp_nh, &pfx);
+ fib_entry_get_prefix(path->fp_via_fib, &pfx);
fib_entry_child_remove(path->fp_via_fib,
path->fp_sibling);
fib_table_entry_special_remove(path->recursive.fp_tbl_id,
@@ -1025,7 +1043,14 @@ fib_path_create (fib_node_index_t pl_index,
else
{
path->fp_type = FIB_PATH_TYPE_RECURSIVE;
- path->recursive.fp_nh = rpath->frp_addr;
+ if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
+ {
+ path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
+ }
+ else
+ {
+ path->recursive.fp_nh.fp_ip = rpath->frp_addr;
+ }
path->recursive.fp_tbl_id = rpath->frp_fib_index;
}
}
@@ -1301,13 +1326,20 @@ fib_path_cmp_w_route_path (fib_node_index_t path_index,
rpath->frp_sw_if_index);
break;
case FIB_PATH_TYPE_RECURSIVE:
- res = ip46_address_cmp(&path->recursive.fp_nh,
- &rpath->frp_addr);
-
- if (0 == res)
- {
- res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
- }
+ if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
+ {
+ res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
+ }
+ else
+ {
+ res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
+ &rpath->frp_addr);
+ }
+
+ if (0 == res)
+ {
+ res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
+ }
break;
case FIB_PATH_TYPE_DEAG:
res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
@@ -1506,7 +1538,14 @@ fib_path_resolve (fib_node_index_t path_index)
ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
- fib_prefix_from_ip46_addr(&path->recursive.fp_nh, &pfx);
+ if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
+ {
+ fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label, &pfx);
+ }
+ else
+ {
+ fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
+ }
fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
&pfx,
@@ -1720,7 +1759,7 @@ fib_path_contribute_forwarding (fib_node_index_t path_index,
adj_index_t ai;
/*
- * get a MPLS link type adj.
+ * get a appropriate link type adj.
*/
ai = fib_path_attached_next_hop_get_adj(
path,
@@ -1739,12 +1778,6 @@ fib_path_contribute_forwarding (fib_node_index_t path_index,
case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
- /*
- * Assume that EOS and IP forwarding is the same.
- * revisit for ieBGP
- */
- dpo_copy(dpo, &path->fp_dpo);
- break;
case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
fib_path_recursive_adj_update(path, fct, dpo);
break;
@@ -1898,7 +1931,7 @@ fib_path_encode (fib_node_index_t path_list_index,
case FIB_PATH_TYPE_DEAG:
break;
case FIB_PATH_TYPE_RECURSIVE:
- api_rpath->rpath.frp_addr = path->recursive.fp_nh;
+ api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
break;
default:
break;
@@ -1906,6 +1939,16 @@ fib_path_encode (fib_node_index_t path_list_index,
return (1);
}
+fib_protocol_t
+fib_path_get_proto (fib_node_index_t path_index)
+{
+ fib_path_t *path;
+
+ path = fib_path_get(path_index);
+
+ return (path->fp_nh_proto);
+}
+
void
fib_path_module_init (void)
{
diff --git a/vnet/vnet/fib/fib_path.h b/vnet/vnet/fib/fib_path.h
index 4151c578aed..91f49d09234 100644
--- a/vnet/vnet/fib/fib_path.h
+++ b/vnet/vnet/fib/fib_path.h
@@ -132,6 +132,7 @@ extern int fib_path_is_recursive(fib_node_index_t path_index);
extern int fib_path_is_exclusive(fib_node_index_t path_index);
extern int fib_path_is_deag(fib_node_index_t path_index);
extern int fib_path_is_looped(fib_node_index_t path_index);
+extern fib_protocol_t fib_path_get_proto(fib_node_index_t path_index);
extern void fib_path_destroy(fib_node_index_t path_index);
extern uword fib_path_hash(fib_node_index_t path_index);
extern load_balance_path_t * fib_path_append_nh_for_multipath_hash(
diff --git a/vnet/vnet/fib/fib_path_ext.c b/vnet/vnet/fib/fib_path_ext.c
index 6603b64f02f..f75b5626c04 100644
--- a/vnet/vnet/fib/fib_path_ext.c
+++ b/vnet/vnet/fib/fib_path_ext.c
@@ -18,23 +18,28 @@
#include <vnet/dpo/load_balance.h>
#include <vnet/dpo/drop_dpo.h>
-#include "fib_path_ext.h"
-#include "fib_path.h"
-#include "fib_path_list.h"
-#include "fib_internal.h"
+#include <vnet/fib/fib_path_ext.h>
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_path.h>
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/fib_internal.h>
u8 *
format_fib_path_ext (u8 * s, va_list * args)
{
fib_path_ext_t *path_ext;
+ u32 ii;
path_ext = va_arg (*args, fib_path_ext_t *);
- s = format(s, "path:%d label:%U",
- path_ext->fpe_path_index,
- format_mpls_unicast_label,
- path_ext->fpe_path.frp_label);
-
+ s = format(s, "path:%d labels:",
+ path_ext->fpe_path_index);
+ for (ii = 0; ii < vec_len(path_ext->fpe_path.frp_label_stack); ii++)
+ {
+ s = format(s, "%U ",
+ format_mpls_unicast_label,
+ path_ext->fpe_path.frp_label_stack[ii]);
+ }
return (s);
}
@@ -86,12 +91,23 @@ fib_path_ext_init (fib_path_ext_t *path_ext,
fib_path_ext_resolve(path_ext, path_list_index);
}
+/**
+ * @brief Return true if the label stack is implicit null
+ */
+static int
+fib_path_ext_is_imp_null (fib_path_ext_t *path_ext)
+{
+ return ((1 == vec_len(path_ext->fpe_label_stack)) &&
+ (MPLS_IETF_IMPLICIT_NULL_LABEL == path_ext->fpe_label_stack[0]));
+}
+
load_balance_path_t *
fib_path_ext_stack (fib_path_ext_t *path_ext,
- fib_forward_chain_type_t parent_fct,
+ const fib_entry_t *entry,
+ fib_forward_chain_type_t child_fct,
load_balance_path_t *nhs)
{
- fib_forward_chain_type_t child_fct;
+ fib_forward_chain_type_t parent_fct;
load_balance_path_t *nh;
if (!fib_path_is_resolved(path_ext->fpe_path_index))
@@ -102,33 +118,50 @@ fib_path_ext_stack (fib_path_ext_t *path_ext,
* label. From the chain type request by the child, determine what
* chain type we will request from the parent.
*/
- switch (parent_fct)
+ switch (child_fct)
{
case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
- ASSERT(0);
- return (nhs);
+ {
+ /*
+ * The EOS chain is a tricky since, when the path has an imp NULL one cannot know
+ * the adjacency to link to without knowing what the packets payload protocol
+ * will be once the label is popped.
+ */
+ if (fib_path_ext_is_imp_null(path_ext))
+ {
+ parent_fct = fib_entry_chain_type_fixup(entry, child_fct);
+ }
+ else
+ {
+ /*
+ * we have a label to stack. packets will thus be labelled when
+ * they encounter the child, ergo, non-eos.
+ */
+ parent_fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS;
+ }
break;
+ }
case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
- if (MPLS_IETF_IMPLICIT_NULL_LABEL == path_ext->fpe_label)
+ if (fib_path_ext_is_imp_null(path_ext))
{
/*
* implicit-null label for the eos or IP chain, need to pick up
* the IP adj
*/
- child_fct = parent_fct;
+ parent_fct = child_fct;
}
else
{
/*
* we have a label to stack. packets will thus be labelled when
- * they encounter th child, ergo, non-eos.
+ * they encounter the child, ergo, non-eos.
*/
- child_fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS;
+ parent_fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS;
}
break;
case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
- child_fct = parent_fct;
+ parent_fct = child_fct;
break;
default:
return (nhs);
@@ -143,7 +176,7 @@ fib_path_ext_stack (fib_path_ext_t *path_ext,
* are to be sent. We stack the MPLS Label DPO on this path DPO
*/
fib_path_contribute_forwarding(path_ext->fpe_path_index,
- child_fct,
+ parent_fct,
&via_dpo);
if (dpo_is_drop(&via_dpo) ||
@@ -165,17 +198,31 @@ fib_path_ext_stack (fib_path_ext_t *path_ext,
* The label is stackable for this chain type
* construct the mpls header that will be imposed in the data-path
*/
- if (MPLS_IETF_IMPLICIT_NULL_LABEL != path_ext->fpe_label)
+ if (!fib_path_ext_is_imp_null(path_ext))
{
+ /*
+ * we use the parent protocol for the label so that
+ * we pickup the correct MPLS imposition nodes to do
+ * ip[46] processing.
+ */
+ dpo_proto_t chain_proto;
+ mpls_eos_bit_t eos;
+ index_t mldi;
+
+ eos = (child_fct == FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS ?
+ MPLS_NON_EOS :
+ MPLS_EOS);
+ chain_proto = fib_forw_chain_type_to_dpo_proto(child_fct);
+
+ mldi = mpls_label_dpo_create(path_ext->fpe_label_stack,
+ eos, 255, 0,
+ chain_proto,
+ &nh->path_dpo);
+
dpo_set(&nh->path_dpo,
DPO_MPLS_LABEL,
- DPO_PROTO_MPLS,
- mpls_label_dpo_create(path_ext->fpe_label,
- (parent_fct == FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS ?
- MPLS_NON_EOS :
- MPLS_EOS),
- 255, 0,
- &nh->path_dpo));
+ chain_proto,
+ mldi);
}
}
dpo_reset(&via_dpo);
diff --git a/vnet/vnet/fib/fib_path_ext.h b/vnet/vnet/fib/fib_path_ext.h
index 6cb7f507ff4..cf8f8df00c6 100644
--- a/vnet/vnet/fib/fib_path_ext.h
+++ b/vnet/vnet/fib/fib_path_ext.h
@@ -24,10 +24,10 @@
* when packets are sent for that entry over that path.
*
* For example:
- * ip route add 1.1.1.1/32 via 10.10.10.10 mpls-label 100
+ * ip route add 1.1.1.1/32 via 10.10.10.10 out-label 100
*
* The out-going MPLS label value 100 is a path-extension. It is a value sepcific
- * to the entry 1.1.1.1/32 and valid only went packets are sent via 10.10.10.10.
+ * to the entry 1.1.1.1/32 and valid only when packets are sent via 10.10.10.10.
*/
typedef struct fib_path_ext_t_
{
@@ -37,7 +37,7 @@ typedef struct fib_path_ext_t_
* instance of a fib_path_t that is extended
*/
fib_route_path_t fpe_path;
-#define fpe_label fpe_path.frp_label
+#define fpe_label_stack fpe_path.frp_label_stack
/**
* The index of the path. This is the global index, not the path's
@@ -46,6 +46,7 @@ typedef struct fib_path_ext_t_
fib_node_index_t fpe_path_index;
} fib_path_ext_t;
+struct fib_entry_t_;
extern u8 * format_fib_path_ext(u8 * s, va_list * args);
@@ -60,6 +61,7 @@ extern void fib_path_ext_resolve(fib_path_ext_t *path_ext,
fib_node_index_t path_list_index);
extern load_balance_path_t *fib_path_ext_stack(fib_path_ext_t *path_ext,
+ const struct fib_entry_t_ *entry,
fib_forward_chain_type_t fct,
load_balance_path_t *nhs);
diff --git a/vnet/vnet/fib/fib_path_list.c b/vnet/vnet/fib/fib_path_list.c
index 4d695d63d51..5b35e9b87e7 100644
--- a/vnet/vnet/fib/fib_path_list.c
+++ b/vnet/vnet/fib/fib_path_list.c
@@ -613,6 +613,20 @@ fib_path_list_get_resolving_interface (fib_node_index_t path_list_index)
return (sw_if_index);
}
+fib_protocol_t
+fib_path_list_get_proto (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ /*
+ * we don't support a mix of path protocols, so we can return the proto
+ * of the first
+ */
+ return (fib_path_get_proto(path_list->fpl_paths[0]));
+}
+
int
fib_path_list_is_looped (fib_node_index_t path_list_index)
{
diff --git a/vnet/vnet/fib/fib_path_list.h b/vnet/vnet/fib/fib_path_list.h
index 852f07d427d..8bc1b20b6bf 100644
--- a/vnet/vnet/fib/fib_path_list.h
+++ b/vnet/vnet/fib/fib_path_list.h
@@ -126,6 +126,7 @@ extern int fib_path_list_recursive_loop_detect(fib_node_index_t path_list_index,
fib_node_index_t **entry_indicies);
extern u32 fib_path_list_get_resolving_interface(fib_node_index_t path_list_index);
extern int fib_path_list_is_looped(fib_node_index_t path_list_index);
+extern fib_protocol_t fib_path_list_get_proto(fib_node_index_t path_list_index);
extern u8 * fib_path_list_format(fib_node_index_t pl_index,
u8 * s);
extern u8 * fib_path_list_adjs_format(fib_node_index_t pl_index,
diff --git a/vnet/vnet/fib/fib_table.c b/vnet/vnet/fib/fib_table.c
index 54bc8081993..76db42d0ec7 100644
--- a/vnet/vnet/fib/fib_table.c
+++ b/vnet/vnet/fib/fib_table.c
@@ -493,7 +493,7 @@ fib_table_entry_path_add (u32 fib_index,
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t next_hop_label,
+ mpls_label_t *next_hop_labels,
fib_route_path_flags_t path_flags)
{
fib_route_path_t path = {
@@ -503,12 +503,11 @@ fib_table_entry_path_add (u32 fib_index,
.frp_fib_index = next_hop_fib_index,
.frp_weight = next_hop_weight,
.frp_flags = path_flags,
- .frp_label = next_hop_label,
+ .frp_label_stack = next_hop_labels,
};
fib_node_index_t fib_entry_index;
fib_route_path_t *paths = NULL;
- fib_table_route_path_fixup(prefix, &path);
vec_add1(paths, path);
fib_entry_index = fib_table_entry_path_add2(fib_index, prefix,
@@ -523,14 +522,20 @@ fib_table_entry_path_add2 (u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source,
fib_entry_flag_t flags,
- const fib_route_path_t *rpath)
+ fib_route_path_t *rpath)
{
fib_node_index_t fib_entry_index;
fib_table_t *fib_table;
+ u32 ii;
fib_table = fib_table_get(fib_index, prefix->fp_proto);
fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+ for (ii = 0; ii < vec_len(rpath); ii++)
+ {
+ fib_table_route_path_fixup(prefix, &rpath[ii]);
+ }
+
if (FIB_NODE_INDEX_INVALID == fib_entry_index)
{
fib_entry_index = fib_entry_create(fib_index, prefix,
@@ -558,9 +563,9 @@ fib_table_entry_path_add2 (u32 fib_index,
void
fib_table_entry_path_remove2 (u32 fib_index,
- const fib_prefix_t *prefix,
- fib_source_t source,
- const fib_route_path_t *rpath)
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_route_path_t *rpath)
{
/*
* 1 is it present
@@ -570,10 +575,16 @@ fib_table_entry_path_remove2 (u32 fib_index,
*/
fib_node_index_t fib_entry_index;
fib_table_t *fib_table;
+ u32 ii;
fib_table = fib_table_get(fib_index, prefix->fp_proto);
fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+ for (ii = 0; ii < vec_len(rpath); ii++)
+ {
+ fib_table_route_path_fixup(prefix, &rpath[ii]);
+ }
+
if (FIB_NODE_INDEX_INVALID == fib_entry_index)
{
/*
@@ -667,20 +678,24 @@ fib_table_entry_update (u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source,
fib_entry_flag_t flags,
- const fib_route_path_t *paths)
+ fib_route_path_t *paths)
{
fib_node_index_t fib_entry_index;
fib_table_t *fib_table;
+ u32 ii;
fib_table = fib_table_get(fib_index, prefix->fp_proto);
fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+ for (ii = 0; ii < vec_len(paths); ii++)
+ {
+ fib_table_route_path_fixup(prefix, &paths[ii]);
+ }
/*
* sort the paths provided by the control plane. this means
* the paths and the extension on the entry will be sorted.
*/
- vec_sort_with_function(((fib_route_path_t*)paths), // const cast
- fib_route_path_cmp_for_sort);
+ vec_sort_with_function(paths, fib_route_path_cmp_for_sort);
if (FIB_NODE_INDEX_INVALID == fib_entry_index)
{
@@ -717,7 +732,7 @@ fib_table_entry_update_one_path (u32 fib_index,
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t next_hop_label,
+ mpls_label_t *next_hop_labels,
fib_route_path_flags_t path_flags)
{
fib_node_index_t fib_entry_index;
@@ -728,7 +743,7 @@ fib_table_entry_update_one_path (u32 fib_index,
.frp_fib_index = next_hop_fib_index,
.frp_weight = next_hop_weight,
.frp_flags = path_flags,
- .frp_label = next_hop_label,
+ .frp_label_stack = next_hop_labels,
};
fib_route_path_t *paths = NULL;
diff --git a/vnet/vnet/fib/fib_table.h b/vnet/vnet/fib/fib_table.h
index ef7599a7bf5..cfec516de1a 100644
--- a/vnet/vnet/fib/fib_table.h
+++ b/vnet/vnet/fib/fib_table.h
@@ -289,8 +289,8 @@ extern void fib_table_entry_special_remove(u32 fib_index,
* @param next_hop_weight
* [un]equal cost path weight
*
- * @param next_hop_label
- * The path's out-going label. INVALID is there is none.
+ * @param next_hop_label_stack
+ * The path's out-going label stack. NULL is there is none.
*
* @param pf
* Flags for the path
@@ -307,7 +307,7 @@ extern fib_node_index_t fib_table_entry_path_add(u32 fib_index,
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t next_hop_label,
+ mpls_label_t *next_hop_label_stack,
fib_route_path_flags_t pf);
/**
* @brief
@@ -329,7 +329,7 @@ extern fib_node_index_t fib_table_entry_path_add(u32 fib_index,
* Flags for the entry.
*
* @param rpaths
- * A vector of paths.
+ * A vector of paths. Not const since they may be modified.
*
* @return
* the index of the fib_entry_t that is created (or existed already).
@@ -338,7 +338,7 @@ extern fib_node_index_t fib_table_entry_path_add2(u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source,
fib_entry_flag_t flags,
- const fib_route_path_t *rpath);
+ fib_route_path_t *rpath);
/**
* @brief
@@ -407,7 +407,7 @@ extern void fib_table_entry_path_remove(u32 fib_index,
extern void fib_table_entry_path_remove2(u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source,
- const fib_route_path_t *paths);
+ fib_route_path_t *paths);
/**
* @brief
@@ -426,7 +426,7 @@ extern void fib_table_entry_path_remove2(u32 fib_index,
* The ID of the client/source adding the entry.
*
* @param rpaths
- * A vector of paths.
+ * A vector of paths. Not const since they may be modified.
*
* @return
* the index of the fib_entry_t that is created (or existed already).
@@ -435,7 +435,7 @@ extern fib_node_index_t fib_table_entry_update(u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source,
fib_entry_flag_t flags,
- const fib_route_path_t *paths);
+ fib_route_path_t *paths);
/**
* @brief
@@ -472,8 +472,8 @@ extern fib_node_index_t fib_table_entry_update(u32 fib_index,
* @param next_hop_weight
* [un]equal cost path weight
*
- * @param next_hop_label
- * The path's out-going label. INVALID is there is none.
+ * @param next_hop_label_stack
+ * The path's out-going label stack. NULL is there is none.
*
* @param pf
* Flags for the path
@@ -490,7 +490,7 @@ extern fib_node_index_t fib_table_entry_update_one_path(u32 fib_index,
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t next_hop_label,
+ mpls_label_t *next_hop_label_stack,
fib_route_path_flags_t pf);
/**
diff --git a/vnet/vnet/fib/fib_test.c b/vnet/vnet/fib/fib_test.c
index eb5253d06da..a28026c13c1 100644
--- a/vnet/vnet/fib/fib_test.c
+++ b/vnet/vnet/fib/fib_test.c
@@ -28,6 +28,7 @@
#include <vnet/mpls/mpls.h>
#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/fib_entry_src.h>
#include <vnet/fib/fib_walk.h>
#include <vnet/fib/fib_node_list.h>
#include <vnet/fib/fib_urpf_list.h>
@@ -256,6 +257,7 @@ fib_test_build_rewrite (u8 *eth_addr)
typedef enum fib_test_lb_bucket_type_t_ {
FT_LB_LABEL_O_ADJ,
+ FT_LB_LABEL_STACK_O_ADJ,
FT_LB_LABEL_O_LB,
FT_LB_O_LB,
FT_LB_SPECIAL,
@@ -277,6 +279,14 @@ typedef struct fib_test_lb_bucket_t_ {
struct
{
mpls_eos_bit_t eos;
+ mpls_label_t label_stack[8];
+ u8 label_stack_size;
+ u8 ttl;
+ adj_index_t adj;
+ } label_stack_o_adj;
+ struct
+ {
+ mpls_eos_bit_t eos;
mpls_label_t label;
u8 ttl;
index_t lb;
@@ -322,6 +332,63 @@ fib_test_validate_lb_v (const load_balance_t *lb,
switch (exp->type)
{
+ case FT_LB_LABEL_STACK_O_ADJ:
+ {
+ const mpls_label_dpo_t *mld;
+ mpls_label_t hdr;
+ u32 ii;
+
+ FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+
+ mld = mpls_label_dpo_get(dpo->dpoi_index);
+
+ FIB_TEST_LB(exp->label_stack_o_adj.label_stack_size == mld->mld_n_labels,
+ "label stack size",
+ mld->mld_n_labels);
+
+ for (ii = 0; ii < mld->mld_n_labels; ii++)
+ {
+ hdr = clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+ FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
+ exp->label_stack_o_adj.label_stack[ii]),
+ "bucket %d stacks on label %d",
+ bucket,
+ exp->label_stack_o_adj.label_stack[ii]);
+
+ if (ii == mld->mld_n_labels-1)
+ {
+ FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) ==
+ exp->label_o_adj.eos),
+ "bucket %d stacks on label %d %U!=%U",
+ bucket,
+ exp->label_stack_o_adj.label_stack[ii],
+ format_mpls_eos_bit, exp->label_o_adj.eos,
+ format_mpls_eos_bit, vnet_mpls_uc_get_s(hdr));
+ }
+ else
+ {
+ FIB_TEST_LB((vnet_mpls_uc_get_s(hdr) == MPLS_NON_EOS),
+ "bucket %d stacks on label %d %U",
+ bucket,
+ exp->label_stack_o_adj.label_stack[ii],
+ format_mpls_eos_bit, vnet_mpls_uc_get_s(hdr));
+ }
+ }
+
+ FIB_TEST_LB((DPO_ADJACENCY_INCOMPLETE == mld->mld_dpo.dpoi_type),
+ "bucket %d label stacks on %U",
+ bucket,
+ format_dpo_type, mld->mld_dpo.dpoi_type);
+
+ FIB_TEST_LB((exp->label_stack_o_adj.adj == mld->mld_dpo.dpoi_index),
+ "bucket %d label stacks on adj %d",
+ bucket,
+ exp->label_stack_o_adj.adj);
+ }
+ break;
case FT_LB_LABEL_O_ADJ:
{
const mpls_label_dpo_t *mld;
@@ -332,7 +399,7 @@ fib_test_validate_lb_v (const load_balance_t *lb,
format_dpo_type, dpo->dpoi_type);
mld = mpls_label_dpo_get(dpo->dpoi_index);
- hdr = clib_net_to_host_u32(mld->mld_hdr.label_exp_s_ttl);
+ hdr = clib_net_to_host_u32(mld->mld_hdr[0].label_exp_s_ttl);
FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
exp->label_o_adj.label),
@@ -367,10 +434,12 @@ fib_test_validate_lb_v (const load_balance_t *lb,
"bucket %d stacks on %U",
bucket,
format_dpo_type, dpo->dpoi_type);
-
+
mld = mpls_label_dpo_get(dpo->dpoi_index);
- hdr = clib_net_to_host_u32(mld->mld_hdr.label_exp_s_ttl);
+ hdr = clib_net_to_host_u32(mld->mld_hdr[0].label_exp_s_ttl);
+ FIB_TEST_LB(1 == mld->mld_n_labels, "label stack size",
+ mld->mld_n_labels);
FIB_TEST_LB((vnet_mpls_uc_get_label(hdr) ==
exp->label_o_lb.label),
"bucket %d stacks on label %d",
@@ -437,8 +506,8 @@ fib_test_validate_entry (fib_node_index_t fei,
u16 n_buckets,
...)
{
- const load_balance_t *lb;
dpo_id_t dpo = DPO_INVALID;
+ const load_balance_t *lb;
fib_prefix_t pfx;
index_t fw_lbi;
u32 fib_index;
@@ -462,34 +531,37 @@ fib_test_validate_entry (fib_node_index_t fei,
* ensure that the LB contributed by the entry is the
* same as the LB in the forwarding tables
*/
- switch (pfx.fp_proto)
+ if (fct == fib_entry_get_default_chain_type(fib_entry_get(fei)))
{
- case FIB_PROTOCOL_IP4:
- fw_lbi = ip4_fib_forwarding_lookup(fib_index, &pfx.fp_addr.ip4);
- break;
- case FIB_PROTOCOL_IP6:
- fw_lbi = ip6_fib_table_fwding_lookup(&ip6_main, fib_index, &pfx.fp_addr.ip6);
- break;
- case FIB_PROTOCOL_MPLS:
- {
- mpls_unicast_header_t hdr = {
- .label_exp_s_ttl = 0,
- };
-
- vnet_mpls_uc_set_label(&hdr.label_exp_s_ttl, pfx.fp_label);
- vnet_mpls_uc_set_s(&hdr.label_exp_s_ttl, pfx.fp_eos);
- hdr.label_exp_s_ttl = clib_host_to_net_u32(hdr.label_exp_s_ttl);
-
- fw_lbi = mpls_fib_table_forwarding_lookup(fib_index, &hdr);
- break;
- }
- default:
- fw_lbi = 0;
+ switch (pfx.fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ fw_lbi = ip4_fib_forwarding_lookup(fib_index, &pfx.fp_addr.ip4);
+ break;
+ case FIB_PROTOCOL_IP6:
+ fw_lbi = ip6_fib_table_fwding_lookup(&ip6_main, fib_index, &pfx.fp_addr.ip6);
+ break;
+ case FIB_PROTOCOL_MPLS:
+ {
+ mpls_unicast_header_t hdr = {
+ .label_exp_s_ttl = 0,
+ };
+
+ vnet_mpls_uc_set_label(&hdr.label_exp_s_ttl, pfx.fp_label);
+ vnet_mpls_uc_set_s(&hdr.label_exp_s_ttl, pfx.fp_eos);
+ hdr.label_exp_s_ttl = clib_host_to_net_u32(hdr.label_exp_s_ttl);
+
+ fw_lbi = mpls_fib_table_forwarding_lookup(fib_index, &hdr);
+ break;
+ }
+ default:
+ fw_lbi = 0;
+ }
+ FIB_TEST_LB((fw_lbi == dpo.dpoi_index),
+ "Contributed LB = FW LB: %U\n %U",
+ format_load_balance, fw_lbi, 0,
+ format_load_balance, dpo.dpoi_index, 0);
}
- FIB_TEST_LB((fw_lbi == dpo.dpoi_index),
- "Contributed LB = FW LB: %U\n %U",
- format_load_balance, fw_lbi, 0,
- format_load_balance, dpo.dpoi_index, 0);
dpo_reset(&dpo);
@@ -623,7 +695,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1, // weight
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &local_pfx);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached interface route present");
@@ -650,7 +722,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1, // weight
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &local_pfx);
FIB_TEST(((FIB_ENTRY_FLAG_LOCAL | FIB_ENTRY_FLAG_CONNECTED) ==
@@ -700,7 +772,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx);
FIB_TEST((FIB_ENTRY_FLAG_NONE == fib_entry_get_flags(fei)),
@@ -818,7 +890,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
dpo = fib_entry_contribute_ip_forwarding(fei);
@@ -884,7 +956,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST((FIB_ENTRY_FLAG_ATTACHED == fib_entry_get_flags(fei)),
"Flags set on adj-fib");
@@ -933,7 +1005,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_10_10_10_2_s_32);
@@ -970,7 +1042,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
ai = fib_entry_get_adj(fei);
@@ -1003,7 +1075,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
ai = fib_entry_get_adj(fei);
@@ -1030,7 +1102,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
dpo = fib_entry_contribute_ip_forwarding(fei);
@@ -1116,7 +1188,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index, // nexthop in same fib as route
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST_REC_FORW(&bgp_100_pfx, &pfx_1_1_1_1_s_32, 0);
@@ -1152,7 +1224,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index, // nexthop in same fib as route
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST_REC_FORW(&bgp_101_pfx, &pfx_1_1_1_1_s_32, 0);
@@ -1274,7 +1346,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index, // nexthop in same fib as route
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
@@ -1320,7 +1392,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_entry_path_add(fib_index,
&pfx_1_2_3_4_s_32,
@@ -1331,7 +1403,7 @@ fib_test_v4 (void)
tm->hw[1]->sw_if_index,
~0,
3,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "1.2.3.4/32 presnet");
@@ -1372,7 +1444,7 @@ fib_test_v4 (void)
tm->hw[1]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_entry_path_add(fib_index,
&pfx_1_2_3_5_s_32,
@@ -1383,7 +1455,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0,
4,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "1.2.3.5/32 presnet");
@@ -1455,7 +1527,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
0, // zero weigth
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
@@ -1474,7 +1546,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
100,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
@@ -1556,7 +1628,7 @@ fib_test_v4 (void)
tm->hw[1]->sw_if_index,
~0, // invalid fib index
100,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_6_6_6_6_s_32);
@@ -1812,7 +1884,7 @@ fib_test_v4 (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_entry_path_add(fib_index,
&bgp_44_s_32,
@@ -1823,7 +1895,7 @@ fib_test_v4 (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST_REC_FORW(&bgp_44_s_32, &pfx_1_2_3_4_s_32, 0);
@@ -1893,7 +1965,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index, // nexthop in same fib as route
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
@@ -1936,7 +2008,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_1_0_s_24);
dpo1 = fib_entry_contribute_ip_forwarding(fei);
@@ -1994,7 +2066,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_1_0_s_28);
dpo2 = fib_entry_contribute_ip_forwarding(fei);
@@ -2097,7 +2169,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
dpo1 = fib_entry_contribute_ip_forwarding(fei);
ai = fib_entry_get_adj(fei);
@@ -2198,7 +2270,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index, // same as route
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fib_table_entry_path_add(fib_index,
&bgp_102,
@@ -2209,7 +2281,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index, // same as route's FIB
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &bgp_102);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "100.100.100.102/32 presnet");
@@ -2298,7 +2370,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index, // Same as route's FIB
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
/*
@@ -2345,7 +2417,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
/*
@@ -2459,7 +2531,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fib_table_entry_path_add(fib_index,
&pfx_5_5_5_6_s_32,
@@ -2470,7 +2542,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fib_table_entry_path_add(fib_index,
&pfx_5_5_5_7_s_32,
@@ -2481,7 +2553,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
/*
* +3 entries, +3 shared path-list
@@ -2520,7 +2592,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
@@ -2579,7 +2651,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_5_5_5_5_s_32);
@@ -2621,7 +2693,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_5_5_5_7_s_32);
@@ -2696,7 +2768,7 @@ fib_test_v4 (void)
~0, // no index provided.
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_5_5_5_6_s_32);
FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
@@ -2744,7 +2816,7 @@ fib_test_v4 (void)
~0, // recursive
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
dpo = fib_entry_contribute_ip_forwarding(fei);
FIB_TEST(load_balance_is_drop(dpo),
@@ -2774,7 +2846,7 @@ fib_test_v4 (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_1_s_32);
@@ -2804,7 +2876,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_28);
ai = fib_entry_get_adj(fei);
@@ -2839,7 +2911,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket(dpo1->dpoi_index, 0)),
"adj for 200.200.200.200/32 is recursive via adj for 1.1.1.1");
@@ -2863,7 +2935,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fib_table_entry_path_add(fib_index,
@@ -2875,7 +2947,7 @@ fib_test_v4 (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
@@ -2960,7 +3032,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket_i(lb, 0)),
@@ -2987,7 +3059,7 @@ fib_test_v4 (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_RESOLVE_VIA_HOST);
fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx);
@@ -3055,7 +3127,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fib_table_entry_path_remove(fib_index,
@@ -3133,7 +3205,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fib_table_entry_path_add(fib_index,
&pfx_4_4_4_4_s_32,
@@ -3144,7 +3216,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fib_table_entry_path_add(fib_index,
&pfx_4_4_4_4_s_32,
@@ -3155,7 +3227,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(FIB_NODE_INDEX_INVALID !=
fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32),
@@ -3240,7 +3312,7 @@ fib_test_v4 (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &pfx_4_4_4_4_s_32);
@@ -3300,7 +3372,7 @@ fib_test_v4 (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_entry_path_add(fib_index,
&pfx_34_1_1_1_s_32,
@@ -3311,7 +3383,7 @@ fib_test_v4 (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST_REC_FORW(&pfx_34_1_1_1_s_32, &pfx_34_34_1_1_s_32, 0);
fib_table_entry_delete_index(fei, FIB_SOURCE_API);
@@ -3389,7 +3461,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &pfx_4_1_1_1_s_32);
@@ -3443,7 +3515,7 @@ fib_test_v4 (void)
tm->hw[0]->sw_if_index,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(0, &pfx_2001_s_64);
@@ -3684,7 +3756,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
@@ -3715,7 +3787,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &local_pfx);
@@ -3760,7 +3832,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_0_0);
@@ -3881,7 +3953,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_2001_1_2_s_128);
@@ -3920,7 +3992,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_2001_1_3_s_128);
@@ -3974,7 +4046,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_2001_a_s_64);
ai = fib_entry_get_adj(fei);
@@ -3988,7 +4060,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_2001_b_s_64);
ai = fib_entry_get_adj(fei);
@@ -4023,7 +4095,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(fei == fib_table_lookup_exact_match(0, &pfx_1_1_1_1_s_32),
"1.1.1.1/32 o v6 route present");
@@ -4061,7 +4133,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &pfx_2001_c_s_64);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached route present");
@@ -4163,7 +4235,7 @@ fib_test_v6 (void)
tm->hw[1]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &connected_pfx);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached interface route present");
@@ -4182,7 +4254,7 @@ fib_test_v6 (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &connected_pfx);
@@ -4462,429 +4534,6 @@ fib_test_v6 (void)
}
/*
- * Test the recursive route route handling for GRE tunnels
- */
-static void
-fib_test_gre (void)
-{
- /* fib_node_index_t fei; */
- /* u32 fib_index = 0; */
- /* test_main_t *tm; */
- /* u32 ii; */
-
- /* tm = &test_main; */
-
- /* for (ii = 0; ii < 4; ii++) */
- /* { */
- /* ip4_main.fib_index_by_sw_if_index[tm->hw[ii]->sw_if_index] = 0; */
- /* } */
-
- /* /\* */
- /* * add interface routes. We'll assume this works. It's more rigorously */
- /* * tested elsewhere. */
- /* *\/ */
- /* fib_prefix_t local_pfx = { */
- /* .fp_len = 24, */
- /* .fp_proto = FIB_PROTOCOL_IP4, */
- /* .fp_addr = { */
- /* .ip4 = { */
- /* /\* 10.10.10.10 *\/ */
- /* .as_u32 = clib_host_to_net_u32(0x0a0a0a0a), */
- /* }, */
- /* }, */
- /* }; */
-
- /* fib_table_entry_update_one_path(fib_index, &local_pfx, */
- /* FIB_SOURCE_INTERFACE, */
- /* (FIB_ENTRY_FLAG_CONNECTED | */
- /* FIB_ENTRY_FLAG_ATTACHED), */
- /* NULL, */
- /* tm->hw[0]->sw_if_index, */
- /* ~0, */
- /* 1, */
- /* FIB_ROUTE_PATH_FLAG_NONE); */
- /* fei = fib_table_lookup_exact_match(fib_index, &local_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID != fei), */
- /* "attached interface route present"); */
-
- /* local_pfx.fp_len = 32; */
- /* fib_table_entry_update_one_path(fib_index, &local_pfx, */
- /* FIB_SOURCE_INTERFACE, */
- /* (FIB_ENTRY_FLAG_CONNECTED | */
- /* FIB_ENTRY_FLAG_LOCAL), */
- /* NULL, */
- /* tm->hw[0]->sw_if_index, */
- /* ~0, // invalid fib index */
- /* 1, */
- /* FIB_ROUTE_PATH_FLAG_NONE); */
- /* fei = fib_table_lookup_exact_match(fib_index, &local_pfx); */
-
- /* FIB_TEST((FIB_NODE_INDEX_INVALID != fei), */
- /* "local interface route present"); */
-
- /* fib_prefix_t local2_pfx = { */
- /* .fp_len = 24, */
- /* .fp_proto = FIB_PROTOCOL_IP4, */
- /* .fp_addr = { */
- /* .ip4 = { */
- /* /\* 10.10.11.11 *\/ */
- /* .as_u32 = clib_host_to_net_u32(0x0a0a0b0b), */
- /* }, */
- /* }, */
- /* }; */
-
- /* fib_table_entry_update_one_path(fib_index, &local2_pfx, */
- /* FIB_SOURCE_INTERFACE, */
- /* (FIB_ENTRY_FLAG_CONNECTED | */
- /* FIB_ENTRY_FLAG_ATTACHED), */
- /* NULL, */
- /* tm->hw[1]->sw_if_index, */
- /* ~0, */
- /* 1, */
- /* FIB_ROUTE_PATH_FLAG_NONE); */
- /* fei = fib_table_lookup_exact_match(fib_index, &local2_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID != fei), */
- /* "attached interface route present"); */
-
- /* local2_pfx.fp_len = 32; */
- /* fib_table_entry_update_one_path(fib_index, &local2_pfx, */
- /* FIB_SOURCE_INTERFACE, */
- /* (FIB_ENTRY_FLAG_CONNECTED | */
- /* FIB_ENTRY_FLAG_LOCAL), */
- /* NULL, */
- /* tm->hw[0]->sw_if_index, */
- /* ~0, // invalid fib index */
- /* 1, */
- /* FIB_ROUTE_PATH_FLAG_NONE); */
- /* fei = fib_table_lookup_exact_match(fib_index, &local2_pfx); */
-
- /* FIB_TEST((FIB_NODE_INDEX_INVALID != fei), */
- /* "local interface route present"); */
-
- /* /\* */
- /* * Add the route that will be used to resolve the tunnel's destination */
- /* *\/ */
- /* fib_prefix_t route_pfx = { */
- /* .fp_len = 24, */
- /* .fp_proto = FIB_PROTOCOL_IP4, */
- /* .fp_addr = { */
- /* .ip4 = { */
- /* /\* 1.1.1.0/24 *\/ */
- /* .as_u32 = clib_host_to_net_u32(0x01010100), */
- /* }, */
- /* }, */
- /* }; */
- /* /\* 10.10.10.2 *\/ */
- /* ip46_address_t nh_10_10_10_2 = { */
- /* .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02), */
- /* }; */
-
- /* fib_table_entry_path_add(fib_index, &route_pfx, */
- /* FIB_SOURCE_API, */
- /* FIB_ENTRY_FLAG_NONE, */
- /* &nh_10_10_10_2, */
- /* tm->hw[0]->sw_if_index, */
- /* ~0, */
- /* 1, */
- /* FIB_ROUTE_PATH_FLAG_NONE); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID != */
- /* fib_table_lookup_exact_match(fib_index, &local_pfx)), */
- /* "route present"); */
-
- /* /\* */
- /* * Add a tunnel */
- /* *\/ */
- /* /\* 1.1.1.1 *\/ */
- /* fib_prefix_t tun_dst_pfx = { */
- /* .fp_len = 32, */
- /* .fp_proto = FIB_PROTOCOL_IP4, */
- /* .fp_addr = { */
- /* .ip4.as_u32 = clib_host_to_net_u32(0x01010101), */
- /* }, */
- /* }; */
- /* /\* 10.10.10.10 *\/ */
- /* ip4_address_t tun_src = { */
- /* .as_u32 = clib_host_to_net_u32(0x0a0a0a0a), */
- /* }; */
- /* /\* 172.16.0.1 *\/ */
- /* ip4_address_t tun_itf = { */
- /* .as_u32 = clib_host_to_net_u32(0xac100001), */
- /* }; */
- /* fib_prefix_t tun_itf_pfx = { */
- /* .fp_len = 30, */
- /* .fp_proto = FIB_PROTOCOL_IP4, */
- /* .fp_addr = { */
- /* .ip4 = tun_itf, */
- /* }, */
- /* }; */
- /* u32 *encap_labels = NULL; */
- /* u32 label = 0xbaba; */
- /* u32 encap_index; */
- /* u32 tunnel_sw_if_index; */
-
- /* int rv; */
-
- /* /\* */
- /* * First we need the MPLS Encap present */
- /* * */
- /* * Pretty sure this is broken. the wiki say the 1st aparamter address */
- /* * should be the tunnel's interface address, which makes some sense. But */
- /* * the code for tunnel creation checks for the tunnel's destination */
- /* * address. curious... */
- /* *\/ */
- /* vec_add1(encap_labels, label); */
- /* rv = vnet_mpls_add_del_encap(&tun_dst_pfx.fp_addr.ip4, */
- /* 0, // inner VRF */
- /* encap_labels, */
- /* ~0, // policy_tunnel_index, */
- /* 0, // no_dst_hash, */
- /* &encap_index, */
- /* 1); // ADD */
- /* FIB_TEST((0 == rv), "MPLS encap created"); */
-
- /* /\* */
- /* * now create the tunnel */
- /* *\/ */
- /* rv = vnet_mpls_gre_add_del_tunnel(&tun_src, */
- /* &tun_dst_pfx.fp_addr.ip4, */
- /* &tun_itf_pfx.fp_addr.ip4, */
- /* tun_itf_pfx.fp_len, */
- /* 0, // inner VRF */
- /* 0, // outer VRF */
- /* &tunnel_sw_if_index, */
- /* 0, // l2 only */
- /* 1); // ADD */
- /* FIB_TEST((0 == rv), "Tunnel created"); */
-
- /* /\* */
- /* * add it again. just for giggles. */
- /* *\/ */
- /* rv = vnet_mpls_gre_add_del_tunnel(&tun_src, */
- /* &tun_dst_pfx.fp_addr.ip4, */
- /* &tun_itf_pfx.fp_addr.ip4, */
- /* tun_itf_pfx.fp_len, */
- /* 0, // inner VRF */
- /* 0, // outer VRF */
- /* &tunnel_sw_if_index, */
- /* 0, // l2 only */
- /* 1); // ADD */
- /* FIB_TEST((0 != rv), "Duplicate Tunnel not created"); */
-
- /* /\* */
- /* * Find the route added for the tunnel subnet and check that */
- /* * it has a midchin adj that is stacked on the adj used to reach the */
- /* * tunnel destination */
- /* *\/ */
- /* ip_adjacency_t *midchain_adj, *route_adj, *adjfib_adj; */
- /* adj_index_t midchain_ai, route_ai, adjfib_ai1, adjfib_ai2; */
- /* ip_lookup_main_t *lm; */
-
- /* lm = &ip4_main.lookup_main; */
-
- /* fei = fib_table_lookup_exact_match(fib_index, &tun_itf_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "tun itf route present"); */
- /* midchain_ai = fib_entry_contribute_forwarding(fei); */
- /* midchain_adj = adj_get(midchain_ai); */
-
- /* FIB_TEST((IP_LOOKUP_NEXT_MIDCHAIN == midchain_adj->lookup_next_index), */
- /* "Tunnel interface links to midchain"); */
-
- /* fei = fib_table_lookup_exact_match(fib_index, &route_pfx); */
- /* route_ai = fib_entry_contribute_forwarding(fei); */
- /* FIB_TEST((midchain_adj->sub_type.midchain.adj_index == route_ai), */
- /* "tunnel midchain it stacked on route adj"); */
-
- /* /\* */
- /* * update the route to the tunnel's destination to load-balance via */
- /* * interface 1. */
- /* *\/ */
- /* /\* 10.10.11.2 *\/ */
- /* ip46_address_t nh_10_10_11_2 = { */
- /* .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0b02), */
- /* }; */
-
- /* fib_table_entry_path_add(fib_index, &route_pfx, */
- /* FIB_SOURCE_API, */
- /* FIB_ENTRY_FLAG_NONE, */
- /* &nh_10_10_11_2, */
- /* tm->hw[1]->sw_if_index, */
- /* ~0, */
- /* 1, */
- /* FIB_ROUTE_PATH_FLAG_NONE); */
-
- /* /\* */
- /* * the tunnels midchain should have re-stacked. This tests that the */
- /* * route re-resolution backwalk works to a tunnel interface. */
- /* *\/ */
- /* fei = fib_table_lookup_exact_match(fib_index, &route_pfx); */
- /* FIB_TEST((route_ai != fib_entry_contribute_forwarding(fei)), "route changed"); */
- /* route_ai = fib_entry_contribute_forwarding(fei); */
-
- /* midchain_adj = adj_get(midchain_ai); */
-
- /* FIB_TEST((midchain_adj->sub_type.midchain.adj_index == route_ai), */
- /* "tunnel midchain has re-stacked on route adj"); */
-
- /* route_adj = adj_get(route_ai); */
-
- /* FIB_TEST((2 == route_adj->n_adj), "Route adj is multipath"); */
-
- /* /\* */
- /* * At this stage both nieghbour adjs are incomplete, so the same should */
- /* * be true of the multipath adj */
- /* *\/ */
- /* FIB_TEST((IP_LOOKUP_NEXT_ARP == route_adj->lookup_next_index), */
- /* "Adj0 is ARP: %d", route_adj->lookup_next_index); */
- /* FIB_TEST((IP_LOOKUP_NEXT_ARP == (route_adj+1)->lookup_next_index), */
- /* "Adj1 is ARP"); */
-
- /* /\* */
- /* * do the equivalent of creating an ARP entry for 10.10.10.2. */
- /* * This will complete the adj, and this */
- /* * change should be refelct in the multipath too. */
- /* *\/ */
- /* u8* rewrite = NULL, byte = 0xd; */
- /* vec_add(rewrite, &byte, 6); */
-
- /* adjfib_ai1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4, */
- /* VNET_LINK_IP4, */
- /* &nh_10_10_10_2, */
- /* tm->hw[0]->sw_if_index); */
- /* adj_nbr_update_rewrite(FIB_PROTOCOL_IP4, */
- /* adjfib_ai1, */
- /* rewrite); */
- /* adjfib_adj = adj_get(adjfib_ai1); */
- /* FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adjfib_adj->lookup_next_index), */
- /* "Adj-fib10 adj is rewrite"); */
-
- /* adjfib_ai2 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4, */
- /* VNET_LINK_IP4, */
- /* &nh_10_10_11_2, */
- /* tm->hw[1]->sw_if_index); */
- /* adj_nbr_update_rewrite(FIB_PROTOCOL_IP4, */
- /* adjfib_ai2, */
- /* rewrite); */
-
- /* adjfib_adj = adj_get(adjfib_ai2); */
-
- /* FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adjfib_adj->lookup_next_index), */
- /* "Adj-fib11 adj is rewrite"); */
-
- /* fei = fib_table_lookup_exact_match(fib_index, &route_pfx); */
- /* FIB_TEST((route_ai != fib_entry_contribute_forwarding(fei)), "route changed"); */
- /* route_ai = fib_entry_contribute_forwarding(fei); */
- /* route_adj = adj_get(route_ai); */
- /* FIB_TEST((IP_LOOKUP_NEXT_REWRITE == route_adj->lookup_next_index), */
- /* "Adj0 is rewrite"); */
- /* FIB_TEST((IP_LOOKUP_NEXT_REWRITE == (route_adj+1)->lookup_next_index), */
- /* "Adj1 is rewrite"); */
-
- /* /\* */
- /* * CLEANUP */
- /* *\/ */
- /* adj_index_t drop_ai = adj_get_special(FIB_PROTOCOL_IP4, */
- /* ADJ_SPECIAL_TYPE_DROP); */
-
- /* /\* */
- /* * remove the route that the tunnel resovles via. expect */
- /* * it to now resolve via the default route, which is drop */
- /* *\/ */
- /* fib_table_entry_path_remove(fib_index, &route_pfx, */
- /* FIB_SOURCE_API, */
- /* &nh_10_10_10_2, */
- /* tm->hw[0]->sw_if_index, */
- /* ~0, */
- /* 1, */
- /* FIB_ROUTE_PATH_FLAG_NONE); */
- /* fib_table_entry_path_remove(fib_index, &route_pfx, */
- /* FIB_SOURCE_API, */
- /* &nh_10_10_11_2, */
- /* tm->hw[1]->sw_if_index, */
- /* ~0, */
- /* 1, */
- /* FIB_ROUTE_PATH_FLAG_NONE); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID != */
- /* fib_table_lookup_exact_match(fib_index, &local_pfx)), */
- /* "route present"); */
- /* midchain_adj = adj_get(midchain_ai); */
- /* FIB_TEST((midchain_adj->sub_type.midchain.adj_index == drop_ai), */
- /* "tunnel midchain has re-stacked on drop"); */
-
- /* /\* */
- /* * remove the tunnel and its MPLS encaps */
- /* *\/ */
- /* rv = vnet_mpls_gre_add_del_tunnel(&tun_src, */
- /* &tun_dst_pfx.fp_addr.ip4, */
- /* &tun_itf_pfx.fp_addr.ip4, */
- /* tun_itf_pfx.fp_len, */
- /* 0, // inner VRF */
- /* 0, // outer VRF */
- /* &tunnel_sw_if_index, */
- /* 0, // l2 only */
- /* 0); // DEL */
- /* FIB_TEST((0 == rv), "Tunnel removed"); */
- /* rv = vnet_mpls_gre_add_del_tunnel(&tun_src, */
- /* &tun_dst_pfx.fp_addr.ip4, */
- /* &tun_itf_pfx.fp_addr.ip4, */
- /* tun_itf_pfx.fp_len, */
- /* 0, // inner VRF */
- /* 0, // outer VRF */
- /* &tunnel_sw_if_index, */
- /* 0, // l2 only */
- /* 0); // DEL */
- /* FIB_TEST((0 != rv), "No existant Tunnel not removed"); */
-
- /* rv = vnet_mpls_add_del_encap(&tun_dst_pfx.fp_addr.ip4, */
- /* 0, // inner VRF */
- /* encap_labels, */
- /* ~0, // policy_tunnel_index, */
- /* 0, // no_dst_hash, */
- /* NULL, */
- /* 0); // ADD */
- /* FIB_TEST((0 == rv), "MPLS encap deleted"); */
-
- /* vec_free(encap_labels); */
-
- /* /\* */
- /* * no more FIB entries expected */
- /* *\/ */
- /* fei = fib_table_lookup_exact_match(fib_index, &tun_itf_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "tun itf route removed"); */
- /* fei = fib_table_lookup_exact_match(fib_index, &tun_dst_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "tun dst route removed"); */
-
- /* /\* */
- /* * CLEANUP the connecteds */
- /* *\/ */
- /* local2_pfx.fp_len = 24; */
- /* fib_table_entry_delete(fib_index, &local2_pfx, */
- /* FIB_SOURCE_INTERFACE); */
- /* fei = fib_table_lookup_exact_match(fib_index, &local2_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID == fei), */
- /* "attached interface route remove"); */
-
- /* local2_pfx.fp_len = 32; */
- /* fib_table_entry_special_remove(fib_index, &local2_pfx, */
- /* FIB_SOURCE_INTERFACE); */
- /* fei = fib_table_lookup_exact_match(fib_index, &local2_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID == fei), */
- /* "local interface route removed"); */
- /* local_pfx.fp_len = 24; */
- /* fib_table_entry_delete(fib_index, &local_pfx, */
- /* FIB_SOURCE_INTERFACE); */
- /* fei = fib_table_lookup_exact_match(fib_index, &local_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID == fei), */
- /* "attached interface route remove"); */
-
- /* local_pfx.fp_len = 32; */
- /* fib_table_entry_special_remove(fib_index, &local_pfx, */
- /* FIB_SOURCE_INTERFACE); */
- /* fei = fib_table_lookup_exact_match(fib_index, &local_pfx); */
- /* FIB_TEST((FIB_NODE_INDEX_INVALID == fei), */
- /* "local interface route removed"); */
-}
-
-/*
* Test Attached Exports
*/
static void
@@ -4931,7 +4580,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
@@ -4947,7 +4596,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &local_pfx);
@@ -4976,7 +4625,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_10_10_10_1_s_32);
@@ -5003,7 +4652,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached export created");
@@ -5045,7 +4694,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_2_s_32);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib2 present");
@@ -5076,7 +4725,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(import_fib_index1, &local_pfx);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "attached export created");
@@ -5113,7 +4762,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_3_s_32);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib3 present");
@@ -5185,7 +4834,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
FIB_TEST((FIB_NODE_INDEX_INVALID == fei), "ADJ-fib1 removed from FIB1");
@@ -5208,7 +4857,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(import_fib_index1, &pfx_10_10_10_1_s_32);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "ADJ-fib1 imported in FIB1");
@@ -5240,7 +4889,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
dpo = fib_entry_contribute_ip_forwarding(fei);
@@ -5296,7 +4945,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
dpo = fib_entry_contribute_ip_forwarding(fei);
@@ -5332,7 +4981,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
@@ -5371,7 +5020,7 @@ fib_test_ae (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
@@ -5430,6 +5079,7 @@ fib_test_ae (void)
adj_nbr_db_size());
}
+
/*
* Test the recursive route route handling for GRE tunnels
*/
@@ -5440,7 +5090,7 @@ fib_test_label (void)
const u32 fib_index = 0;
test_main_t *tm;
ip4_main_t *im;
- int lb_count;
+ int lb_count, ii;
lb_count = pool_elts(load_balance_pool);
tm = &test_main;
@@ -5476,7 +5126,7 @@ fib_test_label (void)
tm->hw[0]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &local0_pfx);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
@@ -5492,7 +5142,7 @@ fib_test_label (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &local0_pfx);
@@ -5522,7 +5172,7 @@ fib_test_label (void)
tm->hw[1]->sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &local1_pfx);
FIB_TEST((FIB_NODE_INDEX_INVALID != fei),
@@ -5538,7 +5188,7 @@ fib_test_label (void)
tm->hw[1]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup_exact_match(fib_index, &local1_pfx);
@@ -5608,6 +5258,9 @@ fib_test_label (void)
.eos = MPLS_NON_EOS,
},
};
+ mpls_label_t *l99 = NULL;
+ vec_add1(l99, 99);
+
fib_table_entry_update_one_path(fib_index,
&pfx_1_1_1_1_s_32,
FIB_SOURCE_API,
@@ -5617,7 +5270,7 @@ fib_test_label (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- 99,
+ l99,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
@@ -5644,6 +5297,8 @@ fib_test_label (void)
.adj = ai_mpls_10_10_11_1,
},
};
+ mpls_label_t *l_imp_null = NULL;
+ vec_add1(l_imp_null, MPLS_IETF_IMPLICIT_NULL_LABEL);
fei = fib_table_entry_path_add(fib_index,
&pfx_1_1_1_1_s_32,
@@ -5654,7 +5309,7 @@ fib_test_label (void)
tm->hw[1]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_IETF_IMPLICIT_NULL_LABEL,
+ l_imp_null,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(fib_test_validate_entry(fei,
@@ -5732,7 +5387,7 @@ fib_test_label (void)
tm->hw[1]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(fib_test_validate_entry(fei,
@@ -5800,6 +5455,8 @@ fib_test_label (void)
.eos = MPLS_EOS,
},
};
+ mpls_label_t *l1600 = NULL;
+ vec_add1(l1600, 1600);
fib_table_entry_update_one_path(fib_index,
&pfx_2_2_2_2_s_32,
@@ -5810,7 +5467,7 @@ fib_test_label (void)
~0,
fib_index,
1,
- 1600,
+ l1600,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
@@ -6063,6 +5720,9 @@ fib_test_label (void)
/*
* add back the path with the valid label
*/
+ l99 = NULL;
+ vec_add1(l99, 99);
+
fib_table_entry_path_add(fib_index,
&pfx_1_1_1_1_s_32,
FIB_SOURCE_API,
@@ -6072,7 +5732,7 @@ fib_test_label (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- 99,
+ l99,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_1_1_s_32);
@@ -6191,6 +5851,8 @@ fib_test_label (void)
.eos = MPLS_EOS,
},
};
+ mpls_label_t *l101 = NULL;
+ vec_add1(l101, 101);
fei = fib_table_entry_update_one_path(fib_index,
&pfx_1_1_1_2_s_32,
@@ -6201,7 +5863,7 @@ fib_test_label (void)
tm->hw[0]->sw_if_index,
~0, // invalid fib index
1,
- 101,
+ l101,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(fib_test_validate_entry(fei,
@@ -6229,6 +5891,9 @@ fib_test_label (void)
.eos = MPLS_EOS,
},
};
+ mpls_label_t *l1601 = NULL;
+ vec_add1(l1601, 1601);
+
l1600_eos_o_1_1_1_1.label_o_lb.lb = non_eos_1_1_1_1.dpoi_index;
fei = fib_table_entry_path_add(fib_index,
@@ -6240,7 +5905,7 @@ fib_test_label (void)
~0,
fib_index,
1,
- 1601,
+ l1601,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(fib_test_validate_entry(fei,
@@ -6256,6 +5921,9 @@ fib_test_label (void)
* update the via-entry so it no longer has an imp-null path.
* the LB for the recursive can use an imp-null
*/
+ l_imp_null = NULL;
+ vec_add1(l_imp_null, MPLS_IETF_IMPLICIT_NULL_LABEL);
+
fei = fib_table_entry_update_one_path(fib_index,
&pfx_1_1_1_2_s_32,
FIB_SOURCE_API,
@@ -6265,7 +5933,7 @@ fib_test_label (void)
tm->hw[1]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_IETF_IMPLICIT_NULL_LABEL,
+ l_imp_null,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(fib_test_validate_entry(fei,
@@ -6298,7 +5966,7 @@ fib_test_label (void)
tm->hw[1]->sw_if_index,
~0, // invalid fib index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST(fib_test_validate_entry(fei,
@@ -6340,7 +6008,7 @@ fib_test_label (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fib_entry_contribute_forwarding(fib_table_lookup(fib_index,
@@ -6384,7 +6052,7 @@ fib_test_label (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_2_2_2_4_s_32);
@@ -6398,6 +6066,54 @@ fib_test_label (void)
dpo_reset(&ip_1_1_1_1);
/*
+ * Create an entry with a deep label stack
+ */
+ fib_prefix_t pfx_2_2_5_5_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020505),
+ },
+ };
+ fib_test_lb_bucket_t ls_eos_o_10_10_10_1 = {
+ .type = FT_LB_LABEL_STACK_O_ADJ,
+ .label_stack_o_adj = {
+ .adj = ai_mpls_10_10_11_1,
+ .label_stack_size = 8,
+ .label_stack = {
+ 200, 201, 202, 203, 204, 205, 206, 207
+ },
+ .eos = MPLS_EOS,
+ },
+ };
+ mpls_label_t *label_stack = NULL;
+ vec_validate(label_stack, 7);
+ for (ii = 0; ii < 8; ii++)
+ {
+ label_stack[ii] = ii + 200;
+ }
+
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_2_2_5_5_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_11_1,
+ tm->hw[1]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ label_stack,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(fei,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ls_eos_o_10_10_10_1),
+ "2.2.5.5/32 LB 1 buckets via: "
+ "adj 10.10.11.1");
+ fib_table_entry_delete_index(fei, FIB_SOURCE_API);
+
+ /*
* cleanup
*/
fib_table_entry_delete(fib_index,
@@ -6950,6 +6666,7 @@ lfib_test_deagg (void)
lookup_dpo_t *lkd;
test_main_t *tm;
int lb_count;
+ adj_index_t ai_mpls_10_10_10_1;
tm = &test_main;
lb_count = pool_elts(load_balance_pool);
@@ -6964,6 +6681,14 @@ lfib_test_deagg (void)
tm->hw[0]->sw_if_index,
1);
+ ip46_address_t nh_10_10_10_1 = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01),
+ };
+ ai_mpls_10_10_10_1 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ VNET_LINK_MPLS,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index);
+
/*
* Test the specials stack properly.
*/
@@ -7023,7 +6748,7 @@ lfib_test_deagg (void)
~0,
fib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST((lfe == fib_table_lookup(lfib_index, &pfx)),
@@ -7074,7 +6799,7 @@ lfib_test_deagg (void)
~0,
lfib_index,
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
FIB_TEST((lfe == fib_table_lookup(lfib_index, &pfx)),
@@ -7113,30 +6838,192 @@ lfib_test_deagg (void)
format_mpls_unicast_label, deag_label,
format_mpls_eos_bit, MPLS_EOS);
+ dpo_reset(&dpo);
- mpls_sw_interface_enable_disable(&mpls_main,
- tm->hw[0]->sw_if_index,
- 0);
+ /*
+ * An MPLS x-connect
+ */
+ fib_prefix_t pfx_1200 = {
+ .fp_len = 21,
+ .fp_proto = FIB_PROTOCOL_MPLS,
+ .fp_label = 1200,
+ .fp_eos = MPLS_NON_EOS,
+ };
+ fib_test_lb_bucket_t neos_o_10_10_10_1 = {
+ .type = FT_LB_LABEL_STACK_O_ADJ,
+ .label_stack_o_adj = {
+ .adj = ai_mpls_10_10_10_1,
+ .label_stack_size = 4,
+ .label_stack = {
+ 200, 300, 400, 500,
+ },
+ .eos = MPLS_NON_EOS,
+ },
+ };
+ dpo_id_t neos_1200 = DPO_INVALID;
+ dpo_id_t ip_1200 = DPO_INVALID;
+ mpls_label_t *l200 = NULL;
+ vec_add1(l200, 200);
+ vec_add1(l200, 300);
+ vec_add1(l200, 400);
+ vec_add1(l200, 500);
+
+ lfe = fib_table_entry_update_one_path(fib_index,
+ &pfx_1200,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_10_10_10_1,
+ tm->hw[0]->sw_if_index,
+ ~0, // invalid fib index
+ 1,
+ l200,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ 1,
+ &neos_o_10_10_10_1),
+ "1200/0 LB 1 buckets via: "
+ "adj 10.10.11.1");
- dpo_reset(&dpo);
/*
- * +1 for the drop LB in the MPLS tables.
+ * A recursive route via the MPLS x-connect
*/
- FIB_TEST(lb_count+1 == pool_elts(load_balance_pool),
- "Load-balance resources freed %d of %d",
- lb_count+1, pool_elts(load_balance_pool));
-}
+ fib_prefix_t pfx_2_2_2_3_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020203),
+ },
+ };
+ fib_route_path_t *rpaths = NULL, rpath = {
+ .frp_proto = FIB_PROTOCOL_MPLS,
+ .frp_local_label = 1200,
+ .frp_sw_if_index = ~0, // recurive
+ .frp_fib_index = 0, // Default MPLS fib
+ .frp_weight = 1,
+ .frp_flags = FIB_ROUTE_PATH_FLAG_NONE,
+ .frp_label_stack = NULL,
+ };
+ vec_add1(rpaths, rpath);
-static clib_error_t *
-lfib_test (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd_arg)
-{
- fib_test_mk_intf(4);
+ fib_table_entry_path_add2(fib_index,
+ &pfx_2_2_2_3_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ rpaths);
- lfib_test_deagg();
+ /*
+ * A labelled recursive route via the MPLS x-connect
+ */
+ fib_prefix_t pfx_2_2_2_4_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x02020204),
+ },
+ };
+ mpls_label_t *l999 = NULL;
+ vec_add1(l999, 999);
+ rpaths[0].frp_label_stack = l999,
+
+ fib_table_entry_path_add2(fib_index,
+ &pfx_2_2_2_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ rpaths);
+
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ &ip_1200);
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
+ FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
+ &neos_1200);
+
+ fib_test_lb_bucket_t ip_o_1200 = {
+ .type = FT_LB_O_LB,
+ .lb = {
+ .lb = ip_1200.dpoi_index,
+ },
+ };
+ fib_test_lb_bucket_t mpls_o_1200 = {
+ .type = FT_LB_LABEL_O_LB,
+ .label_o_lb = {
+ .lb = neos_1200.dpoi_index,
+ .label = 999,
+ .eos = MPLS_EOS,
+ },
+ };
- return (NULL);
+ lfe = fib_table_lookup(fib_index, &pfx_2_2_2_3_s_32);
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ip_o_1200),
+ "2.2.2.2.3/32 LB 1 buckets via: label 1200 EOS");
+ lfe = fib_table_lookup(fib_index, &pfx_2_2_2_4_s_32);
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &mpls_o_1200),
+ "2.2.2.2.4/32 LB 1 buckets via: label 1200 non-EOS");
+
+ fib_table_entry_delete(fib_index, &pfx_1200, FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index, &pfx_2_2_2_3_s_32, FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index, &pfx_2_2_2_4_s_32, FIB_SOURCE_API);
+
+ dpo_reset(&neos_1200);
+ dpo_reset(&ip_1200);
+
+ /*
+ * A recursive via a label that does not exist
+ */
+ fib_test_lb_bucket_t bucket_drop = {
+ .type = FT_LB_SPECIAL,
+ .special = {
+ .adj = DPO_PROTO_MPLS,
+ },
+ };
+
+ rpaths[0].frp_label_stack = NULL;
+ lfe = fib_table_entry_path_add2(fib_index,
+ &pfx_2_2_2_4_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ rpaths);
+
+ fib_entry_contribute_forwarding(fib_table_lookup(fib_index, &pfx_1200),
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ &ip_1200);
+ ip_o_1200.lb.lb = ip_1200.dpoi_index;
+
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &ip_o_1200),
+ "2.2.2.2.4/32 LB 1 buckets via: label 1200 EOS");
+ lfe = fib_table_lookup(fib_index, &pfx_1200);
+ FIB_TEST(fib_test_validate_entry(lfe,
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1,
+ &bucket_drop),
+ "2.2.2.4/32 LB 1 buckets via: ip4-DROP");
+
+ fib_table_entry_delete(fib_index, &pfx_2_2_2_4_s_32, FIB_SOURCE_API);
+
+ dpo_reset(&ip_1200);
+
+ /*
+ * cleanup
+ */
+ mpls_sw_interface_enable_disable(&mpls_main,
+ tm->hw[0]->sw_if_index,
+ 0);
+
+ FIB_TEST(lb_count == pool_elts(load_balance_pool),
+ "Load-balance resources freed %d of %d",
+ lb_count, pool_elts(load_balance_pool));
}
static clib_error_t *
@@ -7151,10 +7038,6 @@ fib_test (vlib_main_t * vm,
fib_test_v4();
fib_test_v6();
}
- else if (unformat (input, "gre"))
- {
- fib_test_gre();
- }
else if (unformat (input, "label"))
{
fib_test_label();
@@ -7163,6 +7046,10 @@ fib_test (vlib_main_t * vm,
{
fib_test_ae();
}
+ else if (unformat (input, "lfib"))
+ {
+ lfib_test_deagg();
+ }
else if (unformat (input, "walk"))
{
fib_test_walk();
@@ -7177,9 +7064,9 @@ fib_test (vlib_main_t * vm,
*/
fib_test_v4();
fib_test_v6();
- fib_test_gre();
fib_test_ae();
fib_test_label();
+ lfib_test_deagg();
}
return (NULL);
@@ -7191,12 +7078,6 @@ VLIB_CLI_COMMAND (test_fib_command, static) = {
.function = fib_test,
};
-VLIB_CLI_COMMAND (test_lfib_command, static) = {
- .path = "test lfib",
- .short_help = "mpls label fib unit tests - DO NOT RUN ON A LIVE SYSTEM",
- .function = lfib_test,
-};
-
clib_error_t *
fib_test_init (vlib_main_t *vm)
{
diff --git a/vnet/vnet/fib/fib_types.c b/vnet/vnet/fib/fib_types.c
index f52de7b8be0..d25a7731c64 100644
--- a/vnet/vnet/fib/fib_types.c
+++ b/vnet/vnet/fib/fib_types.c
@@ -64,6 +64,16 @@ fib_prefix_from_ip46_addr (const ip46_address_t *addr,
pfx->fp_addr = *addr;
}
+void
+fib_prefix_from_mpls_label (mpls_label_t label,
+ fib_prefix_t *pfx)
+{
+ pfx->fp_proto = FIB_PROTOCOL_MPLS;
+ pfx->fp_len = 21;
+ pfx->fp_label = label;
+ pfx->fp_eos = MPLS_NON_EOS;
+}
+
int
fib_prefix_cmp (const fib_prefix_t *p1,
const fib_prefix_t *p2)
@@ -301,11 +311,6 @@ fib_forw_chain_type_to_dpo_proto (fib_forward_chain_type_t fct)
case FIB_FORW_CHAIN_TYPE_ETHERNET:
return (DPO_PROTO_ETHERNET);
case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
- /*
- * insufficient information to to convert
- */
- ASSERT(0);
- break;
case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
return (DPO_PROTO_MPLS);
}
diff --git a/vnet/vnet/fib/fib_types.h b/vnet/vnet/fib/fib_types.h
index 92371e6b8ba..73eb944994b 100644
--- a/vnet/vnet/fib/fib_types.h
+++ b/vnet/vnet/fib/fib_types.h
@@ -292,13 +292,22 @@ typedef struct fib_route_path_t_ {
* zeros address is ambiguous.
*/
fib_protocol_t frp_proto;
- /**
- * The next-hop address.
- * Will be NULL for attached paths.
- * Will be all zeros for attached-next-hop paths on a p2p interface
- * Will be all zeros for a deag path.
- */
- ip46_address_t frp_addr;
+
+ union {
+ /**
+ * The next-hop address.
+ * Will be NULL for attached paths.
+ * Will be all zeros for attached-next-hop paths on a p2p interface
+ * Will be all zeros for a deag path.
+ */
+ ip46_address_t frp_addr;
+
+ /**
+ * The MPLS local Label to reursively resolve through.
+ * This is valid when the path type is MPLS.
+ */
+ mpls_label_t frp_local_label;
+ };
/**
* The interface.
* Will be invalid for recursive paths.
@@ -318,9 +327,9 @@ typedef struct fib_route_path_t_ {
*/
fib_route_path_flags_t frp_flags;
/**
- * The outgoing MPLS label. INVALID implies no label.
+ * The outgoing MPLS label Stack. NULL implies no label.
*/
- mpls_label_t frp_label;
+ mpls_label_t *frp_label_stack;
} fib_route_path_t;
/**
diff --git a/vnet/vnet/fib/fib_walk.c b/vnet/vnet/fib/fib_walk.c
index e5f9b87f927..e7376bf47a2 100644
--- a/vnet/vnet/fib/fib_walk.c
+++ b/vnet/vnet/fib/fib_walk.c
@@ -619,8 +619,8 @@ fib_walk_async (fib_node_type_t parent_type,
*/
return;
}
- if (0 == fib_node_child_get_n_children(parent_type,
- parent_index))
+ if (0 == fib_node_get_n_children(parent_type,
+ parent_index))
{
/*
* no children to walk - quit now
@@ -674,8 +674,8 @@ fib_walk_sync (fib_node_type_t parent_type,
*/
return;
}
- if (0 == fib_node_child_get_n_children(parent_type,
- parent_index))
+ if (0 == fib_node_get_n_children(parent_type,
+ parent_index))
{
/*
* no children to walk - quit now
diff --git a/vnet/vnet/fib/mpls_fib.h b/vnet/vnet/fib/mpls_fib.h
index 42c9a865276..93ae4623016 100644
--- a/vnet/vnet/fib/mpls_fib.h
+++ b/vnet/vnet/fib/mpls_fib.h
@@ -96,7 +96,7 @@ mpls_fib_table_get_index_for_sw_if_index (u32 sw_if_index)
{
mpls_main_t *mm = &mpls_main;
- ASSERT(vec_len(mm->fib_index_by_sw_if_index) < sw_if_index);
+ ASSERT(vec_len(mm->fib_index_by_sw_if_index) > sw_if_index);
return (mm->fib_index_by_sw_if_index[sw_if_index]);
}
diff --git a/vnet/vnet/gre/interface.c b/vnet/vnet/gre/interface.c
index 7adc5268446..d624587d8e9 100644
--- a/vnet/vnet/gre/interface.c
+++ b/vnet/vnet/gre/interface.c
@@ -331,6 +331,7 @@ vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a,
t->hw_if_index = hw_if_index;
t->outer_fib_index = outer_fib_index;
t->sw_if_index = sw_if_index;
+ t->l2_adj_index = ADJ_INDEX_INVALID;
vec_validate_init_empty (gm->tunnel_index_by_sw_if_index, sw_if_index, ~0);
gm->tunnel_index_by_sw_if_index[sw_if_index] = t - gm->tunnels;
@@ -416,6 +417,9 @@ vnet_gre_tunnel_delete (vnet_gre_add_del_tunnel_args_t *a,
if (GRE_TUNNEL_TYPE_TEB == t->type)
adj_unlock(t->l2_adj_index);
+ if (t->l2_adj_index != ADJ_INDEX_INVALID)
+ adj_unlock(t->l2_adj_index);
+
fib_entry_child_remove(t->fib_entry_index,
t->sibling_index);
fib_table_entry_delete_index(t->fib_entry_index,
diff --git a/vnet/vnet/ip/ip4_forward.c b/vnet/vnet/ip/ip4_forward.c
index d6fd380815b..2a6791e5055 100644
--- a/vnet/vnet/ip/ip4_forward.c
+++ b/vnet/vnet/ip/ip4_forward.c
@@ -729,7 +729,7 @@ ip4_add_interface_routes (u32 sw_if_index,
sw_if_index,
~0, // invalid FIB index
1,
- MPLS_LABEL_INVALID,
+ NULL, // no out-label stack
FIB_ROUTE_PATH_FLAG_NONE);
a->neighbor_probe_adj_index = fib_entry_get_adj(fei);
}
@@ -769,7 +769,7 @@ ip4_add_interface_routes (u32 sw_if_index,
sw_if_index,
~0, // invalid FIB index
1,
- MPLS_LABEL_INVALID,
+ NULL, // no out-label stack
FIB_ROUTE_PATH_FLAG_NONE);
}
diff --git a/vnet/vnet/ip/ip6_forward.c b/vnet/vnet/ip/ip6_forward.c
index f3cd640a841..1f40c429310 100644
--- a/vnet/vnet/ip/ip6_forward.c
+++ b/vnet/vnet/ip/ip6_forward.c
@@ -340,7 +340,7 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index,
sw_if_index,
~0, // invalid FIB index
1,
- MPLS_LABEL_INVALID,
+ NULL, // no label stack
FIB_ROUTE_PATH_FLAG_NONE);
a->neighbor_probe_adj_index = fib_entry_get_adj(fei);
}
@@ -378,7 +378,7 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index,
sw_if_index,
~0, // invalid FIB index
1,
- MPLS_LABEL_INVALID,
+ NULL,
FIB_ROUTE_PATH_FLAG_NONE);
}
diff --git a/vnet/vnet/ip/ip6_neighbor.c b/vnet/vnet/ip/ip6_neighbor.c
index cc176306969..a407978b3fa 100644
--- a/vnet/vnet/ip/ip6_neighbor.c
+++ b/vnet/vnet/ip/ip6_neighbor.c
@@ -586,7 +586,7 @@ vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm,
n->key.sw_if_index,
~0,
1,
- MPLS_LABEL_INVALID,
+ NULL, // no label stack
FIB_ROUTE_PATH_FLAG_NONE);
}
else
diff --git a/vnet/vnet/ip/lookup.c b/vnet/vnet/ip/lookup.c
index 511a5cc83ec..1a32b4a6467 100644
--- a/vnet/vnet/ip/lookup.c
+++ b/vnet/vnet/ip/lookup.c
@@ -348,8 +348,8 @@ vnet_ip_route_cmd (vlib_main_t * vm,
fib_route_path_t *rpaths = NULL, rpath;
dpo_id_t dpo = DPO_INVALID, *dpos = NULL;
fib_prefix_t *prefixs = NULL, pfx;
+ mpls_label_t out_label, via_label;
clib_error_t * error = NULL;
- mpls_label_t out_label;
u32 table_id, is_del;
vnet_main_t * vnm;
u32 fib_index;
@@ -361,6 +361,7 @@ vnet_ip_route_cmd (vlib_main_t * vm,
table_id = 0;
count = 1;
memset(&pfx, 0, sizeof(pfx));
+ out_label = via_label = MPLS_LABEL_INVALID;
/* Get a line of input. */
if (! unformat_user (main_input, unformat_line_input, line_input))
@@ -403,7 +404,16 @@ vnet_ip_route_cmd (vlib_main_t * vm,
error = clib_error_return(0 , "Paths then labels");
goto done;
}
- rpaths[vec_len(rpaths)-1].frp_label = out_label;
+ vec_add1(rpaths[vec_len(rpaths)-1].frp_label_stack, out_label);
+ }
+ else if (unformat (line_input, "via-label %U",
+ unformat_mpls_unicast_label,
+ &rpath.frp_local_label))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_MPLS;
+ rpath.frp_sw_if_index = ~0;
+ vec_add1(rpaths, rpath);
}
else if (unformat (line_input, "count %f", &count))
;
@@ -431,7 +441,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
&rpath.frp_sw_if_index,
&rpath.frp_weight))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_IP4;
vec_add1(rpaths, rpath);
}
@@ -443,7 +452,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
&rpath.frp_sw_if_index,
&rpath.frp_weight))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_IP6;
vec_add1(rpaths, rpath);
}
@@ -454,7 +462,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
unformat_vnet_sw_interface, vnm,
&rpath.frp_sw_if_index))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_weight = 1;
rpath.frp_proto = FIB_PROTOCOL_IP4;
vec_add1(rpaths, rpath);
@@ -466,7 +473,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
unformat_vnet_sw_interface, vnm,
&rpath.frp_sw_if_index))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_weight = 1;
rpath.frp_proto = FIB_PROTOCOL_IP6;
vec_add1(rpaths, rpath);
@@ -478,7 +484,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
{
rpath.frp_weight = 1;
rpath.frp_sw_if_index = ~0;
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_IP4;
vec_add1(rpaths, rpath);
}
@@ -489,7 +494,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
{
rpath.frp_weight = 1;
rpath.frp_sw_if_index = ~0;
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_IP6;
vec_add1(rpaths, rpath);
}
@@ -504,7 +508,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
rpath.frp_fib_index = table_id;
rpath.frp_weight = 1;
rpath.frp_sw_if_index = ~0;
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_IP4;
vec_add1(rpaths, rpath);
}
@@ -515,7 +518,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
rpath.frp_fib_index = table_id;
rpath.frp_weight = 1;
rpath.frp_sw_if_index = ~0;
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_IP6;
vec_add1(rpaths, rpath);
}
@@ -523,7 +525,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
"lookup in table %d",
&rpath.frp_fib_index))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = pfx.fp_proto;
rpath.frp_sw_if_index = ~0;
vec_add1(rpaths, rpath);
@@ -532,7 +533,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
unformat (line_input, "via %U",
unformat_dpo, &dpo, prefixs[0].fp_proto))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
vec_add1 (dpos, dpo);
}
else
diff --git a/vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c b/vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c
index 1f2b2863695..26a93a87b9d 100644
--- a/vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c
+++ b/vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c
@@ -243,7 +243,6 @@ lisp_gpe_mk_fib_paths (const lisp_fwd_path_t * paths)
rpaths[ii].frp_sw_if_index = ladj->sw_if_index;
rpaths[ii].frp_weight = (paths[ii].weight ? paths[ii].weight : 1);
- rpaths[ii].frp_label = MPLS_LABEL_INVALID;
}
ASSERT (0 != vec_len (rpaths));
diff --git a/vnet/vnet/mpls/interface.c b/vnet/vnet/mpls/interface.c
index a0f6f2f2474..692a2d1eb62 100644
--- a/vnet/vnet/mpls/interface.c
+++ b/vnet/vnet/mpls/interface.c
@@ -22,243 +22,6 @@
#include <vnet/adj/adj_midchain.h>
#include <vnet/dpo/classify_dpo.h>
-/* manually added to the interface output node */
-#define MPLS_ETH_OUTPUT_NEXT_OUTPUT 1
-
-static uword
-mpls_eth_interface_tx (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- mpls_main_t * gm = &mpls_main;
- vnet_main_t * vnm = gm->vnet_main;
- u32 next_index;
- u32 * from, * to_next, n_left_from, n_left_to_next;
-
- /* Vector of buffer / pkt indices we're supposed to process */
- from = vlib_frame_vector_args (frame);
-
- /* Number of buffers / pkts */
- n_left_from = frame->n_vectors;
-
- /* Speculatively send the first buffer to the last disposition we used */
- next_index = node->cached_next_index;
-
- while (n_left_from > 0)
- {
- /* set up to enqueue to our disposition with index = next_index */
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- /*
- * As long as we have enough pkts left to process two pkts
- * and prefetch two pkts...
- */
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- vlib_buffer_t * b0, * b1;
- u32 bi0, next0, bi1, next1;
- mpls_eth_tunnel_t * t0, * t1;
- u32 sw_if_index0, sw_if_index1;
- vnet_hw_interface_t * hi0, * hi1;
- u8 * dst0, * dst1;
-
- /* Prefetch the next iteration */
- {
- vlib_buffer_t * p2, * p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- /*
- * Prefetch packet data. We expect to overwrite
- * the inbound L2 header with an ip header and a
- * gre header. Might want to prefetch the last line
- * of rewrite space as well; need profile data
- */
- CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
- }
-
- /* Pick up the next two buffer indices */
- bi0 = from[0];
- bi1 = from[1];
-
- /* Speculatively enqueue them where we sent the last buffer */
- to_next[0] = bi0;
- to_next[1] = bi1;
- from += 2;
- to_next += 2;
- n_left_to_next -= 2;
- n_left_from -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- sw_if_index0 = vnet_buffer(b0)->sw_if_index [VLIB_TX];
- sw_if_index1 = vnet_buffer(b1)->sw_if_index [VLIB_TX];
-
- /* get h/w intfcs */
- hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
- hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
-
- /* hw_instance = tunnel pool index */
- t0 = pool_elt_at_index (gm->eth_tunnels, hi0->hw_instance);
- t1 = pool_elt_at_index (gm->eth_tunnels, hi1->hw_instance);
-
- /* Apply rewrite - $$$$$ fixme don't use memcpy */
- vlib_buffer_advance (b0, -(word)vec_len(t0->rewrite_data));
- vlib_buffer_advance (b1, -(word)vec_len(t1->rewrite_data));
-
- dst0 = vlib_buffer_get_current (b0);
- dst1 = vlib_buffer_get_current (b1);
-
- clib_memcpy (dst0, t0->rewrite_data, vec_len(t0->rewrite_data));
- clib_memcpy (dst1, t1->rewrite_data, vec_len(t1->rewrite_data));
-
- /* Fix TX fib indices */
- vnet_buffer(b0)->sw_if_index [VLIB_TX] = t0->tx_sw_if_index;
- vnet_buffer(b1)->sw_if_index [VLIB_TX] = t1->tx_sw_if_index;
-
- /* mpls-post-rewrite takes it from here... */
- next0 = MPLS_ETH_OUTPUT_NEXT_OUTPUT;
- next1 = MPLS_ETH_OUTPUT_NEXT_OUTPUT;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- mpls_eth_tx_trace_t *tr = vlib_add_trace (vm, node,
- b0, sizeof (*tr));
- tr->lookup_miss = 0;
- tr->tunnel_id = t0 - gm->eth_tunnels;
- tr->tx_sw_if_index = t0->tx_sw_if_index;
- tr->mpls_encap_index = t0->encap_index;
- tr->length = b0->current_length;
- hi0 = vnet_get_sup_hw_interface (vnm, t0->tx_sw_if_index);
- clib_memcpy (tr->dst, hi0->hw_address, sizeof (tr->dst));
- }
- if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
- {
- mpls_eth_tx_trace_t *tr = vlib_add_trace (vm, node,
- b1, sizeof (*tr));
- tr->lookup_miss = 0;
- tr->tunnel_id = t1 - gm->eth_tunnels;
- tr->tx_sw_if_index = t1->tx_sw_if_index;
- tr->mpls_encap_index = t1->encap_index;
- tr->length = b1->current_length;
- hi1 = vnet_get_sup_hw_interface (vnm, t1->tx_sw_if_index);
- clib_memcpy (tr->dst, hi1->hw_address, sizeof (tr->dst));
- }
-
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- }
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- vlib_buffer_t * b0;
- u32 bi0, next0;
- mpls_eth_tunnel_t * t0;
- u32 sw_if_index0;
- vnet_hw_interface_t * hi0;
- u8 * dst0;
-
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- sw_if_index0 = vnet_buffer(b0)->sw_if_index [VLIB_TX];
-
- hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
-
- t0 = pool_elt_at_index (gm->eth_tunnels, hi0->hw_instance);
-
- /* Apply rewrite - $$$$$ fixme don't use memcpy */
- vlib_buffer_advance (b0, -(word)vec_len(t0->rewrite_data));
-
- dst0 = vlib_buffer_get_current (b0);
-
- clib_memcpy (dst0, t0->rewrite_data, vec_len(t0->rewrite_data));
-
- /* Fix the TX interface */
- vnet_buffer(b0)->sw_if_index [VLIB_TX] = t0->tx_sw_if_index;
-
- /* Send the packet */
- next0 = MPLS_ETH_OUTPUT_NEXT_OUTPUT;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- mpls_eth_tx_trace_t *tr = vlib_add_trace (vm, node,
- b0, sizeof (*tr));
- tr->lookup_miss = 0;
- tr->tunnel_id = t0 - gm->eth_tunnels;
- tr->tx_sw_if_index = t0->tx_sw_if_index;
- tr->mpls_encap_index = t0->encap_index;
- tr->length = b0->current_length;
- hi0 = vnet_get_sup_hw_interface (vnm, t0->tx_sw_if_index);
- clib_memcpy (tr->dst, hi0->hw_address, sizeof (tr->dst));
- }
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
-
- vlib_node_increment_counter (vm, mpls_input_node.index,
- MPLS_ERROR_PKTS_ENCAP, frame->n_vectors);
-
- return frame->n_vectors;
-}
-
-static u8 * format_mpls_eth_tunnel_name (u8 * s, va_list * args)
-{
- u32 dev_instance = va_arg (*args, u32);
- return format (s, "mpls-eth%d", dev_instance);
-}
-
-static u8 * format_mpls_eth_device (u8 * s, va_list * args)
-{
- u32 dev_instance = va_arg (*args, u32);
- CLIB_UNUSED (int verbose) = va_arg (*args, int);
-
- s = format (s, "MPLS-ETH tunnel: id %d\n", dev_instance);
- return s;
-}
-
-VNET_DEVICE_CLASS (mpls_eth_device_class) = {
- .name = "MPLS-ETH tunnel device",
- .format_device_name = format_mpls_eth_tunnel_name,
- .format_device = format_mpls_eth_device,
- .format_tx_trace = format_mpls_eth_tx_trace,
- .tx_function = mpls_eth_interface_tx,
- .no_flatten_output_chains = 1,
-#ifdef SOON
- .clear counter = 0;
- .admin_up_down_function = 0;
-#endif
-};
-
-VLIB_DEVICE_TX_FUNCTION_MULTIARCH (mpls_eth_device_class,
- mpls_eth_interface_tx)
-
-VNET_HW_INTERFACE_CLASS (mpls_eth_hw_interface_class) = {
- .name = "MPLS-ETH",
- .format_header = format_mpls_eth_header_with_length,
-#if 0
- .unformat_header = unformat_mpls_eth_header,
-#endif
- .build_rewrite = default_build_rewrite,
- .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
-};
u8
mpls_sw_interface_is_enabled (u32 sw_if_index)
@@ -308,753 +71,6 @@ mpls_sw_interface_enable_disable (mpls_main_t * mm,
}
-u8 * format_mpls_encap_index (u8 * s, va_list * args)
-{
- mpls_main_t * mm = va_arg (*args, mpls_main_t *);
- u32 entry_index = va_arg (*args, u32);
- mpls_encap_t * e;
- int i;
-
- e = pool_elt_at_index (mm->encaps, entry_index);
-
- for (i = 0; i < vec_len (e->labels); i++)
- s = format
- (s, "%d ", vnet_mpls_uc_get_label(clib_net_to_host_u32
- (e->labels[i].label_exp_s_ttl)));
-
- return s;
-}
-
-u8 * format_mpls_ethernet_tunnel (u8 * s, va_list * args)
-{
- mpls_eth_tunnel_t * t = va_arg (*args, mpls_eth_tunnel_t *);
- mpls_main_t * mm = &mpls_main;
-
- s = format (s, "[%d]: dst %U, adj %U/%d, labels %U\n",
- t - mm->eth_tunnels,
- format_ethernet_address, &t->tunnel_dst,
- format_ip4_address, &t->intfc_address,
- t->mask_width,
- format_mpls_encap_index, mm, t->encap_index);
-
-
- s = format (s, " tx on %U, rx fib index %d",
- format_vnet_sw_if_index_name, mm->vnet_main, t->tx_sw_if_index,
- t->inner_fib_index);
-
- return s;
-}
-
-static clib_error_t *
-show_mpls_tunnel_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- mpls_main_t * mm = &mpls_main;
- mpls_eth_tunnel_t * et;
-
- if (pool_elts (mm->eth_tunnels))
- {
- vlib_cli_output (vm, "MPLS-Ethernet tunnels");
- pool_foreach (et, mm->eth_tunnels,
- ({
- vlib_cli_output (vm, "%U", format_mpls_ethernet_tunnel, et);
- }));
- }
- else
- vlib_cli_output (vm, "No MPLS-Ethernet tunnels");
-
- return 0;
-}
-
-VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = {
- .path = "show mpls tunnel",
- .short_help = "show mpls tunnel",
- .function = show_mpls_tunnel_command_fn,
-};
-
-
-/* force inclusion from application's main.c */
-clib_error_t *mpls_interface_init (vlib_main_t *vm)
-{
- clib_error_t * error;
-
- if ((error = vlib_call_init_function (vm, mpls_policy_encap_init)))
- return error;
-
- return 0;
-}
-VLIB_INIT_FUNCTION(mpls_interface_init);
-
-
-static u8 * mpls_ethernet_rewrite (mpls_main_t *mm, mpls_eth_tunnel_t * t)
-{
- u8 * rewrite_data = 0;
- mpls_encap_t * e;
- mpls_unicast_header_t *lp0;
- int i;
-
- /* look up the encap label stack using the RX FIB and adjacency address*/
- e = mpls_encap_by_fib_and_dest (mm, t->inner_fib_index,
- t->intfc_address.as_u32);
-
- if (e == 0)
- {
- clib_warning ("no label for inner fib index %d, dst %U",
- t->inner_fib_index, format_ip4_address,
- &t->intfc_address);
- return 0;
- }
-
- vec_validate (rewrite_data,
- sizeof (mpls_unicast_header_t) * vec_len(e->labels) -1);
-
- /* Copy the encap label stack */
- lp0 = (mpls_unicast_header_t *) rewrite_data;
-
- for (i = 0; i < vec_len(e->labels); i++)
- lp0[i] = e->labels[i];
-
- return (rewrite_data);
-}
-
-int vnet_mpls_ethernet_add_del_tunnel (u8 *dst,
- ip4_address_t *intfc,
- u32 mask_width,
- u32 inner_fib_id,
- u32 tx_sw_if_index,
- u32 * tunnel_sw_if_index,
- u8 l2_only,
- u8 is_add)
-{
- ip4_main_t * im = &ip4_main;
- ip_lookup_main_t * lm = &im->lookup_main;
- mpls_main_t * mm = &mpls_main;
- vnet_main_t * vnm = vnet_get_main();
- mpls_eth_tunnel_t *tp;
- u32 inner_fib_index = 0;
- ip_adjacency_t adj;
- u32 adj_index;
- u8 * rewrite_data;
- int found_tunnel = 0;
- mpls_encap_t * e = 0;
- u32 hw_if_index = ~0;
- vnet_hw_interface_t * hi;
- u32 slot;
- u32 dummy;
-
- if (tunnel_sw_if_index == 0)
- tunnel_sw_if_index = &dummy;
-
- *tunnel_sw_if_index = ~0;
-
- if (inner_fib_id != (u32)~0)
- {
- uword * p;
-
- p = hash_get (im->fib_index_by_table_id, inner_fib_id);
- if (! p)
- return VNET_API_ERROR_NO_SUCH_FIB;
- inner_fib_index = p[0];
- }
-
- /* suppress duplicate mpls interface generation. */
- pool_foreach (tp, mm->eth_tunnels,
- ({
- /*
- * If we have a tunnel which matches (src, dst, intfc/mask)
- * AND the expected route is in the FIB, it's a dup
- */
- if (!memcmp (&tp->tunnel_dst, dst, sizeof (*dst))
- && !memcmp (&tp->intfc_address, intfc, sizeof (*intfc))
- && tp->inner_fib_index == inner_fib_index
- && FIB_NODE_INDEX_INVALID != tp->fei)
- {
- found_tunnel = 1;
-
- if (is_add)
- {
- if (l2_only)
- return 1;
- else
- {
- e = mpls_encap_by_fib_and_dest (mm, inner_fib_index,
- intfc->as_u32);
- if (e == 0)
- return VNET_API_ERROR_NO_SUCH_LABEL;
-
- goto reinstall_it;
- }
- }
- else
- {
- /* Delete */
- goto add_del_route;
- }
-
- }
- }));
-
- /* Delete, and we can't find the tunnel */
- if (is_add == 0 && found_tunnel == 0)
- return VNET_API_ERROR_NO_SUCH_ENTRY;
-
- e = mpls_encap_by_fib_and_dest (mm, inner_fib_index, intfc->as_u32);
- if (e == 0)
- return VNET_API_ERROR_NO_SUCH_LABEL;
-
- pool_get(mm->eth_tunnels, tp);
- memset (tp, 0, sizeof (*tp));
-
- if (vec_len (mm->free_eth_sw_if_indices) > 0)
- {
- hw_if_index =
- mm->free_eth_sw_if_indices[vec_len(mm->free_eth_sw_if_indices)-1];
- _vec_len (mm->free_eth_sw_if_indices) -= 1;
- hi = vnet_get_hw_interface (vnm, hw_if_index);
- hi->dev_instance = tp - mm->eth_tunnels;
- hi->hw_instance = tp - mm->eth_tunnels;
- }
- else
- {
- hw_if_index = vnet_register_interface
- (vnm, mpls_eth_device_class.index, tp - mm->eth_tunnels,
- mpls_eth_hw_interface_class.index,
- tp - mm->eth_tunnels);
- hi = vnet_get_hw_interface (vnm, hw_if_index);
-
- /* ... to make the IP and L2 x-connect cases identical */
- slot = vlib_node_add_named_next_with_slot
- (vnm->vlib_main, hi->tx_node_index,
- "interface-output", MPLS_ETH_OUTPUT_NEXT_OUTPUT);
-
- ASSERT (slot == MPLS_ETH_OUTPUT_NEXT_OUTPUT);
- }
-
- *tunnel_sw_if_index = hi->sw_if_index;
- vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
- VNET_SW_INTERFACE_FLAG_ADMIN_UP);
-
- tp->hw_if_index = hw_if_index;
-
- reinstall_it:
- clib_memcpy(tp->tunnel_dst, dst, sizeof (tp->tunnel_dst));
- tp->intfc_address.as_u32 = intfc->as_u32;
- tp->mask_width = mask_width;
- tp->inner_fib_index = inner_fib_index;
- tp->encap_index = e - mm->encaps;
- tp->tx_sw_if_index = tx_sw_if_index;
- tp->l2_only = l2_only;
-
- /* Create the adjacency and add to v4 fib */
- memset(&adj, 0, sizeof (adj));
- adj.lookup_next_index = IP_LOOKUP_NEXT_REWRITE;
-
- rewrite_data = mpls_ethernet_rewrite (mm, tp);
- if (rewrite_data == 0)
- {
- if (*tunnel_sw_if_index != ~0)
- {
- hi = vnet_get_hw_interface (vnm, tp->hw_if_index);
- vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
- 0 /* admin down */);
- vec_add1 (mm->free_eth_sw_if_indices, tp->hw_if_index);
- }
-
- pool_put (mm->eth_tunnels, tp);
- return VNET_API_ERROR_NO_SUCH_LABEL;
- }
-
- vnet_rewrite_for_sw_interface
- (vnm,
- VNET_LINK_MPLS,
- tx_sw_if_index,
- ip4_rewrite_node.index,
- tp->tunnel_dst,
- &adj.rewrite_header,
- sizeof (adj.rewrite_data));
-
- /*
- * Prepend the (0,1,2) VLAN tag ethernet header
- * we just built to the mpls header stack
- */
- vec_insert (rewrite_data, adj.rewrite_header.data_bytes, 0);
- clib_memcpy(rewrite_data,
- vnet_rewrite_get_data_internal(&adj.rewrite_header,
- sizeof (adj.rewrite_data)),
- adj.rewrite_header.data_bytes);
-
- vnet_rewrite_set_data_internal (&adj.rewrite_header,
- sizeof(adj.rewrite_data),
- rewrite_data,
- vec_len(rewrite_data));
-
- vec_free (tp->rewrite_data);
-
- tp->rewrite_data = rewrite_data;
-
- if (!l2_only)
- ip_add_adjacency (lm, &adj, 1 /* one adj */,
- &adj_index);
-
- add_del_route:
-
- if (!l2_only)
- {
- const fib_prefix_t pfx = {
- .fp_addr = {
- .ip4 = tp->intfc_address,
- },
- .fp_len = tp->mask_width,
- .fp_proto = FIB_PROTOCOL_IP4,
- };
- if (is_add)
- tp->fei = fib_table_entry_special_add(tp->inner_fib_index,
- &pfx,
- FIB_SOURCE_API,
- FIB_ENTRY_FLAG_NONE,
- adj_index);
- else
- {
- fib_table_entry_delete(tp->inner_fib_index, &pfx, FIB_SOURCE_API);
- tp->fei = FIB_NODE_INDEX_INVALID;
- }
- }
- if (is_add == 0 && found_tunnel)
- {
- hi = vnet_get_hw_interface (vnm, tp->hw_if_index);
- vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
- 0 /* admin down */);
- vec_add1 (mm->free_eth_sw_if_indices, tp->hw_if_index);
- vec_free (tp->rewrite_data);
- pool_put (mm->eth_tunnels, tp);
- }
-
- return 0;
-}
-
-static clib_error_t *
-create_mpls_ethernet_tunnel_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- unformat_input_t _line_input, * line_input = &_line_input;
- vnet_main_t * vnm = vnet_get_main();
- ip4_address_t intfc;
- int adj_set = 0;
- u8 dst[6];
- int dst_set = 0, intfc_set = 0;
- u32 mask_width;
- u32 inner_fib_id = (u32)~0;
- int rv;
- u8 is_del = 0;
- u8 l2_only = 0;
- u32 tx_sw_if_index;
- u32 sw_if_index = ~0;
-
- /* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
- return 0;
-
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (line_input, "dst %U",
- unformat_ethernet_address, &dst))
- dst_set = 1;
- else if (unformat (line_input, "adj %U/%d",
- unformat_ip4_address, &intfc, &mask_width))
- adj_set = 1;
- else if (unformat (line_input, "tx-intfc %U",
- unformat_vnet_sw_interface, vnm, &tx_sw_if_index))
- intfc_set = 1;
- else if (unformat (line_input, "fib-id %d", &inner_fib_id))
- ;
- else if (unformat (line_input, "l2-only"))
- l2_only = 1;
- else if (unformat (line_input, "del"))
- is_del = 1;
- else
- return clib_error_return (0, "unknown input '%U'",
- format_unformat_error, line_input);
- }
-
- if (!intfc_set)
- return clib_error_return (0, "missing tx-intfc");
-
- if (!dst_set)
- return clib_error_return (0, "missing: dst <ethernet-address>");
-
- if (!adj_set)
- return clib_error_return (0, "missing: intfc <ip-address>/<mask-width>");
-
-
- rv = vnet_mpls_ethernet_add_del_tunnel (dst, &intfc, mask_width,
- inner_fib_id, tx_sw_if_index,
- &sw_if_index,
- l2_only, !is_del);
-
- switch (rv)
- {
- case 0:
- if (!is_del)
- vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
- break;
- case VNET_API_ERROR_NO_SUCH_FIB:
- return clib_error_return (0, "rx fib ID %d doesn't exist\n",
- inner_fib_id);
-
- case VNET_API_ERROR_NO_SUCH_ENTRY:
- return clib_error_return (0, "tunnel not found\n");
-
- case VNET_API_ERROR_NO_SUCH_LABEL:
- /*
- * This happens when there's no MPLS label for the dst address
- * no need for two error messages.
- */
- return clib_error_return (0, "no label for %U in fib %d",
- format_ip4_address, &intfc, inner_fib_id);
- break;
-
- default:
- return clib_error_return (0, "vnet_mpls_ethernet_add_del_tunnel returned %d", rv);
- break;
- }
- return 0;
-}
-
-
-VLIB_CLI_COMMAND (create_mpls_ethernet_tunnel_command, static) = {
- .path = "create mpls ethernet tunnel",
- .short_help =
- "create mpls ethernet tunnel [del] dst <mac-addr> intfc <addr>/<mw>",
- .function = create_mpls_ethernet_tunnel_command_fn,
-};
-
-
-int vnet_mpls_policy_tunnel_add_rewrite (mpls_main_t * mm,
- mpls_encap_t * e,
- u32 policy_tunnel_index)
-{
- mpls_eth_tunnel_t * t;
- ip_adjacency_t adj;
- u8 * rewrite_data = 0;
- u8 * label_start;
- mpls_unicast_header_t *lp;
- int i;
-
- if (pool_is_free_index (mm->eth_tunnels, policy_tunnel_index))
- return VNET_API_ERROR_NO_SUCH_ENTRY;
-
- t = pool_elt_at_index (mm->eth_tunnels, policy_tunnel_index);
-
- memset (&adj, 0, sizeof (adj));
-
- /* Build L2 encap */
- vnet_rewrite_for_sw_interface
- (mm->vnet_main,
- VNET_LINK_MPLS,
- t->tx_sw_if_index,
- mpls_policy_encap_node.index,
- t->tunnel_dst,
- &adj.rewrite_header,
- sizeof (adj.rewrite_data));
-
- vec_validate (rewrite_data, adj.rewrite_header.data_bytes -1);
-
- clib_memcpy(rewrite_data,
- vnet_rewrite_get_data_internal(&adj.rewrite_header,
- sizeof (adj.rewrite_data)),
- adj.rewrite_header.data_bytes);
-
- /* Append the label stack */
-
- vec_add2 (rewrite_data, label_start, vec_len(e->labels) * sizeof (u32));
-
- lp = (mpls_unicast_header_t *) label_start;
-
- for (i = 0; i < vec_len(e->labels); i++)
- lp[i] = e->labels[i];
-
- /* Remember the rewrite data */
- e->rewrite = rewrite_data;
- e->output_next_index = adj.rewrite_header.next_index;
-
- return 0;
-}
-
-int vnet_mpls_ethernet_add_del_policy_tunnel (u8 *dst,
- ip4_address_t *intfc,
- u32 mask_width,
- u32 inner_fib_id,
- u32 tx_sw_if_index,
- u32 * tunnel_sw_if_index,
- u32 classify_table_index,
- u32 * new_tunnel_index,
- u8 l2_only,
- u8 is_add)
-{
- ip4_main_t * im = &ip4_main;
- mpls_main_t * mm = &mpls_main;
- vnet_main_t * vnm = vnet_get_main();
- mpls_eth_tunnel_t *tp;
- u32 inner_fib_index = 0;
- int found_tunnel = 0;
- mpls_encap_t * e = 0;
- u32 hw_if_index = ~0;
- vnet_hw_interface_t * hi;
- u32 slot;
- u32 dummy;
-
- if (tunnel_sw_if_index == 0)
- tunnel_sw_if_index = &dummy;
-
- *tunnel_sw_if_index = ~0;
-
- if (inner_fib_id != (u32)~0)
- {
- uword * p;
-
- p = hash_get (im->fib_index_by_table_id, inner_fib_id);
- if (! p)
- return VNET_API_ERROR_NO_SUCH_FIB;
- inner_fib_index = p[0];
- }
-
- /* suppress duplicate mpls interface generation. */
- pool_foreach (tp, mm->eth_tunnels,
- ({
- /*
- * If we have a tunnel which matches (src, dst, intfc/mask)
- * AND the expected route is in the FIB, it's a dup
- */
- if (!memcmp (&tp->tunnel_dst, dst, sizeof (*dst))
- && !memcmp (&tp->intfc_address, intfc, sizeof (*intfc))
- && tp->inner_fib_index == inner_fib_index
- && FIB_NODE_INDEX_INVALID != tp->fei)
- {
- found_tunnel = 1;
-
- if (is_add)
- {
- if (l2_only)
- return 1;
- else
- {
- goto reinstall_it;
- }
- }
- else
- {
- /* Delete */
- goto add_del_route;
- }
-
- }
- }));
-
- /* Delete, and we can't find the tunnel */
- if (is_add == 0 && found_tunnel == 0)
- return VNET_API_ERROR_NO_SUCH_ENTRY;
-
- pool_get(mm->eth_tunnels, tp);
- memset (tp, 0, sizeof (*tp));
-
- if (vec_len (mm->free_eth_sw_if_indices) > 0)
- {
- hw_if_index =
- mm->free_eth_sw_if_indices[vec_len(mm->free_eth_sw_if_indices)-1];
- _vec_len (mm->free_eth_sw_if_indices) -= 1;
- hi = vnet_get_hw_interface (vnm, hw_if_index);
- hi->dev_instance = tp - mm->eth_tunnels;
- hi->hw_instance = tp - mm->eth_tunnels;
- }
- else
- {
- hw_if_index = vnet_register_interface
- (vnm, mpls_eth_device_class.index, tp - mm->eth_tunnels,
- mpls_eth_hw_interface_class.index,
- tp - mm->eth_tunnels);
- hi = vnet_get_hw_interface (vnm, hw_if_index);
-
- /* ... to make the IP and L2 x-connect cases identical */
- slot = vlib_node_add_named_next_with_slot
- (vnm->vlib_main, hi->tx_node_index,
- "interface-output", MPLS_ETH_OUTPUT_NEXT_OUTPUT);
-
- ASSERT (slot == MPLS_ETH_OUTPUT_NEXT_OUTPUT);
- }
-
- *tunnel_sw_if_index = hi->sw_if_index;
- vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
- VNET_SW_INTERFACE_FLAG_ADMIN_UP);
-
- tp->hw_if_index = hw_if_index;
-
- reinstall_it:
- clib_memcpy(tp->tunnel_dst, dst, sizeof (tp->tunnel_dst));
- tp->intfc_address.as_u32 = intfc->as_u32;
- tp->mask_width = mask_width;
- tp->inner_fib_index = inner_fib_index;
- tp->encap_index = e - mm->encaps;
- tp->tx_sw_if_index = tx_sw_if_index;
- tp->l2_only = l2_only;
- tp->fei = FIB_NODE_INDEX_INVALID;
-
- if (new_tunnel_index)
- *new_tunnel_index = tp - mm->eth_tunnels;
-
- add_del_route:
-
- if (!l2_only)
- {
- const fib_prefix_t pfx = {
- .fp_addr = {
- .ip4 = tp->intfc_address,
- },
- .fp_len = tp->mask_width,
- .fp_proto = FIB_PROTOCOL_IP4,
- };
- dpo_id_t dpo = DPO_INVALID;
-
- if (is_add)
- {
- dpo_set(&dpo,
- DPO_CLASSIFY,
- DPO_PROTO_IP4,
- classify_dpo_create(DPO_PROTO_IP4,
- classify_table_index));
-
- tp->fei = fib_table_entry_special_dpo_add(tp->inner_fib_index,
- &pfx,
- FIB_SOURCE_API,
- FIB_ENTRY_FLAG_EXCLUSIVE,
- &dpo);
- dpo_reset(&dpo);
- }
- else
- {
- fib_table_entry_delete(tp->inner_fib_index, &pfx, FIB_SOURCE_API);
- tp->fei = FIB_NODE_INDEX_INVALID;
- }
- }
- if (is_add == 0 && found_tunnel)
- {
- hi = vnet_get_hw_interface (vnm, tp->hw_if_index);
- vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
- 0 /* admin down */);
- vec_add1 (mm->free_eth_sw_if_indices, tp->hw_if_index);
- pool_put (mm->eth_tunnels, tp);
- }
-
- return 0;
-}
-
-static clib_error_t *
-create_mpls_ethernet_policy_tunnel_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- unformat_input_t _line_input, * line_input = &_line_input;
- vnet_main_t * vnm = vnet_get_main();
- ip4_address_t intfc;
- int adj_set = 0;
- u8 dst[6];
- int dst_set = 0, intfc_set = 0;
- u32 mask_width;
- u32 inner_fib_id = (u32)~0;
- u32 classify_table_index = (u32)~0;
- u32 new_tunnel_index;
- int rv;
- u8 is_del = 0;
- u8 l2_only = 0;
- u32 tx_sw_if_index;
-
- /* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
- return 0;
-
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (line_input, "dst %U",
- unformat_ethernet_address, &dst))
- dst_set = 1;
- else if (unformat (line_input, "adj %U/%d",
- unformat_ip4_address, &intfc, &mask_width))
- adj_set = 1;
- else if (unformat (line_input, "tx-intfc %U",
- unformat_vnet_sw_interface, vnm, &tx_sw_if_index))
- intfc_set = 1;
- else if (unformat (line_input, "classify-table-index %d",
- &classify_table_index))
- ;
- else if (unformat (line_input, "fib-id %d", &inner_fib_id))
- ;
- else if (unformat (line_input, "l2-only"))
- l2_only = 1;
- else if (unformat (line_input, "del"))
- is_del = 1;
- else
- return clib_error_return (0, "unknown input '%U'",
- format_unformat_error, line_input);
- }
-
- if (classify_table_index == ~0)
- return clib_error_return (0, "missing classify_table_index");
-
- if (!intfc_set)
- return clib_error_return (0, "missing tx-intfc");
-
- if (!dst_set)
- return clib_error_return (0, "missing: dst <ethernet-address>");
-
- if (!adj_set)
- return clib_error_return (0, "missing: intfc <ip-address>/<mask-width>");
-
-
- rv = vnet_mpls_ethernet_add_del_policy_tunnel (dst, &intfc, mask_width,
- inner_fib_id, tx_sw_if_index,
- 0 /* tunnel sw_if_index */,
- classify_table_index,
- &new_tunnel_index,
- l2_only, !is_del);
- switch (rv)
- {
- case 0:
- if (!is_del)
- vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), new_tunnel_index);
- break;
- case VNET_API_ERROR_NO_SUCH_FIB:
- return clib_error_return (0, "rx fib ID %d doesn't exist\n",
- inner_fib_id);
-
- case VNET_API_ERROR_NO_SUCH_ENTRY:
- return clib_error_return (0, "tunnel not found\n");
-
- case VNET_API_ERROR_NO_SUCH_LABEL:
- /*
- * This happens when there's no MPLS label for the dst address
- * no need for two error messages.
- */
- return clib_error_return (0, "no label for %U in fib %d",
- format_ip4_address, &intfc, inner_fib_id);
- break;
-
- default:
- return clib_error_return (0, "vnet_mpls_ethernet_add_del_policy_tunnel returned %d", rv);
- break;
- }
-
- return 0;
-}
-
-VLIB_CLI_COMMAND (create_mpls_ethernet_policy_tunnel_command, static) = {
- .path = "create mpls ethernet policy tunnel",
- .short_help =
- "create mpls ethernet policy tunnel [del] dst <mac-addr> intfc <addr>/<mw>\n"
- " classify-table-index <nn>",
- .function = create_mpls_ethernet_policy_tunnel_command_fn,
-};
-
static clib_error_t *
mpls_interface_enable_disable (vlib_main_t * vm,
unformat_input_t * input,
@@ -1090,6 +106,14 @@ mpls_interface_enable_disable (vlib_main_t * vm,
return error;
}
+/*?
+ * This command enables an interface to accpet MPLS packets
+ *
+ * @cliexpar
+ * @cliexstart{set interface mpls}
+ * set interface mpls GigEthernet0/8/0 enable
+ * @cliexend
+ ?*/
VLIB_CLI_COMMAND (set_interface_ip_table_command, static) = {
.path = "set interface mpls",
.function = mpls_interface_enable_disable,
diff --git a/vnet/vnet/mpls/mpls.c b/vnet/vnet/mpls/mpls.c
index aa1d963dca1..e6ae4980067 100644
--- a/vnet/vnet/mpls/mpls.c
+++ b/vnet/vnet/mpls/mpls.c
@@ -121,45 +121,6 @@ unformat_mpls_header (unformat_input_t * input, va_list * args)
return 1;
}
-u8 * format_mpls_eth_tx_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- mpls_eth_tx_trace_t * t = va_arg (*args, mpls_eth_tx_trace_t *);
- mpls_main_t * mm = &mpls_main;
-
- if (t->lookup_miss)
- s = format (s, "MPLS: lookup miss");
- else
- {
- s = format (s, "MPLS: tunnel %d labels %U len %d tx_sw_index %d dst %U",
- t->tunnel_id,
- format_mpls_encap_index, mm, t->mpls_encap_index,
- clib_net_to_host_u16 (t->length),
- t->tx_sw_if_index,
- format_ethernet_address, t->dst);
- }
- return s;
-}
-
-u8 * format_mpls_eth_header_with_length (u8 * s, va_list * args)
-{
- ethernet_header_t * h = va_arg (*args, ethernet_header_t *);
- mpls_unicast_header_t * m = (mpls_unicast_header_t *)(h+1);
- u32 max_header_bytes = va_arg (*args, u32);
- uword header_bytes;
-
- header_bytes = sizeof (h[0]);
- if (max_header_bytes != 0 && header_bytes > max_header_bytes)
- return format (s, "ethernet header truncated");
-
- s = format
- (s, "ETHERNET-MPLS label %d",
- vnet_mpls_uc_get_label (clib_net_to_host_u32 (m->label_exp_s_ttl)));
-
- return s;
-}
-
uword
unformat_mpls_label_net_byte_order (unformat_input_t * input,
va_list * args)
@@ -176,157 +137,6 @@ unformat_mpls_label_net_byte_order (unformat_input_t * input,
return 1;
}
-mpls_encap_t *
-mpls_encap_by_fib_and_dest (mpls_main_t * mm, u32 rx_fib, u32 dst_address)
-{
- uword * p;
- mpls_encap_t * e;
- u64 key;
-
- key = ((u64)rx_fib<<32) | ((u64) dst_address);
- p = hash_get (mm->mpls_encap_by_fib_and_dest, key);
-
- if (!p)
- return 0;
-
- e = pool_elt_at_index (mm->encaps, p[0]);
- return e;
-}
-
-int vnet_mpls_add_del_encap (ip4_address_t *dest, u32 fib_id,
- u32 *labels_host_byte_order,
- u32 policy_tunnel_index,
- int no_dst_hash, u32 * indexp, int is_add)
-{
- mpls_main_t * mm = &mpls_main;
- ip4_main_t * im = &ip4_main;
- mpls_encap_t * e;
- u32 label_net_byte_order, label_host_byte_order;
- u32 fib_index;
- u64 key;
- uword *p;
- int i;
-
- p = hash_get (im->fib_index_by_table_id, fib_id);
- if (! p)
- return VNET_API_ERROR_NO_SUCH_FIB;
-
- fib_index = p[0];
-
- key = ((u64)fib_index<<32) | ((u64) dest->as_u32);
-
- if (is_add)
- {
- pool_get (mm->encaps, e);
- memset (e, 0, sizeof (*e));
-
- for (i = 0; i < vec_len (labels_host_byte_order); i++)
- {
- mpls_unicast_header_t h;
- label_host_byte_order = labels_host_byte_order[i];
-
- /* Reformat label into mpls_unicast_header_t */
- label_host_byte_order <<= 12;
- // FIXME NEOS AND EOS
- //if (i == vec_len(labels_host_byte_order) - 1)
- // label_host_byte_order |= 1<<8; /* S=1 */
- label_host_byte_order |= 0xff; /* TTL=FF */
- label_net_byte_order = clib_host_to_net_u32 (label_host_byte_order);
- h.label_exp_s_ttl = label_net_byte_order;
- vec_add1 (e->labels, h);
- }
- if (no_dst_hash == 0)
- hash_set (mm->mpls_encap_by_fib_and_dest, key, e - mm->encaps);
- if (indexp)
- *indexp = e - mm->encaps;
- if (policy_tunnel_index != ~0)
- return vnet_mpls_policy_tunnel_add_rewrite (mm, e, policy_tunnel_index);
- }
- else
- {
- p = hash_get (mm->mpls_encap_by_fib_and_dest, key);
- if (!p)
- return VNET_API_ERROR_NO_SUCH_LABEL;
-
- e = pool_elt_at_index (mm->encaps, p[0]);
-
- vec_free (e->labels);
- vec_free (e->rewrite);
- pool_put(mm->encaps, e);
-
- if (no_dst_hash == 0)
- hash_unset (mm->mpls_encap_by_fib_and_dest, key);
- }
- return 0;
-}
-
-static clib_error_t *
-mpls_add_encap_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- u32 fib_id;
- u32 *labels = 0;
- u32 this_label;
- ip4_address_t dest;
- u32 policy_tunnel_index = ~0;
- int no_dst_hash = 0;
- int rv;
- int fib_set = 0;
- int dest_set = 0;
-
- while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (input, "fib %d", &fib_id))
- fib_set = 1;
- else if (unformat (input, "dest %U", unformat_ip4_address, &dest))
- dest_set = 1;
- else if (unformat (input, "no-dst-hash"))
- no_dst_hash = 1;
- else if (unformat (input, "label %d", &this_label))
- vec_add1 (labels, this_label);
- else if (unformat (input, "policy-tunnel %d", &policy_tunnel_index))
- ;
- else
- break;
- }
-
- if (fib_set == 0)
- return clib_error_return (0, "fib-id missing");
- if (dest_set == 0)
- return clib_error_return (0, "destination IP address missing");
- if (vec_len (labels) == 0)
- return clib_error_return (0, "label stack missing");
-
- rv = vnet_mpls_add_del_encap (&dest, fib_id, labels,
- policy_tunnel_index,
- no_dst_hash, 0 /* indexp */,
- 1 /* is_add */);
- vec_free (labels);
-
- switch (rv)
- {
- case 0:
- break;
-
- case VNET_API_ERROR_NO_SUCH_FIB:
- return clib_error_return (0, "fib id %d unknown", fib_id);
-
- default:
- return clib_error_return (0, "vnet_mpls_add_del_encap returned %d",
- rv);
- }
-
- return 0;
-}
-
-VLIB_CLI_COMMAND (mpls_add_encap_command, static) = {
- .path = "mpls encap add",
- .short_help =
- "mpls encap add label <label> ... fib <id> dest <ip4-address>",
- .function = mpls_add_encap_command_fn,
-};
-
u8 * format_mpls_unicast_header_host_byte_order (u8 * s, va_list * args)
{
mpls_unicast_header_t *h = va_arg(*args, mpls_unicast_header_t *);
@@ -351,46 +161,6 @@ u8 * format_mpls_unicast_header_net_byte_order (u8 * s, va_list * args)
&h_host);
}
-static clib_error_t *
-mpls_del_encap_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- u32 fib_id;
- ip4_address_t dest;
- int rv;
-
- if (unformat (input, "fib %d dest %U", &fib_id,
- unformat_ip4_address, &dest))
- {
- rv = vnet_mpls_add_del_encap (&dest, fib_id, 0 /* labels */,
- ~0 /* policy_tunnel_index */,
- 0 /* no_dst_hash */,
- 0 /* indexp */,
- 0 /* is_add */);
- switch (rv)
- {
- case VNET_API_ERROR_NO_SUCH_FIB:
- return clib_error_return (0, "fib id %d unknown", fib_id);
- case VNET_API_ERROR_NO_SUCH_ENTRY:
- return clib_error_return (0, "dest %U not in fib %d",
- format_ip4_address, &dest, fib_id);
- default:
- break;
- }
- return 0;
- }
- else
- return clib_error_return (0, "unknown input `%U'",
- format_unformat_error, input);
-}
-
-VLIB_CLI_COMMAND (mpls_del_encap_command, static) = {
- .path = "mpls encap delete",
- .short_help = "mpls encap delete fib <id> dest <ip4-address>",
- .function = mpls_del_encap_command_fn,
-};
-
int
mpls_dest_cmp(void * a1, void * a2)
{
@@ -419,65 +189,22 @@ mpls_label_cmp(void * a1, void * a2)
}
static clib_error_t *
-show_mpls_fib_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- u64 key;
- u32 value;
- show_mpls_fib_t *records = 0;
- show_mpls_fib_t *s;
- mpls_main_t * mm = &mpls_main;
- ip4_fib_t * rx_fib;
-
- hash_foreach (key, value, mm->mpls_encap_by_fib_and_dest,
- ({
- vec_add2 (records, s, 1);
- s->fib_index = (u32)(key>>32);
- s->dest = (u32)(key & 0xFFFFFFFF);
- s->entry_index = (u32) value;
- }));
-
- if (!vec_len(records))
- {
- vlib_cli_output (vm, "MPLS encap table empty");
- }
- /* sort output by dst address within fib */
- vec_sort_with_function (records, mpls_dest_cmp);
- vec_sort_with_function (records, mpls_fib_index_cmp);
- vlib_cli_output (vm, "MPLS encap table");
- vlib_cli_output (vm, "%=6s%=16s%=16s", "Table", "Dest address", "Labels");
- vec_foreach (s, records)
- {
- rx_fib = ip4_fib_get (s->fib_index);
- vlib_cli_output (vm, "%=6d%=16U%=16U", rx_fib->table_id,
- format_ip4_address, &s->dest,
- format_mpls_encap_index, mm, s->entry_index);
- }
-
- vec_free(records);
- return 0;
-}
-
-VLIB_CLI_COMMAND (show_mpls_fib_command, static) = {
- .path = "show mpls encap",
- .short_help = "show mpls encap",
- .function = show_mpls_fib_command_fn,
-};
-
-static clib_error_t *
vnet_mpls_local_label (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
unformat_input_t _line_input, * line_input = &_line_input;
fib_route_path_t *rpaths = NULL, rpath;
- clib_error_t * error = 0;
u32 table_id, is_del, is_ip;
- fib_prefix_t pfx;
mpls_label_t local_label;
+ mpls_label_t out_label;
+ clib_error_t * error;
mpls_eos_bit_t eos;
+ vnet_main_t * vnm;
+ fib_prefix_t pfx;
+ vnm = vnet_get_main();
+ error = NULL;
is_ip = 0;
table_id = 0;
eos = MPLS_EOS;
@@ -519,13 +246,99 @@ vnet_mpls_local_label (vlib_main_t * vm,
pfx.fp_proto = FIB_PROTOCOL_IP6;
is_ip = 1;
}
+ else if (unformat (line_input, "via %U %U weight %u",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index,
+ &rpath.frp_weight))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1(rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U weight %u",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index,
+ &rpath.frp_weight))
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1(rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1(rpaths, rpath);
+ }
+
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U next-hop-table %d",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ &rpath.frp_fib_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U next-hop-table %d",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ &rpath.frp_fib_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4))
+ {
+ /*
+ * the recursive next-hops are by default in the same table
+ * as the prefix
+ */
+ rpath.frp_fib_index = table_id;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ vec_add1(rpaths, rpath);
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6))
+ {
+ rpath.frp_fib_index = table_id;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ vec_add1(rpaths, rpath);
+ }
else if (unformat (line_input, "%d", &local_label))
;
else if (unformat (line_input,
"ip4-lookup-in-table %d",
&rpath.frp_fib_index))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_IP4;
rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID;
pfx.fp_payload_proto = DPO_PROTO_IP4;
@@ -535,7 +348,6 @@ vnet_mpls_local_label (vlib_main_t * vm,
"ip6-lookup-in-table %d",
&rpath.frp_fib_index))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_IP6;
rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID;
vec_add1(rpaths, rpath);
@@ -545,16 +357,26 @@ vnet_mpls_local_label (vlib_main_t * vm,
"mpls-lookup-in-table %d",
&rpath.frp_fib_index))
{
- rpath.frp_label = MPLS_LABEL_INVALID;
rpath.frp_proto = FIB_PROTOCOL_MPLS;
rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID;
pfx.fp_payload_proto = DPO_PROTO_MPLS;
vec_add1(rpaths, rpath);
}
+ else if (unformat (line_input, "out-label %U",
+ unformat_mpls_unicast_label,
+ &out_label))
+ {
+ if (vec_len(rpaths) == 0)
+ {
+ error = clib_error_return(0 , "Paths then labels");
+ goto done;
+ }
+ vec_add1(rpaths[vec_len(rpaths)-1].frp_label_stack, out_label);
+ }
else
{
error = clib_error_return (0, "unkown input: %U",
- format_unformat_error, input);
+ format_unformat_error, line_input);
goto done;
}
@@ -596,6 +418,7 @@ vnet_mpls_local_label (vlib_main_t * vm,
pfx.fp_proto = FIB_PROTOCOL_MPLS;
pfx.fp_len = 21;
pfx.fp_label = local_label;
+ pfx.fp_payload_proto = DPO_PROTO_MPLS;
if (NULL == rpaths)
{
@@ -606,17 +429,20 @@ vnet_mpls_local_label (vlib_main_t * vm,
/*
* the CLI parsing stored table Ids, swap to FIB indicies
*/
- fi = fib_table_id_find_fib_index(dpo_proto_to_fib(pfx.fp_payload_proto),
- rpaths[0].frp_fib_index);
-
- if (~0 == fi)
+ if (FIB_NODE_INDEX_INVALID == rpath.frp_sw_if_index)
{
- error = clib_error_return(0 , "%U Via table %d does not exist",
- format_fib_protocol, pfx.fp_payload_proto,
- rpaths[0].frp_fib_index);
- goto done;
+ fi = fib_table_id_find_fib_index(dpo_proto_to_fib(pfx.fp_payload_proto),
+ rpaths[0].frp_fib_index);
+
+ if (~0 == fi)
+ {
+ error = clib_error_return(0 , "%U Via table %d does not exist",
+ format_fib_protocol, pfx.fp_payload_proto,
+ rpaths[0].frp_fib_index);
+ goto done;
+ }
+ rpaths[0].frp_fib_index = fi;
}
- rpaths[0].frp_fib_index = fi;
fib_index = mpls_fib_index_from_table_id(table_id);
@@ -653,44 +479,15 @@ VLIB_CLI_COMMAND (mpls_local_label_command, static) = {
.short_help = "Create/Delete MPL local labels",
};
-int mpls_fib_reset_labels (u32 fib_id)
+int
+mpls_fib_reset_labels (u32 fib_id)
{
- u64 key;
- u32 value;
- show_mpls_fib_t *records = 0;
- show_mpls_fib_t *s;
- mpls_main_t * mm = &mpls_main;
- ip4_main_t * im = &ip4_main;
- u32 fib_index;
- uword *p;
-
- p = hash_get (im->fib_index_by_table_id, fib_id);
- if (! p)
- return VNET_API_ERROR_NO_SUCH_FIB;
-
- fib_index = p[0];
-
- hash_foreach (key, value, mm->mpls_encap_by_fib_and_dest,
- ({
- if (fib_index == (u32)(key>>32)) {
- vec_add2 (records, s, 1);
- s->dest = (u32)(key & 0xFFFFFFFF);
- s->entry_index = (u32) value;
- }
- }));
-
- vec_foreach (s, records)
- {
- key = ((u64)fib_index<<32) | ((u64) s->dest);
- hash_unset (mm->mpls_encap_by_fib_and_dest, key);
- pool_put_index (mm->encaps, s->entry_index);
- }
-
- vec_free(records);
+ // FIXME
return 0;
}
-static clib_error_t * mpls_init (vlib_main_t * vm)
+static clib_error_t *
+mpls_init (vlib_main_t * vm)
{
mpls_main_t * mm = &mpls_main;
clib_error_t * error;
@@ -701,8 +498,6 @@ static clib_error_t * mpls_init (vlib_main_t * vm)
if ((error = vlib_call_init_function (vm, ip_main_init)))
return error;
- mm->mpls_encap_by_fib_and_dest = hash_create (0, sizeof (uword));
-
return vlib_call_init_function (vm, mpls_input_init);
}
diff --git a/vnet/vnet/mpls/mpls.h b/vnet/vnet/mpls/mpls.h
index 59fc761ea2c..b6fdbce7d70 100644
--- a/vnet/vnet/mpls/mpls.h
+++ b/vnet/vnet/mpls/mpls.h
@@ -30,30 +30,6 @@ typedef enum {
MPLS_N_ERROR,
} mpls_error_t;
-/*
- * No protocol info, MPLS labels don't have a next-header field
- * presumably the label field tells all...
- */
-typedef struct {
- u8 tunnel_dst[6];
- ip4_address_t intfc_address;
- u32 tx_sw_if_index;
- u32 inner_fib_index;
- u32 mask_width;
- u32 encap_index;
- u32 hw_if_index;
- u8 * rewrite_data;
- u8 l2_only;
- fib_node_index_t fei;
-} mpls_eth_tunnel_t;
-
-typedef struct {
- mpls_unicast_header_t *labels;
- /* only for policy tunnels */
- u8 * rewrite;
- u32 output_next_index;
-} mpls_encap_t;
-
#define MPLS_FIB_DEFAULT_TABLE_ID 0
/**
@@ -94,18 +70,6 @@ typedef struct {
/** A hash table to lookup the mpls_fib by table ID */
uword *fib_index_by_table_id;
- /* pool of ethernet tunnel instances */
- mpls_eth_tunnel_t *eth_tunnels;
- u32 * free_eth_sw_if_indices;
-
- /* Encap side: map (fib, dst_address) to mpls label stack */
- mpls_encap_t * encaps;
- uword * mpls_encap_by_fib_and_dest;
-
- /* mpls-o-e policy tunnel next index for ip4/ip6-classify */
- u32 ip4_classify_mpls_policy_encap_next_index;
- u32 ip6_classify_mpls_policy_encap_next_index;
-
/* Feature arc indices */
u8 input_feature_arc_index;
u8 output_feature_arc_index;
@@ -123,7 +87,6 @@ extern mpls_main_t mpls_main;
extern clib_error_t * mpls_feature_init(vlib_main_t * vm);
format_function_t format_mpls_protocol;
-format_function_t format_mpls_eth_header_with_length;
format_function_t format_mpls_encap_index;
format_function_t format_mpls_eos_bit;
@@ -153,29 +116,8 @@ void mpls_sw_interface_enable_disable (mpls_main_t * mm,
u8 mpls_sw_interface_is_enabled (u32 sw_if_index);
-mpls_encap_t *
-mpls_encap_by_fib_and_dest (mpls_main_t * mm, u32 rx_fib, u32 dst_address);
-
-int vnet_mpls_ethernet_add_del_tunnel (u8 *dst,
- ip4_address_t *intfc,
- u32 mask_width,
- u32 inner_fib_id,
- u32 tx_sw_if_index,
- u32 * tunnel_sw_if_index,
- u8 l2_only,
- u8 is_add);
-
int mpls_fib_reset_labels (u32 fib_id);
-int vnet_mpls_add_del_encap (ip4_address_t *dest, u32 fib_id,
- u32 *labels_host_byte_order,
- u32 policy_tunnel_index,
- int no_dst_hash, u32 * indexp, int is_add);
-
-int vnet_mpls_policy_tunnel_add_rewrite (mpls_main_t * mm,
- mpls_encap_t * e,
- u32 policy_tunnel_index);
-
#define foreach_mpls_input_next \
_(DROP, "error-drop") \
_(LOOKUP, "mpls-lookup")
@@ -211,26 +153,6 @@ typedef enum {
} mpls_output_next_t;
typedef struct {
- u32 lookup_miss;
-
- /* Tunnel-id / index in tunnel vector */
- u32 tunnel_id;
-
- /* output interface */
- u32 tx_sw_if_index;
-
- /* mpls encap index */
- u32 mpls_encap_index;
-
- /* pkt length */
- u32 length;
-
- u8 dst[6];
-} mpls_eth_tx_trace_t;
-
-u8 * format_mpls_eth_tx_trace (u8 * s, va_list * args);
-
-typedef struct {
u32 fib_index;
u32 entry_index;
u32 dest;
diff --git a/vnet/vnet/mpls/mpls_lookup.c b/vnet/vnet/mpls/mpls_lookup.c
index 34ba79e40fd..2d34cbde341 100644
--- a/vnet/vnet/mpls/mpls_lookup.c
+++ b/vnet/vnet/mpls/mpls_lookup.c
@@ -170,6 +170,19 @@ mpls_lookup (vlib_main_t * vm,
vlib_buffer_length_in_chain (vm, b1));
/*
+ * before we pop the label copy th values we need to maintain.
+ * The label header is in network byte order.
+ * last byte is the TTL.
+ * bits 2 to 4 inclusive are the EXP bits
+ */
+ vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
+ vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
+ vnet_buffer (b0)->mpls.first = 1;
+ vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
+ vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
+ vnet_buffer (b1)->mpls.first = 1;
+
+ /*
* pop the label that was just used in the lookup
*/
vlib_buffer_advance(b0, sizeof(*h0));
@@ -223,8 +236,8 @@ mpls_lookup (vlib_main_t * vm,
lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
vnet_buffer(b0)->sw_if_index[VLIB_RX]);
- lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
- lb0 = load_balance_get(lbi0);
+ lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
+ lb0 = load_balance_get(lbi0);
hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
@@ -248,6 +261,16 @@ mpls_lookup (vlib_main_t * vm,
vlib_buffer_length_in_chain (vm, b0));
/*
+ * before we pop the label copy th values we need to maintain.
+ * The label header is in network byte order.
+ * last byte is the TTL.
+ * bits 2 to 4 inclusive are the EXP bits
+ */
+ vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
+ vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
+ vnet_buffer (b0)->mpls.first = 1;
+
+ /*
* pop the label that was just used in the lookup
*/
vlib_buffer_advance(b0, sizeof(*h0));
diff --git a/vnet/vnet/mpls/mpls_tunnel.c b/vnet/vnet/mpls/mpls_tunnel.c
new file mode 100644
index 00000000000..656bf330b1a
--- /dev/null
+++ b/vnet/vnet/mpls/mpls_tunnel.c
@@ -0,0 +1,779 @@
+/*
+ * mpls_tunnel.c: MPLS tunnel interfaces (i.e. for RSVP-TE)
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/mpls/mpls_tunnel.h>
+#include <vnet/ip/ip.h>
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/adj/adj_midchain.h>
+
+/**
+ * @brief pool of tunnel instances
+ */
+static mpls_tunnel_t *mpls_tunnel_pool;
+
+/**
+ * @brief Pool of free tunnel SW indices - i.e. recycled indices
+ */
+static u32 * mpls_tunnel_free_hw_if_indices;
+
+/**
+ * @brief DB of SW index to tunnel index
+ */
+static u32 *mpls_tunnel_db;
+
+/**
+ * @brief Get a tunnel object from a SW interface index
+ */
+static mpls_tunnel_t*
+mpls_tunnel_get_from_sw_if_index (u32 sw_if_index)
+{
+ if ((vec_len(mpls_tunnel_db) < sw_if_index) ||
+ (~0 == mpls_tunnel_db[sw_if_index]))
+ return (NULL);
+
+ return (pool_elt_at_index(mpls_tunnel_pool,
+ mpls_tunnel_db[sw_if_index]));
+}
+
+/**
+ * @brief Return true if the label stack is imp-null only
+ */
+static fib_forward_chain_type_t
+mpls_tunnel_get_fwd_chain_type (const mpls_tunnel_t *mt)
+{
+ if ((1 == vec_len(mt->mt_label_stack)) &&
+ (mt->mt_label_stack[0] == MPLS_IETF_IMPLICIT_NULL_LABEL))
+ {
+ /*
+ * the only label in the label stack is implicit null
+ * we need to build an IP chain.
+ */
+ if (FIB_PROTOCOL_IP4 == fib_path_list_get_proto(mt->mt_path_list))
+ {
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ }
+ else
+ {
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ }
+ }
+ else
+ {
+ return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
+ }
+}
+
+/**
+ * @brief Build a rewrite string for the MPLS tunnel.
+ *
+ * We have choices here;
+ * 1 - have an Adjacency with a zero length string and stack it on
+ * MPLS label objects
+ * 2 - put the label header rewrites in the adjacency string.
+ *
+ * We choose 2 since it results in fewer graph nodes in the egress path
+ */
+static u8*
+mpls_tunnel_build_rewrite (vnet_main_t * vnm,
+ u32 sw_if_index,
+ vnet_link_t link_type,
+ const void *dst_address)
+{
+ mpls_unicast_header_t *muh;
+ mpls_tunnel_t *mt;
+ u8 *rewrite;
+ u32 mti, ii;
+
+ rewrite = NULL;
+ mti = mpls_tunnel_db[sw_if_index];
+ mt = pool_elt_at_index(mpls_tunnel_pool, mti);
+
+ vec_validate(rewrite, (sizeof(*muh) * vec_len(mt->mt_label_stack)) - 1);
+ muh = (mpls_unicast_header_t *)rewrite;
+
+ /*
+ * The last (inner most) label in the stack may be EOS, all the rest Non-EOS
+ */
+ for (ii = 0; ii < vec_len(mt->mt_label_stack)-1; ii++)
+ {
+ vnet_mpls_uc_set_label(&muh[ii].label_exp_s_ttl, mt->mt_label_stack[ii]);
+ vnet_mpls_uc_set_ttl(&muh[ii].label_exp_s_ttl, 255);
+ vnet_mpls_uc_set_exp(&muh[ii].label_exp_s_ttl, 0);
+ vnet_mpls_uc_set_s(&muh[ii].label_exp_s_ttl, MPLS_NON_EOS);
+ muh[ii].label_exp_s_ttl = clib_host_to_net_u32(muh[ii].label_exp_s_ttl);
+ }
+
+ vnet_mpls_uc_set_label(&muh[ii].label_exp_s_ttl, mt->mt_label_stack[ii]);
+ vnet_mpls_uc_set_ttl(&muh[ii].label_exp_s_ttl, 255);
+ vnet_mpls_uc_set_exp(&muh[ii].label_exp_s_ttl, 0);
+
+ if (VNET_LINK_MPLS == link_type &&
+ mt->mt_label_stack[ii] != MPLS_IETF_IMPLICIT_NULL_LABEL)
+ vnet_mpls_uc_set_s(&muh[ii].label_exp_s_ttl, MPLS_NON_EOS);
+ else
+ vnet_mpls_uc_set_s(&muh[ii].label_exp_s_ttl, MPLS_EOS);
+
+ muh[ii].label_exp_s_ttl = clib_host_to_net_u32(muh[ii].label_exp_s_ttl);
+
+ return ((u8*)muh);
+}
+
+/**
+ * mpls_tunnel_stack
+ *
+ * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
+ */
+static void
+mpls_tunnel_stack (adj_index_t ai)
+{
+ ip_adjacency_t *adj;
+ mpls_tunnel_t *mt;
+ u32 sw_if_index;
+
+ adj = adj_get(ai);
+ sw_if_index = adj->rewrite_header.sw_if_index;
+
+ mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
+
+ if (NULL == mt)
+ return;
+
+ /*
+ * find the adjacency that is contributed by the FIB path-list
+ * that this tunnel resovles via, and use it as the next adj
+ * in the midchain
+ */
+ if (vnet_hw_interface_get_flags(vnet_get_main(),
+ mt->mt_hw_if_index) &
+ VNET_HW_INTERFACE_FLAG_LINK_UP)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ fib_path_list_contribute_forwarding(mt->mt_path_list,
+ mpls_tunnel_get_fwd_chain_type(mt),
+ &dpo);
+
+ if (DPO_LOAD_BALANCE == dpo.dpoi_type)
+ {
+ /*
+ * we don't support multiple paths, so no need to load-balance.
+ * pull the first and only choice and stack directly on that.
+ */
+ load_balance_t *lb;
+
+ lb = load_balance_get (dpo.dpoi_index);
+
+ ASSERT(1 == lb->lb_n_buckets);
+
+ dpo_copy(&dpo, load_balance_get_bucket_i (lb, 0));
+ }
+
+ adj_nbr_midchain_stack(ai, &dpo);
+ dpo_reset(&dpo);
+ }
+ else
+ {
+ adj_nbr_midchain_unstack(ai);
+ }
+}
+
+/**
+ * @brief Call back when restacking all adjacencies on a MPLS interface
+ */
+static adj_walk_rc_t
+mpls_adj_walk_cb (adj_index_t ai,
+ void *ctx)
+{
+ mpls_tunnel_stack(ai);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+static void
+mpls_tunnel_restack (mpls_tunnel_t *mt)
+{
+ fib_protocol_t proto;
+
+ /*
+ * walk all the adjacencies on the MPLS interface and restack them
+ */
+ FOR_EACH_FIB_PROTOCOL(proto)
+ {
+ adj_nbr_walk(mt->mt_sw_if_index,
+ proto,
+ mpls_adj_walk_cb,
+ NULL);
+ }
+}
+
+static clib_error_t *
+mpls_tunnel_admin_up_down (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 flags)
+{
+ vnet_hw_interface_t * hi;
+ mpls_tunnel_t *mt;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ mt = mpls_tunnel_get_from_sw_if_index(hi->sw_if_index);
+
+ if (NULL == mt)
+ return (NULL);
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
+
+ mpls_tunnel_restack(mt);
+
+ return (NULL);
+}
+
+/**
+ * @brief Fixup the adj rewrite post encap. This is a no-op since the
+ * rewrite is a stack of labels.
+ */
+static void
+mpls_tunnel_fixup (vlib_main_t *vm,
+ ip_adjacency_t *adj,
+ vlib_buffer_t *b0)
+{
+}
+
+static void
+mpls_tunnel_update_adj (vnet_main_t * vnm,
+ u32 sw_if_index,
+ adj_index_t ai)
+{
+ adj_nbr_midchain_update_rewrite(
+ ai, mpls_tunnel_fixup,
+ ADJ_MIDCHAIN_FLAG_NONE,
+ mpls_tunnel_build_rewrite(vnm, sw_if_index,
+ adj_get_link_type(ai),
+ NULL));
+
+ mpls_tunnel_stack(ai);
+}
+
+static u8 *
+format_mpls_tunnel_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "mpls-tunnel%d", dev_instance);
+}
+
+static u8 *
+format_mpls_tunnel_device (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+
+ return (format (s, "MPLS-tunnel: id %d\n", dev_instance));
+}
+
+/**
+ * @brief Packet trace structure
+ */
+typedef struct mpls_tunnel_trace_t_
+{
+ /**
+ * Tunnel-id / index in tunnel vector
+ */
+ u32 tunnel_id;
+} mpls_tunnel_trace_t;
+
+static u8 *
+format_mpls_tunnel_tx_trace (u8 * s,
+ va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *);
+
+ s = format (s, "MPLS: tunnel %d", t->tunnel_id);
+ return s;
+}
+
+/**
+ * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
+ */
+static uword
+mpls_tunnel_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 next_index;
+ u32 * from, * to_next, n_left_from, n_left_to_next;
+ vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
+ const mpls_tunnel_t *mt;
+
+ mt = pool_elt_at_index(mpls_tunnel_pool, rd->dev_instance);
+
+ /* Vector of buffer / pkt indices we're supposed to process */
+ from = vlib_frame_vector_args (frame);
+
+ /* Number of buffers / pkts */
+ n_left_from = frame->n_vectors;
+
+ /* Speculatively send the first buffer to the last disposition we used */
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ /* set up to enqueue to our disposition with index = next_index */
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /*
+ * FIXME DUAL LOOP
+ */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t * b0;
+ u32 bi0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer(vm, bi0);
+
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_adj;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = rd->dev_instance;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, mt->mt_l2_tx_arc);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VNET_DEVICE_CLASS (mpls_tunnel_class) = {
+ .name = "MPLS tunnel device",
+ .format_device_name = format_mpls_tunnel_name,
+ .format_device = format_mpls_tunnel_device,
+ .format_tx_trace = format_mpls_tunnel_tx_trace,
+ .tx_function = mpls_tunnel_tx,
+ .no_flatten_output_chains = 1,
+ .admin_up_down_function = mpls_tunnel_admin_up_down,
+};
+
+VNET_HW_INTERFACE_CLASS (mpls_tunnel_hw_interface_class) = {
+ .name = "MPLS-Tunnel",
+// .format_header = format_mpls_eth_header_with_length,
+// .unformat_header = unformat_mpls_eth_header,
+ .update_adjacency = mpls_tunnel_update_adj,
+ .build_rewrite = mpls_tunnel_build_rewrite,
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+const mpls_tunnel_t *
+mpls_tunnel_get (u32 mti)
+{
+ return (pool_elt_at_index(mpls_tunnel_pool, mti));
+}
+
+/**
+ * @brief Walk all the MPLS tunnels
+ */
+void
+mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb,
+ void *ctx)
+{
+ u32 mti;
+
+ pool_foreach_index(mti, mpls_tunnel_pool,
+ ({
+ cb(mti, ctx);
+ }));
+}
+
+void
+vnet_mpls_tunnel_del (u32 sw_if_index)
+{
+ mpls_tunnel_t *mt;
+
+ mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
+
+ if (NULL == mt)
+ return;
+
+ fib_path_list_child_remove(mt->mt_path_list,
+ mt->mt_sibling_index);
+ if (ADJ_INDEX_INVALID != mt->mt_l2_adj)
+ adj_unlock(mt->mt_l2_adj);
+
+ vec_free(mt->mt_label_stack);
+
+ vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index);
+ pool_put(mpls_tunnel_pool, mt);
+ mpls_tunnel_db[sw_if_index] = ~0;
+}
+
+void
+vnet_mpls_tunnel_add (fib_route_path_t *rpaths,
+ mpls_label_t *label_stack,
+ u8 l2_only,
+ u32 *sw_if_index)
+{
+ vnet_hw_interface_t * hi;
+ mpls_tunnel_t *mt;
+ vnet_main_t * vnm;
+ u32 mti;
+
+ vnm = vnet_get_main();
+ pool_get(mpls_tunnel_pool, mt);
+ memset (mt, 0, sizeof (*mt));
+ mti = mt - mpls_tunnel_pool;
+ fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL);
+ mt->mt_l2_adj = ADJ_INDEX_INVALID;
+
+ /*
+ * Create a new, or re=use and old, tunnel HW interface
+ */
+ if (vec_len (mpls_tunnel_free_hw_if_indices) > 0)
+ {
+ mt->mt_hw_if_index =
+ mpls_tunnel_free_hw_if_indices[vec_len(mpls_tunnel_free_hw_if_indices)-1];
+ _vec_len (mpls_tunnel_free_hw_if_indices) -= 1;
+ hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
+ hi->hw_instance = mti;
+ hi->dev_instance = mti;
+ }
+ else
+ {
+ mt->mt_hw_if_index = vnet_register_interface(
+ vnm,
+ mpls_tunnel_class.index,
+ mti,
+ mpls_tunnel_hw_interface_class.index,
+ mti);
+ hi = vnet_get_hw_interface(vnm, mt->mt_hw_if_index);
+ }
+
+ /*
+ * Add the new tunnel to the tunnel DB - key:SW if index
+ */
+ mt->mt_sw_if_index = hi->sw_if_index;
+ vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0);
+ mpls_tunnel_db[mt->mt_sw_if_index] = mti;
+
+ /*
+ * construct a path-list from the path provided
+ */
+ mt->mt_path_list = fib_path_list_create(FIB_PATH_LIST_FLAG_SHARED, rpaths);
+ mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
+ FIB_NODE_TYPE_MPLS_TUNNEL,
+ mti);
+
+ mt->mt_label_stack = vec_dup(label_stack);
+
+ if (l2_only)
+ {
+ mt->mt_l2_adj =
+ adj_nbr_add_or_lock(fib_path_list_get_proto(mt->mt_path_list),
+ VNET_LINK_ETHERNET,
+ &zero_addr,
+ mt->mt_sw_if_index);
+
+ mt->mt_l2_tx_arc = vlib_node_add_named_next(vlib_get_main(),
+ hi->tx_node_index,
+ "adj-l2-midchain");
+ }
+
+ *sw_if_index = mt->mt_sw_if_index;
+}
+
+static clib_error_t *
+vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ vnet_main_t * vnm = vnet_get_main();
+ u8 is_del = 0;
+ u8 l2_only = 0;
+ fib_route_path_t rpath, *rpaths = NULL;
+ mpls_label_t out_label = MPLS_LABEL_INVALID, *labels = NULL;
+ u32 sw_if_index;
+
+ memset(&rpath, 0, sizeof(rpath));
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del %U",
+ unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ is_del = 1;
+ else if (unformat (line_input, "add"))
+ is_del = 0;
+ else if (unformat (line_input, "out-label %U",
+ unformat_mpls_unicast_label, &out_label))
+ {
+ vec_add1(labels, out_label);
+ }
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ }
+
+ else if (unformat (line_input, "via %U %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6,
+ unformat_vnet_sw_interface, vnm,
+ &rpath.frp_sw_if_index))
+ {
+ rpath.frp_weight = 1;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip6_address,
+ &rpath.frp_addr.ip6))
+ {
+ rpath.frp_fib_index = 0;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_ip4_address,
+ &rpath.frp_addr.ip4))
+ {
+ rpath.frp_fib_index = 0;
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ }
+ else if (unformat (line_input, "l2-only"))
+ l2_only = 1;
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input);
+ }
+
+ if (is_del)
+ {
+ vnet_mpls_tunnel_del(sw_if_index);
+ }
+ else
+ {
+ if (0 == vec_len(labels))
+ return clib_error_return (0, "No Output Labels '%U'",
+ format_unformat_error, line_input);
+
+ vec_add1(rpaths, rpath);
+ vnet_mpls_tunnel_add(rpaths, labels, l2_only, &sw_if_index);
+ }
+
+ vec_free(labels);
+ vec_free(rpaths);
+
+ return (NULL);
+}
+
+/*?
+ * This command create a uni-directional MPLS tunnel
+ *
+ * @cliexpar
+ * @cliexstart{create mpls tunnel}
+ * create mpls tunnel via 10.0.0.1 GigEthernet0/8/0 out-label 33 out-label 34
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (create_mpls_tunnel_command, static) = {
+ .path = "mpls tunnel",
+ .short_help =
+ "mpls tunnel via [addr] [interface] [out-labels]",
+ .function = vnet_create_mpls_tunnel_command_fn,
+};
+
+static u8 *
+format_mpls_tunnel (u8 * s, va_list * args)
+{
+ mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *);
+ int ii;
+
+ s = format(s, "mpls_tunnel%d: sw_if_index:%d hw_if_index:%d",
+ mt - mpls_tunnel_pool,
+ mt->mt_sw_if_index,
+ mt->mt_hw_if_index);
+ s = format(s, "\n label-stack:\n ");
+ for (ii = 0; ii < vec_len(mt->mt_label_stack); ii++)
+ {
+ s = format(s, "%d, ", mt->mt_label_stack[ii]);
+ }
+ s = format(s, "\n via:\n");
+ s = fib_path_list_format(mt->mt_path_list, s);
+ s = format(s, "\n");
+
+ return (s);
+}
+
+static clib_error_t *
+show_mpls_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ mpls_tunnel_t * mt;
+ u32 mti = ~0;
+
+ if (pool_elts (mpls_tunnel_pool) == 0)
+ vlib_cli_output (vm, "No MPLS tunnels configured...");
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &mti))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == mti)
+ {
+ pool_foreach (mt, mpls_tunnel_pool,
+ ({
+ vlib_cli_output (vm, "[@%d] %U",
+ mt - mpls_tunnel_pool,
+ format_mpls_tunnel, mt);
+ }));
+ }
+ else
+ {
+ if (pool_is_free_index(mpls_tunnel_pool, mti))
+ return clib_error_return (0, "Not atunnel index %d", mti);
+
+ mt = pool_elt_at_index(mpls_tunnel_pool, mti);
+
+ vlib_cli_output (vm, "[@%d] %U",
+ mt - mpls_tunnel_pool,
+ format_mpls_tunnel, mt);
+ }
+
+ return 0;
+}
+
+/*?
+ * This command to show MPLS tunnels
+ *
+ * @cliexpar
+ * @cliexstart{sh mpls tunnel 2}
+ * [@2] mpls_tunnel2: sw_if_index:5 hw_if_index:5
+ * label-stack:
+ * 3,
+ * via:
+ * index:26 locks:1 proto:ipv4 uPRF-list:26 len:1 itfs:[2, ]
+ * index:26 pl-index:26 ipv4 weight=1 attached-nexthop: oper-flags:resolved,
+ * 10.0.0.2 loop0
+ * [@0]: ipv4 via 10.0.0.2 loop0: IP4: de:ad:00:00:00:00 -> 00:00:11:aa:bb:cc
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = {
+ .path = "show mpls tunnel",
+ .function = show_mpls_tunnel_command_fn,
+};
+
+static mpls_tunnel_t *
+mpls_tunnel_from_fib_node (fib_node_t *node)
+{
+#if (CLIB_DEBUG > 0)
+ ASSERT(FIB_NODE_TYPE_MPLS_TUNNEL == node->fn_type);
+#endif
+ return ((mpls_tunnel_t*) (((char*)node) -
+ STRUCT_OFFSET_OF(mpls_tunnel_t, mt_node)));
+}
+
+/**
+ * Function definition to backwalk a FIB node
+ */
+static fib_node_back_walk_rc_t
+mpls_tunnel_back_walk (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ mpls_tunnel_restack(mpls_tunnel_from_fib_node(node));
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t*
+mpls_tunnel_fib_node_get (fib_node_index_t index)
+{
+ mpls_tunnel_t * mt;
+
+ mt = pool_elt_at_index(mpls_tunnel_pool, index);
+
+ return (&mt->mt_node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+mpls_tunnel_last_lock_gone (fib_node_t *node)
+{
+ /*
+ * The MPLS MPLS tunnel is a root of the graph. As such
+ * it never has children and thus is never locked.
+ */
+ ASSERT(0);
+}
+
+/*
+ * Virtual function table registered by MPLS MPLS tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t mpls_vft = {
+ .fnv_get = mpls_tunnel_fib_node_get,
+ .fnv_last_lock = mpls_tunnel_last_lock_gone,
+ .fnv_back_walk = mpls_tunnel_back_walk,
+};
+
+static clib_error_t *
+mpls_tunnel_init (vlib_main_t *vm)
+{
+ fib_node_register_type(FIB_NODE_TYPE_MPLS_TUNNEL, &mpls_vft);
+
+ return 0;
+}
+VLIB_INIT_FUNCTION(mpls_tunnel_init);
diff --git a/vnet/vnet/mpls/mpls_tunnel.h b/vnet/vnet/mpls/mpls_tunnel.h
new file mode 100644
index 00000000000..ee56c0fc8e3
--- /dev/null
+++ b/vnet/vnet/mpls/mpls_tunnel.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MPLS_TUNNEL_H__
+#define __MPLS_TUNNEL_H__
+
+#include <vnet/mpls/mpls.h>
+
+/**
+ * @brief A uni-directional MPLS tunnel
+ */
+typedef struct mpls_tunnel_t_
+{
+ /**
+ * @brief The tunnel hooks into the FIB control plane graph.
+ */
+ fib_node_t mt_node;
+
+ /**
+ * @brief If the tunnel is an L2 tunnel, this is the link type ETHERNET
+ * adjacency
+ */
+ adj_index_t mt_l2_adj;
+
+ /**
+ * @brief on a L2 tunnel this is the VLIB arc from the L2-tx to the l2-midchain
+ */
+ u32 mt_l2_tx_arc;
+
+ /**
+ * @brief The path-list over which the tunnel's destination is reachable
+ */
+ fib_node_index_t mt_path_list;
+
+ /**
+ * @brief sibling index on the path-list so notifications are received.
+ */
+ u32 mt_sibling_index;
+
+ /**
+ * @brief The Label stack to apply to egress packets
+ */
+ mpls_label_t *mt_label_stack;
+
+ /**
+ * @brief Flag to indicate the tunnel is only for L2 traffic, that is
+ * this tunnel belongs in a bridge domain.
+ */
+ u8 mt_l2_only;
+
+ /**
+ * @brief The HW interface index of the tunnel interfaces
+ */
+ u32 mt_hw_if_index;
+
+ /**
+ * @brief The SW interface index of the tunnel interfaces
+ */
+ u32 mt_sw_if_index;
+
+} mpls_tunnel_t;
+
+/**
+ * @brief Create a new MPLS tunnel
+ */
+extern void vnet_mpls_tunnel_add (fib_route_path_t *rpath,
+ mpls_label_t *label_stack,
+ u8 l2_only,
+ u32 *sw_if_index);
+
+extern void vnet_mpls_tunnel_del (u32 sw_if_index);
+
+extern const mpls_tunnel_t *mpls_tunnel_get(u32 index);
+
+/**
+ * @brief Callback function invoked while walking MPLS tunnels
+ */
+typedef void (*mpls_tunnel_walk_cb_t)(u32 index, void *ctx);
+
+/**
+ * @brief Walk all the MPLS tunnels
+ */
+extern void mpls_tunnel_walk(mpls_tunnel_walk_cb_t cb,
+ void *ctx);
+
+#endif
diff --git a/vnet/vnet/mpls/policy_encap.c b/vnet/vnet/mpls/policy_encap.c
deleted file mode 100644
index d48a153b37d..00000000000
--- a/vnet/vnet/mpls/policy_encap.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * policy_encap.c: mpls-o-e policy encap
- *
- * Copyright (c) 2012-2014 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <vlib/vlib.h>
-#include <vnet/pg/pg.h>
-#include <vnet/mpls/mpls.h>
-#include <vnet/classify/vnet_classify.h>
-
-typedef struct {
- u32 next_index;
- u32 encap_index;
-} mpls_policy_encap_trace_t;
-
-u8 * format_mpls_policy_encap_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- mpls_policy_encap_trace_t * t = va_arg (*args, mpls_policy_encap_trace_t *);
-
- s = format (s, "MPLS-POLICY-ENCAP: next-index %d encap-index %d",
- t->next_index, t->encap_index);
-
- return s;
-}
-
-vlib_node_registration_t mpls_policy_encap_node;
-
-#define foreach_mpls_policy_encap_next \
-_(DROP, "error-drop")
-
-typedef enum {
-#define _(s,n) MPLS_POLICY_ENCAP_NEXT_##s,
- foreach_mpls_policy_encap_next
-#undef _
- MPLS_POLICY_ENCAP_N_NEXT,
-} mpls_policy_encap_next_t;
-
-#define foreach_mpls_policy_error \
-_(PKTS_ENCAP, "mpls policy tunnel packets encapsulated")
-
-typedef enum {
-#define _(n,s) MPLS_POLICY_ENCAP_ERROR_##n,
- foreach_mpls_policy_error
- MPLS_POLICY_ENCAP_N_ERROR,
-#undef _
-} mpls_policy_encap_error_t;
-
-static char * mpls_policy_encap_error_strings[] =
- {
-#define _(n,s) s,
- foreach_mpls_policy_error
-#undef _
-};
-
-static uword
-mpls_policy_encap (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- u32 n_left_from, next_index, * from, * to_next;
- mpls_main_t * mm = &mpls_main;
-
- from = vlib_frame_vector_args (from_frame);
- n_left_from = from_frame->n_vectors;
-
- next_index = node->cached_next_index;
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 bi0;
- vlib_buffer_t * b0;
- u8 * h0;
- u32 encap_index0;
- u32 next0;
- mpls_encap_t * e0;
-
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- encap_index0 = vnet_buffer(b0)->l2_classify.opaque_index;
-
- e0 = pool_elt_at_index (mm->encaps, encap_index0);
-
- vlib_buffer_advance (b0, -(word)vec_len(e0->rewrite));
- h0 = vlib_buffer_get_current (b0);
- clib_memcpy (h0, e0->rewrite, vec_len(e0->rewrite));
-
- next0 = e0->output_next_index;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- mpls_policy_encap_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->next_index = next0;
- tr->encap_index = encap_index0;
- }
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
- vlib_node_increment_counter (vm, mpls_policy_encap_node.index,
- MPLS_POLICY_ENCAP_ERROR_PKTS_ENCAP,
- from_frame->n_vectors);
- return from_frame->n_vectors;
-}
-
-VLIB_REGISTER_NODE (mpls_policy_encap_node) = {
- .function = mpls_policy_encap,
- .name = "mpls-policy-encap",
- /* Takes a vector of packets. */
- .vector_size = sizeof (u32),
-
- .runtime_data_bytes = 0,
-
- .n_errors = MPLS_POLICY_ENCAP_N_ERROR,
- .error_strings = mpls_policy_encap_error_strings,
-
- .format_trace = format_mpls_policy_encap_trace,
-
- .n_next_nodes = MPLS_POLICY_ENCAP_N_NEXT,
- .next_nodes = {
-#define _(s,n) [MPLS_POLICY_ENCAP_NEXT_##s] = n,
- foreach_mpls_policy_encap_next
-#undef _
- },
-};
-
-VLIB_NODE_FUNCTION_MULTIARCH (mpls_policy_encap_node, mpls_policy_encap)
-
-static clib_error_t *
-mpls_policy_encap_init (vlib_main_t * vm)
-{
- mpls_main_t * mm = &mpls_main;
- clib_error_t * error;
-
- if ((error = vlib_call_init_function (vm, mpls_init)))
- return error;
-
- mm->ip4_classify_mpls_policy_encap_next_index =
- vlib_node_add_next (mm->vlib_main,
- ip4_classify_node.index,
- mpls_policy_encap_node.index);
-
- mm->ip6_classify_mpls_policy_encap_next_index =
- vlib_node_add_next (mm->vlib_main,
- ip6_classify_node.index,
- mpls_policy_encap_node.index);
-
- return 0;
-}
-
-VLIB_INIT_FUNCTION (mpls_policy_encap_init);
diff --git a/vpp-api-test/vat/api_format.c b/vpp-api-test/vat/api_format.c
index 958f75f5fa0..bc4a6fa3f3a 100644
--- a/vpp-api-test/vat/api_format.c
+++ b/vpp-api-test/vat/api_format.c
@@ -1440,8 +1440,8 @@ static void vl_api_tap_delete_reply_t_handler_json
vam->result_ready = 1;
}
-static void vl_api_mpls_ethernet_add_del_tunnel_reply_t_handler
- (vl_api_mpls_ethernet_add_del_tunnel_reply_t * mp)
+static void vl_api_mpls_tunnel_add_del_reply_t_handler
+ (vl_api_mpls_tunnel_add_del_reply_t * mp)
{
vat_main_t *vam = &vat_main;
i32 retval = ntohl (mp->retval);
@@ -1456,8 +1456,8 @@ static void vl_api_mpls_ethernet_add_del_tunnel_reply_t_handler
}
}
-static void vl_api_mpls_ethernet_add_del_tunnel_reply_t_handler_json
- (vl_api_mpls_ethernet_add_del_tunnel_reply_t * mp)
+static void vl_api_mpls_tunnel_add_del_reply_t_handler_json
+ (vl_api_mpls_tunnel_add_del_reply_t * mp)
{
vat_main_t *vam = &vat_main;
vat_json_node_t node;
@@ -1465,7 +1465,7 @@ static void vl_api_mpls_ethernet_add_del_tunnel_reply_t_handler_json
vat_json_init_object (&node);
vat_json_object_add_int (&node, "retval", ntohl (mp->retval));
vat_json_object_add_uint (&node, "tunnel_sw_if_index",
- ntohl (mp->tunnel_sw_if_index));
+ ntohl (mp->sw_if_index));
vat_json_print (vam->ofp, &node);
vat_json_free (&node);
@@ -3461,8 +3461,6 @@ _(mpls_route_add_del_reply) \
_(mpls_ip_bind_unbind_reply) \
_(proxy_arp_add_del_reply) \
_(proxy_arp_intfc_enable_disable_reply) \
-_(mpls_add_del_encap_reply) \
-_(mpls_ethernet_add_del_tunnel_2_reply) \
_(sw_interface_set_unnumbered_reply) \
_(ip_neighbor_add_del_reply) \
_(reset_vrf_reply) \
@@ -3626,11 +3624,7 @@ _(MPLS_IP_BIND_UNBIND_REPLY, mpls_ip_bind_unbind_reply) \
_(PROXY_ARP_ADD_DEL_REPLY, proxy_arp_add_del_reply) \
_(PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY, \
proxy_arp_intfc_enable_disable_reply) \
-_(MPLS_ADD_DEL_ENCAP_REPLY, mpls_add_del_encap_reply) \
-_(MPLS_ETHERNET_ADD_DEL_TUNNEL_REPLY, \
- mpls_ethernet_add_del_tunnel_reply) \
-_(MPLS_ETHERNET_ADD_DEL_TUNNEL_2_REPLY, \
- mpls_ethernet_add_del_tunnel_2_reply) \
+_(MPLS_TUNNEL_ADD_DEL_REPLY, mpls_tunnel_add_del_reply) \
_(SW_INTERFACE_SET_UNNUMBERED_REPLY, \
sw_interface_set_unnumbered_reply) \
_(IP_NEIGHBOR_ADD_DEL_REPLY, ip_neighbor_add_del_reply) \
@@ -3760,8 +3754,7 @@ _(POLICER_CLASSIFY_SET_INTERFACE_REPLY, policer_classify_set_interface_reply) \
_(POLICER_CLASSIFY_DETAILS, policer_classify_details) \
_(NETMAP_CREATE_REPLY, netmap_create_reply) \
_(NETMAP_DELETE_REPLY, netmap_delete_reply) \
-_(MPLS_ETH_TUNNEL_DETAILS, mpls_eth_tunnel_details) \
-_(MPLS_FIB_ENCAP_DETAILS, mpls_fib_encap_details) \
+_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \
_(MPLS_FIB_DETAILS, mpls_fib_details) \
_(CLASSIFY_TABLE_IDS_REPLY, classify_table_ids_reply) \
_(CLASSIFY_TABLE_BY_INTERFACE_REPLY, classify_table_by_interface_reply) \
@@ -5800,7 +5793,6 @@ api_ip_add_del_route (vat_main_t * vam)
vl_api_ip_add_del_route_t *mp;
f64 timeout;
u32 sw_if_index = ~0, vrf_id = 0;
- u8 sw_if_index_set = 0;
u8 is_ipv6 = 0;
u8 is_local = 0, is_drop = 0;
u8 is_unreach = 0, is_prohibit = 0;
@@ -5827,15 +5819,17 @@ api_ip_add_del_route (vat_main_t * vam)
u32 classify_table_index = ~0;
u8 is_classify = 0;
u8 resolve_host = 0, resolve_attached = 0;
+ mpls_label_t *next_hop_out_label_stack = NULL;
mpls_label_t next_hop_out_label = MPLS_LABEL_INVALID;
+ mpls_label_t next_hop_via_label = MPLS_LABEL_INVALID;
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
if (unformat (i, "%U", unformat_sw_if_index, vam, &sw_if_index))
- sw_if_index_set = 1;
+ ;
else if (unformat (i, "sw_if_index %d", &sw_if_index))
- sw_if_index_set = 1;
+ ;
else if (unformat (i, "%U", unformat_ip4_address, &v4_dst_address))
{
address_set = 1;
@@ -5908,6 +5902,8 @@ api_ip_add_del_route (vat_main_t * vam)
else if (unformat (i, "next-hop-table %d", &next_hop_table_id))
;
else if (unformat (i, "out-label %d", &next_hop_out_label))
+ vec_add1 (next_hop_out_label_stack, ntohl (next_hop_out_label));
+ else if (unformat (i, "via-label %d", &next_hop_via_label))
;
else if (unformat (i, "random"))
random_add_del = 1;
@@ -5920,20 +5916,20 @@ api_ip_add_del_route (vat_main_t * vam)
}
}
- if (resolve_attempts > 0 && sw_if_index_set == 0)
- {
- errmsg ("ARP resolution needs explicit interface or sw_if_index\n");
- return -99;
- }
-
if (!next_hop_set && !is_drop && !is_local &&
- !is_classify && !is_unreach && !is_prohibit)
+ !is_classify && !is_unreach && !is_prohibit &&
+ MPLS_LABEL_INVALID == next_hop_via_label)
{
errmsg
("next hop / local / drop / unreach / prohibit / classify not set\n");
return -99;
}
+ if (next_hop_set && MPLS_LABEL_INVALID != next_hop_via_label)
+ {
+ errmsg ("next hop and next-hop via label set\n");
+ return -99;
+ }
if (address_set == 0)
{
errmsg ("missing addresses\n");
@@ -5980,15 +5976,11 @@ api_ip_add_del_route (vat_main_t * vam)
for (j = 0; j < count; j++)
{
/* Construct the API message */
- M (IP_ADD_DEL_ROUTE, ip_add_del_route);
+ M2 (IP_ADD_DEL_ROUTE, ip_add_del_route,
+ sizeof (mpls_label_t) * vec_len (next_hop_out_label_stack));
mp->next_hop_sw_if_index = ntohl (sw_if_index);
mp->table_id = ntohl (vrf_id);
- if (resolve_attempts > 0)
- {
- mp->resolve_attempts = ntohl (resolve_attempts);
- mp->resolve_if_needed = 1;
- }
mp->create_vrf_if_needed = create_vrf_if_needed;
mp->is_add = is_add;
@@ -6006,7 +5998,15 @@ api_ip_add_del_route (vat_main_t * vam)
mp->dst_address_length = dst_address_length;
mp->next_hop_table_id = ntohl (next_hop_table_id);
mp->classify_table_index = ntohl (classify_table_index);
- mp->next_hop_out_label = ntohl (next_hop_out_label);
+ mp->next_hop_via_label = ntohl (next_hop_via_label);
+ mp->next_hop_n_out_labels = vec_len (next_hop_out_label_stack);
+ if (0 != mp->next_hop_n_out_labels)
+ {
+ memcpy (mp->next_hop_out_label_stack,
+ next_hop_out_label_stack,
+ vec_len (next_hop_out_label_stack) * sizeof (mpls_label_t));
+ vec_free (next_hop_out_label_stack);
+ }
if (is_ipv6)
{
@@ -6106,9 +6106,11 @@ api_mpls_route_add_del (vat_main_t * vam)
u32 classify_table_index = ~0;
u8 is_classify = 0;
u8 resolve_host = 0, resolve_attached = 0;
+ mpls_label_t next_hop_via_label = MPLS_LABEL_INVALID;
mpls_label_t next_hop_out_label = MPLS_LABEL_INVALID;
+ mpls_label_t *next_hop_out_label_stack = NULL;
mpls_label_t local_label = MPLS_LABEL_INVALID;
- u8 is_eos = 1;
+ u8 is_eos = 0;
u8 next_hop_proto_is_ip4 = 1;
/* Parse args required to build the message */
@@ -6168,8 +6170,10 @@ api_mpls_route_add_del (vat_main_t * vam)
}
else if (unformat (i, "next-hop-table %d", &next_hop_table_id))
;
- else if (unformat (i, "out-label %d", &next_hop_out_label))
+ else if (unformat (i, "via-label %d", &next_hop_via_label))
;
+ else if (unformat (i, "out-label %d", &next_hop_out_label))
+ vec_add1 (next_hop_out_label_stack, ntohl (next_hop_out_label));
else
{
clib_warning ("parse error '%U'", format_unformat_error, i);
@@ -6200,7 +6204,8 @@ api_mpls_route_add_del (vat_main_t * vam)
for (j = 0; j < count; j++)
{
/* Construct the API message */
- M (MPLS_ROUTE_ADD_DEL, mpls_route_add_del);
+ M2 (MPLS_ROUTE_ADD_DEL, mpls_route_add_del,
+ sizeof (mpls_label_t) * vec_len (next_hop_out_label_stack));
mp->mr_next_hop_sw_if_index = ntohl (sw_if_index);
mp->mr_table_id = ntohl (table_id);
@@ -6215,10 +6220,19 @@ api_mpls_route_add_del (vat_main_t * vam)
mp->mr_next_hop_weight = next_hop_weight;
mp->mr_next_hop_table_id = ntohl (next_hop_table_id);
mp->mr_classify_table_index = ntohl (classify_table_index);
- mp->mr_next_hop_out_label = ntohl (next_hop_out_label);
+ mp->mr_next_hop_via_label = ntohl (next_hop_via_label);
mp->mr_label = ntohl (local_label);
mp->mr_eos = is_eos;
+ mp->mr_next_hop_n_out_labels = vec_len (next_hop_out_label_stack);
+ if (0 != mp->mr_next_hop_n_out_labels)
+ {
+ memcpy (mp->mr_next_hop_out_label_stack,
+ next_hop_out_label_stack,
+ vec_len (next_hop_out_label_stack) * sizeof (mpls_label_t));
+ vec_free (next_hop_out_label_stack);
+ }
+
if (next_hop_set)
{
if (next_hop_proto_is_ip4)
@@ -6464,95 +6478,49 @@ api_proxy_arp_intfc_enable_disable (vat_main_t * vam)
}
static int
-api_mpls_add_del_encap (vat_main_t * vam)
+api_mpls_tunnel_add_del (vat_main_t * vam)
{
unformat_input_t *i = vam->input;
- vl_api_mpls_add_del_encap_t *mp;
+ vl_api_mpls_tunnel_add_del_t *mp;
f64 timeout;
- u32 vrf_id = 0;
- u32 *labels = 0;
- u32 label;
- ip4_address_t dst_address;
- u8 is_add = 1;
-
- while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (i, "vrf %d", &vrf_id))
- ;
- else if (unformat (i, "label %d", &label))
- vec_add1 (labels, ntohl (label));
- else if (unformat (i, "dst %U", unformat_ip4_address, &dst_address))
- ;
- else if (unformat (i, "del"))
- is_add = 0;
- else
- {
- clib_warning ("parse error '%U'", format_unformat_error, i);
- return -99;
- }
- }
-
- if (vec_len (labels) == 0)
- {
- errmsg ("missing encap label stack\n");
- return -99;
- }
-
- M2 (MPLS_ADD_DEL_ENCAP, mpls_add_del_encap,
- sizeof (u32) * vec_len (labels));
- mp->vrf_id = ntohl (vrf_id);
- clib_memcpy (mp->dst_address, &dst_address, sizeof (dst_address));
- mp->is_add = is_add;
- mp->nlabels = vec_len (labels);
- clib_memcpy (mp->labels, labels, sizeof (u32) * mp->nlabels);
-
- vec_free (labels);
-
- S;
- W;
- /* NOTREACHED */
- return 0;
-}
-
-static int
-api_mpls_ethernet_add_del_tunnel (vat_main_t * vam)
-{
- unformat_input_t *i = vam->input;
- vl_api_mpls_ethernet_add_del_tunnel_t *mp;
- f64 timeout;
- u32 inner_vrf_id = 0;
- ip4_address_t intfc_address;
- u8 dst_mac_address[6];
- int dst_set = 1;
- u32 tmp;
- u8 intfc_address_length = 0;
u8 is_add = 1;
u8 l2_only = 0;
- u32 tx_sw_if_index;
- int tx_sw_if_index_set = 0;
+ u32 sw_if_index = ~0;
+ u32 next_hop_sw_if_index = ~0;
+ u32 next_hop_proto_is_ip4 = 1;
- /* Shut up coverity */
- memset (dst_mac_address, 0, sizeof (dst_mac_address));
+ u32 next_hop_table_id = 0;
+ ip4_address_t v4_next_hop_address = {
+ .as_u32 = 0,
+ };
+ ip6_address_t v6_next_hop_address = { {0} };
+ mpls_label_t next_hop_out_label = MPLS_LABEL_INVALID, *labels = NULL;
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (i, "vrf %d", &inner_vrf_id))
+ if (unformat (i, "add"))
+ is_add = 1;
+ else if (unformat (i, "del sw_if_index %d", &sw_if_index))
+ is_add = 0;
+ else if (unformat (i, "sw_if_index %d", &next_hop_sw_if_index))
;
- else if (unformat (i, "adj %U/%d", unformat_ip4_address,
- &intfc_address, &tmp))
- intfc_address_length = tmp;
- else if (unformat (i, "%U", unformat_sw_if_index, vam, &tx_sw_if_index))
- tx_sw_if_index_set = 1;
- else if (unformat (i, "tx_sw_if_index %d", &tx_sw_if_index))
- tx_sw_if_index_set = 1;
- else if (unformat (i, "dst %U", unformat_ethernet_address,
- dst_mac_address))
- dst_set = 1;
+ else if (unformat (i, "via %U",
+ unformat_ip4_address, &v4_next_hop_address))
+ {
+ next_hop_proto_is_ip4 = 1;
+ }
+ else if (unformat (i, "via %U",
+ unformat_ip6_address, &v6_next_hop_address))
+ {
+ next_hop_proto_is_ip4 = 0;
+ }
else if (unformat (i, "l2-only"))
l2_only = 1;
- else if (unformat (i, "del"))
- is_add = 0;
+ else if (unformat (i, "next-hop-table %d", &next_hop_table_id))
+ ;
+ else if (unformat (i, "out-label %d", &next_hop_out_label))
+ vec_add1 (labels, ntohl (next_hop_out_label));
else
{
clib_warning ("parse error '%U'", format_unformat_error, i);
@@ -6560,107 +6528,36 @@ api_mpls_ethernet_add_del_tunnel (vat_main_t * vam)
}
}
- if (!dst_set)
- {
- errmsg ("dst (mac address) not set\n");
- return -99;
- }
- if (!tx_sw_if_index_set)
- {
- errmsg ("tx-intfc not set\n");
- return -99;
- }
-
- M (MPLS_ETHERNET_ADD_DEL_TUNNEL, mpls_ethernet_add_del_tunnel);
+ M2 (MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del,
+ sizeof (mpls_label_t) * vec_len (labels));
- mp->vrf_id = ntohl (inner_vrf_id);
- clib_memcpy (mp->adj_address, &intfc_address, sizeof (intfc_address));
- mp->adj_address_length = intfc_address_length;
- clib_memcpy (mp->dst_mac_address, dst_mac_address,
- sizeof (dst_mac_address));
- mp->tx_sw_if_index = ntohl (tx_sw_if_index);
- mp->l2_only = l2_only;
- mp->is_add = is_add;
-
- S;
- W;
- /* NOTREACHED */
- return 0;
-}
+ mp->mt_next_hop_sw_if_index = ntohl (next_hop_sw_if_index);
+ mp->mt_sw_if_index = ntohl (sw_if_index);
+ mp->mt_is_add = is_add;
+ mp->mt_l2_only = l2_only;
+ mp->mt_next_hop_table_id = ntohl (next_hop_table_id);
+ mp->mt_next_hop_proto_is_ip4 = next_hop_proto_is_ip4;
-static int
-api_mpls_ethernet_add_del_tunnel_2 (vat_main_t * vam)
-{
- unformat_input_t *i = vam->input;
- vl_api_mpls_ethernet_add_del_tunnel_2_t *mp;
- f64 timeout;
- u32 inner_vrf_id = 0;
- u32 outer_vrf_id = 0;
- ip4_address_t adj_address;
- int adj_address_set = 0;
- ip4_address_t next_hop_address;
- int next_hop_address_set = 0;
- u32 tmp;
- u8 adj_address_length = 0;
- u8 l2_only = 0;
- u8 is_add = 1;
- u32 resolve_attempts = 5;
- u8 resolve_if_needed = 1;
+ mp->mt_next_hop_n_out_labels = vec_len (labels);
- while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ if (0 != mp->mt_next_hop_n_out_labels)
{
- if (unformat (i, "inner_vrf_id %d", &inner_vrf_id))
- ;
- else if (unformat (i, "outer_vrf_id %d", &outer_vrf_id))
- ;
- else if (unformat (i, "adj %U/%d", unformat_ip4_address,
- &adj_address, &tmp))
- {
- adj_address_length = tmp;
- adj_address_set = 1;
- }
- else if (unformat (i, "next-hop %U", unformat_ip4_address,
- &next_hop_address))
- next_hop_address_set = 1;
- else if (unformat (i, "resolve-attempts %d", &resolve_attempts))
- ;
- else if (unformat (i, "resolve-if-needed %d", &tmp))
- resolve_if_needed = tmp;
- else if (unformat (i, "l2-only"))
- l2_only = 1;
- else if (unformat (i, "del"))
- is_add = 0;
- else
- {
- clib_warning ("parse error '%U'", format_unformat_error, i);
- return -99;
- }
+ clib_memcpy (mp->mt_next_hop_out_label_stack, labels,
+ sizeof (mpls_label_t) * mp->mt_next_hop_n_out_labels);
+ vec_free (labels);
}
- if (!adj_address_set)
+ if (next_hop_proto_is_ip4)
{
- errmsg ("adjacency address/mask not set\n");
- return -99;
+ clib_memcpy (mp->mt_next_hop,
+ &v4_next_hop_address, sizeof (v4_next_hop_address));
}
- if (!next_hop_address_set)
+ else
{
- errmsg ("ip4 next hop address (in outer fib) not set\n");
- return -99;
+ clib_memcpy (mp->mt_next_hop,
+ &v6_next_hop_address, sizeof (v6_next_hop_address));
}
- M (MPLS_ETHERNET_ADD_DEL_TUNNEL_2, mpls_ethernet_add_del_tunnel_2);
-
- mp->inner_vrf_id = ntohl (inner_vrf_id);
- mp->outer_vrf_id = ntohl (outer_vrf_id);
- mp->resolve_attempts = ntohl (resolve_attempts);
- mp->resolve_if_needed = resolve_if_needed;
- mp->is_add = is_add;
- mp->l2_only = l2_only;
- clib_memcpy (mp->adj_address, &adj_address, sizeof (adj_address));
- mp->adj_address_length = adj_address_length;
- clib_memcpy (mp->next_hop_ip4_address_in_outer_vrf, &next_hop_address,
- sizeof (next_hop_address));
-
S;
W;
/* NOTREACHED */
@@ -14818,34 +14715,32 @@ api_netmap_delete (vat_main_t * vam)
return 0;
}
-static void vl_api_mpls_eth_tunnel_details_t_handler
- (vl_api_mpls_eth_tunnel_details_t * mp)
+static void vl_api_mpls_tunnel_details_t_handler
+ (vl_api_mpls_tunnel_details_t * mp)
{
vat_main_t *vam = &vat_main;
+ i32 len = mp->mt_next_hop_n_labels;
i32 i;
- i32 len = ntohl (mp->nlabels);
- fformat (vam->ofp, "[%d]: dst %U, adj %U/%d, labels ",
- ntohl (mp->tunnel_index),
- format_ethernet_address, &mp->tunnel_dst_mac,
- format_ip4_address, &mp->intfc_address, ntohl (mp->mask_width));
+ fformat (vam->ofp, "[%d]: via %U %d labels ",
+ mp->tunnel_index,
+ format_ip4_address, mp->mt_next_hop,
+ ntohl (mp->mt_next_hop_sw_if_index));
for (i = 0; i < len; i++)
{
- fformat (vam->ofp, "%u ", ntohl (mp->labels[i]));
+ fformat (vam->ofp, "%u ", ntohl (mp->mt_next_hop_out_labels[i]));
}
fformat (vam->ofp, "\n");
- fformat (vam->ofp, " tx on %d, rx fib index %d\n",
- ntohl (mp->tx_sw_if_index), ntohl (mp->inner_fib_index));
}
-static void vl_api_mpls_eth_tunnel_details_t_handler_json
- (vl_api_mpls_eth_tunnel_details_t * mp)
+static void vl_api_mpls_tunnel_details_t_handler_json
+ (vl_api_mpls_tunnel_details_t * mp)
{
vat_main_t *vam = &vat_main;
vat_json_node_t *node = NULL;
struct in_addr ip4;
i32 i;
- i32 len = ntohl (mp->nlabels);
+ i32 len = mp->mt_next_hop_n_labels;
if (VAT_JSON_ARRAY != vam->json_tree.type)
{
@@ -14856,30 +14751,23 @@ static void vl_api_mpls_eth_tunnel_details_t_handler_json
vat_json_init_object (node);
vat_json_object_add_uint (node, "tunnel_index", ntohl (mp->tunnel_index));
- clib_memcpy (&ip4, &(mp->intfc_address), sizeof (ip4));
- vat_json_object_add_ip4 (node, "intfc_address", ip4);
- vat_json_object_add_uint (node, "inner_fib_index",
- ntohl (mp->inner_fib_index));
- vat_json_object_add_uint (node, "mask_width", ntohl (mp->mask_width));
- vat_json_object_add_uint (node, "encap_index", ntohl (mp->encap_index));
- vat_json_object_add_uint (node, "hw_if_index", ntohl (mp->hw_if_index));
- vat_json_object_add_uint (node, "l2_only", ntohl (mp->l2_only));
- vat_json_object_add_string_copy (node, "tunnel_dst_mac",
- format (0, "%U", format_ethernet_address,
- &mp->tunnel_dst_mac));
- vat_json_object_add_uint (node, "tx_sw_if_index",
- ntohl (mp->tx_sw_if_index));
+ clib_memcpy (&ip4, &(mp->mt_next_hop), sizeof (ip4));
+ vat_json_object_add_ip4 (node, "next_hop", ip4);
+ vat_json_object_add_uint (node, "next_hop_sw_if_index",
+ ntohl (mp->mt_next_hop_sw_if_index));
+ vat_json_object_add_uint (node, "l2_only", ntohl (mp->mt_l2_only));
vat_json_object_add_uint (node, "label_count", len);
for (i = 0; i < len; i++)
{
- vat_json_object_add_uint (node, "label", ntohl (mp->labels[i]));
+ vat_json_object_add_uint (node, "label",
+ ntohl (mp->mt_next_hop_out_labels[i]));
}
}
static int
-api_mpls_eth_tunnel_dump (vat_main_t * vam)
+api_mpls_tunnel_dump (vat_main_t * vam)
{
- vl_api_mpls_eth_tunnel_dump_t *mp;
+ vl_api_mpls_tunnel_dump_t *mp;
f64 timeout;
i32 index = -1;
@@ -14895,7 +14783,7 @@ api_mpls_eth_tunnel_dump (vat_main_t * vam)
fformat (vam->ofp, " tunnel_index %d\n", index);
- M (MPLS_ETH_TUNNEL_DUMP, mpls_eth_tunnel_dump);
+ M (MPLS_TUNNEL_DUMP, mpls_tunnel_dump);
mp->tunnel_index = htonl (index);
S;
@@ -14908,69 +14796,6 @@ api_mpls_eth_tunnel_dump (vat_main_t * vam)
W;
}
-static void vl_api_mpls_fib_encap_details_t_handler
- (vl_api_mpls_fib_encap_details_t * mp)
-{
- vat_main_t *vam = &vat_main;
- i32 i;
- i32 len = ntohl (mp->nlabels);
-
- fformat (vam->ofp, "table %d, dest %U, label ",
- ntohl (mp->fib_index), format_ip4_address, &mp->dest, len);
- for (i = 0; i < len; i++)
- {
- fformat (vam->ofp, "%u ", ntohl (mp->labels[i]));
- }
- fformat (vam->ofp, "\n");
-}
-
-static void vl_api_mpls_fib_encap_details_t_handler_json
- (vl_api_mpls_fib_encap_details_t * mp)
-{
- vat_main_t *vam = &vat_main;
- vat_json_node_t *node = NULL;
- i32 i;
- i32 len = ntohl (mp->nlabels);
- struct in_addr ip4;
-
- if (VAT_JSON_ARRAY != vam->json_tree.type)
- {
- ASSERT (VAT_JSON_NONE == vam->json_tree.type);
- vat_json_init_array (&vam->json_tree);
- }
- node = vat_json_array_add (&vam->json_tree);
-
- vat_json_init_object (node);
- vat_json_object_add_uint (node, "table", ntohl (mp->fib_index));
- vat_json_object_add_uint (node, "entry_index", ntohl (mp->entry_index));
- clib_memcpy (&ip4, &(mp->dest), sizeof (ip4));
- vat_json_object_add_ip4 (node, "dest", ip4);
- vat_json_object_add_uint (node, "s_bit", ntohl (mp->s_bit));
- vat_json_object_add_uint (node, "label_count", len);
- for (i = 0; i < len; i++)
- {
- vat_json_object_add_uint (node, "label", ntohl (mp->labels[i]));
- }
-}
-
-static int
-api_mpls_fib_encap_dump (vat_main_t * vam)
-{
- vl_api_mpls_fib_encap_dump_t *mp;
- f64 timeout;
-
- M (MPLS_FIB_ENCAP_DUMP, mpls_fib_encap_dump);
- S;
-
- /* Use a control ping for synchronization */
- {
- vl_api_control_ping_t *mp;
- M (CONTROL_PING, control_ping);
- S;
- }
- W;
-}
-
#define vl_api_mpls_fib_details_t_endian vl_noop_handler
#define vl_api_mpls_fib_details_t_print vl_noop_handler
@@ -16895,12 +16720,13 @@ _(mpls_route_add_del, \
"[multipath] [count <n>]") \
_(mpls_ip_bind_unbind, \
"<label> <addr/len>") \
+_(mpls_tunnel_add_del, \
+ " via <addr> [table-id <n>]\n" \
+ "sw_if_index <id>] [l2] [del]") \
_(proxy_arp_add_del, \
"<lo-ip4-addr> - <hi-ip4-addr> [vrf <n>] [del]") \
_(proxy_arp_intfc_enable_disable, \
"<intfc> | sw_if_index <id> enable | disable") \
-_(mpls_add_del_encap, \
- "label <n> dst <ip4-addr> [vrf <n>] [del]") \
_(sw_interface_set_unnumbered, \
"<intfc> | sw_if_index <id> unnum_if_index <id> [del]") \
_(ip_neighbor_add_del, \
@@ -16942,12 +16768,6 @@ _(set_arp_neighbor_limit, "arp_nbr_limit <n> [ipv6]") \
_(l2_patch_add_del, \
"rx <intfc> | rx_sw_if_index <id> tx <intfc> | tx_sw_if_index <id>\n" \
"enable | disable") \
-_(mpls_ethernet_add_del_tunnel, \
- "tx <intfc> | tx_sw_if_index <n> dst <mac-addr>\n" \
- "adj <ip4-addr>/<mw> dst <mac-addr> [del]") \
-_(mpls_ethernet_add_del_tunnel_2, \
- "inner_vrf_id <n> outer_vrf_id <n> next-hop <ip4-addr>\n" \
- "resolve-attempts <n> resolve-if-needed 0 | 1 [del]") \
_(sr_tunnel_add_del, \
"[name <name>] src <ip6-addr> dst <ip6-addr>/<mw> \n" \
"(next <ip6-addr>)+ [tag <ip6-addr>]* [clean] [reroute] \n" \
@@ -17114,8 +16934,7 @@ _(policer_classify_dump, "type [ip4|ip6|l2]") \
_(netmap_create, "name <interface name> [hw-addr <mac>] [pipe] " \
"[master|slave]") \
_(netmap_delete, "name <interface name>") \
-_(mpls_eth_tunnel_dump, "tunnel_index <tunnel-id>") \
-_(mpls_fib_encap_dump, "") \
+_(mpls_tunnel_dump, "tunnel_index <tunnel-id>") \
_(mpls_fib_dump, "") \
_(classify_table_ids, "") \
_(classify_table_by_interface, "sw_if_index <sw_if_index>") \
diff --git a/vpp/app/vpe_cli.c b/vpp/app/vpe_cli.c
index 70fe3a77ff7..a26bf71f8af 100644
--- a/vpp/app/vpe_cli.c
+++ b/vpp/app/vpe_cli.c
@@ -93,7 +93,7 @@ virtual_ip_cmd_fn_command_fn (vlib_main_t * vm,
rpath->frp_sw_if_index = sw_if_index;
rpath->frp_fib_index = ~0;
rpath->frp_weight = 1;
- rpath->frp_label = MPLS_LABEL_INVALID;
+ rpath->frp_label_stack = NULL;
}
fib_table_entry_path_add2 (0, // default FIB table
diff --git a/vpp/vpp-api/api.c b/vpp/vpp-api/api.c
index b5253726915..77cc89f4157 100644
--- a/vpp/vpp-api/api.c
+++ b/vpp/vpp-api/api.c
@@ -55,6 +55,7 @@
#include <vnet/unix/tuntap.h>
#include <vnet/unix/tapcli.h>
#include <vnet/mpls/mpls.h>
+#include <vnet/mpls/mpls_tunnel.h>
#include <vnet/dhcp/proxy.h>
#include <vnet/dhcp/client.h>
#if IPV6SR > 0
@@ -174,9 +175,7 @@ _(TAP_DELETE, tap_delete) \
_(SW_INTERFACE_TAP_DUMP, sw_interface_tap_dump) \
_(CREATE_VLAN_SUBIF, create_vlan_subif) \
_(CREATE_SUBIF, create_subif) \
-_(MPLS_ETHERNET_ADD_DEL_TUNNEL, mpls_ethernet_add_del_tunnel) \
-_(MPLS_ETHERNET_ADD_DEL_TUNNEL_2, mpls_ethernet_add_del_tunnel_2) \
-_(MPLS_ADD_DEL_ENCAP, mpls_add_del_encap) \
+_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \
_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \
_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \
_(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \
@@ -293,10 +292,8 @@ _(POLICER_CLASSIFY_SET_INTERFACE, policer_classify_set_interface) \
_(POLICER_CLASSIFY_DUMP, policer_classify_dump) \
_(NETMAP_CREATE, netmap_create) \
_(NETMAP_DELETE, netmap_delete) \
-_(MPLS_ETH_TUNNEL_DUMP, mpls_eth_tunnel_dump) \
-_(MPLS_ETH_TUNNEL_DETAILS, mpls_eth_tunnel_details) \
-_(MPLS_FIB_ENCAP_DUMP, mpls_fib_encap_dump) \
-_(MPLS_FIB_ENCAP_DETAILS, mpls_fib_encap_details) \
+_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \
+_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \
_(MPLS_FIB_DUMP, mpls_fib_dump) \
_(MPLS_FIB_DETAILS, mpls_fib_details) \
_(CLASSIFY_TABLE_IDS,classify_table_ids) \
@@ -350,21 +347,10 @@ typedef enum
{
RESOLVE_IP4_ADD_DEL_ROUTE = 1,
RESOLVE_IP6_ADD_DEL_ROUTE,
- RESOLVE_MPLS_ETHERNET_ADD_DEL,
} resolve_t;
typedef struct
{
- u8 resolve_type;
- union
- {
- vl_api_ip_add_del_route_t r;
- vl_api_mpls_ethernet_add_del_tunnel_2_t t;
- };
-} pending_route_t;
-
-typedef struct
-{
#define _(a) uword *a##_registration_hash; \
vpe_client_registration_t * a##_registrations;
@@ -373,9 +359,6 @@ typedef struct
/* notifications happen really early in the game */
u8 link_state_process_up;
- /* ip4 and ip6 pending route adds */
- pending_route_t *pending_routes;
-
/* ip4 arp event registration pool */
vl_api_ip4_arp_event_t *arp_events;
@@ -595,9 +578,6 @@ static int ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp);
static int ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp);
-static int mpls_ethernet_add_del_tunnel_2_t_handler
- (vl_api_mpls_ethernet_add_del_tunnel_2_t * mp);
-
void
handle_ip4_arp_event (u32 pool_index)
{
@@ -703,13 +683,7 @@ resolver_process (vlib_main_t * vm,
uword event_type;
uword *event_data = 0;
f64 timeout = 100.0;
- vpe_api_main_t *vam = &vpe_api_main;
- pending_route_t *pr;
- vl_api_ip_add_del_route_t *adr;
- vl_api_mpls_ethernet_add_del_tunnel_2_t *pme;
- u32 *resolution_failures = 0;
- int i, rv;
- clib_error_t *e;
+ int i;
while (1)
{
@@ -724,58 +698,7 @@ resolver_process (vlib_main_t * vm,
break;
case RESOLUTION_EVENT:
- for (i = 0; i < vec_len (event_data); i++)
- {
- /*
- * Resolution events can occur long after the
- * original request has timed out. $$$ add a cancel
- * mechanism..
- */
- if (pool_is_free_index (vam->pending_routes, event_data[i]))
- continue;
-
- pr = pool_elt_at_index (vam->pending_routes, event_data[i]);
- adr = &pr->r;
- pme = &pr->t;
-
- switch (pr->resolve_type)
- {
- case RESOLVE_IP4_ADD_DEL_ROUTE:
- rv = ip4_add_del_route_t_handler (adr);
- clib_warning ("resolver: add %U/%d via %U %s",
- format_ip4_address,
- (ip4_address_t *) & (adr->dst_address),
- adr->dst_address_length,
- format_ip4_address,
- (ip4_address_t *) & (adr->next_hop_address),
- (rv >= 0) ? "succeeded" : "failed");
- break;
-
- case RESOLVE_IP6_ADD_DEL_ROUTE:
- rv = ip6_add_del_route_t_handler (adr);
- clib_warning ("resolver: add %U/%d via %U %s",
- format_ip6_address,
- (ip6_address_t *) & (adr->dst_address),
- adr->dst_address_length,
- format_ip6_address,
- (ip6_address_t *) & (adr->next_hop_address),
- (rv >= 0) ? "succeeded" : "failed");
- break;
-
- case RESOLVE_MPLS_ETHERNET_ADD_DEL:
- rv = mpls_ethernet_add_del_tunnel_2_t_handler (pme);
- clib_warning ("resolver: add mpls-o-e via %U %s",
- format_ip4_address,
- (ip4_address_t *) &
- (pme->next_hop_ip4_address_in_outer_vrf),
- (rv >= 0) ? "succeeded" : "failed");
- break;
-
- default:
- clib_warning ("resolver: BOGUS TYPE %d", pr->resolve_type);
- }
- pool_put (vam->pending_routes, pr);
- }
+ clib_warning ("resolver: BOGUS TYPE");
break;
case IP4_ARP_EVENT:
@@ -788,109 +711,10 @@ resolver_process (vlib_main_t * vm,
handle_ip6_nd_event (event_data[i]);
break;
- case ~0: /* timeout, retry pending resolutions */
- /* *INDENT-OFF* */
- pool_foreach (pr, vam->pending_routes,
- ({
- int is_adr = 1;
- adr = &pr->r;
- pme = &pr->t;
-
- /* May fail, e.g. due to interface down */
- switch (pr->resolve_type)
- {
- case RESOLVE_IP4_ADD_DEL_ROUTE:
- e = ip4_probe_neighbor
- (vm, (ip4_address_t *)&(adr->next_hop_address),
- ntohl(adr->next_hop_sw_if_index));
- break;
-
- case RESOLVE_IP6_ADD_DEL_ROUTE:
- e = ip6_probe_neighbor
- (vm, (ip6_address_t *)&(adr->next_hop_address),
- ntohl(adr->next_hop_sw_if_index));
- break;
-
- case RESOLVE_MPLS_ETHERNET_ADD_DEL:
- is_adr = 0;
- e = ip4_probe_neighbor
- (vm,
- (ip4_address_t *)&(pme->next_hop_ip4_address_in_outer_vrf),
- pme->resolve_opaque);
- break;
-
- default:
- e = clib_error_return (0, "resolver: BOGUS TYPE %d",
- pr->resolve_type);
- }
- if (e)
- {
- clib_error_report (e);
- if (is_adr)
- adr->resolve_attempts = 1;
- else
- pme->resolve_attempts = 1;
- }
- if (is_adr)
- {
- adr->resolve_attempts -= 1;
- if (adr->resolve_attempts == 0)
- vec_add1 (resolution_failures,
- pr - vam->pending_routes);
- }
- else
- {
- pme->resolve_attempts -= 1;
- if (pme->resolve_attempts == 0)
- vec_add1 (resolution_failures,
- pr - vam->pending_routes);
- }
- }));
- /* *INDENT-ON* */
- for (i = 0; i < vec_len (resolution_failures); i++)
- {
- pr = pool_elt_at_index (vam->pending_routes,
- resolution_failures[i]);
- adr = &pr->r;
- pme = &pr->t;
-
- switch (pr->resolve_type)
- {
- case RESOLVE_IP4_ADD_DEL_ROUTE:
- clib_warning ("resolver: add %U/%d via %U retry failure",
- format_ip4_address,
- (ip4_address_t *) & (adr->dst_address),
- adr->dst_address_length,
- format_ip4_address,
- (ip4_address_t *) & (adr->next_hop_address));
- break;
-
- case RESOLVE_IP6_ADD_DEL_ROUTE:
- clib_warning ("resolver: add %U/%d via %U retry failure",
- format_ip6_address,
- (ip6_address_t *) & (adr->dst_address),
- adr->dst_address_length,
- format_ip6_address,
- (ip6_address_t *) & (adr->next_hop_address));
- break;
-
- case RESOLVE_MPLS_ETHERNET_ADD_DEL:
- clib_warning ("resolver: add mpls-o-e via %U retry failure",
- format_ip4_address,
- (ip4_address_t *) &
- (pme->next_hop_ip4_address_in_outer_vrf));
- break;
-
- default:
- clib_warning ("BUG");
- }
- pool_put (vam->pending_routes, pr);
- }
- vec_reset_length (resolution_failures);
+ case ~0: /* timeout */
break;
}
- if (pool_elts (vam->pending_routes) == 0)
- timeout = 100.0;
+
vec_reset_length (event_data);
}
return 0; /* or not */
@@ -921,47 +745,53 @@ add_del_route_t_handler (u8 is_multipath,
const ip46_address_t * next_hop,
u32 next_hop_sw_if_index,
u8 next_hop_fib_index,
- u32 next_hop_weight, u32 next_hop_out_label)
+ u32 next_hop_weight,
+ mpls_label_t next_hop_via_label,
+ mpls_label_t * next_hop_out_label_stack)
{
vnet_classify_main_t *cm = &vnet_classify_main;
stats_main_t *sm = &stats_main;
+ fib_route_path_flags_t path_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ fib_route_path_t path = {
+ .frp_proto = (next_hop_proto_is_ip4 ?
+ FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6),
+ .frp_addr = (NULL == next_hop ? zero_addr : *next_hop),
+ .frp_sw_if_index = next_hop_sw_if_index,
+ .frp_fib_index = next_hop_fib_index,
+ .frp_weight = next_hop_weight,
+ .frp_label_stack = next_hop_out_label_stack,
+ };
+ fib_route_path_t *paths = NULL;
- if (is_multipath)
+ if (MPLS_LABEL_INVALID != next_hop_via_label)
{
- fib_route_path_flags_t path_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ path.frp_proto = FIB_PROTOCOL_MPLS;
+ path.frp_local_label = next_hop_via_label;
+ }
+ if (is_resolve_host)
+ path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_HOST;
+ if (is_resolve_attached)
+ path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED;
+
+ path.frp_flags = path_flags;
+ if (is_multipath)
+ {
dslock (sm, 1 /* release hint */ , 10 /* tag */ );
- if (is_resolve_host)
- path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_HOST;
- if (is_resolve_attached)
- path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED;
+
+ vec_add1 (paths, path);
if (is_add)
- fib_table_entry_path_add (fib_index,
- prefix,
- FIB_SOURCE_API,
- FIB_ENTRY_FLAG_NONE,
- (next_hop_proto_is_ip4 ?
- FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6),
- next_hop,
- next_hop_sw_if_index,
- next_hop_fib_index,
- next_hop_weight,
- next_hop_out_label, path_flags);
+ fib_table_entry_path_add2 (fib_index,
+ prefix,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE, paths);
else
- fib_table_entry_path_remove (fib_index,
- prefix,
- FIB_SOURCE_API,
- (next_hop_proto_is_ip4 ?
- FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6),
- next_hop,
- next_hop_sw_if_index,
- next_hop_fib_index,
- next_hop_weight, path_flags);
+ fib_table_entry_path_remove2 (fib_index,
+ prefix, FIB_SOURCE_API, paths);
+ vec_free (paths);
dsunlock (sm);
return 0;
}
@@ -1025,25 +855,11 @@ add_del_route_t_handler (u8 is_multipath,
{
if (is_add)
{
- fib_route_path_flags_t path_flags = FIB_ROUTE_PATH_FLAG_NONE;
-
- if (is_resolve_host)
- path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_HOST;
- if (is_resolve_attached)
- path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED;
-
- fib_table_entry_update_one_path (fib_index,
- prefix,
- FIB_SOURCE_API,
- FIB_ENTRY_FLAG_NONE,
- (next_hop_proto_is_ip4 ?
- FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6),
- next_hop,
- next_hop_sw_if_index,
- next_hop_fib_index,
- next_hop_weight,
- next_hop_out_label, path_flags);
+ vec_add1 (paths, path);
+ fib_table_entry_update (fib_index,
+ prefix,
+ FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, paths);
+ vec_free (paths);
}
else
{
@@ -1117,7 +933,8 @@ static int
ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
{
u32 fib_index, next_hop_fib_index;
- int rv;
+ mpls_label_t *label_stack = NULL;
+ int rv, ii, n_labels;;
rv = add_del_route_check (FIB_PROTOCOL_IP4,
mp->table_id,
@@ -1140,6 +957,18 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
memset (&nh, 0, sizeof (nh));
memcpy (&nh.ip4, mp->next_hop_address, sizeof (nh.ip4));
+ n_labels = mp->next_hop_n_out_labels;
+ if (n_labels == 0)
+ ;
+ else if (1 == n_labels)
+ vec_add1 (label_stack, ntohl (mp->next_hop_out_label_stack[0]));
+ else
+ {
+ vec_validate (label_stack, n_labels - 1);
+ for (ii = 0; ii < n_labels; ii++)
+ label_stack[ii] = ntohl (mp->next_hop_out_label_stack[ii]);
+ }
+
return (add_del_route_t_handler (mp->is_multipath,
mp->is_add,
mp->is_drop,
@@ -1155,14 +984,16 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
ntohl (mp->next_hop_sw_if_index),
next_hop_fib_index,
mp->next_hop_weight,
- ntohl (mp->next_hop_out_label)));
+ ntohl (mp->next_hop_via_label),
+ label_stack));
}
static int
ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
{
u32 fib_index, next_hop_fib_index;
- int rv;
+ mpls_label_t *label_stack = NULL;
+ int rv, ii, n_labels;;
rv = add_del_route_check (FIB_PROTOCOL_IP6,
mp->table_id,
@@ -1185,6 +1016,18 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
memset (&nh, 0, sizeof (nh));
memcpy (&nh.ip6, mp->next_hop_address, sizeof (nh.ip6));
+ n_labels = mp->next_hop_n_out_labels;
+ if (n_labels == 0)
+ ;
+ else if (1 == n_labels)
+ vec_add1 (label_stack, ntohl (mp->next_hop_out_label_stack[0]));
+ else
+ {
+ vec_validate (label_stack, n_labels - 1);
+ for (ii = 0; ii < n_labels; ii++)
+ label_stack[ii] = ntohl (mp->next_hop_out_label_stack[ii]);
+ }
+
return (add_del_route_t_handler (mp->is_multipath,
mp->is_add,
mp->is_drop,
@@ -1199,7 +1042,8 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
&nh, ntohl (mp->next_hop_sw_if_index),
next_hop_fib_index,
mp->next_hop_weight,
- ntohl (mp->next_hop_out_label)));
+ ntohl (mp->next_hop_via_label),
+ label_stack));
}
static int
@@ -1207,8 +1051,8 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm,
vl_api_mpls_route_add_del_t * mp)
{
u32 fib_index, next_hop_fib_index;
-
- int rv;
+ mpls_label_t *label_stack = NULL;
+ int rv, ii, n_labels;;
fib_prefix_t pfx = {
.fp_len = 21,
@@ -1251,6 +1095,18 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm,
else
memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6));
+ n_labels = mp->mr_next_hop_n_out_labels;
+ if (n_labels == 0)
+ ;
+ else if (1 == n_labels)
+ vec_add1 (label_stack, ntohl (mp->mr_next_hop_out_label_stack[0]));
+ else
+ {
+ vec_validate (label_stack, n_labels - 1);
+ for (ii = 0; ii < n_labels; ii++)
+ label_stack[ii] = ntohl (mp->mr_next_hop_out_label_stack[ii]);
+ }
+
return (add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add, 0, // mp->is_drop,
0, // mp->is_unreach,
0, // mp->is_prohibit,
@@ -1264,7 +1120,8 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm,
&nh, ntohl (mp->mr_next_hop_sw_if_index),
next_hop_fib_index,
mp->mr_next_hop_weight,
- ntohl (mp->mr_next_hop_out_label)));
+ ntohl (mp->mr_next_hop_via_label),
+ label_stack));
}
void
@@ -2246,169 +2103,60 @@ out:
}
static void
- vl_api_mpls_ethernet_add_del_tunnel_t_handler
- (vl_api_mpls_ethernet_add_del_tunnel_t * mp)
+vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp)
{
- vl_api_mpls_ethernet_add_del_tunnel_reply_t *rmp;
+ vl_api_mpls_tunnel_add_del_reply_t *rmp;
int rv = 0;
stats_main_t *sm = &stats_main;
u32 tunnel_sw_if_index;
+ int ii;
dslock (sm, 1 /* release hint */ , 5 /* tag */ );
- rv = vnet_mpls_ethernet_add_del_tunnel
- (mp->dst_mac_address, (ip4_address_t *) (mp->adj_address),
- (u32) (mp->adj_address_length), ntohl (mp->vrf_id),
- ntohl (mp->tx_sw_if_index),
- &tunnel_sw_if_index, mp->l2_only, mp->is_add);
-
- dsunlock (sm);
+ if (mp->mt_is_add)
+ {
+ fib_route_path_t rpath, *rpaths = NULL;
+ mpls_label_t *label_stack = NULL;
- /* *INDENT-OFF* */
- REPLY_MACRO2(VL_API_MPLS_ETHERNET_ADD_DEL_TUNNEL_REPLY,
- ({
- rmp->tunnel_sw_if_index = ntohl(tunnel_sw_if_index);
- }));
- /* *INDENT-ON* */
-}
+ memset (&rpath, 0, sizeof (rpath));
-/*
- * This piece of misery brought to you because the control-plane
- * can't figure out the tx interface + dst-mac address all by itself
- */
-static int mpls_ethernet_add_del_tunnel_2_t_handler
- (vl_api_mpls_ethernet_add_del_tunnel_2_t * mp)
-{
- pending_route_t *pr;
- vl_api_mpls_ethernet_add_del_tunnel_2_t *pme;
- vnet_main_t *vnm = vnet_get_main ();
- vlib_main_t *vm = vlib_get_main ();
- stats_main_t *sm = &stats_main;
- vpe_api_main_t *vam = &vpe_api_main;
- u32 inner_fib_index, outer_fib_index;
- ip4_main_t *im = &ip4_main;
- ip_lookup_main_t *lm = &im->lookup_main;
- ip_adjacency_t *adj = 0;
- u32 lookup_result;
- u32 tx_sw_if_index;
- u8 *dst_mac_address;
- clib_error_t *e;
- uword *p;
- int rv;
- u32 tunnel_sw_if_index;
+ if (mp->mt_next_hop_proto_is_ip4)
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP4;
+ clib_memcpy (&rpath.frp_addr.ip4,
+ mp->mt_next_hop, sizeof (rpath.frp_addr.ip4));
+ }
+ else
+ {
+ rpath.frp_proto = FIB_PROTOCOL_IP6;
+ clib_memcpy (&rpath.frp_addr.ip6,
+ mp->mt_next_hop, sizeof (rpath.frp_addr.ip6));
+ }
+ rpath.frp_sw_if_index = ntohl (mp->mt_next_hop_sw_if_index);
- p = hash_get (im->fib_index_by_table_id, ntohl (mp->outer_vrf_id));
- if (!p)
- return VNET_API_ERROR_NO_SUCH_FIB;
- else
- outer_fib_index = p[0];
+ for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++)
+ vec_add1 (label_stack, ntohl (mp->mt_next_hop_out_label_stack[ii]));
+ vec_add1 (rpaths, rpath);
- p = hash_get (im->fib_index_by_table_id, ntohl (mp->inner_vrf_id));
- if (!p)
- return VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ vnet_mpls_tunnel_add (rpaths, label_stack,
+ mp->mt_l2_only, &tunnel_sw_if_index);
+ vec_free (rpaths);
+ vec_free (label_stack);
+ }
else
- inner_fib_index = p[0];
-
- if (inner_fib_index == outer_fib_index)
- return VNET_API_ERROR_INVALID_VALUE;
-
- // FIXME not an ADJ
- lookup_result = ip4_fib_table_lookup_lb (ip4_fib_get (outer_fib_index),
- (ip4_address_t *)
- mp->next_hop_ip4_address_in_outer_vrf);
-
- adj = ip_get_adjacency (lm, lookup_result);
- tx_sw_if_index = adj->rewrite_header.sw_if_index;
-
- if (mp->is_add && mp->resolve_if_needed)
{
- if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
- {
- pool_get (vam->pending_routes, pr);
- pr->resolve_type = RESOLVE_MPLS_ETHERNET_ADD_DEL;
- pme = &pr->t;
- clib_memcpy (pme, mp, sizeof (*pme));
- /* recursion block, "just in case" */
- pme->resolve_if_needed = 0;
- pme->resolve_attempts = ntohl (mp->resolve_attempts);
- pme->resolve_opaque = tx_sw_if_index;
- vnet_register_ip4_arp_resolution_event
- (vnm,
- (ip4_address_t *) & (pme->next_hop_ip4_address_in_outer_vrf),
- vpe_resolver_process_node.index,
- RESOLUTION_EVENT, pr - vam->pending_routes);
-
- vlib_process_signal_event
- (vm, vpe_resolver_process_node.index,
- RESOLUTION_PENDING_EVENT, 0 /* data */ );
-
- /* The interface may be down, etc. */
- e = ip4_probe_neighbor
- (vm, (ip4_address_t *) & (mp->next_hop_ip4_address_in_outer_vrf),
- tx_sw_if_index);
-
- if (e)
- clib_error_report (e);
-
- return VNET_API_ERROR_IN_PROGRESS;
- }
+ vnet_mpls_tunnel_del (ntohl (mp->mt_sw_if_index));
}
- if (adj->lookup_next_index != IP_LOOKUP_NEXT_REWRITE)
- return VNET_API_ERROR_NEXT_HOP_NOT_IN_FIB;
-
- dst_mac_address =
- vnet_rewrite_get_data_internal
- (&adj->rewrite_header, sizeof (adj->rewrite_data));
-
- dslock (sm, 1 /* release hint */ , 10 /* tag */ );
-
- rv = vnet_mpls_ethernet_add_del_tunnel
- (dst_mac_address, (ip4_address_t *) (mp->adj_address),
- (u32) (mp->adj_address_length), ntohl (mp->inner_vrf_id),
- tx_sw_if_index, &tunnel_sw_if_index, mp->l2_only, mp->is_add);
-
dsunlock (sm);
- return rv;
-}
-
-static void
- vl_api_mpls_ethernet_add_del_tunnel_2_t_handler
- (vl_api_mpls_ethernet_add_del_tunnel_2_t * mp)
-{
- vl_api_mpls_ethernet_add_del_tunnel_reply_t *rmp;
- int rv = 0;
-
- rv = mpls_ethernet_add_del_tunnel_2_t_handler (mp);
-
- REPLY_MACRO (VL_API_MPLS_ETHERNET_ADD_DEL_TUNNEL_2_REPLY);
-}
-
-
-static void
-vl_api_mpls_add_del_encap_t_handler (vl_api_mpls_add_del_encap_t * mp)
-{
- vl_api_mpls_add_del_encap_reply_t *rmp;
- int rv;
- static u32 *labels;
- int i;
-
- vec_reset_length (labels);
-
- for (i = 0; i < mp->nlabels; i++)
- vec_add1 (labels, ntohl (mp->labels[i]));
-
- /* $$$$ fixme */
- rv = vnet_mpls_add_del_encap ((ip4_address_t *) mp->dst_address,
- ntohl (mp->vrf_id), labels,
- ~0 /* policy_tunnel_index */ ,
- 0 /* no_dst_hash */ ,
- 0 /* indexp */ ,
- mp->is_add);
-
- REPLY_MACRO (VL_API_MPLS_ADD_DEL_ENCAP_REPLY);
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_MPLS_TUNNEL_ADD_DEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl(tunnel_sw_if_index);
+ }));
+ /* *INDENT-ON* */
}
static void
@@ -4084,6 +3832,7 @@ vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t * mp)
rv = vhost_user_modify_if (vnm, vm, (char *) mp->sock_filename,
mp->is_server, sw_if_index, (u64) ~ 0,
mp->renumber, ntohl (mp->custom_dev_instance));
+
REPLY_MACRO (VL_API_MODIFY_VHOST_USER_IF_REPLY);
}
@@ -7433,84 +7182,63 @@ vl_api_netmap_delete_t_handler (vl_api_netmap_delete_t * mp)
}
static void
-vl_api_mpls_eth_tunnel_details_t_handler (vl_api_mpls_eth_tunnel_details_t *
- mp)
+vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_fib_details_t * mp)
{
clib_warning ("BUG");
}
+typedef struct mpls_tunnel_send_walk_ctx_t_
+{
+ unix_shared_memory_queue_t *q;
+ u32 index;
+ u32 context;
+} mpls_tunnel_send_walk_ctx_t;
+
static void
-send_mpls_eth_tunnel_entry (vpe_api_main_t * am,
- unix_shared_memory_queue_t * q,
- mpls_eth_tunnel_t * et, u32 index, u32 context)
+send_mpls_tunnel_entry (u32 mti, void *arg)
{
- mpls_main_t *mm = &mpls_main;
- mpls_encap_t *e;
- int i;
+ mpls_tunnel_send_walk_ctx_t *ctx;
+ vl_api_mpls_tunnel_details_t *mp;
+ const mpls_tunnel_t *mt;
u32 nlabels;
- vl_api_mpls_eth_tunnel_details_t *mp;
- e = pool_elt_at_index (mm->encaps, et->encap_index);
- nlabels = vec_len (e->labels);
+ ctx = arg;
+
+ if (~0 != ctx->index && mti != ctx->index)
+ return;
+
+ mt = mpls_tunnel_get (mti);
+ nlabels = vec_len (mt->mt_label_stack);
mp = vl_msg_api_alloc (sizeof (*mp) + nlabels * sizeof (u32));
memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_MPLS_ETH_TUNNEL_DETAILS);
- mp->context = context;
+ mp->_vl_msg_id = ntohs (VL_API_MPLS_TUNNEL_DETAILS);
+ mp->context = ctx->context;
- mp->tunnel_index = htonl (index);
- memcpy (mp->tunnel_dst_mac, et->tunnel_dst, 6);
- mp->intfc_address = et->intfc_address.as_u32;
- mp->tx_sw_if_index = htonl (et->tx_sw_if_index);
- mp->inner_fib_index = htonl (et->inner_fib_index);
- mp->mask_width = htonl (et->mask_width);
- mp->encap_index = htonl (et->encap_index);
- mp->hw_if_index = htonl (et->hw_if_index);
- mp->l2_only = htonl (et->l2_only);
- mp->nlabels = htonl (nlabels);
+ mp->tunnel_index = ntohl (mti);
+ memcpy (mp->mt_next_hop_out_labels,
+ mt->mt_label_stack, nlabels * sizeof (u32));
- for (i = 0; i < nlabels; i++)
- {
- mp->labels[i] =
- htonl (vnet_mpls_uc_get_label
- (clib_host_to_net_u32 (e->labels[i].label_exp_s_ttl)));
- }
+ // FIXME
- vl_msg_api_send_shmem (q, (u8 *) & mp);
+ vl_msg_api_send_shmem (ctx->q, (u8 *) & mp);
}
static void
-vl_api_mpls_eth_tunnel_dump_t_handler (vl_api_mpls_eth_tunnel_dump_t * mp)
+vl_api_mpls_tunnel_dump_t_handler (vl_api_mpls_tunnel_dump_t * mp)
{
- vpe_api_main_t *am = &vpe_api_main;
unix_shared_memory_queue_t *q;
- mpls_main_t *mm = &mpls_main;
- mpls_eth_tunnel_t *et;
- u32 index = ntohl (mp->tunnel_index);
q = vl_api_client_index_to_input_queue (mp->client_index);
if (q == 0)
return;
- if (index != ~0)
- {
- if (!pool_is_free_index (mm->eth_tunnels, index))
- {
- et = pool_elt_at_index (mm->eth_tunnels, index);
- send_mpls_eth_tunnel_entry (am, q, et, et - mm->eth_tunnels,
- mp->context);
- }
- }
- else
- {
- /* *INDENT-OFF* */
- pool_foreach (et, mm->eth_tunnels,
- ({
- send_mpls_eth_tunnel_entry (am, q, et, et - mm->eth_tunnels,
- mp->context);
- }));
- /* *INDENT-ON* */
- }
+ mpls_tunnel_send_walk_ctx_t ctx = {
+ .q = q,
+ .index = ntohl (mp->tunnel_index),
+ .context = mp->context,
+ };
+ mpls_tunnel_walk (send_mpls_tunnel_entry, &ctx);
}
static void
@@ -7930,99 +7658,6 @@ vl_api_ip6_fib_dump_t_handler (vl_api_ip6_fib_dump_t * mp)
}
static void
-vl_api_mpls_fib_encap_details_t_handler (vl_api_mpls_fib_encap_details_t * mp)
-{
- clib_warning ("BUG");
-}
-
-static void
-send_mpls_fib_encap_details (vpe_api_main_t * am,
- unix_shared_memory_queue_t * q,
- show_mpls_fib_t * s, u32 context)
-{
- vl_api_mpls_fib_encap_details_t *mp;
- mpls_main_t *mm = &mpls_main;
- mpls_encap_t *e;
- int i;
- u32 nlabels;
-
- e = pool_elt_at_index (mm->encaps, s->entry_index);
- nlabels = vec_len (e->labels);
-
- mp = vl_msg_api_alloc (sizeof (*mp) + nlabels * sizeof (u32));
- memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_MPLS_FIB_ENCAP_DETAILS);
- mp->context = context;
-
- mp->fib_index = htonl (s->fib_index);
- mp->entry_index = htonl (s->entry_index);
- mp->dest = s->dest;
- mp->s_bit = htonl (s->s_bit);
-
- mp->nlabels = htonl (nlabels);
-
- for (i = 0; i < nlabels; i++)
- {
- mp->labels[i] =
- htonl (vnet_mpls_uc_get_label
- (clib_host_to_net_u32 (e->labels[i].label_exp_s_ttl)));
- }
-
- vl_msg_api_send_shmem (q, (u8 *) & mp);
-}
-
-static void
-vl_api_mpls_fib_encap_dump_t_handler (vl_api_mpls_fib_encap_dump_t * mp)
-{
- vpe_api_main_t *am = &vpe_api_main;
- unix_shared_memory_queue_t *q;
- vlib_main_t *vm = &vlib_global_main;
- u64 key;
- u32 value;
- show_mpls_fib_t *records = 0;
- show_mpls_fib_t *s;
- mpls_main_t *mm = &mpls_main;
- ip4_fib_t *rx_fib;
-
- q = vl_api_client_index_to_input_queue (mp->client_index);
- if (q == 0)
- return;
-
- /* *INDENT-OFF* */
- hash_foreach (key, value, mm->mpls_encap_by_fib_and_dest,
- ({
- vec_add2 (records, s, 1);
- s->fib_index = (u32)(key>>32);
- s->dest = (u32)(key & 0xFFFFFFFF);
- s->entry_index = (u32) value;
- }));
- /* *INDENT-ON* */
-
- if (0 == vec_len (records))
- {
- vlib_cli_output (vm, "MPLS encap table empty");
- goto out;
- }
-
- /* sort output by dst address within fib */
- vec_sort_with_function (records, mpls_dest_cmp);
- vec_sort_with_function (records, mpls_fib_index_cmp);
- vlib_cli_output (vm, "MPLS encap table");
- vlib_cli_output (vm, "%=6s%=16s%=16s", "Table", "Dest address", "Labels");
- vec_foreach (s, records)
- {
- rx_fib = ip4_fib_get (s->fib_index);
- vlib_cli_output (vm, "%=6d%=16U%=16U", rx_fib->table_id,
- format_ip4_address, &s->dest, format_mpls_encap_index,
- mm, s->entry_index);
- send_mpls_fib_encap_details (am, q, s, mp->context);
- }
-
-out:
- vec_free (records);
-}
-
-static void
vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t * mp)
{
unix_shared_memory_queue_t *q;
@@ -9259,7 +8894,7 @@ vpe_api_hookup (vlib_main_t * vm)
/*
* Trace space for 8 MPLS encap labels, classifier mask+match
*/
- am->api_trace_cfg[VL_API_MPLS_ADD_DEL_ENCAP].size += 8 * sizeof (u32);
+ am->api_trace_cfg[VL_API_MPLS_TUNNEL_ADD_DEL].size += 8 * sizeof (u32);
am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_TABLE].size += 5 * sizeof (u32x4);
am->api_trace_cfg[VL_API_CLASSIFY_ADD_DEL_SESSION].size
+= 5 * sizeof (u32x4);
diff --git a/vpp/vpp-api/custom_dump.c b/vpp/vpp-api/custom_dump.c
index a36a8a43af0..691defaaffe 100644
--- a/vpp/vpp-api/custom_dump.c
+++ b/vpp/vpp-api/custom_dump.c
@@ -519,9 +519,6 @@ static void *vl_api_ip_add_del_route_t_print
if (mp->create_vrf_if_needed)
s = format (s, "create-vrf ");
- if (mp->resolve_attempts != 0)
- s = format (s, "resolve-attempts %d ", ntohl (mp->resolve_attempts));
-
if (mp->next_hop_weight != 1)
s = format (s, "weight %d ", mp->next_hop_weight);
@@ -573,77 +570,25 @@ static void *vl_api_proxy_arp_intfc_enable_disable_t_print
FINISH;
}
-static void *vl_api_mpls_add_del_encap_t_print
- (vl_api_mpls_add_del_encap_t * mp, void *handle)
-{
- u8 *s;
- int i;
-
- s = format (0, "SCRIPT: mpls_add_del_encap ");
-
- s = format (s, "vrf_id %d ", ntohl (mp->vrf_id));
-
- s = format (s, "dst %U ", format_ip4_address, mp->dst_address);
-
- for (i = 0; i < mp->nlabels; i++)
- s = format (s, "label %d ", ntohl (mp->labels[i]));
-
- if (mp->is_add == 0)
- s = format (s, "del ");
-
- FINISH;
-}
-
-static void *vl_api_mpls_ethernet_add_del_tunnel_t_print
- (vl_api_mpls_ethernet_add_del_tunnel_t * mp, void *handle)
-{
- u8 *s;
-
- s = format (0, "SCRIPT: mpls_ethernet_add_del_tunnel ");
-
- s = format (s, "tx_sw_if_index %d ", ntohl (mp->tx_sw_if_index));
-
- s = format (s, "dst %U", format_ethernet_address, mp->dst_mac_address);
-
- s = format (s, "adj %U/%d ", format_ip4_address,
- (ip4_address_t *) mp->adj_address, mp->adj_address_length);
-
- s = format (s, "vrf_id %d ", ntohl (mp->vrf_id));
-
- if (mp->l2_only)
- s = format (s, "l2-only ");
-
- if (mp->is_add == 0)
- s = format (s, "del ");
-
- FINISH;
-}
-
-static void *vl_api_mpls_ethernet_add_del_tunnel_2_t_print
- (vl_api_mpls_ethernet_add_del_tunnel_2_t * mp, void *handle)
+static void *vl_api_mpls_tunnel_add_del_t_print
+ (vl_api_mpls_tunnel_add_del_t * mp, void *handle)
{
u8 *s;
- s = format (0, "SCRIPT: mpls_ethernet_add_del_tunnel_2 ");
-
- s = format (s, "adj %U/%d ", format_ip4_address,
- (ip4_address_t *) mp->adj_address, mp->adj_address_length);
+ s = format (0, "SCRIPT: mpls_tunnel_add_del ");
- s = format (s, "next-hop %U ", format_ip4_address,
- (ip4_address_t *) mp->next_hop_ip4_address_in_outer_vrf);
+ if (mp->mt_next_hop_sw_if_index)
+ s = format (s, "sw_if_index %d ", ntohl (mp->mt_next_hop_sw_if_index));
- s = format (s, "inner_vrf_id %d ", ntohl (mp->inner_vrf_id));
-
- s = format (s, "outer_vrf_id %d ", ntohl (mp->outer_vrf_id));
-
- s = format (s, "resolve-if-needed %d ", mp->resolve_if_needed);
-
- s = format (s, "resolve-attempts %d ", ntohl (mp->resolve_attempts));
+ if (mp->mt_next_hop_proto_is_ip4)
+ s = format (s, "%U ", format_ip4_address, mp->mt_next_hop);
+ else
+ s = format (s, "%U ", format_ip6_address, mp->mt_next_hop);
- if (mp->l2_only)
+ if (mp->mt_l2_only)
s = format (s, "l2-only ");
- if (mp->is_add == 0)
+ if (mp->mt_is_add == 0)
s = format (s, "del ");
FINISH;
@@ -2053,28 +1998,18 @@ static void *vl_api_sw_interface_clear_stats_t_print
FINISH;
}
-static void *vl_api_mpls_eth_tunnel_dump_t_print
- (vl_api_mpls_eth_tunnel_dump_t * mp, void *handle)
+static void *vl_api_mpls_tunnel_dump_t_print
+ (vl_api_mpls_tunnel_dump_t * mp, void *handle)
{
u8 *s;
- s = format (0, "SCRIPT: mpls_eth_tunnel_dump ");
+ s = format (0, "SCRIPT: mpls_tunnel_dump ");
s = format (s, "tunnel_index %d ", ntohl (mp->tunnel_index));
FINISH;
}
-static void *vl_api_mpls_fib_encap_dump_t_print
- (vl_api_mpls_fib_encap_dump_t * mp, void *handle)
-{
- u8 *s;
-
- s = format (0, "SCRIPT: mpls_fib_encap_dump ");
-
- FINISH;
-}
-
static void *vl_api_mpls_fib_dump_t_print
(vl_api_mpls_fib_dump_t * mp, void *handle)
{
@@ -2975,9 +2910,7 @@ _(SW_INTERFACE_TAP_DUMP, sw_interface_tap_dump) \
_(IP_ADD_DEL_ROUTE, ip_add_del_route) \
_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \
_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \
-_(MPLS_ADD_DEL_ENCAP, mpls_add_del_encap) \
-_(MPLS_ETHERNET_ADD_DEL_TUNNEL, mpls_ethernet_add_del_tunnel) \
-_(MPLS_ETHERNET_ADD_DEL_TUNNEL_2, mpls_ethernet_add_del_tunnel_2) \
+_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \
_(SW_INTERFACE_SET_UNNUMBERED, sw_interface_set_unnumbered) \
_(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \
_(RESET_VRF, reset_vrf) \
@@ -3053,9 +2986,8 @@ _(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \
_(AF_PACKET_CREATE, af_packet_create) \
_(AF_PACKET_DELETE, af_packet_delete) \
_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) \
-_(MPLS_ETH_TUNNEL_DUMP, mpls_eth_tunnel_dump) \
-_(MPLS_FIB_ENCAP_DUMP, mpls_fib_encap_dump) \
_(MPLS_FIB_DUMP, mpls_fib_dump) \
+_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \
_(CLASSIFY_TABLE_IDS,classify_table_ids) \
_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \
_(CLASSIFY_TABLE_INFO,classify_table_info) \
diff --git a/vpp/vpp-api/test_client.c b/vpp/vpp-api/test_client.c
index cb265ee5c69..df608d4f9d0 100644
--- a/vpp/vpp-api/test_client.c
+++ b/vpp/vpp-api/test_client.c
@@ -224,13 +224,6 @@ vl_api_create_vlan_subif_reply_t_handler (vl_api_create_vlan_subif_reply_t *
ntohl (mp->retval), ntohl (mp->sw_if_index));
}
-static void
-vl_api_mpls_add_del_encap_reply_t_handler (vl_api_mpls_add_del_encap_reply_t *
- mp)
-{
- fformat (stdout, "add del mpls label reply %d\n", ntohl (mp->retval));
-}
-
static void vl_api_proxy_arp_add_del_reply_t_handler
(vl_api_proxy_arp_add_del_reply_t * mp)
{
@@ -590,7 +583,6 @@ _(SW_INTERFACE_ADD_DEL_ADDRESS_REPLY, sw_interface_add_del_address_reply) \
_(SW_INTERFACE_SET_TABLE_REPLY, sw_interface_set_table_reply) \
_(TAP_CONNECT_REPLY, tap_connect_reply) \
_(CREATE_VLAN_SUBIF_REPLY, create_vlan_subif_reply) \
-_(MPLS_ADD_DEL_ENCAP_REPLY, mpls_add_del_encap_reply) \
_(PROXY_ARP_ADD_DEL_REPLY, proxy_arp_add_del_reply) \
_(PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY, proxy_arp_intfc_enable_disable_reply) \
_(IP_NEIGHBOR_ADD_DEL_REPLY, ip_neighbor_add_del_reply) \
@@ -737,9 +729,6 @@ add_del_ip4_route (test_main_t * tm, int enable_disable)
mp->context = 0xdeadbeef;
mp->table_id = ntohl (0);
mp->create_vrf_if_needed = 1;
- /* Arp, please, if needed */
- mp->resolve_if_needed = 1;
- mp->resolve_attempts = ntohl (10);
mp->next_hop_sw_if_index = ntohl (5);
mp->is_add = enable_disable;
diff --git a/vpp/vpp-api/vpe.api b/vpp/vpp-api/vpe.api
index bcc3f4ec1b3..2da94215f1e 100644
--- a/vpp/vpp-api/vpe.api
+++ b/vpp/vpp-api/vpe.api
@@ -361,7 +361,9 @@ define create_vlan_subif_reply
@param mr_next_hop[16] - the nextop address
@param mr_next_hop_sw_if_index - the next-hop SW interface
@param mr_next_hop_table_id - the next-hop table-id (if appropriate)
- @param mr_next_hop_out_label - the next-hop output label
+ @param mr_next_hop_n_out_labels - the number of labels in the label stack
+ @param mr_next_hop_out_label_stack - the next-hop output label stack, outer most first
+ @param next_hop_via_label - The next-hop is a resolved via a local label
*/
define mpls_route_add_del
{
@@ -380,9 +382,11 @@ define mpls_route_add_del
u8 mr_next_hop_proto_is_ip4;
u8 mr_next_hop_weight;
u8 mr_next_hop[16];
+ u8 mr_next_hop_n_out_labels;
u32 mr_next_hop_sw_if_index;
u32 mr_next_hop_table_id;
- u32 mr_next_hop_out_label;
+ u32 mr_next_hop_via_label;
+ u32 mr_next_hop_out_label_stack[mr_next_hop_n_out_labels];
};
/** \brief Reply for MPLS route add / del request
@@ -532,16 +536,92 @@ define mpls_ip_bind_unbind_reply
i32 retval;
};
+/** \brief MPLS tunnel Add / del route
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mt_is_add - Is this a route add or delete
+ @param mt_sw_if_index - The SW interface index of the tunnel to delete
+ @param mt_next_hop_proto_is_ip4 - The next-hop is IPV4
+ @param mt_next_hop_weight - The weight, for UCMP
+ @param mt_next_hop[16] - the nextop address
+ @param mt_next_hop_sw_if_index - the next-hop SW interface
+ @param mt_next_hop_table_id - the next-hop table-id (if appropriate)
+ @param mt_next_hop_n_out_labels - the number of next-hop output labels
+ @param mt_next_hop_out_label_stack - the next-hop output label stack, outer most first
+*/
+define mpls_tunnel_add_del
+{
+ u32 client_index;
+ u32 context;
+ u32 mt_sw_if_index;
+ u8 mt_is_add;
+ u8 mt_l2_only;
+ u8 mt_next_hop_proto_is_ip4;
+ u8 mt_next_hop_weight;
+ u8 mt_next_hop[16];
+ u8 mt_next_hop_n_out_labels;
+ u32 mt_next_hop_sw_if_index;
+ u32 mt_next_hop_table_id;
+ u32 mt_next_hop_out_label_stack[mt_next_hop_n_out_labels];
+};
+
+/** \brief Reply for MPLS tunnel add / del request
+ @param context - returned sender context, to match reply w/ request
+ @param retval - return code
+ @param sw_if_index - SW interface index of the tunnel created
+*/
+define mpls_tunnel_add_del_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Dump mpls eth tunnel table
+ @param client_index - opaque cookie to identify the sender
+ @param tunnel_index - eth tunnel identifier or -1 in case of all tunnels
+*/
+define mpls_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+ i32 tunnel_index;
+};
+
+/** \brief mpls eth tunnel operational state response
+ @param tunnel_index - eth tunnel identifier
+ @param intfc_address - interface ipv4 addr
+ @param mask_width - interface ipv4 addr mask
+ @param hw_if_index - interface id
+ @param l2_only -
+ @param tunnel_dst_mac -
+ @param tx_sw_if_index -
+ @param encap_index - reference to mpls label table
+ @param nlabels - number of resolved labels
+ @param labels - resolved labels
+*/
+define mpls_tunnel_details
+{
+ u32 context;
+ u32 tunnel_index;
+ u8 mt_l2_only;
+ u8 mt_sw_if_index;
+ u8 mt_next_hop_proto_is_ip4;
+ u8 mt_next_hop[16];
+ u32 mt_next_hop_sw_if_index;
+ u32 mt_next_hop_table_id;
+ u32 mt_next_hop_n_labels;
+ u32 mt_next_hop_out_labels[mt_next_hop_n_labels];
+};
+
/** \brief Add / del route request
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
@param sw_if_index - software index of the new vlan's parent interface
@param vrf_id - fib table /vrf associated with the route
@param lookup_in_vrf -
- @param resolve_attempts -
@param classify_table_index -
@param create_vrf_if_needed -
- @param resolve_if_needed -
@param is_add - 1 if adding the route, 0 if deleting
@param is_drop - Drop the packet
@param is_unreach - Drop the packet and rate limit send ICMP unreachable
@@ -555,6 +635,9 @@ define mpls_ip_bind_unbind_reply
@param dst_address_length -
@param dst_address[16] -
@param next_hop_address[16] -
+ @param next_hop_n_out_labels - the number of labels in the label stack
+ @param next_hop_out_label_stack - the next-hop output label stack, outer most first
+ @param next_hop_via_label - The next-hop is a resolved via a local label
*/
define ip_add_del_route
{
@@ -562,12 +645,9 @@ define ip_add_del_route
u32 context;
u32 next_hop_sw_if_index;
u32 table_id;
- u32 resolve_attempts;
u32 classify_table_index;
- u32 next_hop_out_label;
u32 next_hop_table_id;
u8 create_vrf_if_needed;
- u8 resolve_if_needed;
u8 is_add;
u8 is_drop;
u8 is_unreach;
@@ -584,6 +664,9 @@ define ip_add_del_route
u8 dst_address_length;
u8 dst_address[16];
u8 next_hop_address[16];
+ u8 next_hop_n_out_labels;
+ u32 next_hop_via_label;
+ u32 next_hop_out_label_stack[next_hop_n_out_labels];
};
/** \brief Reply for add / del route request
@@ -596,37 +679,6 @@ define ip_add_del_route_reply
i32 retval;
};
-/** \brief Add / del MPLS encapsulation request
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param vrf_id - vrf id
- @param dst_address[4] -
- @param is_add - 1 if adding the encap, 0 if deleting
- @param nlabels - number of labels
- @param labels - array of labels
-*/
-define mpls_add_del_encap
-{
- u32 client_index;
- u32 context;
- u32 vrf_id;
- u8 dst_address[4];
- /* 1 = add, 0 = delete */
- u8 is_add;
- u8 nlabels;
- u32 labels[0];
-};
-
-/** \brief Reply for add / del encapsulation request
- @param context - returned sender context, to match reply w/ request
- @param retval - return code
-*/
-define mpls_add_del_encap_reply
-{
- u32 context;
- i32 retval;
-};
-
/** \brief Proxy ARP add / del request
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
@@ -1508,78 +1560,6 @@ define sw_interface_set_vxlan_bypass_reply
i32 retval;
};
-/** \brief MPLS Ethernet add / del tunnel request
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param vrf_id - vrf_id, only for IP4
- @param sw_if_index - interface used to reach neighbor
- @param is_add - add if set, tunnel delete if 0
- @param dst_mac_address -
- @param adj_address -
- @param adj_address_length -
-*/
-define mpls_ethernet_add_del_tunnel
-{
- u32 client_index;
- u32 context;
- u32 vrf_id;
- u32 tx_sw_if_index;
- u8 is_add;
- u8 l2_only;
- u8 dst_mac_address[6];
- u8 adj_address[4];
- u8 adj_address_length;
-};
-
-/** \brief Reply for MPLS Ethernet add / delete tunnel request
- @param context - sender context, to match reply w/ request
- @param retval - return code for the request
-*/
-define mpls_ethernet_add_del_tunnel_reply
-{
- u32 context;
- i32 retval;
- u32 tunnel_sw_if_index;
-};
-/** \brief MPLS Ethernet add/ del tunnel 2
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param inner_vrf_id -
- @param outer_vrf_id -
- @param resolve_attempts -
- @param resolve_opaque -
- @param resolve_if_needed -
- @param is_add -
- @param adj_address -
- @param adj_address_length -
- @param next_hop_ip4_address_in_outer_vrf -
-*/
-define mpls_ethernet_add_del_tunnel_2
-{
- u32 client_index;
- u32 context;
- u32 inner_vrf_id;
- u32 outer_vrf_id;
- u32 resolve_attempts;
- u32 resolve_opaque; /* no need to set this */
- u8 resolve_if_needed;
- u8 is_add;
- u8 l2_only;
- u8 adj_address[4];
- u8 adj_address_length;
- u8 next_hop_ip4_address_in_outer_vrf[4];
-};
-
-/** \brief MPLS Ethernet add/ del tunnel 2
- @param context - sender context, to match reply w/ request
- @param retval - return code for add /del request
-*/
-define mpls_ethernet_add_del_tunnel_2_reply
-{
- u32 context;
- i32 retval;
-};
-
/** \brief Set L2 XConnect between two interfaces request
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
@@ -4567,118 +4547,6 @@ define netmap_delete_reply
i32 retval;
};
-/** \brief Dump mpls gre tunnel table
- @param client_index - opaque cookie to identify the sender
- @param tunnel_index - gre tunnel identifier or -1 in case of all tunnels
-*/
-define mpls_gre_tunnel_dump
-{
- u32 client_index;
- u32 context;
- i32 tunnel_index;
-};
-
-/** \brief mpls gre tunnel operational state response
- @param tunnel_index - gre tunnel identifier
- @param intfc_address - interface ipv4 addr
- @param mask_width - interface ipv4 addr mask
- @param hw_if_index - interface id
- @param l2_only -
- @param tunnel_src - tunnel source ipv4 addr
- @param tunnel_dst - tunnel destination ipv4 addr
- @param outer_fib_index - gre tunnel identifier
- @param encap_index - reference to mpls label table
- @param nlabels - number of resolved labels
- @param labels - resolved labels
-*/
-define mpls_gre_tunnel_details
-{
- u32 context;
- u32 tunnel_index;
-
- u32 intfc_address;
- u32 inner_fib_index;
- u32 mask_width;
- u32 encap_index;
- u32 hw_if_index;
- u8 l2_only;
- u32 tunnel_src;
- u32 tunnel_dst;
- u32 outer_fib_index;
- u32 nlabels;
- u32 labels[nlabels];
-};
-
-/** \brief Dump mpls eth tunnel table
- @param client_index - opaque cookie to identify the sender
- @param tunnel_index - eth tunnel identifier or -1 in case of all tunnels
-*/
-define mpls_eth_tunnel_dump
-{
- u32 client_index;
- u32 context;
- i32 tunnel_index;
-};
-
-/** \brief mpls eth tunnel operational state response
- @param tunnel_index - eth tunnel identifier
- @param intfc_address - interface ipv4 addr
- @param mask_width - interface ipv4 addr mask
- @param hw_if_index - interface id
- @param l2_only -
- @param tunnel_dst_mac -
- @param tx_sw_if_index -
- @param encap_index - reference to mpls label table
- @param nlabels - number of resolved labels
- @param labels - resolved labels
-*/
-define mpls_eth_tunnel_details
-{
- u32 context;
- u32 tunnel_index;
-
- u32 intfc_address;
- u32 inner_fib_index;
- u32 mask_width;
- u32 encap_index;
- u32 hw_if_index;
- u8 l2_only;
- u8 tunnel_dst_mac[6];
- u32 tx_sw_if_index;
- u32 nlabels;
- u32 labels[nlabels];
-};
-
-/** \brief Dump mpls fib table
- @param client_index - opaque cookie to identify the sender
- @param fib_index - mpls fib entry identifier or -1 in case of all entries
-*/
-define mpls_fib_encap_dump
-{
- u32 client_index;
- u32 context;
-};
-
-/** \brief mpls fib encap table response
- @param fib_index - fib table id
- @param dest - destination ipv4 addr
- @param s_bit -
- @param entry_index - reference to mpls label table
- @param nlabels - number of resolved labels
- @param labels - resolved labels
-*/
-define mpls_fib_encap_details
-{
- u32 context;
-
- u32 fib_index;
- u32 entry_index;
- u32 dest;
- u32 s_bit;
- u32 nlabels;
- u32 labels[nlabels];
-};
-
/** \brief Classify get table IDs request
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request