aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2018-08-30 06:12:27 -0700
committerNeale Ranns <nranns@cisco.com>2018-08-31 09:03:07 +0000
commit2da975c4dde45b1421998e54c107cb24d01d10d9 (patch)
tree2b26f835004209c611a846223d341a78d053a0e8
parentfe47e29fc6ddcb664f76f4ebbfd2593cb0282e43 (diff)
SR-MPLS: fixes and tests
- the FIB path takes a vector of type fib_mpls_label_t not u32 so the untype safe vec_add did not work - write som eSR-MPLS tests - allow an MPLS tunnel to resolve through a SR BSID Change-Id: I2a18b9a9bf43584100ac269c4ebc286c9e3b3ea5 Signed-off-by: Neale Ranns <nranns@cisco.com> (cherry picked from commit 7c922dc404c2c0a2d67d53ca05db1c1ae1598f44)
-rw-r--r--src/vnet/dpo/mpls_label_dpo.c2
-rw-r--r--src/vnet/mpls/mpls.api1
-rw-r--r--src/vnet/mpls/mpls_api.c9
-rwxr-xr-xsrc/vnet/srmpls/sr_mpls_policy.c21
-rw-r--r--test/test_srmpls.py271
-rw-r--r--test/vpp_mpls_tunnel_interface.py2
-rw-r--r--test/vpp_papi_provider.py13
7 files changed, 316 insertions, 3 deletions
diff --git a/src/vnet/dpo/mpls_label_dpo.c b/src/vnet/dpo/mpls_label_dpo.c
index 0a7063cc99a..bee155b9035 100644
--- a/src/vnet/dpo/mpls_label_dpo.c
+++ b/src/vnet/dpo/mpls_label_dpo.c
@@ -205,7 +205,7 @@ format_mpls_label_dpo (u8 *s, va_list *args)
}
mld = mpls_label_dpo_get(index);
- s = format(s, "mpls-label[%U%d]:",
+ s = format(s, "mpls-label[%U@%d]:",
format_mpls_label_dpo_flags,
(int) mld->mld_flags, index);
diff --git a/src/vnet/mpls/mpls.api b/src/vnet/mpls/mpls.api
index 6047d255aa7..7ab0f3750cc 100644
--- a/src/vnet/mpls/mpls.api
+++ b/src/vnet/mpls/mpls.api
@@ -54,6 +54,7 @@ define mpls_tunnel_add_del
u8 mt_next_hop_preference;
u8 mt_next_hop[16];
u8 mt_next_hop_n_out_labels;
+ u32 mt_next_hop_via_label;
u32 mt_next_hop_sw_if_index;
u32 mt_next_hop_table_id;
vl_api_fib_mpls_label_t mt_next_hop_out_label_stack[mt_next_hop_n_out_labels];
diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c
index 9f5100a7f83..47a3408520e 100644
--- a/src/vnet/mpls/mpls_api.c
+++ b/src/vnet/mpls/mpls_api.c
@@ -306,6 +306,7 @@ vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp)
u32 tunnel_sw_if_index;
int ii;
fib_route_path_t rpath, *rpaths = NULL;
+ u32 next_hop_via_label;
memset (&rpath, 0, sizeof (rpath));
@@ -326,6 +327,14 @@ vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp)
rpath.frp_sw_if_index = ntohl (mp->mt_next_hop_sw_if_index);
rpath.frp_weight = 1;
+ next_hop_via_label = ntohl (mp->mt_next_hop_via_label);
+ if ((MPLS_LABEL_INVALID != next_hop_via_label) && (0 != next_hop_via_label))
+ {
+ rpath.frp_proto = DPO_PROTO_MPLS;
+ rpath.frp_local_label = next_hop_via_label;
+ rpath.frp_eos = MPLS_NON_EOS;
+ }
+
if (mp->mt_is_add)
{
for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++)
diff --git a/src/vnet/srmpls/sr_mpls_policy.c b/src/vnet/srmpls/sr_mpls_policy.c
index 5bb7fb2bff5..86cd169716a 100755
--- a/src/vnet/srmpls/sr_mpls_policy.c
+++ b/src/vnet/srmpls/sr_mpls_policy.c
@@ -61,6 +61,7 @@ create_sl (mpls_sr_policy_t * sr_policy, mpls_label_t * sl, u32 weight)
{
mpls_sr_main_t *sm = &sr_mpls_main;
mpls_sr_sl_t *segment_list;
+ u32 ii;
pool_get (sm->sid_lists, segment_list);
memset (segment_list, 0, sizeof (*segment_list));
@@ -85,8 +86,24 @@ create_sl (mpls_sr_policy_t * sr_policy, mpls_label_t * sl, u32 weight)
.frp_local_label = sl[0],
};
- if (vec_len (sl) - 1)
- vec_add (path.frp_label_stack, sl + 1, vec_len (sl) - 1);
+ if (vec_len (sl) > 1)
+ {
+ vec_validate (path.frp_label_stack, vec_len (sl) - 2);
+ for (ii = 1; ii < vec_len (sl); ii++)
+ {
+ path.frp_label_stack[ii - 1].fml_value = sl[ii];
+ }
+ }
+ else
+ {
+ /*
+ * add an impliciet NULL label to allow non-eos recursion
+ */
+ fib_mpls_label_t lbl = {
+ .fml_value = MPLS_IETF_IMPLICIT_NULL_LABEL,
+ };
+ vec_add1 (path.frp_label_stack, lbl);
+ }
fib_route_path_t *paths = NULL;
vec_add1 (paths, path);
diff --git a/test/test_srmpls.py b/test/test_srmpls.py
new file mode 100644
index 00000000000..ded4a71fa40
--- /dev/null
+++ b/test/test_srmpls.py
@@ -0,0 +1,271 @@
+#!/usr/bin/env python
+
+import unittest
+import socket
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
+ VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
+ MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable, \
+ VppMplsLabel, MplsLspMode
+from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
+from scapy.contrib.mpls import MPLS
+
+
+def verify_filter(capture, sent):
+ if not len(capture) == len(sent):
+ # filter out any IPv6 RAs from the capture
+ for p in capture:
+ if p.haslayer(IPv6):
+ capture.remove(p)
+ return capture
+
+
+def verify_mpls_stack(tst, rx, mpls_labels):
+ # the rx'd packet has the MPLS label popped
+ eth = rx[Ether]
+ tst.assertEqual(eth.type, 0x8847)
+
+ rx_mpls = rx[MPLS]
+
+ for ii in range(len(mpls_labels)):
+ tst.assertEqual(rx_mpls.label, mpls_labels[ii].value)
+ tst.assertEqual(rx_mpls.cos, mpls_labels[ii].exp)
+ tst.assertEqual(rx_mpls.ttl, mpls_labels[ii].ttl)
+
+ if ii == len(mpls_labels) - 1:
+ tst.assertEqual(rx_mpls.s, 1)
+ else:
+ # not end of stack
+ tst.assertEqual(rx_mpls.s, 0)
+ # pop the label to expose the next
+ rx_mpls = rx_mpls[MPLS].payload
+
+
+class TestSRMPLS(VppTestCase):
+ """ SR-MPLS Test Case """
+
+ def setUp(self):
+ super(TestSRMPLS, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(4))
+
+ # setup both interfaces
+ # assign them different tables.
+ table_id = 0
+ self.tables = []
+
+ tbl = VppMplsTable(self, 0)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+ i.enable_mpls()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.ip6_disable()
+ i.disable_mpls()
+ i.admin_down()
+ super(TestSRMPLS, self).tearDown()
+
+ def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0):
+ self.reset_packet_infos()
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4, dst=dst_ip,
+ ttl=ip_ttl, tos=ip_dscp) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def verify_capture_labelled_ip4(self, src_if, capture, sent,
+ mpls_labels, ip_ttl=None):
+ try:
+ capture = verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ verify_mpls_stack(self, rx, mpls_labels)
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ if not ip_ttl:
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+ else:
+ self.assertEqual(rx_ip.ttl, ip_ttl)
+
+ except:
+ raise
+
+ def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels):
+ try:
+ capture = verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ verify_mpls_stack(self, rx, mpls_labels)
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ raise
+
+ def test_sr_mpls(self):
+ """ SR MPLS """
+
+ #
+ # A simple MPLS xconnect - neos label in label out
+ #
+ route_32_eos = VppMplsRoute(self, 32, 0,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(32)])])
+ route_32_eos.add_vpp_config()
+
+ #
+ # A binding SID with only one label
+ #
+ self.vapi.sr_mpls_policy_add(999, 1, 0, [32])
+
+ #
+ # A labeled IP route that resolves thru the binding SID
+ #
+ ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=999,
+ labels=[VppMplsLabel(55)])])
+ ip_10_0_0_1.add_vpp_config()
+
+ tx = self.create_stream_ip4(self.pg1, "10.0.0.1")
+ rx = self.send_and_expect(self.pg1, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(55)])
+
+ #
+ # An unlabeled IP route that resolves thru the binding SID
+ #
+ ip_10_0_0_1 = VppIpRoute(self, "10.0.0.2", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=999)])
+ ip_10_0_0_1.add_vpp_config()
+
+ tx = self.create_stream_ip4(self.pg1, "10.0.0.2")
+ rx = self.send_and_expect(self.pg1, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32)])
+
+ self.vapi.sr_mpls_policy_del(999)
+
+ #
+ # this time the SID has many labels pushed
+ #
+ self.vapi.sr_mpls_policy_add(999, 1, 0, [32, 33, 34])
+
+ tx = self.create_stream_ip4(self.pg1, "10.0.0.1")
+ rx = self.send_and_expect(self.pg1, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34),
+ VppMplsLabel(55)])
+ tx = self.create_stream_ip4(self.pg1, "10.0.0.2")
+ rx = self.send_and_expect(self.pg1, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34)])
+
+ #
+ # Resolve an MPLS tunnel via the SID
+ #
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=999,
+ labels=[VppMplsLabel(44),
+ VppMplsLabel(46)])])
+ mpls_tun.add_vpp_config()
+ mpls_tun.admin_up()
+
+ #
+ # add an unlabelled route through the new tunnel
+ #
+ route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun._sw_if_index)])
+ route_10_0_0_3.add_vpp_config()
+ self.logger.info(self.vapi.cli("sh mpls tun 0"))
+ self.logger.info(self.vapi.cli("sh adj 21"))
+
+ tx = self.create_stream_ip4(self.pg1, "10.0.0.3")
+ rx = self.send_and_expect(self.pg1, tx, self.pg0)
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34),
+ VppMplsLabel(44),
+ VppMplsLabel(46)])
+
+ #
+ # add a labelled route through the new tunnel
+ #
+ route_10_0_0_3 = VppIpRoute(self, "10.0.0.4", 32,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun._sw_if_index,
+ labels=[VppMplsLabel(55)])])
+ route_10_0_0_3.add_vpp_config()
+
+ tx = self.create_stream_ip4(self.pg1, "10.0.0.4")
+ rx = self.send_and_expect(self.pg1, tx, self.pg0)
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34),
+ VppMplsLabel(44),
+ VppMplsLabel(46),
+ VppMplsLabel(55)])
+
+ self.vapi.sr_mpls_policy_del(999)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/vpp_mpls_tunnel_interface.py b/test/vpp_mpls_tunnel_interface.py
index 995ffb7dd54..b125f3c7277 100644
--- a/test/vpp_mpls_tunnel_interface.py
+++ b/test/vpp_mpls_tunnel_interface.py
@@ -26,6 +26,7 @@ class VppMPLSTunnelInterface(VppInterface):
path.nh_itf,
path.nh_table_id,
path.weight,
+ next_hop_via_label=path.nh_via_label,
next_hop_out_label_stack=lstack,
next_hop_n_out_labels=len(lstack),
is_multicast=self.is_multicast,
@@ -42,6 +43,7 @@ class VppMPLSTunnelInterface(VppInterface):
path.nh_itf,
path.nh_table_id,
path.weight,
+ next_hop_via_label=path.nh_via_label,
next_hop_out_label_stack=path.nh_labels,
next_hop_n_out_labels=len(path.nh_labels),
is_add=0)
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index 8bfc18e5a5c..30777d3d495 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -1329,6 +1329,7 @@ class VppPapiProvider(object):
'mt_next_hop_n_out_labels': next_hop_n_out_labels,
'mt_next_hop_sw_if_index': next_hop_sw_if_index,
'mt_next_hop_table_id': next_hop_table_id,
+ 'mt_next_hop_via_label': next_hop_via_label,
'mt_next_hop_out_label_stack': next_hop_out_label_stack})
def nat44_interface_add_del_feature(
@@ -2695,6 +2696,18 @@ class VppPapiProvider(object):
'decap_vrf_id': decap_vrf_id,
'client_mac': client_mac})
+ def sr_mpls_policy_add(self, bsid, weight, type, segments):
+ return self.api(self.papi.sr_mpls_policy_add,
+ {'bsid': bsid,
+ 'weight': weight,
+ 'type': type,
+ 'n_segments': len(segments),
+ 'segments': segments})
+
+ def sr_mpls_policy_del(self, bsid):
+ return self.api(self.papi.sr_mpls_policy_del,
+ {'bsid': bsid})
+
def sr_localsid_add_del(self,
localsid,
behavior,