aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/vnet/fib/fib_entry_src.c19
-rw-r--r--src/vnet/fib/fib_entry_src_mpls.c2
-rw-r--r--src/vnet/fib/fib_path.c13
-rw-r--r--src/vnet/fib/fib_path_ext.c9
-rw-r--r--src/vnet/fib/fib_test.c94
-rw-r--r--src/vnet/fib/fib_test.h1
-rw-r--r--test/test_dvr.py4
-rw-r--r--test/test_ip4.py1
-rw-r--r--test/test_ip6.py4
-rw-r--r--test/test_mpls.py114
10 files changed, 222 insertions, 39 deletions
diff --git a/src/vnet/fib/fib_entry_src.c b/src/vnet/fib/fib_entry_src.c
index 667aa485f7c..214dafe9b8d 100644
--- a/src/vnet/fib/fib_entry_src.c
+++ b/src/vnet/fib/fib_entry_src.c
@@ -259,6 +259,23 @@ fib_entry_chain_type_fixup (const fib_entry_t *entry,
return (dfct);
}
+static dpo_proto_t
+fib_prefix_get_payload_proto (const fib_prefix_t *pfx)
+{
+ switch (pfx->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (DPO_PROTO_IP4);
+ case FIB_PROTOCOL_IP6:
+ return (DPO_PROTO_IP6);
+ case FIB_PROTOCOL_MPLS:
+ return (pfx->fp_payload_proto);
+ }
+
+ ASSERT(0);
+ return (DPO_PROTO_IP4);
+}
+
static void
fib_entry_src_get_path_forwarding (fib_node_index_t path_index,
fib_entry_src_collect_forwarding_ctx_t *ctx)
@@ -313,7 +330,7 @@ fib_entry_src_get_path_forwarding (fib_node_index_t path_index,
ctx->fct),
&nh->path_dpo);
fib_path_stack_mpls_disp(path_index,
- ctx->fib_entry->fe_prefix.fp_payload_proto,
+ fib_prefix_get_payload_proto(&ctx->fib_entry->fe_prefix),
&nh->path_dpo);
break;
diff --git a/src/vnet/fib/fib_entry_src_mpls.c b/src/vnet/fib/fib_entry_src_mpls.c
index 6fdd5c0ac66..f80d42afbb0 100644
--- a/src/vnet/fib/fib_entry_src_mpls.c
+++ b/src/vnet/fib/fib_entry_src_mpls.c
@@ -170,7 +170,7 @@ static u8*
fib_entry_src_mpls_format (fib_entry_src_t *src,
u8* s)
{
- return (format(s, "MPLS local-label:%d", src->mpls.fesm_label));
+ return (format(s, " local-label:%d", src->mpls.fesm_label));
}
const static fib_entry_src_vft_t mpls_src_vft = {
diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c
index 3e031929419..8dabfdf9674 100644
--- a/src/vnet/fib/fib_path.c
+++ b/src/vnet/fib/fib_path.c
@@ -2259,6 +2259,18 @@ fib_path_stack_mpls_disp (fib_node_index_t path_index,
switch (path->fp_type)
{
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ {
+ dpo_id_t tmp = DPO_INVALID;
+
+ dpo_copy(&tmp, dpo);
+ dpo_set(dpo,
+ DPO_MPLS_DISPOSITION,
+ payload_proto,
+ mpls_disp_dpo_create(payload_proto, ~0, &tmp));
+ dpo_reset(&tmp);
+ break;
+ }
case FIB_PATH_TYPE_DEAG:
{
dpo_id_t tmp = DPO_INVALID;
@@ -2275,7 +2287,6 @@ fib_path_stack_mpls_disp (fib_node_index_t path_index,
}
case FIB_PATH_TYPE_RECEIVE:
case FIB_PATH_TYPE_ATTACHED:
- case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
case FIB_PATH_TYPE_RECURSIVE:
case FIB_PATH_TYPE_INTF_RX:
case FIB_PATH_TYPE_UDP_ENCAP:
diff --git a/src/vnet/fib/fib_path_ext.c b/src/vnet/fib/fib_path_ext.c
index 4438671b4a0..a285ba07f7c 100644
--- a/src/vnet/fib/fib_path_ext.c
+++ b/src/vnet/fib/fib_path_ext.c
@@ -255,6 +255,15 @@ fib_path_ext_stack (fib_path_ext_t *path_ext,
chain_proto,
mldi);
}
+ else if (child_fct == FIB_FORW_CHAIN_TYPE_MPLS_EOS)
+ {
+ /*
+ * MPLS EOS packets using an imp-null. Insert the disposition.
+ */
+ fib_path_stack_mpls_disp(nh->path_index,
+ fib_forw_chain_type_to_dpo_proto(parent_fct),
+ &nh->path_dpo);
+ }
}
dpo_reset(&via_dpo);
diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c
index 2658eb27811..b74ec337c1f 100644
--- a/src/vnet/fib/fib_test.c
+++ b/src/vnet/fib/fib_test.c
@@ -29,6 +29,7 @@
#include <vnet/dpo/interface_rx_dpo.h>
#include <vnet/dpo/replicate_dpo.h>
#include <vnet/dpo/l2_bridge_dpo.h>
+#include <vnet/dpo/mpls_disposition.h>
#include <vnet/mpls/mpls.h>
@@ -514,6 +515,30 @@ fib_test_validate_lb_v (const load_balance_t *lb,
bucket,
exp->adj.adj);
break;
+ case FT_LB_MPLS_DISP_O_ADJ:
+ {
+ const mpls_disp_dpo_t *mdd;
+
+ FIB_TEST_I((DPO_MPLS_DISPOSITION == dpo->dpoi_type),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+
+ mdd = mpls_disp_dpo_get(dpo->dpoi_index);
+
+ dpo = &mdd->mdd_dpo;
+
+ FIB_TEST_I(((DPO_ADJACENCY == dpo->dpoi_type) ||
+ (DPO_ADJACENCY_INCOMPLETE == dpo->dpoi_type)),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+ FIB_TEST_LB((exp->adj.adj == dpo->dpoi_index),
+ "bucket %d stacks on adj %d",
+ bucket,
+ exp->adj.adj);
+ break;
+ }
case FT_LB_INTF:
FIB_TEST_I((DPO_INTERFACE_RX == dpo->dpoi_type),
"bucket %d stacks on %U",
@@ -6380,6 +6405,12 @@ fib_test_label (void)
.fp_label = 24001,
.fp_eos = MPLS_NON_EOS,
};
+ fib_test_lb_bucket_t disp_o_10_10_11_1 = {
+ .type = FT_LB_MPLS_DISP_O_ADJ,
+ .adj = {
+ .adj = ai_v4_10_10_11_1,
+ },
+ };
/*
* The EOS entry should link to both the paths,
@@ -6393,10 +6424,10 @@ fib_test_label (void)
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
&l99_eos_o_10_10_10_1,
- &a_o_10_10_11_1),
+ &disp_o_10_10_11_1),
"24001/eos LB 2 buckets via: "
"label 99 over 10.10.10.1, "
- "adj over 10.10.11.1");
+ "mpls disp adj over 10.10.11.1");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
@@ -6419,6 +6450,13 @@ fib_test_label (void)
.adj = ai_v4_10_10_11_2,
},
};
+ fib_test_lb_bucket_t disp_o_10_10_11_2 = {
+ .type = FT_LB_MPLS_DISP_O_ADJ,
+ .adj = {
+ .adj = ai_v4_10_10_11_2,
+ },
+ };
+
fei = fib_table_entry_path_add(fib_index,
&pfx_1_1_1_1_s_32,
@@ -6567,11 +6605,11 @@ fib_test_label (void)
FIB_TEST(fib_test_validate_entry(fei,
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
- &a_o_10_10_11_1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_2),
"24001/eos LB 2 buckets via: "
- "adj over 10.10.11.1, ",
- "adj-v4 over 10.10.11.2");
+ "mpls-disp adj over 10.10.11.1, ",
+ "mpls-disp adj-v4 over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
@@ -6644,20 +6682,20 @@ fib_test_label (void)
&l99_eos_o_10_10_10_1,
&l99_eos_o_10_10_10_1,
&l99_eos_o_10_10_10_1,
- &a_o_10_10_11_1,
- &a_o_10_10_11_1,
- &a_o_10_10_11_1,
- &a_o_10_10_11_1,
- &a_o_10_10_11_1,
- &adj_o_10_10_11_2,
- &adj_o_10_10_11_2,
- &adj_o_10_10_11_2,
- &adj_o_10_10_11_2,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_2,
+ &disp_o_10_10_11_2,
+ &disp_o_10_10_11_2,
+ &disp_o_10_10_11_2,
+ &disp_o_10_10_11_2),
"24001/eos LB 16 buckets via: "
"label 99 over 10.10.10.1, "
- "adj over 10.10.11.1",
- "adj-v4 over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.1",
+ "MPLS disp adj-v4 over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
@@ -6698,11 +6736,11 @@ fib_test_label (void)
FIB_TEST(fib_test_validate_entry(fei,
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
- &a_o_10_10_11_1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_2),
"24001/eos LB 2 buckets via: "
- "adj over 10.10.11.1, "
- "adj-v4 over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.1, "
+ "MPLS disp adj-v4 over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
@@ -6750,9 +6788,9 @@ fib_test_label (void)
FIB_TEST(fib_test_validate_entry(fei,
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_2),
"24001/eos LB 1 buckets via: "
- "adj over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
@@ -6796,10 +6834,10 @@ fib_test_label (void)
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
&l99_eos_o_10_10_10_1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_2),
"24001/eos LB 2 buckets via: "
"label 99 over 10.10.10.1, "
- "adj over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
@@ -6841,10 +6879,10 @@ fib_test_label (void)
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
&l99_eos_o_10_10_10_1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_2),
"25005/eos LB 2 buckets via: "
"label 99 over 10.10.10.1, "
- "adj over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_25005_neos);
diff --git a/src/vnet/fib/fib_test.h b/src/vnet/fib/fib_test.h
index f3d8346aab3..53697cb62ae 100644
--- a/src/vnet/fib/fib_test.h
+++ b/src/vnet/fib/fib_test.h
@@ -29,6 +29,7 @@ typedef enum fib_test_lb_bucket_type_t_ {
FT_LB_LABEL_STACK_O_ADJ,
FT_LB_LABEL_O_LB,
FT_LB_O_LB,
+ FT_LB_MPLS_DISP_O_ADJ,
FT_LB_INTF,
FT_LB_L2,
FT_LB_BIER_TABLE,
diff --git a/test/test_dvr.py b/test/test_dvr.py
index 27522a54eea..f5d5e54a15f 100644
--- a/test/test_dvr.py
+++ b/test/test_dvr.py
@@ -15,7 +15,7 @@ from util import ppp
class TestDVR(VppTestCase):
- """ IPv4 Load-Balancing """
+ """ Distributed Virtual Router """
def setUp(self):
super(TestDVR, self).setUp()
@@ -83,8 +83,6 @@ class TestDVR(VppTestCase):
L2_VTR_OP.L2_POP_1,
93)
- self.logger.error(self.vapi.ppcli("show bridge-domain 1 detail"))
-
#
# Add routes to bridge the traffic via a tagged an nontagged interface
#
diff --git a/test/test_ip4.py b/test/test_ip4.py
index b05635f95ee..12fbced1c26 100644
--- a/test/test_ip4.py
+++ b/test/test_ip4.py
@@ -1360,7 +1360,6 @@ class TestIPInput(VppTestCase):
self.assertEqual(icmp.src, self.pg0.remote_ip4)
self.assertEqual(icmp.dst, self.pg1.remote_ip4)
- self.logger.error(self.vapi.cli("sh error"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_ip6.py b/test/test_ip6.py
index 0a0d56cbb8d..684eff5546e 100644
--- a/test/test_ip6.py
+++ b/test/test_ip6.py
@@ -1278,6 +1278,7 @@ class TestIP6LoadBalance(VppTestCase):
super(TestIP6LoadBalance, self).tearDown()
def send_and_expect_load_balancing(self, input, pkts, outputs):
+ self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
@@ -1286,6 +1287,7 @@ class TestIP6LoadBalance(VppTestCase):
self.assertNotEqual(0, len(rx))
def send_and_expect_one_itf(self, input, pkts, itf):
+ self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
@@ -1691,8 +1693,6 @@ class TestIP6Input(VppTestCase):
# 0: "hop limit exceeded in transit",
self.assertEqual(icmp.code, 0)
- self.logger.error(self.vapi.cli("sh error"))
-
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_mpls.py b/test/test_mpls.py
index d265e85ed5e..9590519e237 100644
--- a/test/test_mpls.py
+++ b/test/test_mpls.py
@@ -12,7 +12,7 @@ from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
-from scapy.layers.inet6 import IPv6
+from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
from scapy.contrib.mpls import MPLS
@@ -288,6 +288,32 @@ class TestMPLS(VppTestCase):
except:
raise
+ def verify_capture_ip6_icmp(self, src_if, capture, sent):
+ try:
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+
+ # the rx'd packet has the MPLS label popped
+ eth = rx[Ether]
+ self.assertEqual(eth.type, 0x86DD)
+
+ tx_ip = tx[IPv6]
+ rx_ip = rx[IPv6]
+
+ self.assertEqual(rx_ip.dst, tx_ip.src)
+ # ICMP sourced from the interface's address
+ self.assertEqual(rx_ip.src, src_if.local_ip6)
+ # hop-limit reset to 255 for IMCP packet
+ self.assertEqual(rx_ip.hlim, 254)
+
+ icmp = rx[ICMPv6TimeExceeded]
+
+ except:
+ raise
+
def send_and_assert_no_replies(self, intf, pkts, remark):
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
@@ -364,6 +390,91 @@ class TestMPLS(VppTestCase):
self.verify_capture_ip4(self.pg0, rx, tx)
#
+ # disposed packets have an invalid IPv4 checkusm
+ #
+ tx = self.create_stream_labelled_ip4(self.pg0, [33],
+ dst_ip=self.pg0.remote_ip4,
+ n=65,
+ chksum=1)
+ self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
+
+ #
+ # An MPLS xconnect - EOS label in IPv6 out
+ #
+ route_333_eos = VppMplsRoute(
+ self, 333, 1,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ labels=[],
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route_333_eos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, [333], 64)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6(self.pg0, rx, tx)
+
+ #
+ # disposed packets have an TTL expired
+ #
+ tx = self.create_stream_labelled_ip6(self.pg0, [333], 64,
+ dst_ip=self.pg1.remote_ip6,
+ hlim=1)
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, [333], 64,
+ dst_ip=self.pg1.remote_ip6,
+ hlim=0)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6_icmp(self.pg0, rx, tx)
+
+ #
+ # An MPLS xconnect - EOS label in IPv6 out w imp-null
+ #
+ route_334_eos = VppMplsRoute(
+ self, 334, 1,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ labels=[3],
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route_334_eos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, [334], 64)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6(self.pg0, rx, tx)
+
+ #
+ # disposed packets have an TTL expired
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, [334], 64,
+ dst_ip=self.pg1.remote_ip6,
+ hlim=0)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6_icmp(self.pg0, rx, tx)
+
+ #
# An MPLS xconnect - non-EOS label in IP out - an invalid configuration
# so this traffic should be dropped.
#
@@ -1043,7 +1154,6 @@ class TestMPLS(VppTestCase):
tx = self.create_stream_labelled_ip4(self.pg0, [34],
dst_ip="232.1.1.1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
- self.logger.error(self.vapi.cli("sh error"))
def test_mcast_ip6_tail(self):
""" MPLS IPv6 Multicast Tail """