aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/vnet/buffer.c3
-rw-r--r--src/vnet/buffer.h4
-rw-r--r--src/vnet/dpo/lookup_dpo.c85
-rw-r--r--src/vnet/mpls/mpls_lookup.c2
-rw-r--r--src/vnet/mpls/mpls_lookup.h8
-rw-r--r--test/test_ip4.py18
-rw-r--r--test/test_ip6.py121
7 files changed, 237 insertions, 4 deletions
diff --git a/src/vnet/buffer.c b/src/vnet/buffer.c
index f793fb4fc03..de1c73597e3 100644
--- a/src/vnet/buffer.c
+++ b/src/vnet/buffer.c
@@ -42,6 +42,9 @@ format_vnet_buffer (u8 * s, va_list * args)
a = format (a, "qos %d.%d ",
vnet_buffer2 (b)->qos.bits, vnet_buffer2 (b)->qos.source);
+ if (b->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)
+ a = format (a, "loop-counter %d ", vnet_buffer2 (b)->loop_counter);
+
s = format (s, "%U", format_vlib_buffer, b);
if (a)
s = format (s, "\n%U%v", format_white_space, indent, a);
diff --git a/src/vnet/buffer.h b/src/vnet/buffer.h
index b4ce9c79c87..e4167eae6ae 100644
--- a/src/vnet/buffer.h
+++ b/src/vnet/buffer.h
@@ -51,6 +51,7 @@
_( 3, VLAN_2_DEEP, "vlan-2-deep") \
_( 4, VLAN_1_DEEP, "vlan-1-deep") \
_( 5, SPAN_CLONE, "span-clone") \
+ _( 6, LOOP_COUNTER_VALID, 0) \
_( 7, LOCALLY_ORIGINATED, "local") \
_( 8, IS_IP4, "ip4") \
_( 9, IS_IP6, "ip6") \
@@ -357,7 +358,8 @@ typedef struct
u8 source;
} qos;
- u8 __unused[2];
+ u8 loop_counter;
+ u8 __unused[1];
/* Group Based Policy */
struct
diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c
index 059fa3fd869..ebdbc127cc6 100644
--- a/src/vnet/dpo/lookup_dpo.c
+++ b/src/vnet/dpo/lookup_dpo.c
@@ -29,6 +29,12 @@ static const char *const lookup_input_names[] = LOOKUP_INPUTS;
static const char *const lookup_cast_names[] = LOOKUP_CASTS;
/**
+ * If a packet encounters a lookup DPO more than the many times
+ * then we assume there is a loop in the forward graph and drop the packet
+ */
+#define MAX_LUKPS_PER_PACKET 4
+
+/**
* @brief Enumeration of the lookup subtypes
*/
typedef enum lookup_sub_type_t_
@@ -471,6 +477,23 @@ lookup_dpo_ip4_inline (vlib_main_t * vm,
(cm, thread_index, lbi1, 1,
vlib_buffer_length_in_chain (vm, b1));
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+ if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b1)->loop_counter = 0;
+ b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+ vnet_buffer2(b1)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+ if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next1 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
lookup_trace_t *tr = vlib_add_trace (vm, node,
@@ -572,6 +595,16 @@ lookup_dpo_ip4_inline (vlib_main_t * vm,
(cm, thread_index, lbi0, 1,
vlib_buffer_length_in_chain (vm, b0));
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
lookup_trace_t *tr = vlib_add_trace (vm, node,
@@ -780,6 +813,23 @@ lookup_dpo_ip6_inline (vlib_main_t * vm,
hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+ if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b1)->loop_counter = 0;
+ b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+ vnet_buffer2(b1)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+ if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next1 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
{
flow_hash_config0 = lb0->lb_hash_config;
@@ -910,6 +960,16 @@ lookup_dpo_ip6_inline (vlib_main_t * vm,
next0 = dpo0->dpoi_next_node;
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+
vlib_increment_combined_counter
(cm, thread_index, lbi0, 1,
vlib_buffer_length_in_chain (vm, b0));
@@ -1085,11 +1145,21 @@ lookup_dpo_mpls_inline (vlib_main_t * vm,
vlib_buffer_length_in_chain (vm, b0));
}
- vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
+ vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1;
vnet_buffer (b0)->mpls.first = 1;
vlib_buffer_advance(b0, sizeof(*hdr0));
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = MPLS_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
lookup_trace_t *tr = vlib_add_trace (vm, node,
@@ -1163,6 +1233,7 @@ VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
typedef enum lookup_ip_dst_mcast_next_t_ {
+ LOOKUP_IP_DST_MCAST_NEXT_DROP,
LOOKUP_IP_DST_MCAST_NEXT_RPF,
LOOKUP_IP_DST_MCAST_N_NEXT,
} mfib_forward_lookup_next_t;
@@ -1249,6 +1320,16 @@ lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm,
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = LOOKUP_IP_DST_MCAST_NEXT_DROP;
+
vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
}
@@ -1273,6 +1354,7 @@ VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = {
.format_trace = format_lookup_trace,
.n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
.next_nodes = {
+ [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip4-drop",
[LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
},
};
@@ -1295,6 +1377,7 @@ VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = {
.format_trace = format_lookup_trace,
.n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
.next_nodes = {
+ [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip6-drop",
[LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf",
},
};
diff --git a/src/vnet/mpls/mpls_lookup.c b/src/vnet/mpls/mpls_lookup.c
index 04a983b9ff0..56492f1368a 100644
--- a/src/vnet/mpls/mpls_lookup.c
+++ b/src/vnet/mpls/mpls_lookup.c
@@ -696,7 +696,7 @@ VLIB_REGISTER_NODE (mpls_load_balance_node) = {
.n_next_nodes = 1,
.next_nodes =
{
- [0] = "mpls-drop",
+ [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop",
},
};
diff --git a/src/vnet/mpls/mpls_lookup.h b/src/vnet/mpls/mpls_lookup.h
index 28c9124f9bf..4311dc053ec 100644
--- a/src/vnet/mpls/mpls_lookup.h
+++ b/src/vnet/mpls/mpls_lookup.h
@@ -24,6 +24,14 @@
*/
u32 mpls_lookup_to_replicate_edge;
+/**
+ * Enum of statically configred MPLS lookup next nodes
+ */
+typedef enum mpls_lookup_next_t_
+{
+ MPLS_LOOKUP_NEXT_DROP = 0,
+} mpls_lookup_next_t;
+
/*
* Compute flow hash.
* We'll use it to select which adjacency to use for this flow. And other things.
diff --git a/test/test_ip4.py b/test/test_ip4.py
index e501bff3fec..d15453d4f33 100644
--- a/test/test_ip4.py
+++ b/test/test_ip4.py
@@ -1246,6 +1246,24 @@ class TestIPDeag(VppTestCase):
route_in_src.add_vpp_config()
self.send_and_expect(self.pg0, pkts_src, self.pg2)
+ #
+ # loop in the lookup DP
+ #
+ route_loop = VppIpRoute(self, "2.2.2.3", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=0)])
+ route_loop.add_vpp_config()
+
+ p_l = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src="2.2.2.4", dst="2.2.2.3") /
+ TCP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, p_l * 257,
+ "IP lookup loop")
+
class TestIPInput(VppTestCase):
""" IPv4 Input Exceptions """
diff --git a/test/test_ip6.py b/test/test_ip6.py
index 2f19bcf1547..9a0c752ebfd 100644
--- a/test/test_ip6.py
+++ b/test/test_ip6.py
@@ -9,7 +9,7 @@ from vpp_sub_interface import VppSubInterface, VppDot1QSubint
from vpp_pg_interface import is_ipv6_misc
from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, VppIpMRoute, \
VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \
- VppMplsRoute, DpoProto, VppMplsTable
+ VppMplsRoute, DpoProto, VppMplsTable, VppIpTable
from vpp_neighbor import find_nbr, VppNeighbor
from scapy.packet import Raw
@@ -1957,6 +1957,125 @@ class TestIP6Punt(VppTestCase):
is_ip6=1)
+class TestIPDeag(VppTestCase):
+ """ IPv6 Deaggregate Routes """
+
+ def setUp(self):
+ super(TestIPDeag, self).setUp()
+
+ self.create_pg_interfaces(range(3))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ super(TestIPDeag, self).tearDown()
+ for i in self.pg_interfaces:
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def test_ip_deag(self):
+ """ IP Deag Routes """
+
+ #
+ # Create a table to be used for:
+ # 1 - another destination address lookup
+ # 2 - a source address lookup
+ #
+ table_dst = VppIpTable(self, 1, is_ip6=1)
+ table_src = VppIpTable(self, 2, is_ip6=1)
+ table_dst.add_vpp_config()
+ table_src.add_vpp_config()
+
+ #
+ # Add a route in the default table to point to a deag/
+ # second lookup in each of these tables
+ #
+ route_to_dst = VppIpRoute(self, "1::1", 128,
+ [VppRoutePath("::",
+ 0xffffffff,
+ nh_table_id=1,
+ proto=DpoProto.DPO_PROTO_IP6)],
+ is_ip6=1)
+ route_to_src = VppIpRoute(self, "1::2", 128,
+ [VppRoutePath("::",
+ 0xffffffff,
+ nh_table_id=2,
+ is_source_lookup=1,
+ proto=DpoProto.DPO_PROTO_IP6)],
+ is_ip6=1)
+ route_to_dst.add_vpp_config()
+ route_to_src.add_vpp_config()
+
+ #
+ # packets to these destination are dropped, since they'll
+ # hit the respective default routes in the second table
+ #
+ p_dst = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IPv6(src="5::5", dst="1::1") /
+ TCP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ p_src = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IPv6(src="2::2", dst="1::2") /
+ TCP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ pkts_dst = p_dst * 257
+ pkts_src = p_src * 257
+
+ self.send_and_assert_no_replies(self.pg0, pkts_dst,
+ "IP in dst table")
+ self.send_and_assert_no_replies(self.pg0, pkts_src,
+ "IP in src table")
+
+ #
+ # add a route in the dst table to forward via pg1
+ #
+ route_in_dst = VppIpRoute(self, "1::1", 128,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)],
+ is_ip6=1,
+ table_id=1)
+ route_in_dst.add_vpp_config()
+
+ self.send_and_expect(self.pg0, pkts_dst, self.pg1)
+
+ #
+ # add a route in the src table to forward via pg2
+ #
+ route_in_src = VppIpRoute(self, "2::2", 128,
+ [VppRoutePath(self.pg2.remote_ip6,
+ self.pg2.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)],
+ is_ip6=1,
+ table_id=2)
+ route_in_src.add_vpp_config()
+ self.send_and_expect(self.pg0, pkts_src, self.pg2)
+
+ #
+ # loop in the lookup DP
+ #
+ route_loop = VppIpRoute(self, "3::3", 128,
+ [VppRoutePath("::",
+ 0xffffffff,
+ proto=DpoProto.DPO_PROTO_IP6)],
+ is_ip6=1)
+ route_loop.add_vpp_config()
+
+ p_l = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IPv6(src="3::4", dst="3::3") /
+ TCP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, p_l * 257,
+ "IP lookup loop")
+
+
class TestIP6Input(VppTestCase):
""" IPv6 Input Exceptions """