summaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorNeale Ranns <neale.ranns@cisco.com>2018-08-01 12:53:17 -0700
committerDave Barach <openvpp@barachs.net>2018-08-03 11:49:39 +0000
commitce9e0b4d48705d693f4e03093e3f506bdecaf141 (patch)
treec7b59ffe466f8fce19fb2a536b16958a16efc9b9 /src/vnet
parent40f92469c6c1b7145ad752475350f71d752d8033 (diff)
loop counter to prevent infiinte number of look ups per-packet
Change-Id: I59235d11baac18785a4c90cdaf14e8f3ddf06dab Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/buffer.c3
-rw-r--r--src/vnet/buffer.h4
-rw-r--r--src/vnet/dpo/lookup_dpo.c85
-rw-r--r--src/vnet/mpls/mpls_lookup.c2
-rw-r--r--src/vnet/mpls/mpls_lookup.h8
5 files changed, 99 insertions, 3 deletions
diff --git a/src/vnet/buffer.c b/src/vnet/buffer.c
index f793fb4fc03..de1c73597e3 100644
--- a/src/vnet/buffer.c
+++ b/src/vnet/buffer.c
@@ -42,6 +42,9 @@ format_vnet_buffer (u8 * s, va_list * args)
a = format (a, "qos %d.%d ",
vnet_buffer2 (b)->qos.bits, vnet_buffer2 (b)->qos.source);
+ if (b->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)
+ a = format (a, "loop-counter %d ", vnet_buffer2 (b)->loop_counter);
+
s = format (s, "%U", format_vlib_buffer, b);
if (a)
s = format (s, "\n%U%v", format_white_space, indent, a);
diff --git a/src/vnet/buffer.h b/src/vnet/buffer.h
index b4ce9c79c87..e4167eae6ae 100644
--- a/src/vnet/buffer.h
+++ b/src/vnet/buffer.h
@@ -51,6 +51,7 @@
_( 3, VLAN_2_DEEP, "vlan-2-deep") \
_( 4, VLAN_1_DEEP, "vlan-1-deep") \
_( 5, SPAN_CLONE, "span-clone") \
+ _( 6, LOOP_COUNTER_VALID, 0) \
_( 7, LOCALLY_ORIGINATED, "local") \
_( 8, IS_IP4, "ip4") \
_( 9, IS_IP6, "ip6") \
@@ -357,7 +358,8 @@ typedef struct
u8 source;
} qos;
- u8 __unused[2];
+ u8 loop_counter;
+ u8 __unused[1];
/* Group Based Policy */
struct
diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c
index 059fa3fd869..ebdbc127cc6 100644
--- a/src/vnet/dpo/lookup_dpo.c
+++ b/src/vnet/dpo/lookup_dpo.c
@@ -29,6 +29,12 @@ static const char *const lookup_input_names[] = LOOKUP_INPUTS;
static const char *const lookup_cast_names[] = LOOKUP_CASTS;
/**
+ * If a packet encounters a lookup DPO more than the many times
+ * then we assume there is a loop in the forward graph and drop the packet
+ */
+#define MAX_LUKPS_PER_PACKET 4
+
+/**
* @brief Enumeration of the lookup subtypes
*/
typedef enum lookup_sub_type_t_
@@ -471,6 +477,23 @@ lookup_dpo_ip4_inline (vlib_main_t * vm,
(cm, thread_index, lbi1, 1,
vlib_buffer_length_in_chain (vm, b1));
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+ if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b1)->loop_counter = 0;
+ b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+ vnet_buffer2(b1)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+ if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next1 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
lookup_trace_t *tr = vlib_add_trace (vm, node,
@@ -572,6 +595,16 @@ lookup_dpo_ip4_inline (vlib_main_t * vm,
(cm, thread_index, lbi0, 1,
vlib_buffer_length_in_chain (vm, b0));
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
lookup_trace_t *tr = vlib_add_trace (vm, node,
@@ -780,6 +813,23 @@ lookup_dpo_ip6_inline (vlib_main_t * vm,
hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+ if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b1)->loop_counter = 0;
+ b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+ vnet_buffer2(b1)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+ if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next1 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
{
flow_hash_config0 = lb0->lb_hash_config;
@@ -910,6 +960,16 @@ lookup_dpo_ip6_inline (vlib_main_t * vm,
next0 = dpo0->dpoi_next_node;
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+
vlib_increment_combined_counter
(cm, thread_index, lbi0, 1,
vlib_buffer_length_in_chain (vm, b0));
@@ -1085,11 +1145,21 @@ lookup_dpo_mpls_inline (vlib_main_t * vm,
vlib_buffer_length_in_chain (vm, b0));
}
- vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
+ vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1;
vnet_buffer (b0)->mpls.first = 1;
vlib_buffer_advance(b0, sizeof(*hdr0));
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = MPLS_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
lookup_trace_t *tr = vlib_add_trace (vm, node,
@@ -1163,6 +1233,7 @@ VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
typedef enum lookup_ip_dst_mcast_next_t_ {
+ LOOKUP_IP_DST_MCAST_NEXT_DROP,
LOOKUP_IP_DST_MCAST_NEXT_RPF,
LOOKUP_IP_DST_MCAST_N_NEXT,
} mfib_forward_lookup_next_t;
@@ -1249,6 +1320,16 @@ lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm,
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = LOOKUP_IP_DST_MCAST_NEXT_DROP;
+
vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
}
@@ -1273,6 +1354,7 @@ VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = {
.format_trace = format_lookup_trace,
.n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
.next_nodes = {
+ [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip4-drop",
[LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
},
};
@@ -1295,6 +1377,7 @@ VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = {
.format_trace = format_lookup_trace,
.n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
.next_nodes = {
+ [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip6-drop",
[LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf",
},
};
diff --git a/src/vnet/mpls/mpls_lookup.c b/src/vnet/mpls/mpls_lookup.c
index 04a983b9ff0..56492f1368a 100644
--- a/src/vnet/mpls/mpls_lookup.c
+++ b/src/vnet/mpls/mpls_lookup.c
@@ -696,7 +696,7 @@ VLIB_REGISTER_NODE (mpls_load_balance_node) = {
.n_next_nodes = 1,
.next_nodes =
{
- [0] = "mpls-drop",
+ [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop",
},
};
diff --git a/src/vnet/mpls/mpls_lookup.h b/src/vnet/mpls/mpls_lookup.h
index 28c9124f9bf..4311dc053ec 100644
--- a/src/vnet/mpls/mpls_lookup.h
+++ b/src/vnet/mpls/mpls_lookup.h
@@ -24,6 +24,14 @@
*/
u32 mpls_lookup_to_replicate_edge;
+/**
+ * Enum of statically configred MPLS lookup next nodes
+ */
+typedef enum mpls_lookup_next_t_
+{
+ MPLS_LOOKUP_NEXT_DROP = 0,
+} mpls_lookup_next_t;
+
/*
* Compute flow hash.
* We'll use it to select which adjacency to use for this flow. And other things.