diff options
author | Neale Ranns <neale.ranns@cisco.com> | 2018-08-01 12:53:17 -0700 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2018-08-03 11:49:39 +0000 |
commit | ce9e0b4d48705d693f4e03093e3f506bdecaf141 (patch) | |
tree | c7b59ffe466f8fce19fb2a536b16958a16efc9b9 /src/vnet/dpo/lookup_dpo.c | |
parent | 40f92469c6c1b7145ad752475350f71d752d8033 (diff) |
loop counter to prevent infiinte number of look ups per-packet
Change-Id: I59235d11baac18785a4c90cdaf14e8f3ddf06dab
Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
Diffstat (limited to 'src/vnet/dpo/lookup_dpo.c')
-rw-r--r-- | src/vnet/dpo/lookup_dpo.c | 85 |
1 files changed, 84 insertions, 1 deletions
diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c index 059fa3fd869..ebdbc127cc6 100644 --- a/src/vnet/dpo/lookup_dpo.c +++ b/src/vnet/dpo/lookup_dpo.c @@ -29,6 +29,12 @@ static const char *const lookup_input_names[] = LOOKUP_INPUTS; static const char *const lookup_cast_names[] = LOOKUP_CASTS; /** + * If a packet encounters a lookup DPO more than the many times + * then we assume there is a loop in the forward graph and drop the packet + */ +#define MAX_LUKPS_PER_PACKET 4 + +/** * @brief Enumeration of the lookup subtypes */ typedef enum lookup_sub_type_t_ @@ -471,6 +477,23 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b1)); + if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) { + vnet_buffer2(b0)->loop_counter = 0; + b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID; + } + if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) { + vnet_buffer2(b1)->loop_counter = 0; + b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID; + } + + vnet_buffer2(b0)->loop_counter++; + vnet_buffer2(b1)->loop_counter++; + + if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET)) + next0 = IP_LOOKUP_NEXT_DROP; + if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET)) + next1 = IP_LOOKUP_NEXT_DROP; + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { lookup_trace_t *tr = vlib_add_trace (vm, node, @@ -572,6 +595,16 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); + if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) { + vnet_buffer2(b0)->loop_counter = 0; + b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID; + } + + vnet_buffer2(b0)->loop_counter++; + + if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET)) + next0 = IP_LOOKUP_NEXT_DROP; + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { lookup_trace_t *tr = vlib_add_trace (vm, node, @@ -780,6 +813,23 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0; hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0; + if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) { + vnet_buffer2(b0)->loop_counter = 0; + b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID; + } + if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) { + vnet_buffer2(b1)->loop_counter = 0; + b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID; + } + + vnet_buffer2(b0)->loop_counter++; + vnet_buffer2(b1)->loop_counter++; + + if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET)) + next0 = IP_LOOKUP_NEXT_DROP; + if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET)) + next1 = IP_LOOKUP_NEXT_DROP; + if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) { flow_hash_config0 = lb0->lb_hash_config; @@ -910,6 +960,16 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, next0 = dpo0->dpoi_next_node; vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) { + vnet_buffer2(b0)->loop_counter = 0; + b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID; + } + + vnet_buffer2(b0)->loop_counter++; + + if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET)) + next0 = IP_LOOKUP_NEXT_DROP; + vlib_increment_combined_counter (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); @@ -1085,11 +1145,21 @@ lookup_dpo_mpls_inline (vlib_main_t * vm, vlib_buffer_length_in_chain (vm, b0)); } - vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3]; + vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3]; vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1; vnet_buffer (b0)->mpls.first = 1; vlib_buffer_advance(b0, sizeof(*hdr0)); + if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) { + vnet_buffer2(b0)->loop_counter = 0; + b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID; + } + + vnet_buffer2(b0)->loop_counter++; + + if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET)) + next0 = MPLS_LOOKUP_NEXT_DROP; + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { lookup_trace_t *tr = vlib_add_trace (vm, node, @@ -1163,6 +1233,7 @@ VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = { VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf) typedef enum lookup_ip_dst_mcast_next_t_ { + LOOKUP_IP_DST_MCAST_NEXT_DROP, LOOKUP_IP_DST_MCAST_NEXT_RPF, LOOKUP_IP_DST_MCAST_N_NEXT, } mfib_forward_lookup_next_t; @@ -1249,6 +1320,16 @@ lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm, vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0; + if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) { + vnet_buffer2(b0)->loop_counter = 0; + b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID; + } + + vnet_buffer2(b0)->loop_counter++; + + if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET)) + next0 = LOOKUP_IP_DST_MCAST_NEXT_DROP; + vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0); } @@ -1273,6 +1354,7 @@ VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = { .format_trace = format_lookup_trace, .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT, .next_nodes = { + [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip4-drop", [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf", }, }; @@ -1295,6 +1377,7 @@ VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = { .format_trace = format_lookup_trace, .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT, .next_nodes = { + [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip6-drop", [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf", }, }; |