From 2b600184f3f43e740b54a1c51d3a35f8c1a77868 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Fri, 29 Mar 2019 05:08:27 -0700 Subject: GBP: iVXLAN reflection check packets should not egress on an iVXLAN tunnel if they arrived on one. Change-Id: I9adca30252364b4878f99e254aebc73b70a5d4d6 Signed-off-by: Neale Ranns --- src/plugins/gbp/gbp_policy_dpo.c | 17 ++++++++++++++++ src/plugins/gbp/gbp_policy_node.c | 37 +++++++++++++++++++++++------------ src/vnet/vxlan-gbp/decap.c | 11 ++++++++--- src/vnet/vxlan-gbp/vxlan_gbp.h | 11 +++++++++++ src/vnet/vxlan-gbp/vxlan_gbp_packet.h | 8 +++++++- test/test_gbp.py | 15 ++++++++++++++ 6 files changed, 83 insertions(+), 16 deletions(-) diff --git a/src/plugins/gbp/gbp_policy_dpo.c b/src/plugins/gbp/gbp_policy_dpo.c index 5fb04ff4df5..c3a51a46236 100644 --- a/src/plugins/gbp/gbp_policy_dpo.c +++ b/src/plugins/gbp/gbp_policy_dpo.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -153,6 +154,13 @@ gbp_policy_dpo_interpose (const dpo_id_t * original, gpd_clone->gpd_sclass = gpd->gpd_sclass; gpd_clone->gpd_sw_if_index = gpd->gpd_sw_if_index; + /* + * if no interface is provided, grab one from the parent + * on which we stack + */ + if (~0 == gpd_clone->gpd_sw_if_index) + gpd_clone->gpd_sw_if_index = dpo_get_urpf (parent); + dpo_stack (gbp_policy_dpo_type, gpd_clone->gpd_proto, &gpd_clone->gpd_dpo, parent); @@ -286,6 +294,15 @@ gbp_policy_dpo_inline (vlib_main_t * vm, gpd0 = gbp_policy_dpo_get (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index; + /* + * Reflection check; in and out on an ivxlan tunnel + */ + if ((~0 != vxlan_gbp_tunnel_by_sw_if_index (gpd0->gpd_sw_if_index)) + && (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_R)) + { + goto trace; + } + if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A) { next0 = gpd0->gpd_dpo.dpoi_next_node; diff --git a/src/plugins/gbp/gbp_policy_node.c b/src/plugins/gbp/gbp_policy_node.c index ff21e7d0e2e..1f2ac4310e0 100644 --- a/src/plugins/gbp/gbp_policy_node.c +++ b/src/plugins/gbp/gbp_policy_node.c @@ -17,9 +17,11 @@ #include #include +#include #define foreach_gbp_policy \ - _(DENY, "deny") + _(DENY, "deny") \ + _(REFLECTION, "reflection") typedef enum { @@ -37,10 +39,8 @@ static char *gbp_policy_error_strings[] = { typedef enum { -#define _(sym,str) GBP_POLICY_NEXT_##sym, - foreach_gbp_policy -#undef _ - GBP_POLICY_N_NEXT, + GBP_POLICY_NEXT_DROP, + GBP_POLICY_N_NEXT, } gbp_policy_next_t; /** @@ -53,6 +53,7 @@ typedef struct gbp_policy_trace_t_ u32 dst_epg; u32 acl_index; u32 allowed; + u32 flags; } gbp_policy_trace_t; always_inline dpo_proto_t @@ -138,7 +139,7 @@ gbp_policy_inline (vlib_main_t * vm, index_t gci0; gc0 = NULL; - next0 = GBP_POLICY_NEXT_DENY; + next0 = GBP_POLICY_NEXT_DROP; bi0 = from[0]; to_next[0] = bi0; from += 1; @@ -151,7 +152,16 @@ gbp_policy_inline (vlib_main_t * vm, sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX]; /* - * If the A0bit is set then policy has already been applied + * Reflection check; in and out on an ivxlan tunnel + */ + if ((~0 != vxlan_gbp_tunnel_by_sw_if_index (sw_if_index0)) && + (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_R)) + { + goto trace; + } + + /* + * If the A-bit is set then policy has already been applied * and we skip enforcement here. */ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A) @@ -165,6 +175,7 @@ gbp_policy_inline (vlib_main_t * vm, key0.as_u32 = ~0; goto trace; } + /* * determine the src and dst EPG */ @@ -307,8 +318,9 @@ gbp_policy_inline (vlib_main_t * vm, vlib_add_trace (vm, node, b0, sizeof (*t)); t->sclass = key0.gck_src; t->dst_epg = key0.gck_dst; - t->acl_index = (gc0 ? gc0->gc_acl_index : ~0), - t->allowed = (next0 != GBP_POLICY_NEXT_DENY); + t->acl_index = (gc0 ? gc0->gc_acl_index : ~0); + t->allowed = (next0 != GBP_POLICY_NEXT_DROP); + t->flags = vnet_buffer2 (b0)->gbp.flags; } /* verify speculative enqueue, maybe switch current next frame */ @@ -346,8 +358,9 @@ format_gbp_policy_trace (u8 * s, va_list * args) gbp_policy_trace_t *t = va_arg (*args, gbp_policy_trace_t *); s = - format (s, "sclass:%d, dst:%d, acl:%d allowed:%d", - t->sclass, t->dst_epg, t->acl_index, t->allowed); + format (s, "sclass:%d, dst:%d, acl:%d allowed:%d flags:%U", + t->sclass, t->dst_epg, t->acl_index, t->allowed, + format_vxlan_gbp_header_gpflags, t->flags); return s; } @@ -365,7 +378,7 @@ VLIB_REGISTER_NODE (gbp_policy_port_node) = { .n_next_nodes = GBP_POLICY_N_NEXT, .next_nodes = { - [GBP_POLICY_NEXT_DENY] = "error-drop", + [GBP_POLICY_NEXT_DROP] = "error-drop", }, }; diff --git a/src/vnet/vxlan-gbp/decap.c b/src/vnet/vxlan-gbp/decap.c index c520e25dcf2..23995c49fcb 100644 --- a/src/vnet/vxlan-gbp/decap.c +++ b/src/vnet/vxlan-gbp/decap.c @@ -330,7 +330,8 @@ vxlan_gbp_input (vlib_main_t * vm, pkts_decapsulated++; } - vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0); + vnet_buffer2 (b0)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp0) | + VXLAN_GBP_GPFLAGS_R); vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0); @@ -370,7 +371,9 @@ vxlan_gbp_input (vlib_main_t * vm, (rx_counter, thread_index, t1->sw_if_index, 1, len1); } - vnet_buffer2 (b1)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp1); + vnet_buffer2 (b1)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp1) | + VXLAN_GBP_GPFLAGS_R); + vnet_buffer2 (b1)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp1); vnet_update_l2_len (b0); @@ -476,7 +479,9 @@ vxlan_gbp_input (vlib_main_t * vm, vlib_increment_combined_counter (rx_counter, thread_index, t0->sw_if_index, 1, len0); } - vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0); + vnet_buffer2 (b0)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp0) | + VXLAN_GBP_GPFLAGS_R); + vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0); /* Required to make the l2 tag push / pop code work on l2 subifs */ diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.h b/src/vnet/vxlan-gbp/vxlan_gbp.h index 6580f38e119..af2e1809099 100644 --- a/src/vnet/vxlan-gbp/vxlan_gbp.h +++ b/src/vnet/vxlan-gbp/vxlan_gbp.h @@ -226,6 +226,17 @@ int vnet_vxlan_gbp_tunnel_del (u32 sw_if_indexp); void vnet_int_vxlan_gbp_bypass_mode (u32 sw_if_index, u8 is_ip6, u8 is_enable); +always_inline u32 +vxlan_gbp_tunnel_by_sw_if_index (u32 sw_if_index) +{ + vxlan_gbp_main_t *vxm = &vxlan_gbp_main; + + if (sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index)) + return ~0; + + return (vxm->tunnel_index_by_sw_if_index[sw_if_index]); +} + #endif /* included_vnet_vxlan_gbp_h */ /* diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_packet.h b/src/vnet/vxlan-gbp/vxlan_gbp_packet.h index 33bccd6aed6..58fe00323b0 100644 --- a/src/vnet/vxlan-gbp/vxlan_gbp_packet.h +++ b/src/vnet/vxlan-gbp/vxlan_gbp_packet.h @@ -63,6 +63,11 @@ * Group membership being encapsulated by VXLAN. Its value is source * class id. * + * FOR INTERNAL USE ONLY + * R bit: Bit 12 of the initial word is defined as the reflection bit + * Set on packet rx checked on tx and dropped if set. this prevents + * packets recieved on an iVXLAN tunnel being reflected back to + * another. */ typedef struct @@ -103,7 +108,8 @@ typedef enum _ (0x40, D) \ _ (0x20, E) \ _ (0x10, S) \ -_ (0x08, A) +_ (0x08, A) \ +_ (0x04, R) typedef enum { diff --git a/test/test_gbp.py b/test/test_gbp.py index 17e9f93e938..7f9032970d5 100644 --- a/test/test_gbp.py +++ b/test/test_gbp.py @@ -3561,6 +3561,21 @@ class TestGBP(VppTestCase): rxs = self.send_and_expect(self.pg7, p * 3, self.pg0) self.assertFalse(find_gbp_endpoint(self, ip="10.222.0.1")) + # + # ping from host in remote to remote external subnets + # this is dropped by reflection check. + # + p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) / + IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) / + UDP(sport=1234, dport=48879) / + VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') / + Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) / + IP(src="10.222.0.1", dst="10.222.0.2") / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + + rxs = self.send_and_assert_no_replies(self.pg7, p * 3) + # # cleanup # -- cgit 1.2.3-korg