diff options
author | Mauro Sardara <msardara@cisco.com> | 2022-03-22 17:53:46 +0000 |
---|---|---|
committer | Neale Ranns <neale@graphiant.com> | 2022-03-30 17:51:33 +0000 |
commit | 9539647b895c456ca53892a9259e3127c6b92d35 (patch) | |
tree | f4c0a345a2dc27eb5aff092e3b6cdc1be2d66dfe /src/vnet/vxlan-gpe/encap.c | |
parent | 591efc2f573baed38d79a0b9937ca4ff50732c1b (diff) |
udp: fix inner packet checksum calculation in udp-encap
When computing the inner packet checksum, the code wrongly
assumes that the IP version of the inner packet is the
same of the outer one. On the contrary, it is perfectly
possible to encapsulate v6 packets into v4 and viceversa,
so we need to check the IP format of the inner header before
calling vnet_calc_checksums_inline.
Ticket: VPP-2020
Type: fix
Signed-off-by: Mauro Sardara <msardara@cisco.com>
Change-Id: Ia4515563c164f6dd5096832c831a48cb0a29b3ad
Signed-off-by: Mauro Sardara <msardara@cisco.com>
Diffstat (limited to 'src/vnet/vxlan-gpe/encap.c')
-rw-r--r-- | src/vnet/vxlan-gpe/encap.c | 49 |
1 files changed, 27 insertions, 22 deletions
diff --git a/src/vnet/vxlan-gpe/encap.c b/src/vnet/vxlan-gpe/encap.c index daa0381c4bb..35a5529e80b 100644 --- a/src/vnet/vxlan-gpe/encap.c +++ b/src/vnet/vxlan-gpe/encap.c @@ -88,13 +88,15 @@ format_vxlan_gpe_encap_trace (u8 * s, va_list * args) * */ always_inline void -vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, - vxlan_gpe_tunnel_t * t0, u32 * next0, u8 is_v4) +vxlan_gpe_encap_one_inline (vxlan_gpe_main_t *ngm, vlib_buffer_t *b0, + vxlan_gpe_tunnel_t *t0, u32 *next0, + ip_address_family_t af) { ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36); ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56); - ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4); + ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, af, + N_AF); next0[0] = t0->encap_next_node; } @@ -112,16 +114,18 @@ vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, * */ always_inline void -vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, - vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0, - vxlan_gpe_tunnel_t * t1, u32 * next0, - u32 * next1, u8 is_v4) +vxlan_gpe_encap_two_inline (vxlan_gpe_main_t *ngm, vlib_buffer_t *b0, + vlib_buffer_t *b1, vxlan_gpe_tunnel_t *t0, + vxlan_gpe_tunnel_t *t1, u32 *next0, u32 *next1, + ip_address_family_t af) { ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36); ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56); - ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4); - ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, is_v4); + ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, af, + N_AF); + ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, af, + N_AF); next0[0] = next1[0] = t0->encap_next_node; } @@ -170,7 +174,7 @@ vxlan_gpe_encap (vlib_main_t * vm, u32 sw_if_index0 = ~0, sw_if_index1 = ~0, len0, len1; vnet_hw_interface_t *hi0, *hi1; vxlan_gpe_tunnel_t *t0 = NULL, *t1 = NULL; - u8 is_ip4_0 = 0, is_ip4_1 = 0; + ip_address_family_t af_0 = AF_IP4, af_1 = AF_IP4; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); @@ -201,7 +205,7 @@ vxlan_gpe_encap (vlib_main_t * vm, n_left_to_next -= 2; n_left_from -= 2; - /* get the flag "is_ip4" */ + /* get "af_0" */ if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX]) { sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; @@ -210,10 +214,10 @@ vxlan_gpe_encap (vlib_main_t * vm, vnet_buffer (b[0])->sw_if_index [VLIB_TX]); t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4); + af_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6); } - /* get the flag "is_ip4" */ + /* get "af_1" */ if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX]) { if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX]) @@ -221,7 +225,7 @@ vxlan_gpe_encap (vlib_main_t * vm, sw_if_index1 = sw_if_index0; hi1 = hi0; t1 = t0; - is_ip4_1 = is_ip4_0; + af_1 = af_0; } else { @@ -231,19 +235,20 @@ vxlan_gpe_encap (vlib_main_t * vm, vnet_buffer (b[1])->sw_if_index [VLIB_TX]); t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance); - is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4); + af_1 = + (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6); } } - if (PREDICT_TRUE (is_ip4_0 == is_ip4_1)) + if (PREDICT_TRUE (af_0 == af_1)) { vxlan_gpe_encap_two_inline (ngm, b[0], b[1], t0, t1, &next0, - &next1, is_ip4_0); + &next1, af_0); } else { - vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, is_ip4_0); - vxlan_gpe_encap_one_inline (ngm, b[1], t1, &next1, is_ip4_1); + vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, af_0); + vxlan_gpe_encap_one_inline (ngm, b[1], t1, &next1, af_1); } /* Reset to look up tunnel partner in the configured FIB */ @@ -325,7 +330,7 @@ vxlan_gpe_encap (vlib_main_t * vm, n_left_from -= 1; n_left_to_next -= 1; - /* get the flag "is_ip4" */ + /* get "af_0" */ if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX]) { sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; @@ -336,10 +341,10 @@ vxlan_gpe_encap (vlib_main_t * vm, t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4); + af_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6); } - vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, is_ip4_0); + vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, af_0); /* Reset to look up tunnel partner in the configured FIB */ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = t0->encap_fib_index; |