diff options
author | Benoît Ganne <bganne@cisco.com> | 2019-03-01 14:14:10 +0100 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2019-03-26 10:06:57 +0000 |
commit | 4af1a7f938207f713c249270ed98a0c12f72cd3f (patch) | |
tree | 42c93b22e20228641de844d553819f400676cc5e /src/vnet/vxlan-gbp/encap.c | |
parent | 50d5069a80fd763f4270d61d991febbdfa88aed3 (diff) |
Simplify adjacency rewrite code
Using memcpy instead of complex specific copy logic. This simplify
the implementation and also improve perf slightly.
Also move adjacency data from tail to head of buffer, which improves
cache locality (header and data share the same cacheline)
Finally, fix VxLAN which used to workaround vnet_rewrite logic.
Change-Id: I770ddad9846f7ee505aa99ad417e6a61d5cbbefa
Signed-off-by: Benoît Ganne <bganne@cisco.com>
Diffstat (limited to 'src/vnet/vxlan-gbp/encap.c')
-rw-r--r-- | src/vnet/vxlan-gbp/encap.c | 23 |
1 files changed, 5 insertions, 18 deletions
diff --git a/src/vnet/vxlan-gbp/encap.c b/src/vnet/vxlan-gbp/encap.c index 2fe3fa8d437..b687cbf1cfe 100644 --- a/src/vnet/vxlan-gbp/encap.c +++ b/src/vnet/vxlan-gbp/encap.c @@ -97,7 +97,6 @@ vxlan_gbp_encap_inline (vlib_main_t * vm, u8 const underlay_hdr_len = is_ip4 ? sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t); - u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len; u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t); u32 const csum_flags = is_ip4 ? VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 | @@ -176,6 +175,9 @@ vxlan_gbp_encap_inline (vlib_main_t * vm, ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len); ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len); + vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b0), + vlib_buffer_get_current (b1), + underlay_hdr_len); vlib_buffer_advance (b0, -underlay_hdr_len); vlib_buffer_advance (b1, -underlay_hdr_len); @@ -188,16 +190,6 @@ vxlan_gbp_encap_inline (vlib_main_t * vm, void *underlay0 = vlib_buffer_get_current (b0); void *underlay1 = vlib_buffer_get_current (b1); - /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks - * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite - * use memcpy as a workaround */ - clib_memcpy_fast (underlay0, - t0->rewrite_header.data + rw_hdr_offset, - underlay_hdr_len); - clib_memcpy_fast (underlay1, - t1->rewrite_header.data + rw_hdr_offset, - underlay_hdr_len); - ip4_header_t *ip4_0, *ip4_1; qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0; ip6_header_t *ip6_0, *ip6_1; @@ -370,17 +362,12 @@ vxlan_gbp_encap_inline (vlib_main_t * vm, vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0; ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len); + vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b0), + underlay_hdr_len); vlib_buffer_advance (b0, -underlay_hdr_len); void *underlay0 = vlib_buffer_get_current (b0); - /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks - * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite - * use memcpy as a workaround */ - clib_memcpy_fast (underlay0, - t0->rewrite_header.data + rw_hdr_offset, - underlay_hdr_len); - u32 len0 = vlib_buffer_length_in_chain (vm, b0); u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len); |