diff options
author | Zhiyong Yang <zhiyong.yang@intel.com> | 2020-03-27 13:04:38 +0000 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2020-03-30 12:59:09 +0000 |
commit | 102dd1cfa792f3e70679463df141fe21fc67a8ea (patch) | |
tree | d67f4d43a9e44a33fbe5ff3d045e31e2f840b33c /src | |
parent | 0c7aa7ab54dd6c9074079d589d0c32873a1085bb (diff) |
vxlan: leverage vlib_get_buffers in vxlan_encap_inline
vlib_get_buffers can save about 1.2 clocks per packet for vxlan encap
graph node on Skylake.
Type: improvement
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
Change-Id: I9cad3211883de117c1b84324e8dfad38879de2d2
Diffstat (limited to 'src')
-rw-r--r-- | src/vnet/vxlan/encap.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/src/vnet/vxlan/encap.c b/src/vnet/vxlan/encap.c index da890b2d5fb..2426a8ccaf0 100644 --- a/src/vnet/vxlan/encap.c +++ b/src/vnet/vxlan/encap.c @@ -81,6 +81,8 @@ vxlan_encap_inline (vlib_main_t * vm, u32 next0 = 0, next1 = 0; vxlan_tunnel_t * t0 = NULL, * t1 = NULL; index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE]; + vlib_buffer_t **b = bufs; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -96,6 +98,7 @@ vxlan_encap_inline (vlib_main_t * vm, u32 const csum_flags = is_ip4 ? VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM : VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM; + vlib_get_buffers (vm, from, bufs, n_left_from); while (n_left_from > 0) { @@ -108,16 +111,11 @@ vxlan_encap_inline (vlib_main_t * vm, { /* Prefetch next iteration. */ { - vlib_buffer_t * p2, * p3; + vlib_prefetch_buffer_header (b[2], LOAD); + vlib_prefetch_buffer_header (b[3], LOAD); - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p3->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD); } u32 bi0 = to_next[0] = from[0]; @@ -127,8 +125,10 @@ vxlan_encap_inline (vlib_main_t * vm, n_left_to_next -= 2; n_left_from -= 2; - vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0); - vlib_buffer_t * b1 = vlib_get_buffer (vm, bi1); + vlib_buffer_t * b0 = b[0]; + vlib_buffer_t * b1 = b[1]; + b += 2; + u32 flow_hash0 = vnet_l2_compute_flow_hash (b0); u32 flow_hash1 = vnet_l2_compute_flow_hash (b1); @@ -334,7 +334,9 @@ vxlan_encap_inline (vlib_main_t * vm, n_left_from -= 1; n_left_to_next -= 1; - vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0); + vlib_buffer_t * b0 = b[0]; + b += 1; + u32 flow_hash0 = vnet_l2_compute_flow_hash(b0); /* Get next node index and adj index from tunnel next_dpo */ |