aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ip
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2020-03-31 09:21:29 -0400
committerDamjan Marion <dmarion@me.com>2020-05-04 17:09:34 +0000
commit4ec36c5535849a4e456ed99b57968d54d5e03b62 (patch)
tree47c807c525858db02f7d1e0e4df32b14441ed5c8 /src/vnet/ip
parentb723ccf95ffd8581be15e0752eac2c5f7233b340 (diff)
fib: midchain adjacency optimisations
Type: improvement - inline some common encap fixup functions into the midchain rewrite node so we don't incur the cost of the virtual function call - change the copy 'guess' from ethernet_header (which will never happen) to an ip4 header - add adj-midchain-tx to multiarch sources - don't run adj-midchain-tx as a feature, instead put this node as the adj's next and at the end of the feature arc. - cache the feature arc config index (to save the cache miss going to fetch it) - don't check if features are enabled when taking the arc (since we know they are) the last two changes will also benefit normal adjacencies taking the arc (i.e. for NAT, ACLs, etc) for IPSec: - don't run esp_encrypt as a feature, instead when required insert this node into the adj's next and into the end of the feature arc. this implies that encrypt is always 'the last feature' run, which is symmetric with decrypt always being the first. - esp_encrpyt for tunnels has adj-midchain-tx as next node Change-Id: Ida0af56a704302cf2d7797ded5f118a781e8acb7 Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'src/vnet/ip')
-rw-r--r--src/vnet/ip/ip4_forward.c86
-rw-r--r--src/vnet/ip/ip6_forward.c37
2 files changed, 79 insertions, 44 deletions
diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c
index acff66d994b..ea78d550789 100644
--- a/src/vnet/ip/ip4_forward.c
+++ b/src/vnet/ip/ip4_forward.c
@@ -54,6 +54,7 @@
#include <vnet/dpo/load_balance_map.h>
#include <vnet/dpo/classify_dpo.h>
#include <vnet/mfib/mfib_table.h> /* for mFIB table and entry creation */
+#include <vnet/adj/adj_dp.h>
#include <vnet/ip/ip4_forward.h>
#include <vnet/interface_output.h>
@@ -2222,8 +2223,11 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
if (PREDICT_FALSE
(adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index0, &next_index, b[0]);
+ vnet_feature_arc_start_w_cfg_index (lm->output_feature_arc_index,
+ tx_sw_if_index0,
+ &next_index, b[0],
+ adj0->ia_cfg_index);
+
next[0] = next_index;
if (is_midchain)
vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
@@ -2246,8 +2250,10 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
if (PREDICT_FALSE
(adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index1, &next_index, b[1]);
+ vnet_feature_arc_start_w_cfg_index (lm->output_feature_arc_index,
+ tx_sw_if_index1,
+ &next_index, b[1],
+ adj1->ia_cfg_index);
next[1] = next_index;
if (is_midchain)
vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
@@ -2261,9 +2267,14 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
ip4_ttl_inc (b[1], ip1);
}
- /* Guess we are only writing on simple Ethernet header. */
- vnet_rewrite_two_headers (adj0[0], adj1[0],
- ip0, ip1, sizeof (ethernet_header_t));
+ if (is_midchain)
+ /* Guess we are only writing on ipv4 header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0],
+ ip0, ip1, sizeof (ip4_header_t));
+ else
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0],
+ ip0, ip1, sizeof (ethernet_header_t));
if (do_counters)
{
@@ -2284,12 +2295,10 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
if (is_midchain)
{
- if (error0 == IP4_ERROR_NONE && adj0->sub_type.midchain.fixup_func)
- adj0->sub_type.midchain.fixup_func
- (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
- if (error1 == IP4_ERROR_NONE && adj1->sub_type.midchain.fixup_func)
- adj1->sub_type.midchain.fixup_func
- (vm, adj1, b[1], adj1->sub_type.midchain.fixup_data);
+ if (error0 == IP4_ERROR_NONE)
+ adj_midchain_fixup (vm, adj0, b[0]);
+ if (error1 == IP4_ERROR_NONE)
+ adj_midchain_fixup (vm, adj1, b[1]);
}
if (is_mcast)
@@ -2391,17 +2400,25 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
if (PREDICT_FALSE
(adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index0, &next_index, b[0]);
+ vnet_feature_arc_start_w_cfg_index (lm->output_feature_arc_index,
+ tx_sw_if_index0,
+ &next_index, b[0],
+ adj0->ia_cfg_index);
next[0] = next_index;
if (is_midchain)
- vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
- 0 /* is_ip6 */ ,
- 0 /* with gso */ );
+ {
+ vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
+ 0 /* is_ip6 */ ,
+ 0 /* with gso */ );
- /* Guess we are only writing on simple Ethernet header. */
- vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
+ /* Guess we are only writing on ipv4 header. */
+ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip4_header_t));
+ }
+ else
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_one_header (adj0[0], ip0,
+ sizeof (ethernet_header_t));
/*
* Bump the per-adjacency counters
@@ -2413,9 +2430,8 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
adj_index0, 1, vlib_buffer_length_in_chain (vm,
b[0]) + rw_len0);
- if (is_midchain && adj0->sub_type.midchain.fixup_func)
- adj0->sub_type.midchain.fixup_func
- (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
+ if (is_midchain)
+ adj_midchain_fixup (vm, adj0, b[0]);
if (is_mcast)
/* copy bytes from the IP address into the MAC rewrite */
@@ -2491,18 +2507,26 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
if (PREDICT_FALSE
(adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index0, &next_index, b[0]);
+ vnet_feature_arc_start_w_cfg_index (lm->output_feature_arc_index,
+ tx_sw_if_index0,
+ &next_index, b[0],
+ adj0->ia_cfg_index);
next[0] = next_index;
if (is_midchain)
- /* this acts on the packet that is about to be encapped */
- vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
- 0 /* is_ip6 */ ,
- 0 /* with gso */ );
+ {
+ /* this acts on the packet that is about to be encapped */
+ vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
+ 0 /* is_ip6 */ ,
+ 0 /* with gso */ );
- /* Guess we are only writing on simple Ethernet header. */
- vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
+ /* Guess we are only writing on ipv4 header. */
+ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip4_header_t));
+ }
+ else
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_one_header (adj0[0], ip0,
+ sizeof (ethernet_header_t));
if (do_counters)
vlib_increment_combined_counter
diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c
index 0a455351ad2..1d6c1b7f105 100644
--- a/src/vnet/ip/ip6_forward.c
+++ b/src/vnet/ip/ip6_forward.c
@@ -1893,8 +1893,9 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
if (PREDICT_FALSE
(adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index0, &next0, p0);
+ vnet_feature_arc_start_w_cfg_index
+ (lm->output_feature_arc_index, tx_sw_if_index0, &next0, p0,
+ adj0->ia_cfg_index);
}
else
{
@@ -1911,8 +1912,9 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
if (PREDICT_FALSE
(adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index1, &next1, p1);
+ vnet_feature_arc_start_w_cfg_index
+ (lm->output_feature_arc_index, tx_sw_if_index1, &next1, p1,
+ adj1->ia_cfg_index);
}
else
{
@@ -1929,11 +1931,15 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
vnet_calc_checksums_inline (vm, p1, 0 /* is_ip4 */ ,
1 /* is_ip6 */ ,
0 /* with gso */ );
- }
- /* Guess we are only writing on simple Ethernet header. */
- vnet_rewrite_two_headers (adj0[0], adj1[0],
- ip0, ip1, sizeof (ethernet_header_t));
+ /* Guess we are only writing on ipv6 header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0],
+ ip0, ip1, sizeof (ip6_header_t));
+ }
+ else
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0],
+ ip0, ip1, sizeof (ethernet_header_t));
if (is_midchain)
{
@@ -2022,10 +2028,14 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
vnet_calc_checksums_inline (vm, p0, 0 /* is_ip4 */ ,
1 /* is_ip6 */ ,
0 /* with gso */ );
- }
- /* Guess we are only writing on simple Ethernet header. */
- vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
+ /* Guess we are only writing on ip6 header. */
+ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip6_header_t));
+ }
+ else
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_one_header (adj0[0], ip0,
+ sizeof (ethernet_header_t));
/* Update packet buffer attributes/set output interface. */
rw_len0 = adj0[0].rewrite_header.data_bytes;
@@ -2065,8 +2075,9 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
if (PREDICT_FALSE
(adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index0, &next0, p0);
+ vnet_feature_arc_start_w_cfg_index
+ (lm->output_feature_arc_index, tx_sw_if_index0, &next0, p0,
+ adj0->ia_cfg_index);
}
else
{