diff options
author | John Lo <loj@cisco.com> | 2018-02-13 17:15:23 -0500 |
---|---|---|
committer | Neale Ranns <nranns@cisco.com> | 2018-02-15 11:07:56 +0000 |
commit | a43ccaefc3bd50c03c90f7c3bee02eac9709df56 (patch) | |
tree | bd2820c08864b8c3094cc6f6dddb279926c625cc /src/vnet/gre/node.c | |
parent | 5fda7a3925be145f0c326d0aecc36d883cbcb2ee (diff) |
Optimize GRE Tunnel and add support for ERSPAN encap
Change GRE tunnel to use the interface type where the same encap
node is used as output node for all GRE tunnels, instead of having
dedicated output and tx node for each tunnel. This allows for more
efficient tunnel creation and deletion at scale tested at 1000's
of GRE tunnels.
Add support for ERSPAN encap as another tunnel type, in addition
to the existing L3 and TEB types. The GRE ERSPAN encap supported
is type 2 thus GRE encap need to include sequence number and GRE-
ERSPAN tunnel can be created with user secified ERSPAN session ID.
The GRE tunnel lookup hash key is updated to inclue tunnel type
and session ID, in addition to SIP/DIP and FIB index.
Thus, GRE-ERSPAN tunnel can be created, with the appropriate
session ID, to be used as output interface for SPAN config to
send mirrored packets.
Change interface naming so that all GRE tunnels, irrespective of
tunnel type, uses "greN" where N is the instance number. Removed
interface reuse on tunnel creation and deletion to enable unfied
tunnel interface name.
Add support of user specified instance on GRE tunnel creation.
Thus, N in the "greN" interface name can optionally be specified
by user via CLI/API.
Optimize GRE tunnel encap DPO stacking to bypass load-balance DPO
node since packet output on GRE tunnel always belong to the same
flow after 5-tupple hash.
Change-Id: Ifa83915744a1a88045c998604777cc3583f4da52
Signed-off-by: John Lo <loj@cisco.com>
Diffstat (limited to 'src/vnet/gre/node.c')
-rw-r--r-- | src/vnet/gre/node.c | 83 |
1 files changed, 44 insertions, 39 deletions
diff --git a/src/vnet/gre/node.c b/src/vnet/gre/node.c index 7223b017df7..ee32e602ce9 100644 --- a/src/vnet/gre/node.c +++ b/src/vnet/gre/node.c @@ -164,8 +164,10 @@ gre_input (vlib_main_t * vm, protocol1 = h1->protocol; sparse_vec_index2 (gm->next_by_protocol, protocol0, protocol1, &i0, &i1); - next0 = vec_elt (gm->next_by_protocol, i0); - next1 = vec_elt (gm->next_by_protocol, i1); + next0 = vec_elt (gm->next_by_protocol, i0).next_index; + next1 = vec_elt (gm->next_by_protocol, i1).next_index; + u8 ttype0 = vec_elt (gm->next_by_protocol, i0).tunnel_type; + u8 ttype1 = vec_elt (gm->next_by_protocol, i1).tunnel_type; b0->error = node->errors[i0 == @@ -190,22 +192,21 @@ gre_input (vlib_main_t * vm, /* RPF check for ip4/ip6 input */ - if (PREDICT_TRUE (next0 == GRE_INPUT_NEXT_IP4_INPUT - || next0 == GRE_INPUT_NEXT_IP6_INPUT - || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT - || next0 == GRE_INPUT_NEXT_MPLS_INPUT)) + if (PREDICT_TRUE (next0 > GRE_INPUT_NEXT_DROP)) { if (is_ipv6) { gre_mk_key6 (&ip6_0->dst_address, &ip6_0->src_address, - vnet_buffer (b0)->ip.fib_index, &key0.gtk_v6); + vnet_buffer (b0)->ip.fib_index, + ttype0, 0, &key0.gtk_v6); } else { - gre_mk_key4 (&ip4_0->dst_address, - &ip4_0->src_address, - vnet_buffer (b0)->ip.fib_index, &key0.gtk_v4); + gre_mk_key4 (ip4_0->dst_address, + ip4_0->src_address, + vnet_buffer (b0)->ip.fib_index, + ttype0, 0, &key0.gtk_v4); } if ((!is_ipv6 && !gre_match_key4 (&cached_tunnel_key.gtk_v4, @@ -264,22 +265,21 @@ gre_input (vlib_main_t * vm, vnet_buffer (b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index; drop0: - if (PREDICT_TRUE (next1 == GRE_INPUT_NEXT_IP4_INPUT - || next1 == GRE_INPUT_NEXT_IP6_INPUT - || next1 == GRE_INPUT_NEXT_ETHERNET_INPUT - || next1 == GRE_INPUT_NEXT_MPLS_INPUT)) + if (PREDICT_TRUE (next1 > GRE_INPUT_NEXT_DROP)) { if (is_ipv6) { gre_mk_key6 (&ip6_1->dst_address, &ip6_1->src_address, - vnet_buffer (b1)->ip.fib_index, &key1.gtk_v6); + vnet_buffer (b1)->ip.fib_index, + ttype1, 0, &key1.gtk_v6); } else { - gre_mk_key4 (&ip4_1->dst_address, - &ip4_1->src_address, - vnet_buffer (b1)->ip.fib_index, &key1.gtk_v4); + gre_mk_key4 (ip4_1->dst_address, + ip4_1->src_address, + vnet_buffer (b1)->ip.fib_index, + ttype1, 0, &key1.gtk_v4); } if ((!is_ipv6 && !gre_match_key4 (&cached_tunnel_key.gtk_v4, @@ -423,7 +423,8 @@ gre_input (vlib_main_t * vm, h0 = vlib_buffer_get_current (b0); i0 = sparse_vec_index (gm->next_by_protocol, h0->protocol); - next0 = vec_elt (gm->next_by_protocol, i0); + next0 = vec_elt (gm->next_by_protocol, i0).next_index; + u8 ttype0 = vec_elt (gm->next_by_protocol, i0).tunnel_type; b0->error = node->errors[i0 == SPARSE_VEC_INVALID_INDEX @@ -440,22 +441,21 @@ gre_input (vlib_main_t * vm, so we can increase counters and help forward node to pick right FIB */ /* RPF check for ip4/ip6 input */ - if (PREDICT_TRUE (next0 == GRE_INPUT_NEXT_IP4_INPUT - || next0 == GRE_INPUT_NEXT_IP6_INPUT - || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT - || next0 == GRE_INPUT_NEXT_MPLS_INPUT)) + if (PREDICT_TRUE (next0 > GRE_INPUT_NEXT_DROP)) { if (is_ipv6) { gre_mk_key6 (&ip6_0->dst_address, &ip6_0->src_address, - vnet_buffer (b0)->ip.fib_index, &key0.gtk_v6); + vnet_buffer (b0)->ip.fib_index, + ttype0, 0, &key0.gtk_v6); } else { - gre_mk_key4 (&ip4_0->dst_address, - &ip4_0->src_address, - vnet_buffer (b0)->ip.fib_index, &key0.gtk_v4); + gre_mk_key4 (ip4_0->dst_address, + ip4_0->src_address, + vnet_buffer (b0)->ip.fib_index, + ttype0, 0, &key0.gtk_v4); } if ((!is_ipv6 && !gre_match_key4 (&cached_tunnel_key.gtk_v4, @@ -592,9 +592,7 @@ VLIB_REGISTER_NODE (gre4_input_node) = { .format_trace = format_gre_rx_trace, .unformat_buffer = unformat_gre_header, }; -/* *INDENT-ON* */ -/* *INDENT-OFF* */ VLIB_REGISTER_NODE (gre6_input_node) = { .function = gre6_input, .name = "gre6-input", @@ -617,17 +615,19 @@ VLIB_REGISTER_NODE (gre6_input_node) = { .format_trace = format_gre_rx_trace, .unformat_buffer = unformat_gre_header, }; -/* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (gre4_input_node, gre4_input) VLIB_NODE_FUNCTION_MULTIARCH (gre6_input_node, gre6_input) - void - gre_register_input_protocol (vlib_main_t * vm, - gre_protocol_t protocol, u32 node_index) +/* *INDENT-ON* */ + +void +gre_register_input_protocol (vlib_main_t * vm, + gre_protocol_t protocol, u32 node_index, + gre_tunnel_type_t tunnel_type) { gre_main_t *em = &gre_main; gre_protocol_info_t *pi; - u16 *n; + next_info_t *n; u32 i; { @@ -638,6 +638,7 @@ VLIB_NODE_FUNCTION_MULTIARCH (gre6_input_node, gre6_input) pi = gre_get_protocol_info (em, protocol); pi->node_index = node_index; + pi->tunnel_type = tunnel_type; pi->next_index = vlib_node_add_next (vm, gre4_input_node.index, node_index); i = vlib_node_add_next (vm, gre6_input_node.index, node_index); ASSERT (i == pi->next_index); @@ -645,7 +646,8 @@ VLIB_NODE_FUNCTION_MULTIARCH (gre6_input_node, gre6_input) /* Setup gre protocol -> next index sparse vector mapping. */ n = sparse_vec_validate (em->next_by_protocol, clib_host_to_net_u16 (protocol)); - n[0] = pi->next_index; + n->next_index = pi->next_index; + n->tunnel_type = tunnel_type; } static void @@ -689,14 +691,17 @@ gre_input_init (vlib_main_t * vm) mpls_unicast_input = vlib_get_node_by_name (vm, (u8 *) "mpls-input"); ASSERT (mpls_unicast_input); - gre_register_input_protocol (vm, GRE_PROTOCOL_teb, ethernet_input->index); + gre_register_input_protocol (vm, GRE_PROTOCOL_teb, + ethernet_input->index, GRE_TUNNEL_TYPE_TEB); - gre_register_input_protocol (vm, GRE_PROTOCOL_ip4, ip4_input->index); + gre_register_input_protocol (vm, GRE_PROTOCOL_ip4, + ip4_input->index, GRE_TUNNEL_TYPE_L3); - gre_register_input_protocol (vm, GRE_PROTOCOL_ip6, ip6_input->index); + gre_register_input_protocol (vm, GRE_PROTOCOL_ip6, + ip6_input->index, GRE_TUNNEL_TYPE_L3); gre_register_input_protocol (vm, GRE_PROTOCOL_mpls_unicast, - mpls_unicast_input->index); + mpls_unicast_input->index, GRE_TUNNEL_TYPE_L3); ip4_register_protocol (IP_PROTOCOL_GRE, gre4_input_node.index); ip6_register_protocol (IP_PROTOCOL_GRE, gre6_input_node.index); |