summaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorJohn Lo <loj@cisco.com>2017-01-30 13:12:10 -0500
committerNeale Ranns <nranns@cisco.com>2017-01-31 08:38:56 +0000
commit2b81eb830393fe88c8247cb9015bdaf4060f6ceb (patch)
tree1ff66d4856414902c71bb789e3d824d080790b57 /src/vnet
parent61459c9be0f620f738cf049b1b33e1a2d13dc9a6 (diff)
Add vxlan-bypass feature to IP6 forwarding path
Add vxlan-bypass feature which can be enabled on the IP6 underlay interface which receive VXLAN packets to accelerate VXLAN decap processing. The CLI to enable/disable it is: set interface ip6 vxlan-bypass <interface> [del] The vxlan-bypass feature is already supported on the IP4 underlay interface. The CLI to enable/disable it is: set interface ip vxlan-bypass <interface> [del] Move vxlan-bypass API/CLI support code from decap.c to vxlan.c. Also fixed two issues in the VXLAN decap path in the vxlan-input node: 1. Add verification of VXLAN packet FIB index with the encap-vrf-id of the VXLAN tunnel. 2. Fix checking of VXLANoIPv6 packet mcast DIP against that of the IP6 mcast VXLAN tunnel. Change-Id: I2bad4074a468c48fbb8bb5ac64f6437190756ed2 Signed-off-by: John Lo <loj@cisco.com>
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/ip/ip6.h4
-rw-r--r--src/vnet/ip/ip6_forward.c9
-rw-r--r--src/vnet/vxlan/decap.c455
-rw-r--r--src/vnet/vxlan/vxlan.c171
-rw-r--r--src/vnet/vxlan/vxlan.h2
-rw-r--r--src/vnet/vxlan/vxlan_api.c9
6 files changed, 468 insertions, 182 deletions
diff --git a/src/vnet/ip/ip6.h b/src/vnet/ip/ip6.h
index 6fecd42dc61..152698733e8 100644
--- a/src/vnet/ip/ip6.h
+++ b/src/vnet/ip/ip6.h
@@ -426,6 +426,10 @@ int vnet_ip6_nd_term (vlib_main_t * vm,
ip6_header_t * ip,
u32 sw_if_index, u16 bd_index, u8 shg);
+u8 *format_ip6_forward_next_trace (u8 * s, va_list * args);
+
+u32 ip6_tcp_udp_icmp_validate_checksum (vlib_main_t * vm, vlib_buffer_t * p0);
+
int vnet_set_ip6_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
u32 table_index);
extern vlib_node_registration_t ip6_lookup_node;
diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c
index ac47b3ad1fb..50951c27495 100644
--- a/src/vnet/ip/ip6_forward.c
+++ b/src/vnet/ip/ip6_forward.c
@@ -588,6 +588,13 @@ VNET_FEATURE_INIT (ip6_vpath, static) =
{
.arc_name = "ip6-unicast",
.node_name = "vpath-input-ip6",
+ .runs_before = VNET_FEATURES ("ip6-vxlan-bypass"),
+};
+
+VNET_FEATURE_INIT (ip6_vxlan_bypass, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-vxlan-bypass",
.runs_before = VNET_FEATURES ("ip6-lookup"),
};
@@ -921,7 +928,7 @@ typedef struct
}
ip6_forward_next_trace_t;
-static u8 *
+u8 *
format_ip6_forward_next_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
diff --git a/src/vnet/vxlan/decap.c b/src/vnet/vxlan/decap.c
index 73e50ffe31f..d5a9442f514 100644
--- a/src/vnet/vxlan/decap.c
+++ b/src/vnet/vxlan/decap.c
@@ -48,6 +48,25 @@ static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
return s;
}
+always_inline u32
+validate_vxlan_fib (vlib_buffer_t *b, vxlan_tunnel_t *t, u32 is_ip4)
+{
+ u32 fib_index, sw_if_index;
+
+ sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
+
+ if (is_ip4)
+ fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
+ vec_elt (ip4_main.fib_index_by_sw_if_index, sw_if_index) :
+ vnet_buffer (b)->sw_if_index[VLIB_TX];
+ else
+ fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
+ vec_elt (ip6_main.fib_index_by_sw_if_index, sw_if_index) :
+ vnet_buffer (b)->sw_if_index[VLIB_TX];
+
+ return (fib_index == t->encap_fib_index);
+}
+
always_inline uword
vxlan_input (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -191,7 +210,15 @@ vxlan_input (vlib_main_t * vm,
tunnel_index0 = last_tunnel_index;
t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
- /* Validate VXLAN tunnel SIP against packet DIP */
+ /* Validate VXLAN tunnel encap-fib index agaist packet */
+ if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace0;
+ }
+
+ /* Validate VXLAN tunnel SIP against packet DIP */
if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
goto next0; /* valid packet */
if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
@@ -232,6 +259,14 @@ vxlan_input (vlib_main_t * vm,
tunnel_index0 = last_tunnel_index;
t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
+ /* Validate VXLAN tunnel encap-fib index agaist packet */
+ if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace0;
+ }
+
/* Validate VXLAN tunnel SIP against packet DIP */
if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
&t0->src.ip6)))
@@ -241,7 +276,7 @@ vxlan_input (vlib_main_t * vm,
key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
key6_0.vni = vxlan0->vni_reserved;
- p0 = hash_get (vxm->vxlan6_tunnel_by_key, &key6_0);
+ p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
if (PREDICT_TRUE (p0 != NULL))
{
mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
@@ -327,6 +362,14 @@ vxlan_input (vlib_main_t * vm,
tunnel_index1 = last_tunnel_index;
t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
+ /* Validate VXLAN tunnel encap-fib index agaist packet */
+ if (PREDICT_FALSE (validate_vxlan_fib (b1, t1, is_ip4) == 0))
+ {
+ error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_INPUT_NEXT_DROP;
+ goto trace1;
+ }
+
/* Validate VXLAN tunnel SIP against packet DIP */
if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
goto next1; /* valid packet */
@@ -370,6 +413,14 @@ vxlan_input (vlib_main_t * vm,
tunnel_index1 = last_tunnel_index;
t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
+ /* Validate VXLAN tunnel encap-fib index agaist packet */
+ if (PREDICT_FALSE (validate_vxlan_fib (b1, t1, is_ip4) == 0))
+ {
+ error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_INPUT_NEXT_DROP;
+ goto trace1;
+ }
+
/* Validate VXLAN tunnel SIP against packet DIP */
if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
&t1->src.ip6)))
@@ -379,7 +430,7 @@ vxlan_input (vlib_main_t * vm,
key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
key6_1.vni = vxlan1->vni_reserved;
- p1 = hash_get (vxm->vxlan6_tunnel_by_key, &key6_1);
+ p1 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
if (PREDICT_TRUE (p1 != NULL))
{
mt1 = pool_elt_at_index (vxm->tunnels, p1[0]);
@@ -519,6 +570,14 @@ vxlan_input (vlib_main_t * vm,
tunnel_index0 = last_tunnel_index;
t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
+ /* Validate VXLAN tunnel encap-fib index agaist packet */
+ if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+
/* Validate VXLAN tunnel SIP against packet DIP */
if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
goto next00; /* valid packet */
@@ -560,6 +619,14 @@ vxlan_input (vlib_main_t * vm,
tunnel_index0 = last_tunnel_index;
t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
+ /* Validate VXLAN tunnel encap-fib index agaist packet */
+ if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+
/* Validate VXLAN tunnel SIP against packet DIP */
if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
&t0->src.ip6)))
@@ -569,7 +636,7 @@ vxlan_input (vlib_main_t * vm,
key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
key6_0.vni = vxlan0->vni_reserved;
- p0 = hash_get (vxm->vxlan6_tunnel_by_key, &key6_0);
+ p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
if (PREDICT_TRUE (p0 != NULL))
{
mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
@@ -729,9 +796,9 @@ typedef enum {
always_inline uword
ip_vxlan_bypass_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
- u32 is_ip4)
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ u32 is_ip4)
{
vxlan_main_t * vxm = &vxlan_main;
u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
@@ -756,7 +823,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
while (n_left_from >= 4 && n_left_to_next >= 2)
{
vlib_buffer_t * b0, * b1;
- ip4_header_t * ip0, * ip1;
+ ip4_header_t * ip40, * ip41;
+ ip6_header_t * ip60, * ip61;
udp_header_t * udp0, * udp1;
u32 bi0, ip_len0, udp_len0, flags0, next0;
u32 bi1, ip_len1, udp_len1, flags1, next1;
@@ -787,39 +855,65 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
- ip0 = vlib_buffer_get_current (b0);
- ip1 = vlib_buffer_get_current (b1);
+ if (is_ip4)
+ {
+ ip40 = vlib_buffer_get_current (b0);
+ ip41 = vlib_buffer_get_current (b1);
+ }
+ else
+ {
+ ip60 = vlib_buffer_get_current (b0);
+ ip61 = vlib_buffer_get_current (b1);
+ }
/* Setup packet for next IP feature */
vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
- /* Treat IP frag packets as "experimental" protocol for now
- until support of IP frag reassembly is implemented */
- proto0 = ip4_is_fragment(ip0) ? 0xfe : ip0->protocol;
- proto1 = ip4_is_fragment(ip1) ? 0xfe : ip1->protocol;
+ if (is_ip4)
+ {
+ /* Treat IP frag packets as "experimental" protocol for now
+ until support of IP frag reassembly is implemented */
+ proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
+ proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
+ }
+ else
+ {
+ proto0 = ip60->protocol;
+ proto1 = ip61->protocol;
+ }
/* Process packet 0 */
if (proto0 != IP_PROTOCOL_UDP)
goto exit0; /* not UDP packet */
- udp0 = ip4_next_header (ip0);
+ if (is_ip4)
+ udp0 = ip4_next_header (ip40);
+ else
+ udp0 = ip6_next_header (ip60);
+
if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
goto exit0; /* not VXLAN packet */
+ /* Validate DIP against VTEPs*/
if (is_ip4)
{
- if (addr4.as_u32 != ip0->dst_address.as_u32)
+ if (addr4.as_u32 != ip40->dst_address.as_u32)
{
- if (!hash_get (vxm->vtep4, ip0->dst_address.as_u32))
- goto exit0; /* no local VTEP for VXLAN packet */
- addr4 = ip0->dst_address;
+ if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
+ goto exit0; /* no local VTEP for VXLAN packet */
+ addr4 = ip40->dst_address;
+ }
+ }
+ else
+ {
+ if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
+ {
+ if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
+ goto exit0; /* no local VTEP for VXLAN packet */
+ addr6 = ip60->dst_address;
}
}
- else goto exit0; /* IP6 VXLAN bypass not yet supported */
-
- /* vxlan-input node expect current at VXLAN header */
- vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
flags0 = b0->flags;
good_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
@@ -828,49 +922,80 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
good_udp0 |= udp0->checksum == 0;
/* Verify UDP length */
- ip_len0 = clib_net_to_host_u16 (ip0->length);
+ if (is_ip4)
+ ip_len0 = clib_net_to_host_u16 (ip40->length);
+ else
+ ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
udp_len0 = clib_net_to_host_u16 (udp0->length);
-
len_diff0 = ip_len0 - udp_len0;
/* Verify UDP checksum */
if (PREDICT_FALSE (!good_udp0))
{
- if (!(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
- flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
- good_udp0 =
- (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ if ((flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
+ else
+ flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
+ good_udp0 =
+ (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
}
- error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
- error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+ if (is_ip4)
+ {
+ error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
+ }
next0 = error0 ?
IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
b0->error = error0 ? error_node->errors[error0] : 0;
+ /* vxlan-input node expect current at VXLAN header */
+ if (is_ip4)
+ vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
+ else
+ vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
+
exit0:
/* Process packet 1 */
if (proto1 != IP_PROTOCOL_UDP)
goto exit1; /* not UDP packet */
- udp1 = ip4_next_header (ip1);
+ if (is_ip4)
+ udp1 = ip4_next_header (ip41);
+ else
+ udp1 = ip6_next_header (ip61);
+
if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
goto exit1; /* not VXLAN packet */
+ /* Validate DIP against VTEPs*/
if (is_ip4)
{
- if (addr4.as_u32 != ip1->dst_address.as_u32)
+ if (addr4.as_u32 != ip41->dst_address.as_u32)
{
- if (!hash_get (vxm->vtep4, ip1->dst_address.as_u32))
- goto exit1; /* no local VTEP for VXLAN packet */
- addr4 = ip1->dst_address;
- }
+ if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
+ goto exit1; /* no local VTEP for VXLAN packet */
+ addr4 = ip41->dst_address;
+ }
+ }
+ else
+ {
+ if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
+ {
+ if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
+ goto exit1; /* no local VTEP for VXLAN packet */
+ addr6 = ip61->dst_address;
+ }
}
- else goto exit1; /* IP6 VXLAN bypass not yet supported */
-
- /* vxlan-input node expect current at VXLAN header */
- vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
flags1 = b1->flags;
good_udp1 = (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
@@ -879,27 +1004,48 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
good_udp1 |= udp1->checksum == 0;
/* Verify UDP length */
- ip_len1 = clib_net_to_host_u16 (ip1->length);
+ if (is_ip4)
+ ip_len1 = clib_net_to_host_u16 (ip41->length);
+ else
+ ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
udp_len1 = clib_net_to_host_u16 (udp1->length);
-
len_diff1 = ip_len1 - udp_len1;
/* Verify UDP checksum */
if (PREDICT_FALSE (!good_udp1))
{
- if (!(flags1 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
- flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
- good_udp1 =
- (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ if ((flags1 & IP_BUFFER_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
+ else
+ flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
+ good_udp1 =
+ (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
}
- error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
- error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
+ if (is_ip4)
+ {
+ error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
+ }
next1 = error1 ?
IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
b1->error = error1 ? error_node->errors[error1] : 0;
+ /* vxlan-input node expect current at VXLAN header */
+ if (is_ip4)
+ vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
+ else
+ vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
+
exit1:
vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
to_next, n_left_to_next,
@@ -909,7 +1055,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
vlib_buffer_t * b0;
- ip4_header_t * ip0;
+ ip4_header_t * ip40;
+ ip6_header_t * ip60;
udp_header_t * udp0;
u32 bi0, ip_len0, udp_len0, flags0, next0;
i32 len_diff0;
@@ -922,35 +1069,51 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
n_left_to_next -= 1;
b0 = vlib_get_buffer (vm, bi0);
- ip0 = vlib_buffer_get_current (b0);
+ if (is_ip4)
+ ip40 = vlib_buffer_get_current (b0);
+ else
+ ip60 = vlib_buffer_get_current (b0);
/* Setup packet for next IP feature */
vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
- /* Treat IP frag packets as "experimental" protocol for now
- until support of IP frag reassembly is implemented */
- proto0 = ip4_is_fragment(ip0) ? 0xfe : ip0->protocol;
+ if (is_ip4)
+ /* Treat IP4 frag packets as "experimental" protocol for now
+ until support of IP frag reassembly is implemented */
+ proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
+ else
+ proto0 = ip60->protocol;
if (proto0 != IP_PROTOCOL_UDP)
goto exit; /* not UDP packet */
- udp0 = ip4_next_header (ip0);
+ if (is_ip4)
+ udp0 = ip4_next_header (ip40);
+ else
+ udp0 = ip6_next_header (ip60);
+
if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
goto exit; /* not VXLAN packet */
+ /* Validate DIP against VTEPs*/
if (is_ip4)
{
- if (addr4.as_u32 != ip0->dst_address.as_u32)
+ if (addr4.as_u32 != ip40->dst_address.as_u32)
{
- if (!hash_get (vxm->vtep4, ip0->dst_address.as_u32))
- goto exit; /* no local VTEP for VXLAN packet */
- addr4 = ip0->dst_address;
- }
+ if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
+ goto exit; /* no local VTEP for VXLAN packet */
+ addr4 = ip40->dst_address;
+ }
+ }
+ else
+ {
+ if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
+ {
+ if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
+ goto exit; /* no local VTEP for VXLAN packet */
+ addr6 = ip60->dst_address;
+ }
}
- else goto exit; /* IP6 VXLAN bypass not yet supported */
-
- /* vxlan-input node expect current at VXLAN header */
- vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
flags0 = b0->flags;
good_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
@@ -959,27 +1122,48 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
good_udp0 |= udp0->checksum == 0;
/* Verify UDP length */
- ip_len0 = clib_net_to_host_u16 (ip0->length);
+ if (is_ip4)
+ ip_len0 = clib_net_to_host_u16 (ip40->length);
+ else
+ ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
udp_len0 = clib_net_to_host_u16 (udp0->length);
-
len_diff0 = ip_len0 - udp_len0;
/* Verify UDP checksum */
if (PREDICT_FALSE (!good_udp0))
{
- if (!(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
- flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
- good_udp0 =
- (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ if ((flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
+ else
+ flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
+ good_udp0 =
+ (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
+ }
}
- error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
- error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+ if (is_ip4)
+ {
+ error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
+ }
next0 = error0 ?
IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
b0->error = error0 ? error_node->errors[error0] : 0;
+ /* vxlan-input node expect current at VXLAN header */
+ if (is_ip4)
+ vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
+ else
+ vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
+
exit:
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
@@ -1017,114 +1201,39 @@ VLIB_REGISTER_NODE (ip4_vxlan_bypass_node) = {
VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_bypass_node,ip4_vxlan_bypass)
+/* Dummy init function to get us linked in. */
+clib_error_t * ip4_vxlan_bypass_init (vlib_main_t * vm)
+{ return 0; }
-static clib_error_t *
-set_ip_vxlan_bypass (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- unformat_input_t _line_input, * line_input = &_line_input;
- vnet_main_t * vnm = vnet_get_main();
- clib_error_t * error = 0;
- u32 sw_if_index, is_del;
-
- sw_if_index = ~0;
- is_del = 0;
-
- if (! unformat_user (input, unformat_line_input, line_input))
- return 0;
+VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat_user (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
- ;
- else if (unformat (line_input, "del"))
- is_del = 1;
- else
- {
- error = unformat_parse_error (line_input);
- goto done;
- }
- }
+static uword
+ip6_vxlan_bypass (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
+}
- if (~0 == sw_if_index)
- {
- error = clib_error_return (0, "unknown interface `%U'",
- format_unformat_error, line_input);
- goto done;
- }
+VLIB_REGISTER_NODE (ip6_vxlan_bypass_node) = {
+ .function = ip6_vxlan_bypass,
+ .name = "ip6-vxlan-bypass",
+ .vector_size = sizeof (u32),
- vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass", sw_if_index,
- is_del == 0, 0, 0);
- done:
- return error;
-}
+ .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
+ .next_nodes = {
+ [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
+ [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
+ },
-/*?
- * This command adds the 'ip4-vxlan-bypass' graph node for a given interface.
- * By adding the IPv4 vxlan-bypass graph node to an interface, the node checks
- * for and validate input vxlan packet and bypass ip4-lookup, ip4-local,
- * ip4-udp-lookup nodes to speedup vxlan packet forwarding. This node will
- * cause extra overhead to for non-vxlan packets which is kept at a minimum.
- *
- * @cliexpar
- * @parblock
- * Example of graph node before ip4-vxlan-bypass is enabled:
- * @cliexstart{show vlib graph ip4-vxlan-bypass}
- * Name Next Previous
- * ip4-vxlan-bypass error-drop [0]
- * vxlan4-input [1]
- * ip4-lookup [2]
- * @cliexend
- *
- * Example of how to enable ip4-vxlan-bypass on an interface:
- * @cliexcmd{set interface ip vxlan-bypass GigabitEthernet2/0/0}
- *
- * Example of graph node after ip4-vxlan-bypass is enabled:
- * @cliexstart{show vlib graph ip4-vxlan-bypass}
- * Name Next Previous
- * ip4-vxlan-bypass error-drop [0] ip4-input
- * vxlan4-input [1] ip4-input-no-checksum
- * ip4-lookup [2]
- * @cliexend
- *
- * Example of how to display the feature enabed on an interface:
- * @cliexstart{show ip interface features GigabitEthernet2/0/0}
- * IP feature paths configured on GigabitEthernet2/0/0...
- *
- * ipv4 unicast:
- * ip4-vxlan-bypass
- * ip4-lookup
- *
- * ipv4 multicast:
- * ip4-lookup-multicast
- *
- * ipv4 multicast:
- * interface-output
- *
- * ipv6 unicast:
- * ip6-lookup
- *
- * ipv6 multicast:
- * ip6-lookup
- *
- * ipv6 multicast:
- * interface-output
- * @cliexend
- *
- * Example of how to disable unicast source checking on an interface:
- * @cliexcmd{set interface ip vxlan-bypass GigabitEthernet2/0/0 del}
- * @endparblock
-?*/
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (set_interface_ip_vxlan_bypass_command, static) = {
- .path = "set interface ip vxlan-bypass",
- .function = set_ip_vxlan_bypass,
- .short_help = "set interface ip vxlan-bypass <interface> [del]",
+ .format_buffer = format_ip6_header,
+ .format_trace = format_ip6_forward_next_trace,
};
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_bypass_node,ip6_vxlan_bypass)
+
/* Dummy init function to get us linked in. */
-clib_error_t * ip4_vxlan_bypass_init (vlib_main_t * vm)
+clib_error_t * ip6_vxlan_bypass_init (vlib_main_t * vm)
{ return 0; }
-VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);
+VLIB_INIT_FUNCTION (ip6_vxlan_bypass_init);
diff --git a/src/vnet/vxlan/vxlan.c b/src/vnet/vxlan/vxlan.c
index 9ed42875a35..02465eb42c4 100644
--- a/src/vnet/vxlan/vxlan.c
+++ b/src/vnet/vxlan/vxlan.c
@@ -921,6 +921,177 @@ VLIB_CLI_COMMAND (show_vxlan_tunnel_command, static) = {
/* *INDENT-ON* */
+void vnet_int_vxlan_bypass_mode (u32 sw_if_index,
+ u8 is_ip6,
+ u8 is_enable)
+{
+ if (is_ip6)
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-vxlan-bypass",
+ sw_if_index, is_enable, 0, 0);
+ else
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass",
+ sw_if_index, is_enable, 0, 0);
+}
+
+
+static clib_error_t *
+set_ip_vxlan_bypass (u32 is_ip6,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ vnet_main_t * vnm = vnet_get_main();
+ clib_error_t * error = 0;
+ u32 sw_if_index, is_enable;
+
+ sw_if_index = ~0;
+ is_enable = 1;
+
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "del"))
+ is_enable = 0;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 == sw_if_index)
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ vnet_int_vxlan_bypass_mode (sw_if_index, is_ip6, is_enable);
+
+ done:
+ return error;
+}
+
+static clib_error_t *
+set_ip4_vxlan_bypass (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ return set_ip_vxlan_bypass (0, input, cmd);
+}
+
+/*?
+ * This command adds the 'ip4-vxlan-bypass' graph node for a given interface.
+ * By adding the IPv4 vxlan-bypass graph node to an interface, the node checks
+ * for and validate input vxlan packet and bypass ip4-lookup, ip4-local,
+ * ip4-udp-lookup nodes to speedup vxlan packet forwarding. This node will
+ * cause extra overhead to for non-vxlan packets which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip4-vxlan-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-bypass}
+ * Name Next Previous
+ * ip4-vxlan-bypass error-drop [0]
+ * vxlan4-input [1]
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip4-vxlan-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip4-vxlan-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-bypass}
+ * Name Next Previous
+ * ip4-vxlan-bypass error-drop [0] ip4-input
+ * vxlan4-input [1] ip4-input-no-checksum
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabed on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ * ...
+ * ipv4 unicast:
+ * ip4-vxlan-bypass
+ * ip4-lookup
+ * ...
+ * @cliexend
+ *
+ * Example of how to disable ip4-vxlan-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_vxlan_bypass_command, static) = {
+ .path = "set interface ip vxlan-bypass",
+ .function = set_ip4_vxlan_bypass,
+ .short_help = "set interface ip vxlan-bypass <interface> [del]",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_ip6_vxlan_bypass (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ return set_ip_vxlan_bypass (1, input, cmd);
+}
+
+/*?
+ * This command adds the 'ip6-vxlan-bypass' graph node for a given interface.
+ * By adding the IPv6 vxlan-bypass graph node to an interface, the node checks
+ * for and validate input vxlan packet and bypass ip6-lookup, ip6-local,
+ * ip6-udp-lookup nodes to speedup vxlan packet forwarding. This node will
+ * cause extra overhead to for non-vxlan packets which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip6-vxlan-bypass is enabled:
+ * @cliexstart{show vlib graph ip6-vxlan-bypass}
+ * Name Next Previous
+ * ip6-vxlan-bypass error-drop [0]
+ * vxlan6-input [1]
+ * ip6-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip6-vxlan-bypass on an interface:
+ * @cliexcmd{set interface ip6 vxlan-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip6-vxlan-bypass is enabled:
+ * @cliexstart{show vlib graph ip6-vxlan-bypass}
+ * Name Next Previous
+ * ip6-vxlan-bypass error-drop [0] ip6-input
+ * vxlan6-input [1] ip4-input-no-checksum
+ * ip6-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabed on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ * ...
+ * ipv6 unicast:
+ * ip6-vxlan-bypass
+ * ip6-lookup
+ * ...
+ * @cliexend
+ *
+ * Example of how to disable ip6-vxlan-bypass on an interface:
+ * @cliexcmd{set interface ip6 vxlan-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip6_vxlan_bypass_command, static) = {
+ .path = "set interface ip6 vxlan-bypass",
+ .function = set_ip6_vxlan_bypass,
+ .short_help = "set interface ip vxlan-bypass <interface> [del]",
+};
+/* *INDENT-ON* */
+
clib_error_t *vxlan_init (vlib_main_t *vm)
{
vxlan_main_t * vxm = &vxlan_main;
diff --git a/src/vnet/vxlan/vxlan.h b/src/vnet/vxlan/vxlan.h
index ab9b2333a20..adfa3a8e3c8 100644
--- a/src/vnet/vxlan/vxlan.h
+++ b/src/vnet/vxlan/vxlan.h
@@ -183,4 +183,6 @@ typedef struct {
int vnet_vxlan_add_del_tunnel
(vnet_vxlan_add_del_tunnel_args_t *a, u32 * sw_if_indexp);
+void vnet_int_vxlan_bypass_mode
+(u32 sw_if_index, u8 is_ip6, u8 is_enable);
#endif /* included_vnet_vxlan_h */
diff --git a/src/vnet/vxlan/vxlan_api.c b/src/vnet/vxlan/vxlan_api.c
index 6c9cbd79764..c726c7c5b6a 100644
--- a/src/vnet/vxlan/vxlan_api.c
+++ b/src/vnet/vxlan/vxlan_api.c
@@ -59,14 +59,7 @@ static void
VALIDATE_SW_IF_INDEX (mp);
- if (mp->is_ipv6)
- {
- /* not yet implemented */
- }
- else
- vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass",
- sw_if_index, mp->enable, 0, 0);
-
+ vnet_int_vxlan_bypass_mode (sw_if_index, mp->is_ipv6, mp->enable);
BAD_SW_IF_INDEX_LABEL;
REPLY_MACRO (VL_API_SW_INTERFACE_SET_VXLAN_BYPASS_REPLY);
/span> } esp_encrypt_post_trace_t; /* packet trace format function */ static u8 * format_esp_encrypt_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *); s = format (s, "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s", t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi, format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg, t->integ_alg, t->udp_encap ? " udp-encap-enabled" : ""); return s; } static u8 * format_esp_post_encrypt_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *); s = format (s, "esp-post: next node index %u", t->next_index); return s; } /* pad packet in input buffer */ static_always_inline u8 * esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last, u8 block_size, u8 icv_sz, u16 * next, vlib_node_runtime_t * node, u16 buffer_data_size, uword total_len) { static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00, }; u16 min_length = total_len + sizeof (esp_footer_t); u16 new_length = round_pow2 (min_length, block_size); u8 pad_bytes = new_length - min_length; esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) + last[0]->current_length + pad_bytes); u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz; if (last[0]->current_length + tail_sz > buffer_data_size) { u32 tmp_bi = 0; if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1) return 0; vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi); last[0]->next_buffer = tmp_bi; last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT; f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes); tmp->current_length += tail_sz; last[0] = tmp; } else last[0]->current_length += tail_sz; f->pad_length = pad_bytes; if (pad_bytes) { ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE); pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes); clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes); } return &f->next_header; } static_always_inline void esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp) { ip_csum_t sum; u16 old_len; len = clib_net_to_host_u16 (len); old_len = ip4->length; if (is_transport) { u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP; sum = ip_csum_update (ip4->checksum, ip4->protocol, prot, ip4_header_t, protocol); ip4->protocol = prot; sum = ip_csum_update (sum, old_len, len, ip4_header_t, length); } else sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length); ip4->length = len; ip4->checksum = ip_csum_fold (sum); } static_always_inline void esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len) { clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t)); udp->length = clib_net_to_host_u16 (len); } static_always_inline u8 ext_hdr_is_pre_esp (u8 nexthdr) { #ifdef CLIB_HAVE_VEC128 static const u8x16 ext_hdr_types = { IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS, IP_PROTOCOL_IPV6_ROUTE, IP_PROTOCOL_IPV6_FRAGMENTATION, }; return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr)); #else return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) | (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) | (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0); #endif } static_always_inline u8 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr) { /* this code assumes that HbH, route and frag headers will be before others, if that is not the case, they will end up encrypted */ u8 len = sizeof (ip6_header_t); ip6_ext_header_t *p; /* if next packet doesn't have ext header */ if (ext_hdr_is_pre_esp (ip6->protocol) == 0) { *ext_hdr = NULL; return len; } p = (void *) (ip6 + 1); len += ip6_ext_header_len (p); while (ext_hdr_is_pre_esp (p->next_hdr)) { len += ip6_ext_header_len (p); p = ip6_ext_next_header (p); } *ext_hdr = p; return len; } static_always_inline void esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts, vnet_crypto_op_chunk_t * chunks) { u32 n_fail, n_ops = vec_len (ops); vnet_crypto_op_t *op = ops; if (n_ops == 0) return; n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops); while (n_fail) { ASSERT (op - ops < n_ops); if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) { u32 bi = op->user_data; b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR]; nexts[bi] = ESP_ENCRYPT_NEXT_DROP; n_fail--; } op++; } } static_always_inline void esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts) { u32 n_fail, n_ops = vec_len (ops); vnet_crypto_op_t *op = ops; if (n_ops == 0) return; n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops); while (n_fail) { ASSERT (op - ops < n_ops); if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) { u32 bi = op->user_data; b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR]; nexts[bi] = ESP_ENCRYPT_NEXT_DROP; n_fail--; } op++; } } typedef struct { u32 salt; u64 iv; } __clib_packed esp_gcm_nonce_t; STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12); static_always_inline u32 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, ipsec_sa_t * sa0, vlib_buffer_t * b, vlib_buffer_t * lb, u8 icv_sz, u8 * start, u32 start_len, u16 * n_ch) { vnet_crypto_op_chunk_t *ch; vlib_buffer_t *cb = b; u32 n_chunks = 1; u32 total_len; vec_add2 (ptd->chunks, ch, 1); total_len = ch->len = start_len; ch->src = ch->dst = start; cb = vlib_get_buffer (vm, cb->next_buffer); while (1) { vec_add2 (ptd->chunks, ch, 1); n_chunks += 1; if (lb == cb) total_len += ch->len = cb->current_length - icv_sz; else total_len += ch->len = cb->current_length; ch->src = ch->dst = vlib_buffer_get_current (cb); if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) break; cb = vlib_get_buffer (vm, cb->next_buffer); } if (n_ch) *n_ch = n_chunks; return total_len; } static_always_inline u32 esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, ipsec_sa_t * sa0, vlib_buffer_t * b, vlib_buffer_t * lb, u8 icv_sz, u8 * start, u32 start_len, u8 * digest, u16 * n_ch) { vnet_crypto_op_chunk_t *ch; vlib_buffer_t *cb = b; u32 n_chunks = 1; u32 total_len; vec_add2 (ptd->chunks, ch, 1); total_len = ch->len = start_len; ch->src = start; cb = vlib_get_buffer (vm, cb->next_buffer); while (1) { vec_add2 (ptd->chunks, ch, 1); n_chunks += 1; if (lb == cb) { total_len += ch->len = cb->current_length - icv_sz; if (ipsec_sa_is_set_USE_ESN (sa0)) { u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi); clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi)); ch->len += sizeof (seq_hi); total_len += sizeof (seq_hi); } } else total_len += ch->len = cb->current_length; ch->src = vlib_buffer_get_current (cb); if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) break; cb = vlib_get_buffer (vm, cb->next_buffer); } if (n_ch) *n_ch = n_chunks; return total_len; } always_inline void esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, vnet_crypto_op_t ** crypto_ops, vnet_crypto_op_t ** integ_ops, ipsec_sa_t * sa0, u8 * payload, u16 payload_len, u8 iv_sz, u8 icv_sz, vlib_buffer_t ** bufs, vlib_buffer_t ** b, vlib_buffer_t * lb, u32 hdr_len, esp_header_t * esp, esp_gcm_nonce_t * nonce) { if (sa0->crypto_enc_op_id) { vnet_crypto_op_t *op; vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); vnet_crypto_op_init (op, sa0->crypto_enc_op_id); op->src = op->dst = payload; op->key_index = sa0->crypto_key_index; op->len = payload_len - icv_sz; op->user_data = b - bufs; if (ipsec_sa_is_set_IS_AEAD (sa0)) { /* * construct the AAD in a scratch space in front * of the IP header. */ op->aad = payload - hdr_len - sizeof (esp_aead_t); op->aad_len = esp_aad_fill (op->aad, esp, sa0); op->tag = payload + op->len; op->tag_len = 16; u64 *iv = (u64 *) (payload - iv_sz); nonce->salt = sa0->salt; nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++); op->iv = (u8 *) nonce; } else { op->iv = payload - iv_sz; op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV; } if (lb != b[0]) { /* is chained */ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; op->chunk_index = vec_len (ptd->chunks); op->tag = vlib_buffer_get_tail (lb) - icv_sz; esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload, payload_len, &op->n_chunks); } } if (sa0->integ_op_id) { vnet_crypto_op_t *op; vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); vnet_crypto_op_init (op, sa0->integ_op_id); op->src = payload - iv_sz - sizeof (esp_header_t); op->digest = payload + payload_len - icv_sz; op->key_index = sa0->integ_key_index; op->digest_len = icv_sz; op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t); op->user_data = b - bufs; if (lb != b[0]) { /* is chained */ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; op->chunk_index = vec_len (ptd->chunks); op->digest = vlib_buffer_get_tail (lb) - icv_sz; esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz, payload - iv_sz - sizeof (esp_header_t), payload_len + iv_sz + sizeof (esp_header_t), op->digest, &op->n_chunks); } else if (ipsec_sa_is_set_USE_ESN (sa0)) { u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi); clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi)); op->len += sizeof (seq_hi); } } } static_always_inline int esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, vnet_crypto_async_frame_t ** async_frame, ipsec_sa_t * sa, vlib_buffer_t * b, esp_header_t * esp, u8 * payload, u32 payload_len, u8 iv_sz, u8 icv_sz, u32 bi, u16 * next, u32 hdr_len, u16 async_next, vlib_buffer_t * lb) { esp_post_data_t *post = esp_post_data (b); u8 *tag, *iv, *aad = 0; u8 flag = 0; u32 key_index; i16 crypto_start_offset, integ_start_offset = 0; u16 crypto_total_len, integ_total_len; post->next_index = next[0]; next[0] = ESP_ENCRYPT_NEXT_PENDING; /* crypto */ crypto_start_offset = payload - b->data; crypto_total_len = integ_total_len = payload_len - icv_sz; tag = payload + crypto_total_len; /* aead */ if (ipsec_sa_is_set_IS_AEAD (sa)) { esp_gcm_nonce_t *nonce; u64 *pkt_iv = (u64 *) (payload - iv_sz); aad = payload - hdr_len - sizeof (esp_aead_t); esp_aad_fill (aad, esp, sa); nonce = (esp_gcm_nonce_t *) (aad - sizeof (*nonce)); nonce->salt = sa->salt; nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->gcm_iv_counter++); iv = (u8 *) nonce; key_index = sa->crypto_key_index; if (lb != b) { /* chain */ flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; tag = vlib_buffer_get_tail (lb) - icv_sz; crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz, payload, payload_len, 0); } goto out; } /* cipher then hash */ iv = payload - iv_sz; integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t); integ_total_len += iv_sz + sizeof (esp_header_t); flag |= VNET_CRYPTO_OP_FLAG_INIT_IV; key_index = sa->linked_key_index; if (b != lb) { flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz, payload, payload_len, 0); tag = vlib_buffer_get_tail (lb) - icv_sz; integ_total_len = esp_encrypt_chain_integ (vm, ptd, sa, b, lb, icv_sz, payload - iv_sz - sizeof (esp_header_t), payload_len + iv_sz + sizeof (esp_header_t), tag, 0); } else if (ipsec_sa_is_set_USE_ESN (sa) && !ipsec_sa_is_set_IS_AEAD (sa)) { u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi); clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi)); integ_total_len += sizeof (seq_hi); } out: return vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len, integ_total_len - crypto_total_len, crypto_start_offset, integ_start_offset, bi, async_next, iv, tag, aad, flag); } /* when submitting a frame is failed, drop all buffers in the frame */ static_always_inline void esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f, vlib_buffer_t ** b, u16 * next) { u32 n_drop = f->n_elts; while (--n_drop) { (b - n_drop)[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR; (next - n_drop)[0] = ESP_ENCRYPT_NEXT_DROP; } vnet_crypto_async_reset_frame (f); } always_inline uword esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip6, int is_tun, u16 async_next) { ipsec_main_t *im = &ipsec_main; ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index); u32 *from = vlib_frame_vector_args (frame); u32 n_left = frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; u16 nexts[VLIB_FRAME_SIZE], *next = nexts; esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces; u32 thread_index = vm->thread_index; u16 buffer_data_size = vlib_buffer_get_default_data_size (vm); u32 current_sa_index = ~0, current_sa_packets = 0; u32 current_sa_bytes = 0, spi = 0; u8 block_sz = 0, iv_sz = 0, icv_sz = 0; ipsec_sa_t *sa0 = 0; vlib_buffer_t *lb; vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; vnet_crypto_op_t **integ_ops = &ptd->integ_ops; vnet_crypto_async_frame_t *async_frame = 0; int is_async = im->async_mode; vnet_crypto_async_op_id_t last_async_op = ~0; vlib_get_buffers (vm, from, b, n_left); if (!is_async) { vec_reset_length (ptd->crypto_ops); vec_reset_length (ptd->integ_ops); vec_reset_length (ptd->chained_crypto_ops); vec_reset_length (ptd->chained_integ_ops); } vec_reset_length (ptd->chunks); while (n_left > 0) { u32 sa_index0; dpo_id_t *dpo; esp_header_t *esp; u8 *payload, *next_hdr_ptr; u16 payload_len, payload_len_total, n_bufs; u32 hdr_len; if (n_left > 2) { u8 *p; vlib_prefetch_buffer_header (b[2], LOAD); p = vlib_buffer_get_current (b[1]); CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); p -= CLIB_CACHE_LINE_BYTES; CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); } if (is_tun) { /* we are on a ipsec tunnel's feature arc */ vnet_buffer (b[0])->ipsec.sad_index = sa_index0 = ipsec_tun_protect_get_sa_out (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]); } else sa_index0 = vnet_buffer (b[0])->ipsec.sad_index; if (sa_index0 != current_sa_index) { if (current_sa_packets) vlib_increment_combined_counter (&ipsec_sa_counters, thread_index, current_sa_index, current_sa_packets, current_sa_bytes); current_sa_packets = current_sa_bytes = 0; sa0 = pool_elt_at_index (im->sad, sa_index0); current_sa_index = sa_index0; spi = clib_net_to_host_u32 (sa0->spi); block_sz = sa0->crypto_block_size; icv_sz = sa0->integ_icv_size; iv_sz = sa0->crypto_iv_size; /* submit frame when op_id is different then the old one */ if (is_async && sa0->crypto_async_enc_op_id != last_async_op) { if (async_frame && async_frame->n_elts) { if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0) esp_async_recycle_failed_submit (async_frame, b, next); } async_frame = vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id); last_async_op = sa0->crypto_async_enc_op_id; } } if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index)) { /* this is the first packet to use this SA, claim the SA * for this thread. this could happen simultaneously on * another thread */ clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0, ipsec_sa_assign_thread (thread_index)); } if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index)) { next[0] = ESP_ENCRYPT_NEXT_HANDOFF; goto trace; } lb = b[0]; n_bufs = vlib_buffer_chain_linearize (vm, b[0]); if (n_bufs == 0) { b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS]; next[0] = ESP_ENCRYPT_NEXT_DROP; goto trace; } if (n_bufs > 1) { crypto_ops = &ptd->chained_crypto_ops; integ_ops = &ptd->chained_integ_ops; /* find last buffer in the chain */ while (lb->flags & VLIB_BUFFER_NEXT_PRESENT) lb = vlib_get_buffer (vm, lb->next_buffer); } else { crypto_ops = &ptd->crypto_ops; integ_ops = &ptd->integ_ops; } if (PREDICT_FALSE (esp_seq_advance (sa0))) { b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED]; next[0] = ESP_ENCRYPT_NEXT_DROP; goto trace; } /* space for IV */ hdr_len = iv_sz; if (ipsec_sa_is_set_IS_TUNNEL (sa0)) { payload = vlib_buffer_get_current (b[0]); next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, block_sz, icv_sz, next, node, buffer_data_size, vlib_buffer_length_in_chain (vm, b[0])); if (!next_hdr_ptr) { b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS]; next[0] = ESP_ENCRYPT_NEXT_DROP; goto trace; } b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; payload_len = b[0]->current_length; payload_len_total = vlib_buffer_length_in_chain (vm, b[0]); /* ESP header */ hdr_len += sizeof (*esp); esp = (esp_header_t *) (payload - hdr_len); /* optional UDP header */ if (ipsec_sa_is_set_UDP_ENCAP (sa0)) { hdr_len += sizeof (udp_header_t); esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len), payload_len_total + hdr_len); } /* IP header */ if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)) { ip6_header_t *ip6; u16 len = sizeof (ip6_header_t); hdr_len += len; ip6 = (ip6_header_t *) (payload - hdr_len); clib_memcpy_fast (ip6, &sa0->ip6_hdr, len); *next_hdr_ptr = (is_ip6 ? IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); len = payload_len_total + hdr_len - len; ip6->payload_length = clib_net_to_host_u16 (len); } else { ip4_header_t *ip4; u16 len = sizeof (ip4_header_t); hdr_len += len; ip4 = (ip4_header_t *) (payload - hdr_len); clib_memcpy_fast (ip4, &sa0->ip4_hdr, len); *next_hdr_ptr = (is_ip6 ? IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); len = payload_len_total + hdr_len; esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0); } dpo = &sa0->dpo; if (!is_tun) { next[0] = dpo->dpoi_next_node; vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index; } else next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT; } else /* transport mode */ { u8 *l2_hdr, l2_len, *ip_hdr, ip_len; ip6_ext_header_t *ext_hdr; udp_header_t *udp = 0; u16 udp_len = 0; u8 *old_ip_hdr = vlib_buffer_get_current (b[0]); ip_len = is_ip6 ? esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) : ip4_header_bytes ((ip4_header_t *) old_ip_hdr); vlib_buffer_advance (b[0], ip_len); payload = vlib_buffer_get_current (b[0]); next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, block_sz, icv_sz, next, node, buffer_data_size, vlib_buffer_length_in_chain (vm, b[0])); if (!next_hdr_ptr) goto trace; b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; payload_len = b[0]->current_length; payload_len_total = vlib_buffer_length_in_chain (vm, b[0]); /* ESP header */ hdr_len += sizeof (*esp); esp = (esp_header_t *) (payload - hdr_len); /* optional UDP header */ if (ipsec_sa_is_set_UDP_ENCAP (sa0)) { hdr_len += sizeof (udp_header_t); udp = (udp_header_t *) (payload - hdr_len); } /* IP header */ hdr_len += ip_len; ip_hdr = payload - hdr_len; /* L2 header */ if (!is_tun) { l2_len = vnet_buffer (b[0])->ip.save_rewrite_length; hdr_len += l2_len; l2_hdr = payload - hdr_len; /* copy l2 and ip header */ clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len); } else l2_len = 0; if (is_ip6) { ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr); if (PREDICT_TRUE (NULL == ext_hdr)) { *next_hdr_ptr = ip6->protocol; ip6->protocol = IP_PROTOCOL_IPSEC_ESP; } else { *next_hdr_ptr = ext_hdr->next_hdr; ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP; } ip6->payload_length = clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len - sizeof (ip6_header_t)); } else { u16 len; ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr); *next_hdr_ptr = ip4->protocol; len = payload_len_total + hdr_len - l2_len; if (udp) { esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1); udp_len = len - ip_len; } else esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0); } clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len); if (udp) { esp_fill_udp_hdr (sa0, udp, udp_len); } next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT; } esp->spi = spi; esp->seq = clib_net_to_host_u32 (sa0->seq); if (is_async) { if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0)) goto trace; if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp, payload, payload_len, iv_sz, icv_sz, from[b - bufs], next, hdr_len, async_next, lb)) { esp_async_recycle_failed_submit (async_frame, b, next); goto trace; } } else { esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload, payload_len, iv_sz, icv_sz, bufs, b, lb, hdr_len, esp, nonce++); } vlib_buffer_advance (b[0], 0LL - hdr_len); current_sa_packets += 1; current_sa_bytes += payload_len_total; trace: if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); tr->sa_index = sa_index0; tr->spi = sa0->spi; tr->seq = sa0->seq; tr->sa_seq_hi = sa0->seq_hi; tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0); tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; } /* next */ n_left -= 1; next += 1; b += 1; } vlib_increment_combined_counter (&ipsec_sa_counters, thread_index, current_sa_index, current_sa_packets, current_sa_bytes); if (!is_async) { esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts); esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts, ptd->chunks); esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts); esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, ptd->chunks); } else if (async_frame && async_frame->n_elts) { if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0) esp_async_recycle_failed_submit (async_frame, b, next); } vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } always_inline uword esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; u16 nexts[VLIB_FRAME_SIZE], *next = nexts; u32 *from = vlib_frame_vector_args (frame); u32 n_left = frame->n_vectors; vlib_get_buffers (vm, from, b, n_left); if (n_left >= 4) { vlib_prefetch_buffer_header (b[0], LOAD); vlib_prefetch_buffer_header (b[1], LOAD); vlib_prefetch_buffer_header (b[2], LOAD); vlib_prefetch_buffer_header (b[3], LOAD); } while (n_left > 8) { vlib_prefetch_buffer_header (b[4], LOAD); vlib_prefetch_buffer_header (b[5], LOAD); vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); next[0] = (esp_post_data (b[0]))->next_index; next[1] = (esp_post_data (b[1]))->next_index; next[2] = (esp_post_data (b[2]))->next_index; next[3] = (esp_post_data (b[3]))->next_index; if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) { if (b[0]->flags & VLIB_BUFFER_IS_TRACED) { esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); tr->next_index = next[0]; } if (b[1]->flags & VLIB_BUFFER_IS_TRACED) { esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1], sizeof (*tr)); tr->next_index = next[1]; } if (b[2]->flags & VLIB_BUFFER_IS_TRACED) { esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2], sizeof (*tr)); tr->next_index = next[2]; } if (b[3]->flags & VLIB_BUFFER_IS_TRACED) { esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3], sizeof (*tr)); tr->next_index = next[3]; } } b += 4; next += 4; n_left -= 4; } while (n_left > 0) { next[0] = (esp_post_data (b[0]))->next_index; if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); tr->next_index = next[0]; } b += 1; next += 1; n_left -= 1; } vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_POST_RX_PKTS, frame->n_vectors); vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0, esp_encrypt_async_next.esp4_post_next); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp4_encrypt_node) = { .name = "esp4-encrypt", .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, .n_next_nodes = ESP_ENCRYPT_N_NEXT, .next_nodes = { [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop", [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-handoff", [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output", [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending", }, }; /* *INDENT-ON* */ VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_encrypt_post_inline (vm, node, from_frame); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp4_encrypt_post_node) = { .name = "esp4-encrypt-post", .vector_size = sizeof (u32), .format_trace = format_esp_post_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, .sibling_of = "esp4-encrypt", .n_errors = ARRAY_LEN(esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, }; /* *INDENT-ON* */ VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0, esp_encrypt_async_next.esp6_post_next); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp6_encrypt_node) = { .name = "esp6-encrypt", .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, .n_next_nodes = ESP_ENCRYPT_N_NEXT, .next_nodes = { [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop", [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-handoff", [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output", [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending", }, }; /* *INDENT-ON* */ VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_encrypt_post_inline (vm, node, from_frame); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp6_encrypt_post_node) = { .name = "esp6-encrypt-post", .vector_size = sizeof (u32), .format_trace = format_esp_post_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, .sibling_of = "esp6-encrypt", .n_errors = ARRAY_LEN(esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, }; /* *INDENT-ON* */ VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1, esp_encrypt_async_next.esp4_tun_post_next); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = { .name = "esp4-encrypt-tun", .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, .n_next_nodes = ESP_ENCRYPT_N_NEXT, .next_nodes = { [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop", [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-tun-handoff", [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx", [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending", }, }; VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_encrypt_post_inline (vm, node, from_frame); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = { .name = "esp4-encrypt-tun-post", .vector_size = sizeof (u32), .format_trace = format_esp_post_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, .sibling_of = "esp4-encrypt-tun", .n_errors = ARRAY_LEN(esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, }; /* *INDENT-ON* */ VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1, esp_encrypt_async_next.esp6_tun_post_next); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = { .name = "esp6-encrypt-tun", .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, .n_next_nodes = ESP_ENCRYPT_N_NEXT, .next_nodes = { [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop", [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-tun-handoff", [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending", [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx", }, }; /* *INDENT-ON* */ VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_encrypt_post_inline (vm, node, from_frame); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = { .name = "esp6-encrypt-tun-post", .vector_size = sizeof (u32), .format_trace = format_esp_post_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, .sibling_of = "esp6-encrypt-tun", .n_errors = ARRAY_LEN(esp_encrypt_error_strings), .error_strings = esp_encrypt_error_strings, }; /* *INDENT-ON* */ typedef struct { u32 sa_index; } esp_no_crypto_trace_t; static u8 * format_esp_no_crypto_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *); s = format (s, "esp-no-crypto: sa-index %u", t->sa_index); return s; } enum { ESP_NO_CRYPTO_NEXT_DROP, ESP_NO_CRYPTO_N_NEXT, }; enum { ESP_NO_CRYPTO_ERROR_RX_PKTS, }; static char *esp_no_crypto_error_strings[] = { "Outbound ESP packets received", }; always_inline uword esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; u32 *from = vlib_frame_vector_args (frame); u32 n_left = frame->n_vectors; vlib_get_buffers (vm, from, b, n_left); while (n_left > 0) { u32 sa_index0; /* packets are always going to be dropped, but get the sa_index */ sa_index0 = ipsec_tun_protect_get_sa_out (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]); if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); tr->sa_index = sa_index0; } n_left -= 1; b += 1; } vlib_node_increment_counter (vm, node->node_index, ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors); vlib_buffer_enqueue_to_single_next (vm, node, from, ESP_NO_CRYPTO_NEXT_DROP, frame->n_vectors); return frame->n_vectors; } VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_no_crypto_inline (vm, node, from_frame); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) = { .name = "esp4-no-crypto", .vector_size = sizeof (u32), .format_trace = format_esp_no_crypto_trace, .n_errors = ARRAY_LEN(esp_no_crypto_error_strings), .error_strings = esp_no_crypto_error_strings, .n_next_nodes = ESP_NO_CRYPTO_N_NEXT, .next_nodes = { [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop", }, }; VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return esp_no_crypto_inline (vm, node, from_frame); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) = { .name = "esp6-no-crypto", .vector_size = sizeof (u32), .format_trace = format_esp_no_crypto_trace, .n_errors = ARRAY_LEN(esp_no_crypto_error_strings), .error_strings = esp_no_crypto_error_strings, .n_next_nodes = ESP_NO_CRYPTO_N_NEXT, .next_nodes = { [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop", }, }; /* *INDENT-ON* */ VLIB_NODE_FN (esp_encrypt_pending_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return from_frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp_encrypt_pending_node) = { .name = "esp-encrypt-pending", .vector_size = sizeof (u32), .type = VLIB_NODE_TYPE_INTERNAL, .n_next_nodes = 0 }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */