summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/vnet/vxlan/encap.c505
-rw-r--r--src/vnet/vxlan/vxlan_packet.h33
2 files changed, 282 insertions, 256 deletions
diff --git a/src/vnet/vxlan/encap.c b/src/vnet/vxlan/encap.c
index 2426a8ccaf0..2bc221adaff 100644
--- a/src/vnet/vxlan/encap.c
+++ b/src/vnet/vxlan/encap.c
@@ -26,38 +26,41 @@
#define foreach_vxlan_encap_error \
_(ENCAPSULATED, "good packets encapsulated")
-static char * vxlan_encap_error_strings[] = {
+static char *vxlan_encap_error_strings[] = {
#define _(sym,string) string,
foreach_vxlan_encap_error
#undef _
};
-typedef enum {
+typedef enum
+{
#define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
- foreach_vxlan_encap_error
+ foreach_vxlan_encap_error
#undef _
VXLAN_ENCAP_N_ERROR,
} vxlan_encap_error_t;
-typedef enum {
- VXLAN_ENCAP_NEXT_DROP,
- VXLAN_ENCAP_N_NEXT,
+typedef enum
+{
+ VXLAN_ENCAP_NEXT_DROP,
+ VXLAN_ENCAP_N_NEXT,
} vxlan_encap_next_t;
-typedef struct {
+typedef struct
+{
u32 tunnel_index;
u32 vni;
} vxlan_encap_trace_t;
#ifndef CLIB_MARCH_VARIANT
-u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
+u8 *
+format_vxlan_encap_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- vxlan_encap_trace_t * t
- = va_arg (*args, vxlan_encap_trace_t *);
+ vxlan_encap_trace_t *t = va_arg (*args, vxlan_encap_trace_t *);
- s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
+ s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
t->tunnel_index, t->vni);
return s;
}
@@ -66,20 +69,19 @@ u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
always_inline uword
vxlan_encap_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
- vlib_frame_t * from_frame,
- u8 is_ip4, u8 csum_offload)
+ vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
{
- u32 n_left_from, next_index, * from, * to_next;
- vxlan_main_t * vxm = &vxlan_main;
- vnet_main_t * vnm = vxm->vnet_main;
- vnet_interface_main_t * im = &vnm->interface_main;
- vlib_combined_counter_main_t * tx_counter =
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_main_t *vxm = &vxlan_main;
+ vnet_main_t *vnm = vxm->vnet_main;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_combined_counter_main_t *tx_counter =
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
u32 pkts_encapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ u32 thread_index = vlib_get_thread_index ();
u32 sw_if_index0 = 0, sw_if_index1 = 0;
u32 next0 = 0, next1 = 0;
- vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
+ vxlan_tunnel_t *t0 = NULL, *t1 = NULL;
index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
vlib_buffer_t **b = bufs;
@@ -89,23 +91,22 @@ vxlan_encap_inline (vlib_main_t * vm,
next_index = node->cached_next_index;
- STATIC_ASSERT_SIZEOF(ip6_vxlan_header_t, 56);
- STATIC_ASSERT_SIZEOF(ip4_vxlan_header_t, 36);
+ STATIC_ASSERT_SIZEOF (ip6_vxlan_header_t, 56);
+ STATIC_ASSERT_SIZEOF (ip4_vxlan_header_t, 36);
u8 const underlay_hdr_len = is_ip4 ?
- sizeof(ip4_vxlan_header_t) : sizeof(ip6_vxlan_header_t);
- u16 const l3_len = is_ip4 ? sizeof(ip4_header_t) : sizeof(ip6_header_t);
+ sizeof (ip4_vxlan_header_t) : sizeof (ip6_vxlan_header_t);
+ u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
u32 const csum_flags = is_ip4 ? VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
- VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM :
- VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM :
+ VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
vlib_get_buffers (vm, from, bufs, n_left_from);
while (n_left_from > 0)
{
u32 n_left_to_next;
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
while (n_left_from >= 4 && n_left_to_next >= 2)
{
@@ -114,8 +115,10 @@ vxlan_encap_inline (vlib_main_t * vm,
vlib_prefetch_buffer_header (b[2], LOAD);
vlib_prefetch_buffer_header (b[3], LOAD);
- CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
+ 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
+ 2 * CLIB_CACHE_LINE_BYTES, LOAD);
}
u32 bi0 = to_next[0] = from[0];
@@ -125,19 +128,19 @@ vxlan_encap_inline (vlib_main_t * vm,
n_left_to_next -= 2;
n_left_from -= 2;
- vlib_buffer_t * b0 = b[0];
- vlib_buffer_t * b1 = b[1];
+ vlib_buffer_t *b0 = b[0];
+ vlib_buffer_t *b1 = b[1];
b += 2;
- u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
- u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
+ u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
+ u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
/* Get next node index and adj index from tunnel next_dpo */
- if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
+ if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
{
- sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
- vnet_hw_interface_t *hi0 =
- vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ vnet_hw_interface_t *hi0 =
+ vnet_get_sup_hw_interface (vnm, sw_if_index0);
t0 = &vxm->tunnels[hi0->dev_instance];
/* Note: change to always set next0 if it may set to drop */
next0 = t0->next_dpo.dpoi_next_node;
@@ -145,181 +148,183 @@ vxlan_encap_inline (vlib_main_t * vm,
}
/* Get next node index and adj index from tunnel next_dpo */
- if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
+ if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
{
- if (sw_if_index0 == vnet_buffer(b1)->sw_if_index[VLIB_TX])
- {
+ if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
+ {
sw_if_index1 = sw_if_index0;
t1 = t0;
next1 = next0;
dpoi_idx1 = dpoi_idx0;
- }
+ }
else
- {
- sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
- vnet_hw_interface_t *hi1 =
- vnet_get_sup_hw_interface (vnm, sw_if_index1);
+ {
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ vnet_hw_interface_t *hi1 =
+ vnet_get_sup_hw_interface (vnm, sw_if_index1);
t1 = &vxm->tunnels[hi1->dev_instance];
/* Note: change to always set next1 if it may set to drop */
next1 = t1->next_dpo.dpoi_next_node;
dpoi_idx1 = t1->next_dpo.dpoi_index;
- }
+ }
}
- vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
- vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
+ vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
- ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
- ASSERT(t1->rewrite_header.data_bytes == underlay_hdr_len);
- vnet_rewrite_two_headers(*t0, *t1, vlib_buffer_get_current(b0), vlib_buffer_get_current(b1), underlay_hdr_len);
+ ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
+ ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
+ vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b0),
+ vlib_buffer_get_current (b1),
+ underlay_hdr_len);
- vlib_buffer_advance (b0, -underlay_hdr_len);
- vlib_buffer_advance (b1, -underlay_hdr_len);
+ vlib_buffer_advance (b0, -underlay_hdr_len);
+ vlib_buffer_advance (b1, -underlay_hdr_len);
- u32 len0 = vlib_buffer_length_in_chain (vm, b0);
- u32 len1 = vlib_buffer_length_in_chain (vm, b1);
- u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
- u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
+ u32 len0 = vlib_buffer_length_in_chain (vm, b0);
+ u32 len1 = vlib_buffer_length_in_chain (vm, b1);
+ u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
+ u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
- void * underlay0 = vlib_buffer_get_current(b0);
- void * underlay1 = vlib_buffer_get_current(b1);
+ void *underlay0 = vlib_buffer_get_current (b0);
+ void *underlay1 = vlib_buffer_get_current (b1);
- ip4_header_t * ip4_0, * ip4_1;
+ ip4_header_t *ip4_0, *ip4_1;
qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
- ip6_header_t * ip6_0, * ip6_1;
- udp_header_t * udp0, * udp1;
- u8 * l3_0, * l3_1;
+ ip6_header_t *ip6_0, *ip6_1;
+ udp_header_t *udp0, *udp1;
+ u8 *l3_0, *l3_1;
if (is_ip4)
{
- ip4_vxlan_header_t * hdr0 = underlay0;
- ip4_vxlan_header_t * hdr1 = underlay1;
+ ip4_vxlan_header_t *hdr0 = underlay0;
+ ip4_vxlan_header_t *hdr1 = underlay1;
/* Fix the IP4 checksum and length */
ip4_0 = &hdr0->ip4;
ip4_1 = &hdr1->ip4;
- ip4_0->length = clib_host_to_net_u16 (len0);
- ip4_1->length = clib_host_to_net_u16 (len1);
+ ip4_0->length = clib_host_to_net_u16 (len0);
+ ip4_1->length = clib_host_to_net_u16 (len1);
if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
- {
+ {
ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
ip4_0->tos = ip4_0_tos;
}
if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
- {
+ {
ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
ip4_1->tos = ip4_1_tos;
}
- l3_0 = (u8 *)ip4_0;
- l3_1 = (u8 *)ip4_1;
+ l3_0 = (u8 *) ip4_0;
+ l3_1 = (u8 *) ip4_1;
udp0 = &hdr0->udp;
udp1 = &hdr1->udp;
}
- else /* ipv6 */
+ else /* ipv6 */
{
- ip6_vxlan_header_t * hdr0 = underlay0;
- ip6_vxlan_header_t * hdr1 = underlay1;
+ ip6_vxlan_header_t *hdr0 = underlay0;
+ ip6_vxlan_header_t *hdr1 = underlay1;
/* Fix IP6 payload length */
- ip6_0 = &hdr0->ip6;
- ip6_1 = &hdr1->ip6;
+ ip6_0 = &hdr0->ip6;
+ ip6_1 = &hdr1->ip6;
ip6_0->payload_length = payload_l0;
ip6_1->payload_length = payload_l1;
- l3_0 = (u8 *)ip6_0;
- l3_1 = (u8 *)ip6_1;
- udp0 = &hdr0->udp;
- udp1 = &hdr1->udp;
+ l3_0 = (u8 *) ip6_0;
+ l3_1 = (u8 *) ip6_1;
+ udp0 = &hdr0->udp;
+ udp1 = &hdr1->udp;
}
- /* Fix UDP length and set source port */
- udp0->length = payload_l0;
- udp0->src_port = flow_hash0;
- udp1->length = payload_l1;
- udp1->src_port = flow_hash1;
-
- if (csum_offload)
- {
- b0->flags |= csum_flags;
- vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
- vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
- b1->flags |= csum_flags;
- vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
- vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
- }
- /* IPv4 UDP checksum only if checksum offload is used */
- else if (is_ip4)
- {
- ip_csum_t sum0 = ip4_0->checksum;
- sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
- length /* changed member */);
+ /* Fix UDP length and set source port */
+ udp0->length = payload_l0;
+ udp0->src_port = flow_hash0;
+ udp1->length = payload_l1;
+ udp1->src_port = flow_hash1;
+
+ if (csum_offload)
+ {
+ b0->flags |= csum_flags;
+ vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
+ vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
+ b1->flags |= csum_flags;
+ vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
+ vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
+ }
+ /* IPv4 UDP checksum only if checksum offload is used */
+ else if (is_ip4)
+ {
+ ip_csum_t sum0 = ip4_0->checksum;
+ sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
+ length /* changed member */ );
if (PREDICT_FALSE (ip4_0_tos))
- {
+ {
sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
- tos /* changed member */);
+ tos /* changed member */ );
}
- ip4_0->checksum = ip_csum_fold (sum0);
- ip_csum_t sum1 = ip4_1->checksum;
- sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
- length /* changed member */);
+ ip4_0->checksum = ip_csum_fold (sum0);
+ ip_csum_t sum1 = ip4_1->checksum;
+ sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
+ length /* changed member */ );
if (PREDICT_FALSE (ip4_1_tos))
- {
+ {
sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
- tos /* changed member */);
+ tos /* changed member */ );
}
- ip4_1->checksum = ip_csum_fold (sum1);
- }
- /* IPv6 UDP checksum is mandatory */
- else
- {
- int bogus = 0;
-
- udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
- (vm, b0, ip6_0, &bogus);
- ASSERT(bogus == 0);
- if (udp0->checksum == 0)
- udp0->checksum = 0xffff;
- udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
- (vm, b1, ip6_1, &bogus);
- ASSERT(bogus == 0);
- if (udp1->checksum == 0)
- udp1->checksum = 0xffff;
- }
-
- /* save inner packet flow_hash for load-balance node */
- vnet_buffer (b0)->ip.flow_hash = flow_hash0;
- vnet_buffer (b1)->ip.flow_hash = flow_hash1;
-
- if (sw_if_index0 == sw_if_index1)
- {
- vlib_increment_combined_counter (tx_counter, thread_index,
- sw_if_index0, 2, len0 + len1);
- }
- else
- {
- vlib_increment_combined_counter (tx_counter, thread_index,
- sw_if_index0, 1, len0);
- vlib_increment_combined_counter (tx_counter, thread_index,
- sw_if_index1, 1, len1);
- }
- pkts_encapsulated += 2;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_encap_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->tunnel_index = t0 - vxm->tunnels;
- tr->vni = t0->vni;
- }
-
- if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_encap_trace_t *tr =
- vlib_add_trace (vm, node, b1, sizeof (*tr));
- tr->tunnel_index = t1 - vxm->tunnels;
- tr->vni = t1->vni;
- }
+ ip4_1->checksum = ip_csum_fold (sum1);
+ }
+ /* IPv6 UDP checksum is mandatory */
+ else
+ {
+ int bogus = 0;
+
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
+ (vm, b0, ip6_0, &bogus);
+ ASSERT (bogus == 0);
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
+ (vm, b1, ip6_1, &bogus);
+ ASSERT (bogus == 0);
+ if (udp1->checksum == 0)
+ udp1->checksum = 0xffff;
+ }
+
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+ vnet_buffer (b1)->ip.flow_hash = flow_hash1;
+
+ if (sw_if_index0 == sw_if_index1)
+ {
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index0, 2, len0 + len1);
+ }
+ else
+ {
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index0, 1, len0);
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index1, 1, len1);
+ }
+ pkts_encapsulated += 2;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->tunnel_index = t0 - vxm->tunnels;
+ tr->vni = t0->vni;
+ }
+
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->tunnel_index = t1 - vxm->tunnels;
+ tr->vni = t1->vni;
+ }
vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
to_next, n_left_to_next,
@@ -334,116 +339,117 @@ vxlan_encap_inline (vlib_main_t * vm,
n_left_from -= 1;
n_left_to_next -= 1;
- vlib_buffer_t * b0 = b[0];
+ vlib_buffer_t *b0 = b[0];
b += 1;
- u32 flow_hash0 = vnet_l2_compute_flow_hash(b0);
+ u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
/* Get next node index and adj index from tunnel next_dpo */
- if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
+ if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
{
- sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
- vnet_hw_interface_t *hi0 =
- vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ vnet_hw_interface_t *hi0 =
+ vnet_get_sup_hw_interface (vnm, sw_if_index0);
t0 = &vxm->tunnels[hi0->dev_instance];
/* Note: change to always set next0 if it may be set to drop */
next0 = t0->next_dpo.dpoi_next_node;
dpoi_idx0 = t0->next_dpo.dpoi_index;
}
- vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
- ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
- vnet_rewrite_one_header(*t0, vlib_buffer_get_current(b0), underlay_hdr_len);
+ ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
+ vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b0),
+ underlay_hdr_len);
- vlib_buffer_advance (b0, -underlay_hdr_len);
- void * underlay0 = vlib_buffer_get_current(b0);
+ vlib_buffer_advance (b0, -underlay_hdr_len);
+ void *underlay0 = vlib_buffer_get_current (b0);
- u32 len0 = vlib_buffer_length_in_chain (vm, b0);
- u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
+ u32 len0 = vlib_buffer_length_in_chain (vm, b0);
+ u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
- udp_header_t * udp0;
- ip4_header_t * ip4_0;
+ udp_header_t *udp0;
+ ip4_header_t *ip4_0;
qos_bits_t ip4_0_tos = 0;
- ip6_header_t * ip6_0;
- u8 * l3_0;
+ ip6_header_t *ip6_0;
+ u8 *l3_0;
if (is_ip4)
{
- ip4_vxlan_header_t * hdr = underlay0;
+ ip4_vxlan_header_t *hdr = underlay0;
/* Fix the IP4 checksum and length */
- ip4_0 = &hdr->ip4;
- ip4_0->length = clib_host_to_net_u16 (len0);
+ ip4_0 = &hdr->ip4;
+ ip4_0->length = clib_host_to_net_u16 (len0);
if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
- {
+ {
ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
ip4_0->tos = ip4_0_tos;
}
- l3_0 = (u8*)ip4_0;
+ l3_0 = (u8 *) ip4_0;
udp0 = &hdr->udp;
}
- else /* ip6 path */
+ else /* ip6 path */
{
- ip6_vxlan_header_t * hdr = underlay0;
+ ip6_vxlan_header_t *hdr = underlay0;
/* Fix IP6 payload length */
- ip6_0 = &hdr->ip6;
+ ip6_0 = &hdr->ip6;
ip6_0->payload_length = payload_l0;
- l3_0 = (u8 *)ip6_0;
- udp0 = &hdr->udp;
+ l3_0 = (u8 *) ip6_0;
+ udp0 = &hdr->udp;
}
- /* Fix UDP length and set source port */
- udp0->length = payload_l0;
- udp0->src_port = flow_hash0;
-
- if (csum_offload)
- {
- b0->flags |= csum_flags;
- vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
- vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
- }
- /* IPv4 UDP checksum only if checksum offload is used */
- else if (is_ip4)
- {
- ip_csum_t sum0 = ip4_0->checksum;
- sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
- length /* changed member */);
+ /* Fix UDP length and set source port */
+ udp0->length = payload_l0;
+ udp0->src_port = flow_hash0;
+
+ if (csum_offload)
+ {
+ b0->flags |= csum_flags;
+ vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
+ vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
+ }
+ /* IPv4 UDP checksum only if checksum offload is used */
+ else if (is_ip4)
+ {
+ ip_csum_t sum0 = ip4_0->checksum;
+ sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
+ length /* changed member */ );
if (PREDICT_FALSE (ip4_0_tos))
- {
+ {
sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
- tos /* changed member */);
+ tos /* changed member */ );
}
- ip4_0->checksum = ip_csum_fold (sum0);
- }
- /* IPv6 UDP checksum is mandatory */
- else
- {
- int bogus = 0;
-
- udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
- (vm, b0, ip6_0, &bogus);
- ASSERT(bogus == 0);
- if (udp0->checksum == 0)
- udp0->checksum = 0xffff;
- }
-
- /* reuse inner packet flow_hash for load-balance node */
- vnet_buffer (b0)->ip.flow_hash = flow_hash0;
-
- vlib_increment_combined_counter (tx_counter, thread_index,
- sw_if_index0, 1, len0);
- pkts_encapsulated ++;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_encap_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->tunnel_index = t0 - vxm->tunnels;
- tr->vni = t0->vni;
- }
+ ip4_0->checksum = ip_csum_fold (sum0);
+ }
+ /* IPv6 UDP checksum is mandatory */
+ else
+ {
+ int bogus = 0;
+
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
+ (vm, b0, ip6_0, &bogus);
+ ASSERT (bogus == 0);
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ }
+
+ /* reuse inner packet flow_hash for load-balance node */
+ vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index0, 1, len0);
+ pkts_encapsulated++;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->tunnel_index = t0 - vxm->tunnels;
+ tr->vni = t0->vni;
+ }
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
bi0, next0);
@@ -453,32 +459,33 @@ vxlan_encap_inline (vlib_main_t * vm,
}
/* Do we still need this now that tunnel tx stats is kept? */
- vlib_node_increment_counter (vm, node->node_index,
- VXLAN_ENCAP_ERROR_ENCAPSULATED,
- pkts_encapsulated);
+ vlib_node_increment_counter (vm, node->node_index,
+ VXLAN_ENCAP_ERROR_ENCAPSULATED,
+ pkts_encapsulated);
return from_frame->n_vectors;
}
VLIB_NODE_FN (vxlan4_encap_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
/* Disable chksum offload as setup overhead in tx node is not worthwhile
for ip4 header checksum only, unless udp checksum is also required */
- return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
+ return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
/* csum_offload */ 0);
}
VLIB_NODE_FN (vxlan6_encap_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
/* Enable checksum offload for ip6 as udp checksum is mandatory, */
- return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
+ return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
/* csum_offload */ 1);
}
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (vxlan4_encap_node) = {
.name = "vxlan4-encap",
.vector_size = sizeof (u32),
@@ -504,4 +511,12 @@ VLIB_REGISTER_NODE (vxlan6_encap_node) = {
[VXLAN_ENCAP_NEXT_DROP] = "error-drop",
},
};
+/* *INDENT-ON* */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vxlan/vxlan_packet.h b/src/vnet/vxlan/vxlan_packet.h
index e172b6f4eb9..d1d1ed813e5 100644
--- a/src/vnet/vxlan/vxlan_packet.h
+++ b/src/vnet/vxlan/vxlan_packet.h
@@ -15,33 +15,34 @@
#ifndef __included_vxlan_packet_h__
#define __included_vxlan_packet_h__ 1
-/*
+/*
* From RFC-7348
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |R|R|R|R|I|R|R|R| Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VXLAN Network Identifier (VNI) | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
+ *
* VXLAN Header: This is an 8-byte field that has:
- *
+ *
* - Flags (8 bits): where the I flag MUST be set to 1 for a valid
* VXLAN Network ID (VNI). The other 7 bits (designated "R") are
* reserved fields and MUST be set to zero on transmission and
* ignored on receipt.
- *
+ *
* - VXLAN Segment ID/VXLAN Network Identifier (VNI): this is a
* 24-bit value used to designate the individual VXLAN overlay
* network on which the communicating VMs are situated. VMs in
* different VXLAN overlay networks cannot communicate with each
* other.
- *
+ *
* - Reserved fields (24 bits and 8 bits): MUST be set to zero on
* transmission and ignored on receipt.
- *
+ *
*/
-typedef struct {
+typedef struct
+{
u8 flags;
u8 res1;
u8 res2;
@@ -51,7 +52,8 @@ typedef struct {
#define VXLAN_FLAGS_I 0x08
-static inline u32 vnet_get_vni (vxlan_header_t * h)
+static inline u32
+vnet_get_vni (vxlan_header_t * h)
{
u32 vni_reserved_host_byte_order;
@@ -59,11 +61,20 @@ static inline u32 vnet_get_vni (vxlan_header_t * h)
return vni_reserved_host_byte_order >> 8;
}
-static inline void vnet_set_vni_and_flags (vxlan_header_t * h, u32 vni)
+static inline void
+vnet_set_vni_and_flags (vxlan_header_t * h, u32 vni)
{
- h->vni_reserved = clib_host_to_net_u32 (vni<<8);
- * (u32 *) h = 0;
+ h->vni_reserved = clib_host_to_net_u32 (vni << 8);
+ *(u32 *) h = 0;
h->flags = VXLAN_FLAGS_I;
}
#endif /* __included_vxlan_packet_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */