aboutsummaryrefslogtreecommitdiffstats
path: root/vnet
diff options
context:
space:
mode:
Diffstat (limited to 'vnet')
-rw-r--r--vnet/vnet/api_errno.h3
-rw-r--r--vnet/vnet/ip/udp.h1
-rw-r--r--vnet/vnet/vxlan/decap.c241
-rw-r--r--vnet/vnet/vxlan/encap.c282
-rw-r--r--vnet/vnet/vxlan/vxlan.c209
-rw-r--r--vnet/vnet/vxlan/vxlan.h58
6 files changed, 628 insertions, 166 deletions
diff --git a/vnet/vnet/api_errno.h b/vnet/vnet/api_errno.h
index 77c14a4a..04779f0b 100644
--- a/vnet/vnet/api_errno.h
+++ b/vnet/vnet/api_errno.h
@@ -71,7 +71,8 @@ _(RESPONSE_NOT_READY, -77, "Response not ready") \
_(NOT_CONNECTED, -78, "Not connected to the data plane") \
_(IF_ALREADY_EXISTS, -79, "Interface already exists") \
_(BOND_SLAVE_NOT_ALLOWED, -80, "Operation not allowed on slave of BondEthernet") \
-_(VALUE_EXIST, -81, "Value already exists")
+_(VALUE_EXIST, -81, "Value already exists") \
+_(SAME_SRC_DST, -82, "Source and destination are the same")
typedef enum {
#define _(a,b,c) VNET_API_ERROR_##a = (b),
diff --git a/vnet/vnet/ip/udp.h b/vnet/vnet/ip/udp.h
index 26056bd6..26576a45 100644
--- a/vnet/vnet/ip/udp.h
+++ b/vnet/vnet/ip/udp.h
@@ -41,6 +41,7 @@ _ (4341, lisp_gpe) \
_ (4342, lisp_cp) \
_ (4739, ipfix) \
_ (4789, vxlan) \
+_ (4789, vxlan6) \
_ (4790, vxlan_gpe) \
_ (6633, vpath_3)
diff --git a/vnet/vnet/vxlan/decap.c b/vnet/vnet/vxlan/decap.c
index 7789bed9..bd61f4bb 100644
--- a/vnet/vnet/vxlan/decap.c
+++ b/vnet/vnet/vxlan/decap.c
@@ -47,22 +47,24 @@ static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
return s;
}
-static uword
+always_inline uword
vxlan_input (vlib_main_t * vm,
vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+ vlib_frame_t * from_frame,
+ char is_ip4)
{
u32 n_left_from, next_index, * from, * to_next;
vxlan_main_t * vxm = &vxlan_main;
vnet_main_t * vnm = vxm->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 last_tunnel_index = ~0;
- vxlan_tunnel_key_t last_key;
+ vxlan4_tunnel_key_t last_key4;
+ vxlan6_tunnel_key_t last_key6;
u32 pkts_decapsulated = 0;
u32 cpu_index = os_get_cpu_number();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
- last_key.as_u64 = ~0;
+ last_key4.as_u64 = ~0;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
@@ -82,12 +84,14 @@ vxlan_input (vlib_main_t * vm,
u32 bi0, bi1;
vlib_buffer_t * b0, * b1;
u32 next0, next1;
- ip4_header_t * ip0, * ip1;
+ ip4_header_t * ip4_0, * ip4_1;
+ ip6_header_t * ip6_0, * ip6_1;
vxlan_header_t * vxlan0, * vxlan1;
uword * p0, * p1;
u32 tunnel_index0, tunnel_index1;
vxlan_tunnel_t * t0, * t1;
- vxlan_tunnel_key_t key0, key1;
+ vxlan4_tunnel_key_t key4_0, key4_1;
+ vxlan6_tunnel_key_t key6_0, key6_1;
u32 error0, error1;
u32 sw_if_index0, sw_if_index1, len0, len1;
@@ -121,18 +125,34 @@ vxlan_input (vlib_main_t * vm,
vxlan0 = vlib_buffer_get_current (b0);
vxlan1 = vlib_buffer_get_current (b1);
+ if (is_ip4) {
vlib_buffer_advance
(b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
vlib_buffer_advance
(b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
- ip0 = vlib_buffer_get_current (b0);
- ip1 = vlib_buffer_get_current (b1);
+ ip4_0 = vlib_buffer_get_current (b0);
+ ip4_1 = vlib_buffer_get_current (b1);
+ } else {
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
+ vlib_buffer_advance
+ (b1, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
+ ip6_0 = vlib_buffer_get_current (b0);
+ ip6_1 = vlib_buffer_get_current (b1);
+ }
/* pop (ip, udp, vxlan) */
+ if (is_ip4) {
+ vlib_buffer_advance
+ (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
vlib_buffer_advance
- (b0, sizeof(*ip0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ (b1, sizeof(*ip4_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
+ } else {
vlib_buffer_advance
- (b1, sizeof(*ip1)+sizeof(udp_header_t)+sizeof(*vxlan1));
+ (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ vlib_buffer_advance
+ (b1, sizeof(*ip6_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
+ }
tunnel_index0 = ~0;
error0 = 0;
@@ -140,12 +160,34 @@ vxlan_input (vlib_main_t * vm,
tunnel_index1 = ~0;
error1 = 0;
- key0.src = ip0->src_address.as_u32;
- key0.vni = vxlan0->vni_reserved;
-
- if (PREDICT_FALSE (key0.as_u64 != last_key.as_u64))
+ if (is_ip4) {
+ key4_0.src = ip4_0->src_address.as_u32;
+ key4_0.vni = vxlan0->vni_reserved;
+
+ if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
+ {
+ p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
+
+ if (p0 == 0)
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace0;
+ }
+
+ last_key4.as_u64 = key4_0.as_u64;
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ } else /* !is_ip4 */ {
+ key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
+ key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
+ key6_0.vni = vxlan0->vni_reserved;
+
+ if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
{
- p0 = hash_get (vxm->vxlan_tunnel_by_key, key0.as_u64);
+ p0 = hash_get (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&key6_0));
if (p0 == 0)
{
@@ -154,27 +196,29 @@ vxlan_input (vlib_main_t * vm,
goto trace0;
}
- last_key.as_u64 = key0.as_u64;
+ last_key6 = key6_0;
tunnel_index0 = last_tunnel_index = p0[0];
}
else
tunnel_index0 = last_tunnel_index;
+ }
t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
next0 = t0->decap_next_index;
- sw_if_index0 = t0->sw_if_index;
- len0 = vlib_buffer_length_in_chain (vm, b0);
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
/* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b0);
+ if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
+ vnet_update_l2_len (b0);
/* Set input sw_if_index to VXLAN tunnel for learning */
vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
pkts_decapsulated ++;
- stats_n_packets += 1;
- stats_n_bytes += len0;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
/* Batch stats increment on the same vxlan tunnel so counter
is not incremented per packet */
@@ -205,12 +249,35 @@ vxlan_input (vlib_main_t * vm,
tr->vni = vnet_get_vni (vxlan0);
}
- key1.src = ip1->src_address.as_u32;
- key1.vni = vxlan1->vni_reserved;
- if (PREDICT_FALSE (key1.as_u64 != last_key.as_u64))
+ if (is_ip4) {
+ key4_1.src = ip4_1->src_address.as_u32;
+ key4_1.vni = vxlan1->vni_reserved;
+
+ if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
+ {
+ p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
+
+ if (p1 == 0)
+ {
+ error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_INPUT_NEXT_DROP;
+ goto trace1;
+ }
+
+ last_key4.as_u64 = key4_1.as_u64;
+ tunnel_index1 = last_tunnel_index = p1[0];
+ }
+ else
+ tunnel_index1 = last_tunnel_index;
+ } else /* !is_ip4 */ {
+ key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
+ key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
+ key6_1.vni = vxlan1->vni_reserved;
+
+ if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
{
- p1 = hash_get (vxm->vxlan_tunnel_by_key, key1.as_u64);
+ p1 = hash_get (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&key6_1));
if (p1 == 0)
{
@@ -219,27 +286,29 @@ vxlan_input (vlib_main_t * vm,
goto trace1;
}
- last_key.as_u64 = key1.as_u64;
+ last_key6 = key6_1;
tunnel_index1 = last_tunnel_index = p1[0];
}
else
tunnel_index1 = last_tunnel_index;
+ }
t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
next1 = t1->decap_next_index;
- sw_if_index1 = t1->sw_if_index;
- len1 = vlib_buffer_length_in_chain (vm, b1);
+ sw_if_index1 = t1->sw_if_index;
+ len1 = vlib_buffer_length_in_chain (vm, b1);
/* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b1);
+ if (PREDICT_TRUE(next1 == VXLAN_INPUT_NEXT_L2_INPUT))
+ vnet_update_l2_len (b1);
/* Set input sw_if_index to VXLAN tunnel for learning */
vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
pkts_decapsulated ++;
- stats_n_packets += 1;
- stats_n_bytes += len1;
+ stats_n_packets += 1;
+ stats_n_bytes += len1;
/* Batch stats increment on the same vxlan tunnel so counter
is not incremented per packet */
@@ -280,12 +349,14 @@ vxlan_input (vlib_main_t * vm,
u32 bi0;
vlib_buffer_t * b0;
u32 next0;
- ip4_header_t * ip0;
+ ip4_header_t * ip4_0;
+ ip6_header_t * ip6_0;
vxlan_header_t * vxlan0;
uword * p0;
u32 tunnel_index0;
vxlan_tunnel_t * t0;
- vxlan_tunnel_key_t key0;
+ vxlan4_tunnel_key_t key4_0;
+ vxlan6_tunnel_key_t key6_0;
u32 error0;
u32 sw_if_index0, len0;
@@ -301,23 +372,56 @@ vxlan_input (vlib_main_t * vm,
/* udp leaves current_data pointing at the vxlan header */
vxlan0 = vlib_buffer_get_current (b0);
+ if (is_ip4) {
vlib_buffer_advance
(b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
- ip0 = vlib_buffer_get_current (b0);
+ ip4_0 = vlib_buffer_get_current (b0);
+ } else {
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
+ ip6_0 = vlib_buffer_get_current (b0);
+ }
/* pop (ip, udp, vxlan) */
+ if (is_ip4) {
+ vlib_buffer_advance
+ (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ } else {
vlib_buffer_advance
- (b0, sizeof(*ip0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
+ }
tunnel_index0 = ~0;
error0 = 0;
- key0.src = ip0->src_address.as_u32;
- key0.vni = vxlan0->vni_reserved;
-
- if (PREDICT_FALSE (key0.as_u64 != last_key.as_u64))
+ if (is_ip4) {
+ key4_0.src = ip4_0->src_address.as_u32;
+ key4_0.vni = vxlan0->vni_reserved;
+
+ if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
+ {
+ p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
+
+ if (p0 == 0)
+ {
+ error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+
+ last_key4.as_u64 = key4_0.as_u64;
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ } else /* !is_ip4 */ {
+ key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
+ key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
+ key6_0.vni = vxlan0->vni_reserved;
+
+ if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
{
- p0 = hash_get (vxm->vxlan_tunnel_by_key, key0.as_u64);
+ p0 = hash_get (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&key6_0));
if (p0 == 0)
{
@@ -326,27 +430,29 @@ vxlan_input (vlib_main_t * vm,
goto trace00;
}
- last_key.as_u64 = key0.as_u64;
+ last_key6 = key6_0;
tunnel_index0 = last_tunnel_index = p0[0];
}
else
tunnel_index0 = last_tunnel_index;
+ }
t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
next0 = t0->decap_next_index;
- sw_if_index0 = t0->sw_if_index;
- len0 = vlib_buffer_length_in_chain (vm, b0);
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
/* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b0);
+ if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
+ vnet_update_l2_len (b0);
/* Set input sw_if_index to VXLAN tunnel for learning */
vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
pkts_decapsulated ++;
- stats_n_packets += 1;
- stats_n_bytes += len0;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
/* Batch stats increment on the same vxlan tunnel so counter
is not incremented per packet */
@@ -400,6 +506,22 @@ vxlan_input (vlib_main_t * vm,
return from_frame->n_vectors;
}
+static uword
+vxlan4_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_input(vm, node, from_frame, /* is_ip4 */ 1);
+}
+
+static uword
+vxlan6_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_input(vm, node, from_frame, /* is_ip4 */ 0);
+}
+
static char * vxlan_error_strings[] = {
#define vxlan_error(n,s) s,
#include <vnet/vxlan/vxlan_error.def>
@@ -407,9 +529,30 @@ static char * vxlan_error_strings[] = {
#undef _
};
-VLIB_REGISTER_NODE (vxlan_input_node) = {
- .function = vxlan_input,
- .name = "vxlan-input",
+VLIB_REGISTER_NODE (vxlan4_input_node) = {
+ .function = vxlan4_input,
+ .name = "vxlan4-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .n_errors = VXLAN_N_ERROR,
+ .error_strings = vxlan_error_strings,
+
+ .n_next_nodes = VXLAN_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
+ foreach_vxlan_input_next
+#undef _
+ },
+
+//temp .format_buffer = format_vxlan_header,
+ .format_trace = format_vxlan_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_header,
+};
+
+VLIB_REGISTER_NODE (vxlan6_input_node) = {
+ .function = vxlan6_input,
+ .name = "vxlan6-input",
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
diff --git a/vnet/vnet/vxlan/encap.c b/vnet/vnet/vxlan/encap.c
index 90854ad0..ee5c7d5e 100644
--- a/vnet/vnet/vxlan/encap.c
+++ b/vnet/vnet/vxlan/encap.c
@@ -39,6 +39,7 @@ typedef enum {
typedef enum {
VXLAN_ENCAP_NEXT_IP4_LOOKUP,
+ VXLAN_ENCAP_NEXT_IP6_LOOKUP,
VXLAN_ENCAP_NEXT_DROP,
VXLAN_ENCAP_N_NEXT,
} vxlan_encap_next_t;
@@ -59,9 +60,13 @@ u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
return s;
}
-#define foreach_fixed_header_offset \
+
+#define foreach_fixed_header4_offset \
_(0) _(1) _(2) _(3)
+#define foreach_fixed_header6_offset \
+ _(0) _(1) _(2) _(3) _(4) _(5) _(6)
+
static uword
vxlan_encap (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -99,7 +104,8 @@ vxlan_encap (vlib_main_t * vm,
u32 next1 = VXLAN_ENCAP_NEXT_IP4_LOOKUP;
u32 sw_if_index0, sw_if_index1, len0, len1;
vnet_hw_interface_t * hi0, * hi1;
- ip4_header_t * ip0, * ip1;
+ ip4_header_t * ip4_0, * ip4_1;
+ ip6_header_t * ip6_0, * ip6_1;
udp_header_t * udp0, * udp1;
u64 * copy_src0, * copy_dst0;
u64 * copy_src1, * copy_dst1;
@@ -108,6 +114,7 @@ vxlan_encap (vlib_main_t * vm,
vxlan_tunnel_t * t0, * t1;
u16 new_l0, new_l1;
ip_csum_t sum0, sum1;
+ u8 is_ip4_0, is_ip4_1;
/* Prefetch next iteration. */
{
@@ -147,14 +154,22 @@ vxlan_encap (vlib_main_t * vm,
t0 = &vxm->tunnels[hi0->dev_instance];
t1 = &vxm->tunnels[hi1->dev_instance];
+ is_ip4_0 = (t0->flags & VXLAN_TUNNEL_IS_IPV4);
+ is_ip4_1 = (t1->flags & VXLAN_TUNNEL_IS_IPV4);
+
+ if (PREDICT_FALSE(!is_ip4_0)) next0 = VXLAN_ENCAP_NEXT_IP6_LOOKUP;
+ if (PREDICT_FALSE(!is_ip4_1)) next1 = VXLAN_ENCAP_NEXT_IP6_LOOKUP;
+
/* Check rewrite string and drop packet if tunnel is deleted */
- if (PREDICT_FALSE(t0->rewrite == vxlan_dummy_rewrite))
+ if (PREDICT_FALSE(t0->rewrite == vxlan4_dummy_rewrite ||
+ t0->rewrite == vxlan6_dummy_rewrite))
{
next0 = VXLAN_ENCAP_NEXT_DROP;
b0->error = node->errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
pkts_encapsulated --;
} /* Still go through normal encap with dummy rewrite */
- if (PREDICT_FALSE(t1->rewrite == vxlan_dummy_rewrite))
+ if (PREDICT_FALSE(t1->rewrite == vxlan4_dummy_rewrite ||
+ t1->rewrite == vxlan6_dummy_rewrite))
{
next1 = VXLAN_ENCAP_NEXT_DROP;
b1->error = node->errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
@@ -162,66 +177,117 @@ vxlan_encap (vlib_main_t * vm,
} /* Still go through normal encap with dummy rewrite */
/* IP4 VXLAN header sizeof(ip4_vxlan_header_t) should be 36 octects */
- ASSERT(vec_len(t0->rewrite) == 36);
- ASSERT(vec_len(t1->rewrite) == 36);
+ /* IP6 VXLAN header sizeof(ip6_vxlan_header_t) should be 56 octects */
+ if (PREDICT_TRUE(is_ip4_0))
+ ASSERT(vec_len(t0->rewrite) == 36);
+ else
+ ASSERT(vec_len(t0->rewrite) == 56);
+ if (PREDICT_TRUE(is_ip4_1))
+ ASSERT(vec_len(t1->rewrite) == 36);
+ else
+ ASSERT(vec_len(t1->rewrite) == 56);
/* Apply the rewrite string. $$$$ vnet_rewrite? */
vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
- ip0 = vlib_buffer_get_current(b0);
- ip1 = vlib_buffer_get_current(b1);
- /* Copy the fixed header */
- copy_dst0 = (u64 *) ip0;
+ /* assign both v4 and v6; avoid a branch, optimizer will help us */
+ ip4_0 = vlib_buffer_get_current(b0);
+ ip6_0 = (void *)ip4_0;
+ ip4_1 = vlib_buffer_get_current(b1);
+ ip6_1 = (void *)ip4_1;
+
+ /* Copy the fixed header (v4 and v6 variables point to the same
+ * place at this point)
+ */
+ copy_dst0 = (u64 *) ip4_0;
copy_src0 = (u64 *) t0->rewrite;
- copy_dst1 = (u64 *) ip1;
+
+ copy_dst1 = (u64 *) ip4_1;
copy_src1 = (u64 *) t1->rewrite;
- /* Copy first 32 octets 8-bytes at a time */
+ /* Copy first 32 (ip4)/56 (ip6) octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
- foreach_fixed_header_offset;
+ if (PREDICT_TRUE(is_ip4_0)) {
+ foreach_fixed_header4_offset;
+ } else {
+ foreach_fixed_header6_offset;
+ }
#undef _
#define _(offs) copy_dst1[offs] = copy_src1[offs];
- foreach_fixed_header_offset;
+ if (PREDICT_TRUE(is_ip4_1)) {
+ foreach_fixed_header4_offset;
+ } else {
+ foreach_fixed_header6_offset;
+ }
#undef _
-
/* Last 4 octets. Hopefully gcc will be our friend */
- copy_dst_last0 = (u32 *)(&copy_dst0[4]);
- copy_src_last0 = (u32 *)(&copy_src0[4]);
- copy_dst_last1 = (u32 *)(&copy_dst1[4]);
- copy_src_last1 = (u32 *)(&copy_src1[4]);
-
- copy_dst_last0[0] = copy_src_last0[0];
- copy_dst_last1[0] = copy_src_last1[0];
-
- /* fix the <bleep>ing outer-IP checksum */
- sum0 = ip0->checksum;
- /* old_l0 always 0, see the rewrite setup */
- new_l0 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
-
- sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
- length /* changed member */);
- ip0->checksum = ip_csum_fold (sum0);
- ip0->length = new_l0;
-
- sum1 = ip1->checksum;
- /* old_l1 always 0, see the rewrite setup */
- new_l1 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
-
- sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
- length /* changed member */);
- ip1->checksum = ip_csum_fold (sum1);
- ip1->length = new_l1;
+ if (PREDICT_TRUE(is_ip4_0)) {
+ copy_dst_last0 = (u32 *)(&copy_dst0[4]);
+ copy_src_last0 = (u32 *)(&copy_src0[4]);
+ copy_dst_last0[0] = copy_src_last0[0];
+ }
+ if (PREDICT_TRUE(is_ip4_1)) {
+ copy_dst_last1 = (u32 *)(&copy_dst1[4]);
+ copy_src_last1 = (u32 *)(&copy_src1[4]);
+ copy_dst_last1[0] = copy_src_last1[0];
+ }
+
+ if (PREDICT_TRUE(is_ip4_0)) {
+ /* fix the <bleep>ing outer-IP checksum */
+ sum0 = ip4_0->checksum;
+
+ /* old_l0 always 0, see the rewrite setup */
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */);
+ ip4_0->checksum = ip_csum_fold (sum0);
+ ip4_0->length = new_l0;
+ } else {
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof(*ip6_0));
+ ip6_0->payload_length = new_l0;
+ }
+
+ if (PREDICT_TRUE(is_ip4_1)) {
+ /* fix the <bleep>ing outer-IP checksum */
+ sum1 = ip4_1->checksum;
+
+ /* old_l1 always 0, see the rewrite setup */
+ new_l1 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
+ sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
+ length /* changed member */);
+ ip4_1->checksum = ip_csum_fold (sum1);
+ ip4_1->length = new_l1;
+ } else {
+ new_l1 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
+ - sizeof(*ip6_1));
+ ip6_1->payload_length = new_l1;
+ }
/* Fix UDP length */
- udp0 = (udp_header_t *)(ip0+1);
- new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
- - sizeof (*ip0));
- udp1 = (udp_header_t *)(ip1+1);
- new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
- - sizeof (*ip1));
+ if (PREDICT_TRUE(is_ip4_0)) {
+ udp0 = (udp_header_t *)(ip4_0+1);
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip4_0));
+ } else {
+ udp0 = (udp_header_t *)(ip6_0+1);
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip6_0));
+ }
+ if (PREDICT_TRUE(is_ip4_1)) {
+ udp1 = (udp_header_t *)(ip4_1+1);
+ new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
+ - sizeof (*ip4_1));
+ } else {
+ udp1 = (udp_header_t *)(ip6_1+1);
+ new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
+ - sizeof (*ip6_1));
+ }
udp0->length = new_l0;
udp0->src_port = flow_hash0;
@@ -229,6 +295,26 @@ vxlan_encap (vlib_main_t * vm,
udp1->length = new_l1;
udp1->src_port = flow_hash1;
+ if (PREDICT_FALSE(!is_ip4_0)) {
+ int bogus = 0;
+ /* IPv6 UDP checksum is mandatory */
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
+ ip6_0, &bogus);
+ ASSERT(bogus == 0);
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ }
+
+ if (PREDICT_FALSE(!is_ip4_1)) {
+ int bogus = 0;
+ /* IPv6 UDP checksum is mandatory */
+ udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
+ ip6_1, &bogus);
+ ASSERT(bogus == 0);
+ if (udp1->checksum == 0)
+ udp1->checksum = 0xffff;
+ }
+
/* Reset to look up tunnel partner in the configured FIB */
vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
@@ -301,13 +387,15 @@ vxlan_encap (vlib_main_t * vm,
u32 next0 = VXLAN_ENCAP_NEXT_IP4_LOOKUP;
u32 sw_if_index0, len0;
vnet_hw_interface_t * hi0;
- ip4_header_t * ip0;
+ ip4_header_t * ip4_0;
+ ip6_header_t * ip6_0;
udp_header_t * udp0;
u64 * copy_src0, * copy_dst0;
u32 * copy_src_last0, * copy_dst_last0;
vxlan_tunnel_t * t0;
u16 new_l0;
ip_csum_t sum0;
+ u8 is_ip4_0;
bi0 = from[0];
to_next[0] = bi0;
@@ -326,54 +414,101 @@ vxlan_encap (vlib_main_t * vm,
t0 = &vxm->tunnels[hi0->dev_instance];
+ is_ip4_0 = (t0->flags & VXLAN_TUNNEL_IS_IPV4);
+
+ if (PREDICT_FALSE(!is_ip4_0)) next0 = VXLAN_ENCAP_NEXT_IP6_LOOKUP;
+
/* Check rewrite string and drop packet if tunnel is deleted */
- if (PREDICT_FALSE(t0->rewrite == vxlan_dummy_rewrite))
+ if (PREDICT_FALSE(t0->rewrite == vxlan4_dummy_rewrite ||
+ t0->rewrite == vxlan6_dummy_rewrite))
{
next0 = VXLAN_ENCAP_NEXT_DROP;
b0->error = node->errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
pkts_encapsulated --;
} /* Still go through normal encap with dummy rewrite */
- /* IP4 VXLAN header sizeof(ip4_vxlan_header_t) should be 36 octects */
- ASSERT(vec_len(t0->rewrite) == 36);
+
+ /* IP4 VXLAN header sizeof(ip4_vxlan_header_t) should be 36 octets */
+ /* IP6 VXLAN header sizeof(ip4_vxlan_header_t) should be 56 octets */
+ if (PREDICT_TRUE(is_ip4_0))
+ ASSERT(vec_len(t0->rewrite) == 36);
+ else
+ ASSERT(vec_len(t0->rewrite) == 56);
/* Apply the rewrite string. $$$$ vnet_rewrite? */
vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
- ip0 = vlib_buffer_get_current(b0);
- /* Copy the fixed header */
- copy_dst0 = (u64 *) ip0;
+ /* assign both v4 and v6; avoid a branch, optimizer will help us */
+ ip4_0 = vlib_buffer_get_current(b0);
+ ip6_0 = (void *)ip4_0;
+
+ /* Copy the fixed header (v4 and v6 variables point to the same
+ * place at this point)
+ */
+ copy_dst0 = (u64 *) ip4_0;
copy_src0 = (u64 *) t0->rewrite;
/* Copy first 32 octets 8-bytes at a time */
#define _(offs) copy_dst0[offs] = copy_src0[offs];
- foreach_fixed_header_offset;
+ if (PREDICT_TRUE(is_ip4_0)) {
+ foreach_fixed_header4_offset;
+ } else {
+ foreach_fixed_header6_offset;
+ }
#undef _
- /* Last 4 octets. Hopefully gcc will be our friend */
- copy_dst_last0 = (u32 *)(&copy_dst0[4]);
- copy_src_last0 = (u32 *)(&copy_src0[4]);
+ if (PREDICT_TRUE(is_ip4_0)) {
+ /* Last 4 octets. Hopefully gcc will be our friend */
+ copy_dst_last0 = (u32 *)(&copy_dst0[4]);
+ copy_src_last0 = (u32 *)(&copy_src0[4]);
- copy_dst_last0[0] = copy_src_last0[0];
+ copy_dst_last0[0] = copy_src_last0[0];
+ }
- /* fix the <bleep>ing outer-IP checksum */
- sum0 = ip0->checksum;
- /* old_l0 always 0, see the rewrite setup */
- new_l0 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
-
- sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ if (PREDICT_TRUE(is_ip4_0)) {
+ /* fix the <bleep>ing outer-IP checksum */
+ sum0 = ip4_0->checksum;
+
+ /* old_l0 always 0, see the rewrite setup */
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
length /* changed member */);
- ip0->checksum = ip_csum_fold (sum0);
- ip0->length = new_l0;
+ ip4_0->checksum = ip_csum_fold (sum0);
+ ip4_0->length = new_l0;
+ } else {
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof(*ip6_0));
+ ip6_0->payload_length = new_l0;
+ }
/* Fix UDP length */
- udp0 = (udp_header_t *)(ip0+1);
- new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
- - sizeof (*ip0));
+ if (PREDICT_TRUE(is_ip4_0)) {
+ udp0 = (udp_header_t *)(ip4_0+1);
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip4_0));
+ } else {
+ udp0 = (udp_header_t *)(ip6_0+1);
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip6_0));
+ }
udp0->length = new_l0;
udp0->src_port = flow_hash0;
+ if (PREDICT_FALSE(!is_ip4_0)) {
+ int bogus = 0;
+ /* IPv6 UDP checksum is mandatory */
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
+ ip6_0, &bogus);
+ ASSERT(bogus == 0);
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ }
+
+
+ /* vnet_update_l2_len (b0); do we need this? cluke */
+
/* Reset to look up tunnel partner in the configured FIB */
vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
@@ -447,6 +582,7 @@ VLIB_REGISTER_NODE (vxlan_encap_node) = {
.next_nodes = {
[VXLAN_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [VXLAN_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
[VXLAN_ENCAP_NEXT_DROP] = "error-drop",
},
};
diff --git a/vnet/vnet/vxlan/vxlan.c b/vnet/vnet/vxlan/vxlan.c
index 6e384288..89e56e78 100644
--- a/vnet/vnet/vxlan/vxlan.c
+++ b/vnet/vnet/vxlan/vxlan.c
@@ -13,6 +13,7 @@
* limitations under the License.
*/
#include <vnet/vxlan/vxlan.h>
+#include <vnet/ip/format.h>
vxlan_main_t vxlan_main;
@@ -44,8 +45,8 @@ u8 * format_vxlan_tunnel (u8 * s, va_list * args)
s = format (s,
"[%d] %U (src) %U (dst) vni %d encap_fib_index %d",
t - ngm->tunnels,
- format_ip4_address, &t->src,
- format_ip4_address, &t->dst,
+ format_ip46_address, &t->src,
+ format_ip46_address, &t->dst,
t->vni,
t->encap_fib_index);
s = format (s, " decap_next %U\n", format_decap_next, t->decap_next_index);
@@ -109,13 +110,23 @@ VNET_HW_INTERFACE_CLASS (vxlan_hw_class) = {
};
#define foreach_copy_field \
-_(src.as_u32) \
-_(dst.as_u32) \
_(vni) \
_(encap_fib_index) \
_(decap_next_index)
-static int vxlan_rewrite (vxlan_tunnel_t * t)
+#define foreach_copy_ipv4 { \
+ _(src.ip4.as_u32) \
+ _(dst.ip4.as_u32) \
+}
+
+#define foreach_copy_ipv6 { \
+ _(src.ip6.as_u64[0]) \
+ _(src.ip6.as_u64[1]) \
+ _(dst.ip6.as_u64[0]) \
+ _(dst.ip6.as_u64[1]) \
+}
+
+static int vxlan4_rewrite (vxlan_tunnel_t * t)
{
u8 *rw = 0;
ip4_header_t * ip0;
@@ -133,8 +144,8 @@ static int vxlan_rewrite (vxlan_tunnel_t * t)
ip0->protocol = IP_PROTOCOL_UDP;
/* we fix up the ip4 header length and checksum after-the-fact */
- ip0->src_address.as_u32 = t->src.as_u32;
- ip0->dst_address.as_u32 = t->dst.as_u32;
+ ip0->src_address.as_u32 = t->src.ip4.as_u32;
+ ip0->dst_address.as_u32 = t->dst.ip4.as_u32;
ip0->checksum = ip4_header_checksum (ip0);
/* UDP header, randomize src port on something, maybe? */
@@ -148,6 +159,39 @@ static int vxlan_rewrite (vxlan_tunnel_t * t)
return (0);
}
+static int vxlan6_rewrite (vxlan_tunnel_t * t)
+{
+ u8 *rw = 0;
+ ip6_header_t * ip0;
+ ip6_vxlan_header_t * h0;
+ int len = sizeof (*h0);
+
+ vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip6_vxlan_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip6 header */
+ ip0 = &h0->ip6;
+ ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(6 << 28);
+ ip0->hop_limit = 255;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ ip0->src_address.as_u64[0] = t->src.ip6.as_u64[0];
+ ip0->src_address.as_u64[1] = t->src.ip6.as_u64[1];
+ ip0->dst_address.as_u64[0] = t->dst.ip6.as_u64[0];
+ ip0->dst_address.as_u64[1] = t->dst.ip6.as_u64[1];
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4789);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan);
+
+ /* VXLAN header */
+ vnet_set_vni_and_flags(&h0->vxlan, t->vni);
+
+ t->rewrite = rw;
+ return (0);
+}
+
int vnet_vxlan_add_del_tunnel
(vnet_vxlan_add_del_tunnel_args_t *a, u32 * sw_if_indexp)
{
@@ -155,17 +199,27 @@ int vnet_vxlan_add_del_tunnel
vxlan_tunnel_t *t = 0;
vnet_main_t * vnm = vxm->vnet_main;
ip4_main_t * im4 = &ip4_main;
+ ip6_main_t * im6 = &ip6_main;
vnet_hw_interface_t * hi;
uword * p;
u32 hw_if_index = ~0;
u32 sw_if_index = ~0;
int rv;
- vxlan_tunnel_key_t key;
+ vxlan4_tunnel_key_t key4;
+ vxlan6_tunnel_key_t key6;
+
+ if (!a->is_ip6) {
+ key4.src = a->dst.ip4.as_u32; /* decap src in key is encap dst in config */
+ key4.vni = clib_host_to_net_u32 (a->vni << 8);
- key.src = a->dst.as_u32; /* decap src in key is encap dst in config */
- key.vni = clib_host_to_net_u32 (a->vni << 8);
+ p = hash_get (vxm->vxlan4_tunnel_by_key, key4.as_u64);
+ } else {
+ key6.src.as_u64[0] = a->dst.ip6.as_u64[0];
+ key6.src.as_u64[1] = a->dst.ip6.as_u64[1];
+ key6.vni = clib_host_to_net_u32 (a->vni << 8);
- p = hash_get (vxm->vxlan_tunnel_by_key, key.as_u64);
+ p = hash_get (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&key6));
+ }
if (a->is_add)
{
@@ -185,9 +239,22 @@ int vnet_vxlan_add_del_tunnel
/* copy from arg structure */
#define _(x) t->x = a->x;
foreach_copy_field;
+ if (!a->is_ip6) foreach_copy_ipv4
+ else foreach_copy_ipv6
#undef _
- rv = vxlan_rewrite (t);
+ if (a->is_ip6) {
+ /* copy the key */
+ t->key6 = key6;
+ }
+
+ if (!a->is_ip6) t->flags |= VXLAN_TUNNEL_IS_IPV4;
+
+ if (!a->is_ip6) {
+ rv = vxlan4_rewrite (t);
+ } else {
+ rv = vxlan6_rewrite (t);
+ }
if (rv)
{
@@ -195,7 +262,10 @@ int vnet_vxlan_add_del_tunnel
return rv;
}
- hash_set (vxm->vxlan_tunnel_by_key, key.as_u64, t - vxm->tunnels);
+ if (!a->is_ip6)
+ hash_set (vxm->vxlan4_tunnel_by_key, key4.as_u64, t - vxm->tunnels);
+ else
+ hash_set (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&t->key6), t - vxm->tunnels);
if (vec_len (vxm->free_vxlan_tunnel_hw_if_indices) > 0)
{
@@ -244,8 +314,13 @@ int vnet_vxlan_add_del_tunnel
}
vnet_sw_interface_set_flags (vnm, sw_if_index,
VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ if (!a->is_ip6) {
vec_validate (im4->fib_index_by_sw_if_index, sw_if_index);
im4->fib_index_by_sw_if_index[sw_if_index] = t->encap_fib_index;
+ } else {
+ vec_validate (im6->fib_index_by_sw_if_index, sw_if_index);
+ im6->fib_index_by_sw_if_index[sw_if_index] = t->encap_fib_index;
+ }
}
else
{
@@ -262,10 +337,17 @@ int vnet_vxlan_add_del_tunnel
vxm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0;
- hash_unset (vxm->vxlan_tunnel_by_key, key.as_u64);
+ if (!a->is_ip6)
+ hash_unset (vxm->vxlan4_tunnel_by_key, key4.as_u64);
+ else
+ hash_unset (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&key6));
vec_free (t->rewrite);
- t->rewrite = vxlan_dummy_rewrite;
+ if (!a->is_ip6) {
+ t->rewrite = vxlan4_dummy_rewrite;
+ } else {
+ t->rewrite = vxlan6_dummy_rewrite;
+ }
pool_put (vxm->tunnels, t);
}
@@ -275,7 +357,7 @@ int vnet_vxlan_add_del_tunnel
return 0;
}
-static u32 fib_index_from_fib_id (u32 fib_id)
+static u32 fib4_index_from_fib_id (u32 fib_id)
{
ip4_main_t * im = &ip4_main;
uword * p;
@@ -287,6 +369,18 @@ static u32 fib_index_from_fib_id (u32 fib_id)
return p[0];
}
+static u32 fib6_index_from_fib_id (u32 fib_id)
+{
+ ip6_main_t * im = &ip6_main;
+ uword * p;
+
+ p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
static uword unformat_decap_next (unformat_input_t * input, va_list * args)
{
u32 * result = va_arg (*args, u32 *);
@@ -313,10 +407,12 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm,
vlib_cli_command_t * cmd)
{
unformat_input_t _line_input, * line_input = &_line_input;
- ip4_address_t src, dst;
+ ip46_address_t src, dst;
u8 is_add = 1;
u8 src_set = 0;
u8 dst_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
u32 encap_fib_index = 0;
u32 decap_next_index = ~0;
u32 vni = 0;
@@ -330,16 +426,39 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm,
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
if (unformat (line_input, "del"))
- is_add = 0;
+ {
+ is_add = 0;
+ }
+ else if (unformat (line_input, "src %U",
+ unformat_ip4_address, &src.ip4))
+ {
+ src_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "dst %U",
+ unformat_ip4_address, &dst.ip4))
+ {
+ dst_set = 1;
+ ipv4_set = 1;
+ }
else if (unformat (line_input, "src %U",
- unformat_ip4_address, &src))
- src_set = 1;
+ unformat_ip6_address, &src.ip6))
+ {
+ src_set = 1;
+ ipv6_set = 1;
+ }
else if (unformat (line_input, "dst %U",
- unformat_ip4_address, &dst))
- dst_set = 1;
+ unformat_ip6_address, &dst.ip6))
+ {
+ dst_set = 1;
+ ipv6_set = 1;
+ }
else if (unformat (line_input, "encap-vrf-id %d", &tmp))
{
- encap_fib_index = fib_index_from_fib_id (tmp);
+ if (ipv6_set)
+ encap_fib_index = fib6_index_from_fib_id (tmp);
+ else
+ encap_fib_index = fib4_index_from_fib_id (tmp);
if (encap_fib_index == ~0)
return clib_error_return (0, "nonexistent encap-vrf-id %d", tmp);
}
@@ -364,15 +483,25 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm,
if (dst_set == 0)
return clib_error_return (0, "tunnel dst address not specified");
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+
+ if ((ipv4_set && memcmp(&src.ip4, &dst.ip4, sizeof(src.ip4)) == 0) ||
+ (ipv6_set && memcmp(&src.ip6, &dst.ip6, sizeof(src.ip6)) == 0))
+ return clib_error_return (0, "src and dst addresses are identical");
+
if (vni == 0)
return clib_error_return (0, "vni not specified");
memset (a, 0, sizeof (*a));
a->is_add = is_add;
+ a->is_ip6 = ipv6_set;
#define _(x) a->x = x;
foreach_copy_field;
+ if (ipv4_set) foreach_copy_ipv4
+ else foreach_copy_ipv6
#undef _
rv = vnet_vxlan_add_del_tunnel (a, 0 /* hw_if_indexp */);
@@ -430,27 +559,43 @@ VLIB_CLI_COMMAND (show_vxlan_tunnel_command, static) = {
.function = show_vxlan_tunnel_command_fn,
};
+
clib_error_t *vxlan_init (vlib_main_t *vm)
{
vxlan_main_t * vxm = &vxlan_main;
- ip4_vxlan_header_t * hdr;
- ip4_header_t * ip;
+ ip4_vxlan_header_t * hdr4;
+ ip4_header_t * ip4;
+ ip6_vxlan_header_t * hdr6;
+ ip6_header_t * ip6;
vxm->vnet_main = vnet_get_main();
vxm->vlib_main = vm;
+ /* initialize the ip6 hash */
+ vxm->vxlan6_tunnel_by_key = hash_create_mem(0,
+ sizeof(vxlan6_tunnel_key_t),
+ sizeof(uword));
+
/* init dummy rewrite string for deleted vxlan tunnels */
- _vec_len(vxlan_dummy_rewrite) = sizeof(ip4_vxlan_header_t);
- hdr = (ip4_vxlan_header_t *) vxlan_dummy_rewrite;
- ip = &hdr->ip4;
+ _vec_len(vxlan4_dummy_rewrite) = sizeof(ip4_vxlan_header_t);
+ hdr4 = (ip4_vxlan_header_t *) vxlan4_dummy_rewrite;
+ ip4 = &hdr4->ip4;
/* minimal rewrite setup, see vxlan_rewite() above as reference */
- ip->ip_version_and_header_length = 0x45;
- ip->checksum = ip4_header_checksum (ip);
+ ip4->ip_version_and_header_length = 0x45;
+ ip4->checksum = ip4_header_checksum (ip4);
+
+ /* Same again for IPv6 */
+ _vec_len(vxlan6_dummy_rewrite) = sizeof(ip6_vxlan_header_t);
+ hdr6 = (ip6_vxlan_header_t *) vxlan6_dummy_rewrite;
+ ip6 = &hdr6->ip6;
+ /* minimal rewrite setup, see vxlan_rewite() above as reference */
+ ip6->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(6 << 28);
udp_register_dst_port (vm, UDP_DST_PORT_vxlan,
- vxlan_input_node.index, 1 /* is_ip4 */);
+ vxlan4_input_node.index, /* is_ip4 */ 1);
+ udp_register_dst_port (vm, UDP_DST_PORT_vxlan6,
+ vxlan6_input_node.index, /* is_ip4 */ 0);
return 0;
}
VLIB_INIT_FUNCTION(vxlan_init);
-
diff --git a/vnet/vnet/vxlan/vxlan.h b/vnet/vnet/vxlan/vxlan.h
index 2454e4c5..866d7ffd 100644
--- a/vnet/vnet/vxlan/vxlan.h
+++ b/vnet/vnet/vxlan/vxlan.h
@@ -24,6 +24,7 @@
#include <vnet/ethernet/ethernet.h>
#include <vnet/vxlan/vxlan_packet.h>
#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
#include <vnet/ip/udp.h>
typedef CLIB_PACKED (struct {
@@ -32,6 +33,12 @@ typedef CLIB_PACKED (struct {
vxlan_header_t vxlan; /* 8 bytes */
}) ip4_vxlan_header_t;
+typedef CLIB_PACKED (struct {
+ ip6_header_t ip6; /* 40 bytes */
+ udp_header_t udp; /* 8 bytes */
+ vxlan_header_t vxlan; /* 8 bytes */
+}) ip6_vxlan_header_t;
+
typedef CLIB_PACKED(struct {
/*
* Key fields: ip src and vxlan vni on incoming VXLAN packet
@@ -44,30 +51,50 @@ typedef CLIB_PACKED(struct {
};
u64 as_u64;
};
-}) vxlan_tunnel_key_t;
+}) vxlan4_tunnel_key_t;
+
+typedef CLIB_PACKED(struct {
+ /*
+ * Key fields: ip src and vxlan vni on incoming VXLAN packet
+ * all fields in NET byte order
+ */
+ ip6_address_t src;
+ u32 vni; /* shifted left 8 bits */
+}) vxlan6_tunnel_key_t;
typedef struct {
/* Rewrite string. $$$$ embed vnet_rewrite header */
u8 * rewrite;
- /* decap next index */
- u32 decap_next_index;
-
/* tunnel src and dst addresses */
- ip4_address_t src;
- ip4_address_t dst;
+ ip46_address_t src;
+ ip46_address_t dst;
/* vxlan VNI in HOST byte order */
u32 vni;
+ /* decap next index */
+ u32 decap_next_index;
+
/* L3 FIB index and L2 BD ID */
u16 encap_fib_index; /* tunnel partner IP lookup here */
/* vnet intfc hw/sw_if_index */
u16 hw_if_index;
u32 sw_if_index;
+
+ union { /* storage for the hash key */
+ vxlan4_tunnel_key_t key4;
+ vxlan6_tunnel_key_t key6;
+ };
+
+ /* flags */
+ u32 flags;
} vxlan_tunnel_t;
+/* Flags for vxlan_tunnel_t.flags */
+#define VXLAN_TUNNEL_IS_IPV4 1
+
#define foreach_vxlan_input_next \
_(DROP, "error-drop") \
_(L2_INPUT, "l2-input") \
@@ -93,14 +120,18 @@ typedef struct {
vxlan_tunnel_t *tunnels;
/* lookup tunnel by key */
- uword * vxlan_tunnel_by_key;
+ uword * vxlan4_tunnel_by_key; /* keyed on ipv4.dst + vni */
+ uword * vxlan6_tunnel_by_key; /* keyed on ipv6.dst + vni */
/* Free vlib hw_if_indices */
u32 * free_vxlan_tunnel_hw_if_indices;
/* Dummy rewrite for deleted vxlan_tunnels with hw_if_indices as above */
- u64 dummy_str [sizeof(ip4_vxlan_header_t)/sizeof(u64) + 2];
-#define vxlan_dummy_rewrite ((u8 *) &vxlan_main.dummy_str[1])
+ u64 dummy4_str [sizeof(ip4_vxlan_header_t)/sizeof(u64) + 2];
+#define vxlan4_dummy_rewrite ((u8 *) &vxlan_main.dummy4_str[1])
+
+ u64 dummy6_str [sizeof(ip6_vxlan_header_t)/sizeof(u64) + 2];
+#define vxlan6_dummy_rewrite ((u8 *) &vxlan_main.dummy6_str[1])
/* Mapping from sw_if_index to tunnel index */
u32 * tunnel_index_by_sw_if_index;
@@ -112,14 +143,19 @@ typedef struct {
vxlan_main_t vxlan_main;
-extern vlib_node_registration_t vxlan_input_node;
+extern vlib_node_registration_t vxlan4_input_node;
+extern vlib_node_registration_t vxlan6_input_node;
extern vlib_node_registration_t vxlan_encap_node;
u8 * format_vxlan_encap_trace (u8 * s, va_list * args);
typedef struct {
u8 is_add;
- ip4_address_t src, dst;
+
+ /* we normally use is_ip4, but since this adds to the
+ * structure, this seems less of abreaking change */
+ u8 is_ip6;
+ ip46_address_t src, dst;
u32 encap_fib_index;
u32 decap_next_index;
u32 vni;