summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHongjun Ni <hongjun.ni@intel.com>2016-05-25 01:16:19 +0800
committerFlorin Coras <florin.coras@gmail.com>2016-05-25 10:42:43 +0000
commitdf921cc65a25f6fb71b1169db6ff004b4e45430e (patch)
treef45fb2fe3641c59db8f89acba0bf51cba95ad8b2
parentc4cb44c05d121da2e0f0ccd39d5e1bf470731a85 (diff)
Add Vxlan-Gpe over IPv6
PatchSet4: consolidate code as per comments. PatchSet3: simplify the code using ip_udp_encap_one/two PatchSet2: consolidate comments and indent style Change-Id: Ia8b43f854a46d77e838e198566200ad28fd72472 Signed-off-by: Hongjun Ni <hongjun.ni@intel.com>
-rw-r--r--vnet/vnet/ip/udp.h1
-rw-r--r--vnet/vnet/vxlan-gpe/decap.c824
-rw-r--r--vnet/vnet/vxlan-gpe/encap.c559
-rw-r--r--vnet/vnet/vxlan-gpe/vxlan_gpe.c238
-rw-r--r--vnet/vnet/vxlan-gpe/vxlan_gpe.h45
-rw-r--r--vpp-api-test/vat/api_format.c43
-rw-r--r--vpp/api/api.c16
-rw-r--r--vpp/api/custom_dump.c5
-rw-r--r--vpp/api/vpe.api5
9 files changed, 1021 insertions, 715 deletions
diff --git a/vnet/vnet/ip/udp.h b/vnet/vnet/ip/udp.h
index a6991487507..1cf525c6093 100644
--- a/vnet/vnet/ip/udp.h
+++ b/vnet/vnet/ip/udp.h
@@ -51,6 +51,7 @@ _ (547, dhcpv6_to_server) \
_ (546, dhcpv6_to_client) \
_ (4341, lisp_gpe6) \
_ (4342, lisp_cp6) \
+_ (4790, vxlan6_gpe) \
_ (6633, vpath6_3)
typedef enum {
diff --git a/vnet/vnet/vxlan-gpe/decap.c b/vnet/vnet/vxlan-gpe/decap.c
index 870871e7de6..34bcccd9333 100644
--- a/vnet/vnet/vxlan-gpe/decap.c
+++ b/vnet/vnet/vxlan-gpe/decap.c
@@ -19,6 +19,8 @@
#include <vnet/pg/pg.h>
#include <vnet/vxlan-gpe/vxlan_gpe.h>
+vlib_node_registration_t vxlan_gpe_input_node;
+
typedef struct {
u32 next_index;
u32 tunnel_index;
@@ -32,15 +34,15 @@ static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *);
if (t->tunnel_index != ~0)
- {
- s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
- t->next_index, t->error);
- }
+ {
+ s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
+ t->next_index, t->error);
+ }
else
- {
- s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
- t->error);
- }
+ {
+ s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
+ t->error);
+ }
return s;
}
@@ -54,22 +56,24 @@ static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
return s;
}
-static uword
+always_inline uword
vxlan_gpe_input (vlib_main_t * vm,
vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+ vlib_frame_t * from_frame,
+ u8 is_ip4)
{
- u32 n_left_from, next_index, * from, * to_next;
+ u32 n_left_from, next_index, *from, *to_next;
vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
vnet_main_t * vnm = ngm->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 last_tunnel_index = ~0;
- vxlan_gpe_tunnel_key_t last_key;
+ vxlan4_gpe_tunnel_key_t last_key4;
+ vxlan6_gpe_tunnel_key_t last_key6;
u32 pkts_decapsulated = 0;
- u32 cpu_index = os_get_cpu_number();
+ u32 cpu_index = os_get_cpu_number ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
- memset (&last_key, 0xff, sizeof (last_key));
+ memset (&last_key4, 0xff, sizeof(last_key4));
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
@@ -79,339 +83,458 @@ vxlan_gpe_input (vlib_main_t * vm,
stats_n_packets = stats_n_bytes = 0;
while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- u32 bi0, bi1;
- vlib_buffer_t * b0, * b1;
- u32 next0, next1;
- ip4_vxlan_gpe_header_t * iuvn0, * iuvn1;
- uword * p0, * p1;
- u32 tunnel_index0, tunnel_index1;
- vxlan_gpe_tunnel_t * t0, * t1;
- vxlan_gpe_tunnel_key_t key0, key1;
- u32 error0, error1;
- u32 sw_if_index0, sw_if_index1, len0, len1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t * p2, * p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- }
-
- bi0 = from[0];
- bi1 = from[1];
- to_next[0] = bi0;
- to_next[1] = bi1;
- from += 2;
- to_next += 2;
- n_left_to_next -= 2;
- n_left_from -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- /* udp leaves current_data pointing at the vxlan header */
- vlib_buffer_advance
- (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
- vlib_buffer_advance
- (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
-
- iuvn0 = vlib_buffer_get_current (b0);
- iuvn1 = vlib_buffer_get_current (b1);
-
- /* pop (ip, udp, vxlan) */
- vlib_buffer_advance (b0, sizeof (*iuvn0));
- vlib_buffer_advance (b1, sizeof (*iuvn1));
-
- tunnel_index0 = ~0;
- tunnel_index1 = ~0;
- error0 = 0;
- error1 = 0;
-
- next0 = (iuvn0->vxlan.protocol < node->n_next_nodes) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
- next1 = (iuvn1->vxlan.protocol < node->n_next_nodes) ? iuvn1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
-
-
-
-
- key0.local = iuvn0->ip4.dst_address.as_u32;
- key1.local = iuvn1->ip4.dst_address.as_u32;
-
- key0.remote = iuvn0->ip4.src_address.as_u32;
- key1.remote = iuvn1->ip4.src_address.as_u32;
-
- key0.vni = iuvn0->vxlan.vni_res;
- key1.vni = iuvn1->vxlan.vni_res;
-
- key0.pad = 0;
- key1.pad = 0;
-
- /* Processing for key0 */
- if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
- || (key0.as_u64[1] != last_key.as_u64[1])))
- {
- p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0);
-
- if (p0 == 0)
- {
- error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace0;
- }
+ {
+ u32 n_left_to_next;
- last_key.as_u64[0] = key0.as_u64[0];
- last_key.as_u64[1] = key0.as_u64[1];
- tunnel_index0 = last_tunnel_index = p0[0];
- }
- else
- tunnel_index0 = last_tunnel_index;
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
- t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, *b1;
+ u32 next0, next1;
+ ip4_vxlan_gpe_header_t * iuvn4_0, *iuvn4_1;
+ ip6_vxlan_gpe_header_t * iuvn6_0, *iuvn6_1;
+ uword * p0, *p1;
+ u32 tunnel_index0, tunnel_index1;
+ vxlan_gpe_tunnel_t * t0, *t1;
+ vxlan4_gpe_tunnel_key_t key4_0, key4_1;
+ vxlan6_gpe_tunnel_key_t key6_0, key6_1;
+ u32 error0, error1;
+ u32 sw_if_index0, sw_if_index1, len0, len1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header(p2, LOAD);
+ vlib_prefetch_buffer_header(p3, LOAD);
+
+ CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ if (is_ip4)
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
+ vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
+
+ iuvn4_0 = vlib_buffer_get_current (b0);
+ iuvn4_1 = vlib_buffer_get_current (b1);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof(*iuvn4_0));
+ vlib_buffer_advance (b1, sizeof(*iuvn4_1));
+ }
+ else
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
+ vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
+
+ iuvn6_0 = vlib_buffer_get_current (b0);
+ iuvn6_1 = vlib_buffer_get_current (b1);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof(*iuvn6_0));
+ vlib_buffer_advance (b1, sizeof(*iuvn6_1));
+ }
+
+ tunnel_index0 = ~0;
+ tunnel_index1 = ~0;
+ error0 = 0;
+ error1 = 0;
+
+ if (is_ip4)
+ {
+ next0 = (iuvn4_0->vxlan.protocol < node->n_next_nodes) ?
+ iuvn4_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+ next1 = (iuvn4_1->vxlan.protocol < node->n_next_nodes) ?
+ iuvn4_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
+ key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
+
+ key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
+ key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
+
+ key4_0.vni = iuvn4_0->vxlan.vni_res;
+ key4_1.vni = iuvn4_1->vxlan.vni_res;
+
+ key4_0.pad = 0;
+ key4_1.pad = 0;
+
+ /* Processing for key4_0 */
+ if (PREDICT_FALSE((key4_0.as_u64[0] != last_key4.as_u64[0])
+ || (key4_0.as_u64[1] != last_key4.as_u64[1])))
+ {
+ p0 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_0);
+
+ if (p0 == 0)
+ {
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace0;
+ }
- next0 = t0->protocol;
-
- sw_if_index0 = t0->sw_if_index;
- len0 = vlib_buffer_length_in_chain(vm, b0);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b0);
-
- /*
- * ip[46] lookup in the configured FIB
- */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+ last_key4.as_u64[0] = key4_0.as_u64[0];
+ last_key4.as_u64[1] = key4_0.as_u64[1];
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ }
+ else /* is_ip6 */
+ {
+ next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
+ iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+ next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
+ iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+ key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+ key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
+ key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
+
+ key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+ key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+ key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
+ key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
+
+ key6_0.vni = iuvn6_0->vxlan.vni_res;
+ key6_1.vni = iuvn6_1->vxlan.vni_res;
+
+ /* Processing for key6_0 */
+ if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p0 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_0);
+
+ if (p0 == 0)
+ {
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace0;
+ }
- pkts_decapsulated++;
- stats_n_packets += 1;
- stats_n_bytes += len0;
+ memcpy (&last_key6, &key6_0, sizeof(key6_0));
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ }
+
+ t0 = pool_elt_at_index(ngm->tunnels, tunnel_index0);
+
+ next0 = t0->protocol;
+
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ trace0: b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = tunnel_index0;
+ }
+
+ /* Process packet 1 */
+ if (is_ip4)
+ {
+ /* Processing for key4_1 */
+ if (PREDICT_FALSE(
+ (key4_1.as_u64[0] != last_key4.as_u64[0])
+ || (key4_1.as_u64[1] != last_key4.as_u64[1])))
+ {
+ p1 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_1);
+
+ if (p1 == 0)
+ {
+ error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace1;
+ }
- if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+ last_key4.as_u64[0] = key4_1.as_u64[0];
+ last_key4.as_u64[1] = key4_1.as_u64[1];
+ tunnel_index1 = last_tunnel_index = p1[0];
+ }
+ else
+ tunnel_index1 = last_tunnel_index;
+ }
+ else /* is_ip6 */
+ {
+ /* Processing for key6_1 */
+ if (PREDICT_FALSE(memcmp (&key6_1, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p1 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_1);
+
+ if (p1 == 0)
{
- stats_n_packets -= 1;
- stats_n_bytes -= len0;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len0;
- stats_sw_if_index = sw_if_index0;
+ error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace1;
}
- trace0:
- b0->error = error0 ? node->errors[error0] : 0;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_gpe_rx_trace_t *tr
- = vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->next_index = next0;
- tr->error = error0;
- tr->tunnel_index = tunnel_index0;
- }
-
-
- /* Processing for key1 */
- if (PREDICT_FALSE ((key1.as_u64[0] != last_key.as_u64[0])
- || (key1.as_u64[1] != last_key.as_u64[1])))
- {
- p1 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key1);
-
- if (p1 == 0)
- {
- error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace1;
- }
-
- last_key.as_u64[0] = key1.as_u64[0];
- last_key.as_u64[1] = key1.as_u64[1];
- tunnel_index1 = last_tunnel_index = p1[0];
- }
- else
- tunnel_index1 = last_tunnel_index;
-
- t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1);
-
- next1 = t1->protocol;
- sw_if_index1 = t1->sw_if_index;
- len1 = vlib_buffer_length_in_chain(vm, b1);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b1);
-
- /*
- * ip[46] lookup in the configured FIB
- */
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
-
-
- pkts_decapsulated++;
- stats_n_packets += 1;
- stats_n_bytes += len1;
-
- /* Batch stats increment on the same vxlan tunnel so counter
- is not incremented per packet */
- if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
+ memcpy (&last_key6, &key6_1, sizeof(key6_1));
+ tunnel_index1 = last_tunnel_index = p1[0];
+ }
+ else
+ tunnel_index1 = last_tunnel_index;
+ }
+
+ t1 = pool_elt_at_index(ngm->tunnels, tunnel_index1);
+
+ next1 = t1->protocol;
+ sw_if_index1 = t1->sw_if_index;
+ len1 = vlib_buffer_length_in_chain (vm, b1);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b1);
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len1;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len1;
+ stats_sw_if_index = sw_if_index1;
+ }
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
+
+ trace1: b1->error = error1 ? node->errors[error1] : 0;
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof(*tr));
+ tr->next_index = next1;
+ tr->error = error1;
+ tr->tunnel_index = tunnel_index1;
+ }
+
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0;
+ ip4_vxlan_gpe_header_t * iuvn4_0;
+ ip6_vxlan_gpe_header_t * iuvn6_0;
+ uword * p0;
+ u32 tunnel_index0;
+ vxlan_gpe_tunnel_t * t0;
+ vxlan4_gpe_tunnel_key_t key4_0;
+ vxlan6_gpe_tunnel_key_t key6_0;
+ u32 error0;
+ u32 sw_if_index0, len0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (is_ip4)
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (
+ b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
+
+ iuvn4_0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof(*iuvn4_0));
+ }
+ else
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (
+ b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
+
+ iuvn6_0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof(*iuvn6_0));
+ }
+
+ tunnel_index0 = ~0;
+ error0 = 0;
+
+ if (is_ip4)
+ {
+ next0 =
+ (iuvn4_0->vxlan.protocol < node->n_next_nodes) ?
+ iuvn4_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
+ key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
+ key4_0.vni = iuvn4_0->vxlan.vni_res;
+ key4_0.pad = 0;
+
+ /* Processing for key4_0 */
+ if (PREDICT_FALSE(
+ (key4_0.as_u64[0] != last_key4.as_u64[0])
+ || (key4_0.as_u64[1] != last_key4.as_u64[1])))
+ {
+ p0 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_0);
+
+ if (p0 == 0)
{
- stats_n_packets -= 1;
- stats_n_bytes -= len1;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len1;
- stats_sw_if_index = sw_if_index1;
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace00;
}
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
-
- trace1:
- b1->error = error1 ? node->errors[error1] : 0;
-
- if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_gpe_rx_trace_t *tr
- = vlib_add_trace (vm, node, b1, sizeof (*tr));
- tr->next_index = next1;
- tr->error = error1;
- tr->tunnel_index = tunnel_index1;
- }
-
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 bi0;
- vlib_buffer_t * b0;
- u32 next0;
- ip4_vxlan_gpe_header_t * iuvn0;
- uword * p0;
- u32 tunnel_index0;
- vxlan_gpe_tunnel_t * t0;
- vxlan_gpe_tunnel_key_t key0;
- u32 error0;
- u32 sw_if_index0, len0;
-
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- /* udp leaves current_data pointing at the vxlan header */
- vlib_buffer_advance
- (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
-
- iuvn0 = vlib_buffer_get_current (b0);
-
- /* pop (ip, udp, vxlan) */
- vlib_buffer_advance (b0, sizeof (*iuvn0));
-
- tunnel_index0 = ~0;
- error0 = 0;
- next0 = (iuvn0->vxlan.protocol < node->n_next_nodes) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
-
- key0.local = iuvn0->ip4.dst_address.as_u32;
- key0.remote = iuvn0->ip4.src_address.as_u32;
- key0.vni = iuvn0->vxlan.vni_res;
- key0.pad = 0;
-
- if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
- || (key0.as_u64[1] != last_key.as_u64[1])))
- {
- p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0);
-
- if (p0 == 0)
- {
- error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
- goto trace00;
- }
-
- last_key.as_u64[0] = key0.as_u64[0];
- last_key.as_u64[1] = key0.as_u64[1];
- tunnel_index0 = last_tunnel_index = p0[0];
- }
- else
- tunnel_index0 = last_tunnel_index;
-
- t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
-
- next0 = t0->protocol;
-
- sw_if_index0 = t0->sw_if_index;
- len0 = vlib_buffer_length_in_chain(vm, b0);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- vnet_update_l2_len (b0);
-
- /*
- * ip[46] lookup in the configured FIB
- */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
-
- pkts_decapsulated ++;
- stats_n_packets += 1;
- stats_n_bytes += len0;
-
- /* Batch stats increment on the same vxlan-gpe tunnel so counter
- is not incremented per packet */
- if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+
+ last_key4.as_u64[0] = key4_0.as_u64[0];
+ last_key4.as_u64[1] = key4_0.as_u64[1];
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ }
+ else /* is_ip6 */
+ {
+ next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
+ iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+
+ key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+ key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+ key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+ key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+ key6_0.vni = iuvn6_0->vxlan.vni_res;
+
+ /* Processing for key6_0 */
+ if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
+ {
+ p0 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_0);
+
+ if (p0 == 0)
{
- stats_n_packets -= 1;
- stats_n_bytes -= len0;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len0;
- stats_sw_if_index = sw_if_index0;
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+ goto trace00;
}
- trace00:
- b0->error = error0 ? node->errors[error0] : 0;
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_gpe_rx_trace_t *tr
- = vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->next_index = next0;
- tr->error = error0;
- tr->tunnel_index = tunnel_index0;
- }
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ memcpy (&last_key6, &key6_0, sizeof(key6_0));
+ tunnel_index0 = last_tunnel_index = p0[0];
+ }
+ else
+ tunnel_index0 = last_tunnel_index;
+ }
+
+ t0 = pool_elt_at_index(ngm->tunnels, tunnel_index0);
+
+ next0 = t0->protocol;
+
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan-gpe tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ trace00: b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = tunnel_index0;
+ }
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
}
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
- VXLAN_GPE_ERROR_DECAPSULATED,
- pkts_decapsulated);
+ VXLAN_GPE_ERROR_DECAPSULATED, pkts_decapsulated);
/* Increment any remaining batch stats */
if (stats_n_packets)
{
- vlib_increment_combined_counter(
+ vlib_increment_combined_counter (
im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index,
stats_sw_if_index, stats_n_packets, stats_n_bytes);
node->runtime_data[0] = stats_sw_if_index;
@@ -419,6 +542,20 @@ vxlan_gpe_input (vlib_main_t * vm,
return from_frame->n_vectors;
}
+static uword
+vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */1);
+}
+
+static uword
+vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */0);
+}
+
static char * vxlan_gpe_error_strings[] = {
#define vxlan_gpe_error(n,s) s,
#include <vnet/vxlan-gpe/vxlan_gpe_error.def>
@@ -426,9 +563,9 @@ static char * vxlan_gpe_error_strings[] = {
#undef _
};
-VLIB_REGISTER_NODE (vxlan_gpe_input_node) = {
- .function = vxlan_gpe_input,
- .name = "vxlan-gpe-input",
+VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
+ .function = vxlan4_gpe_input,
+ .name = "vxlan4-gpe-input",
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
.type = VLIB_NODE_TYPE_INTERNAL,
@@ -447,4 +584,27 @@ VLIB_REGISTER_NODE (vxlan_gpe_input_node) = {
// $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
};
+VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input)
+
+VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
+ .function = vxlan6_gpe_input,
+ .name = "vxlan6-gpe-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
+ .error_strings = vxlan_gpe_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gpe_input_next
+#undef _
+ },
+
+ .format_buffer = format_vxlan_gpe_with_length,
+ .format_trace = format_vxlan_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
+};
+VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input)
diff --git a/vnet/vnet/vxlan-gpe/encap.c b/vnet/vnet/vxlan-gpe/encap.c
index 3ffe2a62db2..ba0eca2a0a1 100644
--- a/vnet/vnet/vxlan-gpe/encap.c
+++ b/vnet/vnet/vxlan-gpe/encap.c
@@ -38,6 +38,7 @@ typedef enum {
typedef enum {
VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
+ VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP,
VXLAN_GPE_ENCAP_NEXT_DROP,
VXLAN_GPE_ENCAP_N_NEXT
} vxlan_gpe_encap_next_t;
@@ -58,21 +59,59 @@ u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
return s;
}
-#define foreach_fixed_header_offset \
-_(0) _(1) _(2) _(3) _(4) _(5) _(6)
+always_inline void
+vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
+ vxlan_gpe_tunnel_t * t0, u32 * next0, u8 is_v4)
+{
+ ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
+ ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
+
+ if (is_v4)
+ {
+ ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 36, 1);
+ next0[0] = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+
+ }
+ else
+ {
+ ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 56, 0);
+ next0[0] = VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP;
+ }
+}
+
+always_inline void
+vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, vlib_buffer_t * b1,
+ vxlan_gpe_tunnel_t * t0, vxlan_gpe_tunnel_t * t1, u32 * next0,
+ u32 * next1, u8 is_v4)
+{
+ ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
+ ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
+
+ if (is_v4)
+ {
+ ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 36, 1);
+ ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, 36, 1);
+ next0[0] = next1[0] = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+ }
+ else
+ {
+ ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 56, 0);
+ ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, 56, 0);
+ next0[0] = next1[0] = VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP;
+ }
+}
static uword
vxlan_gpe_encap (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- u32 n_left_from, next_index, * from, * to_next;
+ u32 n_left_from, next_index, *from, *to_next;
vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
vnet_main_t * vnm = ngm->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_encapsulated = 0;
- u16 old_l0 = 0, old_l1 = 0;
- u32 cpu_index = os_get_cpu_number();
+ u32 cpu_index = os_get_cpu_number ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
from = vlib_frame_vector_args (from_frame);
@@ -83,324 +122,207 @@ vxlan_gpe_encap (vlib_main_t * vm,
stats_n_packets = stats_n_bytes = 0;
while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
{
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- u32 bi0, bi1;
- vlib_buffer_t * b0, * b1;
- u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
- u32 next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
- u32 sw_if_index0, sw_if_index1, len0, len1;
- vnet_hw_interface_t * hi0, * hi1;
- ip4_header_t * ip0, * ip1;
- udp_header_t * udp0, * udp1;
- u64 * copy_src0, * copy_dst0;
- u64 * copy_src1, * copy_dst1;
- u32 * copy_src_last0, * copy_dst_last0;
- u32 * copy_src_last1, * copy_dst_last1;
- vxlan_gpe_tunnel_t * t0, * t1;
- u16 new_l0, new_l1;
- ip_csum_t sum0, sum1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t * p2, * p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- }
-
- bi0 = from[0];
- bi1 = from[1];
- to_next[0] = bi0;
- to_next[1] = bi1;
- from += 2;
- to_next += 2;
- n_left_to_next -= 2;
- n_left_from -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- /* 1-wide cache? */
- sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
- sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
- hi0 = vnet_get_sup_hw_interface
- (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
- hi1 = vnet_get_sup_hw_interface
- (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
-
- t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
- t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance);
-
- ASSERT(vec_len(t0->rewrite) >= 24);
- ASSERT(vec_len(t1->rewrite) >= 24);
-
- /* Apply the rewrite string. $$$$ vnet_rewrite? */
- vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
- vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
-
- ip0 = vlib_buffer_get_current(b0);
- ip1 = vlib_buffer_get_current(b1);
- /* Copy the fixed header */
- copy_dst0 = (u64 *) ip0;
- copy_src0 = (u64 *) t0->rewrite;
- copy_dst1 = (u64 *) ip1;
- copy_src1 = (u64 *) t1->rewrite;
-
- ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
-
- /* Copy first 36 octets 8-bytes at a time */
-#define _(offs) copy_dst0[offs] = copy_src0[offs];
- foreach_fixed_header_offset;
-#undef _
-#define _(offs) copy_dst1[offs] = copy_src1[offs];
- foreach_fixed_header_offset;
-#undef _
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, *b1;
+ u32 next0, next1;
+ u32 sw_if_index0, sw_if_index1, len0, len1;
+ vnet_hw_interface_t * hi0, *hi1;
+ vxlan_gpe_tunnel_t * t0, *t1;
+ u8 is_ip4_0, is_ip4_1;
+
+ next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header(p2, LOAD);
+ vlib_prefetch_buffer_header(p3, LOAD);
+
+ CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* 1-wide cache? */
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
+ hi1 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
+
+ t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
+ t1 = pool_elt_at_index(ngm->tunnels, hi1->dev_instance);
+
+ is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+ is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ if (PREDICT_TRUE(is_ip4_0 == is_ip4_1))
+ {
+ vxlan_gpe_encap_two_inline (ngm, b0, b1, t0, t1, &next0, &next1,is_ip4_0);
+ }
+ else
+ {
+ vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
+ vxlan_gpe_encap_one_inline (ngm, b1, t1, &next1, is_ip4_1);
+ }
+
+ /* Reset to look up tunnel partner in the configured FIB */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
+ pkts_encapsulated += 2;
+
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+ len1 = vlib_buffer_length_in_chain (vm, b0);
+ stats_n_packets += 2;
+ stats_n_bytes += len0 + len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ incremented per packet. Note stats are still incremented for deleted
+ and admin-down tunnel where packets are dropped. It is not worthwhile
+ to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE((sw_if_index0 != stats_sw_if_index)
+ || (sw_if_index1 != stats_sw_if_index)))
+ {
+ stats_n_packets -= 2;
+ stats_n_bytes -= len0 + len1;
+ if (sw_if_index0 == sw_if_index1)
+ {
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_sw_if_index = sw_if_index0;
+ stats_n_packets = 2;
+ stats_n_bytes = len0 + len1;
+ }
+ else
+ {
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, sw_if_index0, 1, len0);
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, sw_if_index1, 1, len1);
+ }
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
+ tr->tunnel_index = t0 - ngm->tunnels;
+ }
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b1,
+ sizeof(*tr));
+ tr->tunnel_index = t1 - ngm->tunnels;
+ }
+
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0, next1);
+ }
- /* Last 4 octets. Hopefully gcc will be our friend */
- copy_dst_last0 = (u32 *)(&copy_dst0[7]);
- copy_src_last0 = (u32 *)(&copy_src0[7]);
- copy_dst_last1 = (u32 *)(&copy_dst1[7]);
- copy_src_last1 = (u32 *)(&copy_src1[7]);
-
- copy_dst_last0[0] = copy_src_last0[0];
- copy_dst_last1[0] = copy_src_last1[0];
-
- /* If there are TLVs to copy, do so */
- if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64))
- clib_memcpy (&copy_dst0[3], t0->rewrite + 64 ,
- _vec_len (t0->rewrite)-64);
-
- if (PREDICT_FALSE (_vec_len(t1->rewrite) > 64))
- clib_memcpy (&copy_dst0[3], t1->rewrite + 64 ,
- _vec_len (t1->rewrite)-64);
-
- /* fix the <bleep>ing outer-IP checksum */
- sum0 = ip0->checksum;
- /* old_l0 always 0, see the rewrite setup */
- new_l0 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
-
- sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
- length /* changed member */);
- ip0->checksum = ip_csum_fold (sum0);
- ip0->length = new_l0;
-
- sum1 = ip1->checksum;
- /* old_l1 always 0, see the rewrite setup */
- new_l1 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
-
- sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
- length /* changed member */);
- ip1->checksum = ip_csum_fold (sum1);
- ip1->length = new_l1;
-
- /* Fix UDP length */
- udp0 = (udp_header_t *)(ip0+1);
- new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
- - sizeof (*ip0));
- udp1 = (udp_header_t *)(ip1+1);
- new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
- - sizeof (*ip1));
-
- udp0->length = new_l0;
- udp1->length = new_l1;
-
- /* Reset to look up tunnel partner in the configured FIB */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
- vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
- vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
- pkts_encapsulated += 2;
-
- len0 = vlib_buffer_length_in_chain(vm, b0);
- len1 = vlib_buffer_length_in_chain(vm, b0);
- stats_n_packets += 2;
- stats_n_bytes += len0 + len1;
-
- /* Batch stats increment on the same vxlan tunnel so counter is not
- incremented per packet. Note stats are still incremented for deleted
- and admin-down tunnel where packets are dropped. It is not worthwhile
- to check for this rare case and affect normal path performance. */
- if (PREDICT_FALSE(
- (sw_if_index0 != stats_sw_if_index)
- || (sw_if_index1 != stats_sw_if_index))) {
- stats_n_packets -= 2;
- stats_n_bytes -= len0 + len1;
- if (sw_if_index0 == sw_if_index1) {
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_sw_if_index = sw_if_index0;
- stats_n_packets = 2;
- stats_n_bytes = len0 + len1;
- } else {
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, sw_if_index0, 1, len0);
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, sw_if_index1, 1, len1);
- }
- }
-
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_gpe_encap_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->tunnel_index = t0 - ngm->tunnels;
- }
-
- if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_gpe_encap_trace_t *tr =
- vlib_add_trace (vm, node, b1, sizeof (*tr));
- tr->tunnel_index = t1 - ngm->tunnels;
- }
-
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 bi0;
- vlib_buffer_t * b0;
- u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
- u32 sw_if_index0, len0;
- vnet_hw_interface_t * hi0;
- ip4_header_t * ip0;
- udp_header_t * udp0;
- u64 * copy_src0, * copy_dst0;
- u32 * copy_src_last0, * copy_dst_last0;
- vxlan_gpe_tunnel_t * t0;
- u16 new_l0;
- ip_csum_t sum0;
-
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- /* 1-wide cache? */
- sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
- hi0 = vnet_get_sup_hw_interface
- (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
-
- t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
-
- ASSERT(vec_len(t0->rewrite) >= 24);
-
- /* Apply the rewrite string. $$$$ vnet_rewrite? */
- vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
-
- ip0 = vlib_buffer_get_current(b0);
- /* Copy the fixed header */
- copy_dst0 = (u64 *) ip0;
- copy_src0 = (u64 *) t0->rewrite;
-
- ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
-
- /* Copy first 36 octets 8-bytes at a time */
-#define _(offs) copy_dst0[offs] = copy_src0[offs];
- foreach_fixed_header_offset;
-#undef _
- /* Last 4 octets. Hopefully gcc will be our friend */
- copy_dst_last0 = (u32 *)(&copy_dst0[7]);
- copy_src_last0 = (u32 *)(&copy_src0[7]);
-
- copy_dst_last0[0] = copy_src_last0[0];
-
- /* If there are TLVs to copy, do so */
- if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64))
- clib_memcpy (&copy_dst0[3], t0->rewrite + 64 ,
- _vec_len (t0->rewrite)-64);
-
- /* fix the <bleep>ing outer-IP checksum */
- sum0 = ip0->checksum;
- /* old_l0 always 0, see the rewrite setup */
- new_l0 =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
-
- sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
- length /* changed member */);
- ip0->checksum = ip_csum_fold (sum0);
- ip0->length = new_l0;
-
- /* Fix UDP length */
- udp0 = (udp_header_t *)(ip0+1);
- new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
- - sizeof (*ip0));
-
- udp0->length = new_l0;
-
- /* Reset to look up tunnel partner in the configured FIB */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
- vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
- pkts_encapsulated ++;
-
- len0 = vlib_buffer_length_in_chain(vm, b0);
- stats_n_packets += 1;
- stats_n_bytes += len0;
-
- /* Batch stats increment on the same vxlan tunnel so counter is not
- * incremented per packet. Note stats are still incremented for deleted
- * and admin-down tunnel where packets are dropped. It is not worthwhile
- * to check for this rare case and affect normal path performance. */
- if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
- {
- stats_n_packets -= 1;
- stats_n_bytes -= len0;
- if (stats_n_packets)
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
- cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = 1;
- stats_n_bytes = len0;
- stats_sw_if_index = sw_if_index0;
- }
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- vxlan_gpe_encap_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
- tr->tunnel_index = t0 - ngm->tunnels;
- }
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+ u32 sw_if_index0, len0;
+ vnet_hw_interface_t * hi0;
+ vxlan_gpe_tunnel_t * t0;
+ u8 is_ip4_0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* 1-wide cache? */
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
+
+ t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
+
+ is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
+
+ /* Reset to look up tunnel partner in the configured FIB */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ pkts_encapsulated++;
+
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ * incremented per packet. Note stats are still incremented for deleted
+ * and admin-down tunnel where packets are dropped. It is not worthwhile
+ * to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0,
+ sizeof(*tr));
+ tr->tunnel_index = t0 - ngm->tunnels;
+ }
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
}
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
vlib_node_increment_counter (vm, node->node_index,
VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
pkts_encapsulated);
/* Increment any remaining batch stats */
- if (stats_n_packets) {
- vlib_increment_combined_counter(
- im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
- stats_sw_if_index, stats_n_packets, stats_n_bytes);
- node->runtime_data[0] = stats_sw_if_index;
- }
+ if (stats_n_packets)
+ {
+ vlib_increment_combined_counter (
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
+ stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
return from_frame->n_vectors;
}
@@ -418,8 +340,9 @@ VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
.n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
.next_nodes = {
- [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
- [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
+ [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
},
};
diff --git a/vnet/vnet/vxlan-gpe/vxlan_gpe.c b/vnet/vnet/vxlan-gpe/vxlan_gpe.c
index 13eda440ed1..bf1623af372 100644
--- a/vnet/vnet/vxlan-gpe/vxlan_gpe.c
+++ b/vnet/vnet/vxlan-gpe/vxlan_gpe.c
@@ -13,6 +13,7 @@
* limitations under the License.
*/
#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/ip/format.h>
vxlan_gpe_main_t vxlan_gpe_main;
@@ -23,8 +24,10 @@ u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args)
s = format (s, "[%d] local: %U remote: %U ",
t - gm->tunnels,
- format_ip4_address, &t->local,
- format_ip4_address, &t->remote);
+ format_ip46_address, &t->local,
+ format_ip46_address, &t->remote);
+
+ s = format (s, " vxlan VNI %d ", t->vni);
switch (t->protocol)
{
@@ -48,8 +51,6 @@ u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args)
t->encap_fib_index,
t->decap_fib_index);
- s = format (s, " vxlan VNI %d ", t->vni);
-
return s;
}
@@ -121,24 +122,26 @@ VNET_HW_INTERFACE_CLASS (vxlan_gpe_hw_class) = {
#define foreach_gpe_copy_field \
-_(local.as_u32) \
-_(remote.as_u32) \
_(vni) \
_(protocol) \
_(encap_fib_index) \
-_(decap_fib_index)
-
-#define foreach_copy_field \
-_(src.as_u32) \
-_(dst.as_u32) \
-_(vni) \
-_(encap_fib_index) \
_(decap_fib_index) \
_(decap_next_index)
+#define foreach_copy_ipv4 { \
+ _(local.ip4.as_u32) \
+ _(remote.ip4.as_u32) \
+}
+#define foreach_copy_ipv6 { \
+ _(local.ip6.as_u64[0]) \
+ _(local.ip6.as_u64[1]) \
+ _(remote.ip6.as_u64[0]) \
+ _(remote.ip6.as_u64[1]) \
+}
-static int vxlan_gpe_rewrite (vxlan_gpe_tunnel_t * t)
+
+static int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t)
{
u8 *rw = 0;
ip4_header_t * ip0;
@@ -158,8 +161,8 @@ static int vxlan_gpe_rewrite (vxlan_gpe_tunnel_t * t)
ip0->protocol = IP_PROTOCOL_UDP;
/* we fix up the ip4 header length and checksum after-the-fact */
- ip0->src_address.as_u32 = t->local.as_u32;
- ip0->dst_address.as_u32 = t->remote.as_u32;
+ ip0->src_address.as_u32 = t->local.ip4.as_u32;
+ ip0->dst_address.as_u32 = t->remote.ip4.as_u32;
ip0->checksum = ip4_header_checksum (ip0);
/* UDP header, randomize src port on something, maybe? */
@@ -176,6 +179,44 @@ static int vxlan_gpe_rewrite (vxlan_gpe_tunnel_t * t)
return (0);
}
+static int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t)
+{
+ u8 *rw = 0;
+ ip6_header_t * ip0;
+ ip6_vxlan_gpe_header_t * h0;
+ int len;
+
+ len = sizeof (*h0);
+
+ vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip6_vxlan_gpe_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip6;
+ ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(6 << 28);
+ ip0->hop_limit = 255;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ ip0->src_address.as_u64[0] = t->local.ip6.as_u64[0];
+ ip0->src_address.as_u64[1] = t->local.ip6.as_u64[1];
+ ip0->dst_address.as_u64[0] = t->remote.ip6.as_u64[0];
+ ip0->dst_address.as_u64[1] = t->remote.ip6.as_u64[1];
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (4790);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe);
+
+ /* VXLAN header. Are we having fun yet? */
+ h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
+ h0->vxlan.ver_res = VXLAN_GPE_VERSION;
+ h0->vxlan.protocol = VXLAN_GPE_PROTOCOL_IP4;
+ h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
+
+ t->rewrite = rw;
+ return (0);
+}
+
int vnet_vxlan_gpe_add_del_tunnel
(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp)
{
@@ -187,15 +228,29 @@ int vnet_vxlan_gpe_add_del_tunnel
u32 hw_if_index = ~0;
u32 sw_if_index = ~0;
int rv;
- vxlan_gpe_tunnel_key_t key, *key_copy;
+ vxlan4_gpe_tunnel_key_t key4, *key4_copy;
+ vxlan6_gpe_tunnel_key_t key6, *key6_copy;
hash_pair_t *hp;
- key.local = a->local.as_u32;
- key.remote = a->remote.as_u32;
- key.vni = clib_host_to_net_u32 (a->vni << 8);
- key.pad = 0;
-
- p = hash_get_mem (gm->vxlan_gpe_tunnel_by_key, &key);
+ if (!a->is_ip6)
+ {
+ key4.local = a->local.ip4.as_u32;
+ key4.remote = a->remote.ip4.as_u32;
+ key4.vni = clib_host_to_net_u32 (a->vni << 8);
+ key4.pad = 0;
+
+ p = hash_get_mem(gm->vxlan4_gpe_tunnel_by_key, &key4);
+ }
+ else
+ {
+ key6.local.as_u64[0] = a->local.ip6.as_u64[0];
+ key6.local.as_u64[1] = a->local.ip6.as_u64[1];
+ key6.remote.as_u64[0] = a->remote.ip6.as_u64[0];
+ key6.remote.as_u64[1] = a->remote.ip6.as_u64[1];
+ key6.vni = clib_host_to_net_u32 (a->vni << 8);
+
+ p = hash_get(gm->vxlan6_gpe_tunnel_by_key, &key6);
+ }
if (a->is_add)
{
@@ -212,21 +267,43 @@ int vnet_vxlan_gpe_add_del_tunnel
/* copy from arg structure */
#define _(x) t->x = a->x;
foreach_gpe_copy_field;
+ if (!a->is_ip6) foreach_copy_ipv4
+ else foreach_copy_ipv6
#undef _
- rv = vxlan_gpe_rewrite (t);
+ if (a->is_ip6) {
+ /* copy the key */
+ t->key6 = key6;
+ }
+
+ if (!a->is_ip6) t->flags |= VXLAN_GPE_TUNNEL_IS_IPV4;
+
+ if (!a->is_ip6) {
+ rv = vxlan4_gpe_rewrite (t);
+ } else {
+ rv = vxlan6_gpe_rewrite (t);
+ }
if (rv)
- {
+ {
pool_put (gm->tunnels, t);
return rv;
- }
-
- key_copy = clib_mem_alloc (sizeof (*key_copy));
- clib_memcpy (key_copy, &key, sizeof (*key_copy));
+ }
- hash_set_mem (gm->vxlan_gpe_tunnel_by_key, key_copy,
- t - gm->tunnels);
+ if (!a->is_ip6)
+ {
+ key4_copy = clib_mem_alloc (sizeof (*key4_copy));
+ clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
+ hash_set_mem (gm->vxlan4_gpe_tunnel_by_key, key4_copy,
+ t - gm->tunnels);
+ }
+ else
+ {
+ key6_copy = clib_mem_alloc (sizeof (*key6_copy));
+ clib_memcpy (key6_copy, &key4, sizeof (*key6_copy));
+ hash_set_mem (gm->vxlan4_gpe_tunnel_by_key, key6_copy,
+ t - gm->tunnels);
+ }
if (vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) > 0)
{
@@ -264,10 +341,20 @@ int vnet_vxlan_gpe_add_del_tunnel
vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */);
vec_add1 (gm->free_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index);
- hp = hash_get_pair (gm->vxlan_gpe_tunnel_by_key, &key);
- key_copy = (void *)(hp->key);
- hash_unset_mem (gm->vxlan_gpe_tunnel_by_key, &key);
- clib_mem_free (key_copy);
+ if (!a->is_ip6)
+ {
+ hp = hash_get_pair (gm->vxlan4_gpe_tunnel_by_key, &key4);
+ key4_copy = (void *)(hp->key);
+ hash_unset_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
+ clib_mem_free (key4_copy);
+ }
+ else
+ {
+ hp = hash_get_pair (gm->vxlan6_gpe_tunnel_by_key, &key6);
+ key6_copy = (void *)(hp->key);
+ hash_unset_mem (gm->vxlan4_gpe_tunnel_by_key, &key6);
+ clib_mem_free (key6_copy);
+ }
vec_free (t->rewrite);
pool_put (gm->tunnels, t);
@@ -279,7 +366,7 @@ int vnet_vxlan_gpe_add_del_tunnel
return 0;
}
-static u32 fib_index_from_fib_id (u32 fib_id)
+static u32 fib4_index_from_fib_id (u32 fib_id)
{
ip4_main_t * im = &ip4_main;
uword * p;
@@ -291,6 +378,18 @@ static u32 fib_index_from_fib_id (u32 fib_id)
return p[0];
}
+static u32 fib6_index_from_fib_id (u32 fib_id)
+{
+ ip6_main_t * im = &ip6_main;
+ uword * p;
+
+ p = hash_get (im->fib_index_by_table_id, fib_id);
+ if (!p)
+ return ~0;
+
+ return p[0];
+}
+
static uword unformat_gpe_decap_next (unformat_input_t * input, va_list * args)
{
u32 * result = va_arg (*args, u32 *);
@@ -318,9 +417,11 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
{
unformat_input_t _line_input, * line_input = &_line_input;
u8 is_add = 1;
- ip4_address_t local, remote;
+ ip46_address_t local, remote;
u8 local_set = 0;
u8 remote_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
u32 encap_fib_index = 0;
u32 decap_fib_index = 0;
u8 protocol = VXLAN_GPE_PROTOCOL_IP4;
@@ -340,20 +441,47 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
if (unformat (line_input, "del"))
is_add = 0;
else if (unformat (line_input, "local %U",
- unformat_ip4_address, &local))
+ unformat_ip4_address, &local.ip4))
+ {
local_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip4_address, &remote.ip4))
+ {
+ remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "local %U",
+ unformat_ip6_address, &local.ip6))
+ {
+ local_set = 1;
+ ipv6_set = 1;
+ }
else if (unformat (line_input, "remote %U",
- unformat_ip4_address, &remote))
+ unformat_ip6_address, &remote.ip6))
+ {
remote_set = 1;
+ ipv6_set = 1;
+ }
else if (unformat (line_input, "encap-vrf-id %d", &tmp))
{
- encap_fib_index = fib_index_from_fib_id (tmp);
+ if (ipv6_set)
+ encap_fib_index = fib6_index_from_fib_id (tmp);
+ else
+ encap_fib_index = fib4_index_from_fib_id (tmp);
+
if (encap_fib_index == ~0)
return clib_error_return (0, "nonexistent encap fib id %d", tmp);
}
else if (unformat (line_input, "decap-vrf-id %d", &tmp))
{
- decap_fib_index = fib_index_from_fib_id (tmp);
+
+ if (ipv6_set)
+ decap_fib_index = fib6_index_from_fib_id (tmp);
+ else
+ decap_fib_index = fib4_index_from_fib_id (tmp);
+
if (decap_fib_index == ~0)
return clib_error_return (0, "nonexistent decap fib id %d", tmp);
}
@@ -383,15 +511,25 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
if (remote_set == 0)
return clib_error_return (0, "tunnel remote address not specified");
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+
+ if ((ipv4_set && memcmp(&local.ip4, &remote.ip4, sizeof(local.ip4)) == 0) ||
+ (ipv6_set && memcmp(&local.ip6, &remote.ip6, sizeof(local.ip6)) == 0))
+ return clib_error_return (0, "src and dst addresses are identical");
+
if (vni_set == 0)
return clib_error_return (0, "vni not specified");
memset (a, 0, sizeof (*a));
a->is_add = is_add;
+ a->is_ip6 = ipv6_set;
#define _(x) a->x = x;
foreach_gpe_copy_field;
+ if (ipv4_set) foreach_copy_ipv4
+ else foreach_copy_ipv6
#undef _
rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
@@ -421,7 +559,7 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
VLIB_CLI_COMMAND (create_vxlan_gpe_tunnel_command, static) = {
.path = "create vxlan-gpe tunnel",
.short_help =
- "create vxlan-gpe tunnel local <ip4-addr> remote <ip4-addr>"
+ "create vxlan-gpe tunnel local <local-addr> remote <remote-addr>"
" vni <nn> [next-ip4][next-ip6][next-ethernet][next-nsh]"
" [encap-vrf-id <nn>] [decap-vrf-id <nn>]"
" [del]\n",
@@ -458,12 +596,18 @@ clib_error_t *vxlan_gpe_init (vlib_main_t *vm)
gm->vnet_main = vnet_get_main();
gm->vlib_main = vm;
-
- gm->vxlan_gpe_tunnel_by_key
- = hash_create_mem (0, sizeof(vxlan_gpe_tunnel_key_t), sizeof (uword));
- udp_register_dst_port (vm, UDP_DST_PORT_vxlan_gpe,
- vxlan_gpe_input_node.index, 1 /* is_ip4 */);
+ gm->vxlan4_gpe_tunnel_by_key
+ = hash_create_mem (0, sizeof(vxlan4_gpe_tunnel_key_t), sizeof (uword));
+
+ gm->vxlan6_gpe_tunnel_by_key
+ = hash_create_mem (0, sizeof(vxlan6_gpe_tunnel_key_t), sizeof (uword));
+
+
+ udp_register_dst_port (vm, UDP_DST_PORT_vxlan_gpe,
+ vxlan4_gpe_input_node.index, 1 /* is_ip4 */);
+ udp_register_dst_port (vm, UDP_DST_PORT_vxlan6_gpe,
+ vxlan6_gpe_input_node.index, 0 /* is_ip4 */);
return 0;
}
diff --git a/vnet/vnet/vxlan-gpe/vxlan_gpe.h b/vnet/vnet/vxlan-gpe/vxlan_gpe.h
index a51e0b31e3e..730abe70253 100644
--- a/vnet/vnet/vxlan-gpe/vxlan_gpe.h
+++ b/vnet/vnet/vxlan-gpe/vxlan_gpe.h
@@ -23,6 +23,7 @@
#include <vnet/ethernet/ethernet.h>
#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
#include <vnet/ip/udp.h>
@@ -32,6 +33,12 @@ typedef CLIB_PACKED (struct {
vxlan_gpe_header_t vxlan; /* 8 bytes */
}) ip4_vxlan_gpe_header_t;
+typedef CLIB_PACKED (struct {
+ ip6_header_t ip6; /* 40 bytes */
+ udp_header_t udp; /* 8 bytes */
+ vxlan_gpe_header_t vxlan; /* 8 bytes */
+}) ip6_vxlan_gpe_header_t;
+
typedef CLIB_PACKED(struct {
/*
* Key fields: local remote, vni
@@ -46,7 +53,17 @@ typedef CLIB_PACKED(struct {
};
u64 as_u64[2];
};
-}) vxlan_gpe_tunnel_key_t;
+}) vxlan4_gpe_tunnel_key_t;
+
+typedef CLIB_PACKED(struct {
+ /*
+ * Key fields: local remote, vni
+ * all fields in NET byte order
+ */
+ ip6_address_t local;
+ ip6_address_t remote;
+ u32 vni; /* shifted 8 bits */
+}) vxlan6_gpe_tunnel_key_t;
typedef struct {
/* Rewrite string. $$$$ embed vnet_rewrite header */
@@ -56,8 +73,8 @@ typedef struct {
u8 protocol;
/* tunnel src and dst addresses */
- ip4_address_t local;
- ip4_address_t remote;
+ ip46_address_t local;
+ ip46_address_t remote;
/* FIB indices */
u32 encap_fib_index; /* tunnel partner lookup here */
@@ -66,12 +83,25 @@ typedef struct {
/* vxlan VNI in HOST byte order, shifted left 8 bits */
u32 vni;
+ /*decap next index*/
+ u32 decap_next_index;
+
/* vnet intfc hw/sw_if_index */
u32 hw_if_index;
u32 sw_if_index;
+ union { /* storage for the hash key */
+ vxlan4_gpe_tunnel_key_t key4;
+ vxlan6_gpe_tunnel_key_t key6;
+ };
+
+ /* flags */
+ u32 flags;
} vxlan_gpe_tunnel_t;
+/* Flags for vxlan_gpe_tunnel_t.flags */
+#define VXLAN_GPE_TUNNEL_IS_IPV4 1
+
#define foreach_vxlan_gpe_input_next \
_(DROP, "error-drop") \
_(IP4_INPUT, "ip4-input") \
@@ -97,7 +127,8 @@ typedef struct {
vxlan_gpe_tunnel_t *tunnels;
/* lookup tunnel by key */
- uword * vxlan_gpe_tunnel_by_key;
+ uword * vxlan4_gpe_tunnel_by_key;
+ uword * vxlan6_gpe_tunnel_by_key;
/* Free vlib hw_if_indices */
u32 * free_vxlan_gpe_tunnel_hw_if_indices;
@@ -113,13 +144,15 @@ typedef struct {
vxlan_gpe_main_t vxlan_gpe_main;
extern vlib_node_registration_t vxlan_gpe_encap_node;
-extern vlib_node_registration_t vxlan_gpe_input_node;
+extern vlib_node_registration_t vxlan4_gpe_input_node;
+extern vlib_node_registration_t vxlan6_gpe_input_node;
u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args);
typedef struct {
u8 is_add;
- ip4_address_t local, remote;
+ u8 is_ip6;
+ ip46_address_t local, remote;
u8 protocol;
u32 encap_fib_index;
u32 decap_fib_index;
diff --git a/vpp-api-test/vat/api_format.c b/vpp-api-test/vat/api_format.c
index 3fc56d700e6..59da077e053 100644
--- a/vpp-api-test/vat/api_format.c
+++ b/vpp-api-test/vat/api_format.c
@@ -8062,8 +8062,10 @@ static int api_vxlan_gpe_add_del_tunnel (vat_main_t * vam)
unformat_input_t * line_input = vam->input;
vl_api_vxlan_gpe_add_del_tunnel_t *mp;
f64 timeout;
- ip4_address_t local, remote;
+ ip4_address_t local4, remote4;
+ ip6_address_t local6, remote6;
u8 is_add = 1;
+ u8 ipv4_set = 0, ipv6_set = 0;
u8 local_set = 0;
u8 remote_set = 0;
u32 encap_vrf_id = 0;
@@ -8076,11 +8078,29 @@ static int api_vxlan_gpe_add_del_tunnel (vat_main_t * vam)
if (unformat (line_input, "del"))
is_add = 0;
else if (unformat (line_input, "local %U",
- unformat_ip4_address, &local))
+ unformat_ip4_address, &local4))
+ {
+ local_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip4_address, &remote4))
+ {
+ remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "local %U",
+ unformat_ip6_address, &local6))
+ {
local_set = 1;
+ ipv6_set = 1;
+ }
else if (unformat (line_input, "remote %U",
- unformat_ip4_address, &remote))
+ unformat_ip6_address, &remote6))
+ {
remote_set = 1;
+ ipv6_set = 1;
+ }
else if (unformat (line_input, "encap-vrf-id %d", &encap_vrf_id))
;
else if (unformat (line_input, "decap-vrf-id %d", &decap_vrf_id))
@@ -8109,6 +8129,10 @@ static int api_vxlan_gpe_add_del_tunnel (vat_main_t * vam)
errmsg ("tunnel remote address not specified\n");
return -99;
}
+ if (ipv4_set && ipv6_set) {
+ errmsg ("both IPv4 and IPv6 addresses specified");
+ return -99;
+ }
if (vni_set == 0) {
errmsg ("vni not specified\n");
@@ -8117,14 +8141,21 @@ static int api_vxlan_gpe_add_del_tunnel (vat_main_t * vam)
M(VXLAN_GPE_ADD_DEL_TUNNEL, vxlan_gpe_add_del_tunnel);
- mp->local = local.as_u32;
- mp->remote = remote.as_u32;
+
+ if (ipv6_set) {
+ clib_memcpy(&mp->local, &local6, sizeof(local6));
+ clib_memcpy(&mp->remote, &remote6, sizeof(remote6));
+ } else {
+ clib_memcpy(&mp->local, &local4, sizeof(local4));
+ clib_memcpy(&mp->remote, &remote4, sizeof(remote4));
+ }
+
mp->encap_vrf_id = ntohl(encap_vrf_id);
mp->decap_vrf_id = ntohl(decap_vrf_id);
mp->protocol = ntohl(protocol);
mp->vni = ntohl(vni);
mp->is_add = is_add;
-
+ mp->is_ipv6 = ipv6_set;
S; W;
/* NOTREACHED */
diff --git a/vpp/api/api.c b/vpp/api/api.c
index 3bf2ed47088..8b801e5a5c0 100644
--- a/vpp/api/api.c
+++ b/vpp/api/api.c
@@ -4568,12 +4568,24 @@ vl_api_vxlan_gpe_add_del_tunnel_t_handler
decap_fib_index = ntohl(mp->decap_vrf_id);
}
+ /* Check src & dst are different */
+ if ((a->is_ip6 && memcmp(mp->local, mp->remote, 16) == 0) ||
+ (!a->is_ip6 && memcmp(mp->local, mp->remote, 4) == 0)) {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
memset (a, 0, sizeof (*a));
a->is_add = mp->is_add;
+ a->is_ip6 = mp->is_ipv6;
/* ip addresses sent in network byte order */
- a->local.as_u32 = ntohl(mp->local);
- a->remote.as_u32 = ntohl(mp->remote);
+ if (a->is_ip6) {
+ clib_memcpy(&(a->local.ip6), mp->local, 16);
+ clib_memcpy(&(a->remote.ip6), mp->remote, 16);
+ } else {
+ clib_memcpy(&(a->local.ip4), mp->local, 4);
+ clib_memcpy(&(a->remote.ip4), mp->remote, 4);
+ }
a->encap_fib_index = encap_fib_index;
a->decap_fib_index = decap_fib_index;
a->protocol = protocol;
diff --git a/vpp/api/custom_dump.c b/vpp/api/custom_dump.c
index 47c9f662517..ba4cf538d2f 100644
--- a/vpp/api/custom_dump.c
+++ b/vpp/api/custom_dump.c
@@ -1580,8 +1580,9 @@ static void *vl_api_vxlan_gpe_add_del_tunnel_t_print
s = format (0, "SCRIPT: vxlan_gpe_add_del_tunnel ");
- s = format (s, "local %U remote %U ", format_ip4_address, &mp->local,
- format_ip4_address, &mp->remote);
+ s = format (s, "local %U ", format_ip46_address, &mp->local, mp->is_ipv6);
+
+ s = format (s, "remote %U ", format_ip46_address, &mp->remote, mp->is_ipv6);
s = format (s, "protocol %d ", ntohl(mp->protocol));
diff --git a/vpp/api/vpe.api b/vpp/api/vpe.api
index 2a827d12188..9c90c338e73 100644
--- a/vpp/api/vpe.api
+++ b/vpp/api/vpe.api
@@ -2113,8 +2113,9 @@ manual_java define l2_fib_table_dump {
define vxlan_gpe_add_del_tunnel {
u32 client_index;
u32 context;
- u32 local;
- u32 remote;
+ u8 is_ipv6;
+ u8 local[16];
+ u8 remote[16];
u32 encap_vrf_id;
u32 decap_vrf_id;
u8 protocol;