aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/vxlan-gpe/vxlan_gpe.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/vxlan-gpe/vxlan_gpe.c')
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe.c688
1 files changed, 622 insertions, 66 deletions
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.c b/src/vnet/vxlan-gpe/vxlan_gpe.c
index 1e674085880..3a92c88fbb6 100644
--- a/src/vnet/vxlan-gpe/vxlan_gpe.c
+++ b/src/vnet/vxlan-gpe/vxlan_gpe.c
@@ -20,6 +20,27 @@
#include <vnet/vxlan-gpe/vxlan_gpe.h>
#include <vnet/fib/fib.h>
#include <vnet/ip/format.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/mfib/mfib_table.h>
+#include <vnet/adj/adj_mcast.h>
+#include <vnet/interface.h>
+#include <vlib/vlib.h>
+
+/**
+ * @file
+ * @brief VXLAN-GPE.
+ *
+ * VXLAN-GPE provides the features needed to allow L2 bridge domains (BDs)
+ * to span multiple servers. This is done by building an L2 overlay on
+ * top of an L3 network underlay using VXLAN-GPE tunnels.
+ *
+ * This makes it possible for servers to be co-located in the same data
+ * center or be separated geographically as long as they are reachable
+ * through the underlay L3 network.
+ *
+ * You can refer to this kind of L2 overlay bridge domain as a VXLAN-GPE segment.
+ */
vxlan_gpe_main_t vxlan_gpe_main;
@@ -35,10 +56,10 @@ vxlan_gpe_main_t vxlan_gpe_main;
u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args)
{
vxlan_gpe_tunnel_t * t = va_arg (*args, vxlan_gpe_tunnel_t *);
- vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
s = format (s, "[%d] local: %U remote: %U ",
- t - gm->tunnels,
+ t - ngm->tunnels,
format_ip46_address, &t->local, IP46_TYPE_ANY,
format_ip46_address, &t->remote, IP46_TYPE_ANY);
@@ -105,10 +126,9 @@ static uword dummy_interface_tx (vlib_main_t * vm,
static clib_error_t *
vxlan_gpe_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
{
- if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
- vnet_hw_interface_set_flags (vnm, hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP);
- else
- vnet_hw_interface_set_flags (vnm, hw_if_index, 0);
+ u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
+ VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
return 0;
}
@@ -142,13 +162,84 @@ VNET_HW_INTERFACE_CLASS (vxlan_gpe_hw_class) = {
.name = "VXLAN_GPE",
.format_header = format_vxlan_gpe_header_with_length,
.build_rewrite = default_build_rewrite,
- .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
};
+static void
+vxlan_gpe_tunnel_restack_dpo(vxlan_gpe_tunnel_t * t)
+{
+ dpo_id_t dpo = DPO_INVALID;
+ u32 encap_index = vxlan_gpe_encap_node.index;
+ fib_forward_chain_type_t forw_type = ip46_address_is_ip4(&t->remote) ?
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6;
+
+ fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo);
+ dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+ dpo_reset(&dpo);
+}
+
+static vxlan_gpe_tunnel_t *
+vxlan_gpe_tunnel_from_fib_node (fib_node_t *node)
+{
+#if (CLIB_DEBUG > 0)
+ ASSERT(FIB_NODE_TYPE_VXLAN_GPE_TUNNEL == node->fn_type);
+#endif
+ return ((vxlan_gpe_tunnel_t*) (((char*)node) -
+ STRUCT_OFFSET_OF(vxlan_gpe_tunnel_t, node)));
+}
+
+/**
+ * Function definition to backwalk a FIB node -
+ * Here we will restack the new dpo of VXLAN_GPE DIP to encap node.
+ */
+static fib_node_back_walk_rc_t
+vxlan_gpe_tunnel_back_walk (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+ vxlan_gpe_tunnel_restack_dpo(vxlan_gpe_tunnel_from_fib_node(node));
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t*
+vxlan_gpe_tunnel_fib_node_get (fib_node_index_t index)
+{
+ vxlan_gpe_tunnel_t * t;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
+
+ t = pool_elt_at_index(ngm->tunnels, index);
+
+ return (&t->node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+vxlan_gpe_tunnel_last_lock_gone (fib_node_t *node)
+{
+ /*
+ * The VXLAN_GPE tunnel is a root of the graph. As such
+ * it never has children and thus is never locked.
+ */
+ ASSERT(0);
+}
+
+/*
+ * Virtual function table registered by VXLAN_GPE tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t vxlan_gpe_vft = {
+ .fnv_get = vxlan_gpe_tunnel_fib_node_get,
+ .fnv_last_lock = vxlan_gpe_tunnel_last_lock_gone,
+ .fnv_back_walk = vxlan_gpe_tunnel_back_walk,
+};
#define foreach_gpe_copy_field \
_(vni) \
-_(protocol) \
+_(protocol) \
+_(mcast_sw_if_index) \
_(encap_fib_index) \
_(decap_fib_index)
@@ -173,7 +264,7 @@ _(decap_fib_index)
* @return rc
*
*/
-int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
u8 protocol_override, uword encap_next_node)
{
u8 *rw = 0;
@@ -201,7 +292,7 @@ int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
/* UDP header, randomize src port on something, maybe? */
h0->udp.src_port = clib_host_to_net_u16 (4790);
- h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE);
/* VXLAN header. Are we having fun yet? */
h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
@@ -230,7 +321,7 @@ int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
* @return rc
*
*/
-int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
u8 protocol_override, uword encap_next_node)
{
u8 *rw = 0;
@@ -258,7 +349,7 @@ int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
/* UDP header, randomize src port on something, maybe? */
h0->udp.src_port = clib_host_to_net_u16 (4790);
- h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gpe);
+ h0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE);
/* VXLAN header. Are we having fun yet? */
h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
@@ -279,6 +370,100 @@ int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
return (0);
}
+static void
+hash_set_key_copy (uword ** h, void * key, uword v) {
+ size_t ksz = hash_header(*h)->user;
+ void * copy = clib_mem_alloc (ksz);
+ clib_memcpy (copy, key, ksz);
+ hash_set_mem (*h, copy, v);
+}
+
+static void
+hash_unset_key_free (uword ** h, void * key) {
+ hash_pair_t * hp = hash_get_pair_mem (*h, key);
+ ASSERT (hp);
+ key = uword_to_pointer (hp->key, void *);
+ hash_unset_mem (*h, key);
+ clib_mem_free (key);
+}
+
+static uword
+vtep_addr_ref(ip46_address_t *ip)
+{
+ uword *vtep = ip46_address_is_ip4(ip) ?
+ hash_get (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
+ hash_get_mem (vxlan_gpe_main.vtep6, &ip->ip6);
+ if (vtep)
+ return ++(*vtep);
+ ip46_address_is_ip4(ip) ?
+ hash_set (vxlan_gpe_main.vtep4, ip->ip4.as_u32, 1) :
+ hash_set_key_copy (&vxlan_gpe_main.vtep6, &ip->ip6, 1);
+ return 1;
+}
+
+static uword
+vtep_addr_unref(ip46_address_t *ip)
+{
+ uword *vtep = ip46_address_is_ip4(ip) ?
+ hash_get (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
+ hash_get_mem (vxlan_gpe_main.vtep6, &ip->ip6);
+ ASSERT(vtep);
+ if (--(*vtep) != 0)
+ return *vtep;
+ ip46_address_is_ip4(ip) ?
+ hash_unset (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
+ hash_unset_key_free (&vxlan_gpe_main.vtep6, &ip->ip6);
+ return 0;
+}
+
+typedef CLIB_PACKED(union {
+ struct {
+ fib_node_index_t mfib_entry_index;
+ adj_index_t mcast_adj_index;
+ };
+ u64 as_u64;
+}) mcast_shared_t;
+
+static inline mcast_shared_t
+mcast_shared_get(ip46_address_t * ip)
+{
+ ASSERT(ip46_address_is_multicast(ip));
+ uword * p = hash_get_mem (vxlan_gpe_main.mcast_shared, ip);
+ ASSERT(p);
+ return (mcast_shared_t) { .as_u64 = *p };
+}
+
+static inline void
+mcast_shared_add(ip46_address_t *remote,
+ fib_node_index_t mfei,
+ adj_index_t ai)
+{
+ mcast_shared_t new_ep = {
+ .mcast_adj_index = ai,
+ .mfib_entry_index = mfei,
+ };
+
+ hash_set_key_copy (&vxlan_gpe_main.mcast_shared, remote, new_ep.as_u64);
+}
+
+static inline void
+mcast_shared_remove(ip46_address_t *remote)
+{
+ mcast_shared_t ep = mcast_shared_get(remote);
+
+ adj_unlock(ep.mcast_adj_index);
+ mfib_table_entry_delete_index(ep.mfib_entry_index,
+ MFIB_SOURCE_VXLAN_GPE);
+
+ hash_unset_key_free (&vxlan_gpe_main.mcast_shared, remote);
+}
+
+static inline fib_protocol_t
+fib_ip_proto(bool is_ip6)
+{
+ return (is_ip6) ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4;
+}
+
/**
* @brief Add or Del a VXLAN GPE tunnel
*
@@ -291,9 +476,9 @@ int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
int vnet_vxlan_gpe_add_del_tunnel
(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp)
{
- vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
vxlan_gpe_tunnel_t *t = 0;
- vnet_main_t * vnm = gm->vnet_main;
+ vnet_main_t * vnm = ngm->vnet_main;
vnet_hw_interface_t * hi;
uword * p;
u32 hw_if_index = ~0;
@@ -301,16 +486,16 @@ int vnet_vxlan_gpe_add_del_tunnel
int rv;
vxlan4_gpe_tunnel_key_t key4, *key4_copy;
vxlan6_gpe_tunnel_key_t key6, *key6_copy;
- hash_pair_t *hp;
+ u32 is_ip6 = a->is_ip6;
- if (!a->is_ip6)
+ if (!is_ip6)
{
key4.local = a->local.ip4.as_u32;
key4.remote = a->remote.ip4.as_u32;
key4.vni = clib_host_to_net_u32 (a->vni << 8);
key4.pad = 0;
- p = hash_get_mem(gm->vxlan4_gpe_tunnel_by_key, &key4);
+ p = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4);
}
else
{
@@ -320,16 +505,18 @@ int vnet_vxlan_gpe_add_del_tunnel
key6.remote.as_u64[1] = a->remote.ip6.as_u64[1];
key6.vni = clib_host_to_net_u32 (a->vni << 8);
- p = hash_get_mem(gm->vxlan6_gpe_tunnel_by_key, &key6);
+ p = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6);
}
if (a->is_add)
{
+ l2input_main_t * l2im = &l2input_main;
+
/* adding a tunnel: tunnel must not already exist */
if (p)
- return VNET_API_ERROR_INVALID_VALUE;
+ return VNET_API_ERROR_TUNNEL_EXIST;
- pool_get_aligned (gm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES);
memset (t, 0, sizeof (*t));
/* copy from arg structure */
@@ -349,51 +536,167 @@ int vnet_vxlan_gpe_add_del_tunnel
if (rv)
{
- pool_put (gm->tunnels, t);
+ pool_put (ngm->tunnels, t);
return rv;
}
- if (!a->is_ip6)
+ if (!is_ip6)
{
key4_copy = clib_mem_alloc (sizeof (*key4_copy));
clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
- hash_set_mem (gm->vxlan4_gpe_tunnel_by_key, key4_copy,
- t - gm->tunnels);
+ hash_set_mem (ngm->vxlan4_gpe_tunnel_by_key, key4_copy,
+ t - ngm->tunnels);
}
else
{
key6_copy = clib_mem_alloc (sizeof (*key6_copy));
clib_memcpy (key6_copy, &key6, sizeof (*key6_copy));
- hash_set_mem (gm->vxlan6_gpe_tunnel_by_key, key6_copy,
- t - gm->tunnels);
+ hash_set_mem (ngm->vxlan6_gpe_tunnel_by_key, key6_copy,
+ t - ngm->tunnels);
}
- if (vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) > 0)
+ if (vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices) > 0)
{
- hw_if_index = gm->free_vxlan_gpe_tunnel_hw_if_indices
- [vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices)-1];
- _vec_len (gm->free_vxlan_gpe_tunnel_hw_if_indices) -= 1;
+ vnet_interface_main_t * im = &vnm->interface_main;
+ hw_if_index = ngm->free_vxlan_gpe_tunnel_hw_if_indices
+ [vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices)-1];
+ _vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices) -= 1;
hi = vnet_get_hw_interface (vnm, hw_if_index);
- hi->dev_instance = t - gm->tunnels;
+ hi->dev_instance = t - ngm->tunnels;
hi->hw_instance = hi->dev_instance;
+ /* clear old stats of freed tunnel before reuse */
+ sw_if_index = hi->sw_if_index;
+ vnet_interface_counter_lock(im);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX], sw_if_index);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX], sw_if_index);
+ vlib_zero_simple_counter
+ (&im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], sw_if_index);
+ vnet_interface_counter_unlock(im);
}
else
{
hw_if_index = vnet_register_interface
- (vnm, vxlan_gpe_device_class.index, t - gm->tunnels,
- vxlan_gpe_hw_class.index, t - gm->tunnels);
+ (vnm, vxlan_gpe_device_class.index, t - ngm->tunnels,
+ vxlan_gpe_hw_class.index, t - ngm->tunnels);
hi = vnet_get_hw_interface (vnm, hw_if_index);
hi->output_node_index = vxlan_gpe_encap_node.index;
}
t->hw_if_index = hw_if_index;
t->sw_if_index = sw_if_index = hi->sw_if_index;
- vec_validate_init_empty (gm->tunnel_index_by_sw_if_index, sw_if_index, ~0);
- gm->tunnel_index_by_sw_if_index[sw_if_index] = t - gm->tunnels;
+ vec_validate_init_empty (ngm->tunnel_index_by_sw_if_index, sw_if_index, ~0);
+ ngm->tunnel_index_by_sw_if_index[sw_if_index] = t - ngm->tunnels;
+
+ /* setup l2 input config with l2 feature and bd 0 to drop packet */
+ vec_validate (l2im->configs, sw_if_index);
+ l2im->configs[sw_if_index].feature_bitmap = L2INPUT_FEAT_DROP;
+ l2im->configs[sw_if_index].bd_index = 0;
+ vnet_sw_interface_t * si = vnet_get_sw_interface (vnm, sw_if_index);
+ si->flags &= ~VNET_SW_INTERFACE_FLAG_HIDDEN;
vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ fib_node_init(&t->node, FIB_NODE_TYPE_VXLAN_GPE_TUNNEL);
+ fib_prefix_t tun_remote_pfx;
+ u32 encap_index = vxlan_gpe_encap_node.index;
+ vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL;
+
+ fib_prefix_from_ip46_addr(&t->remote, &tun_remote_pfx);
+ if (!ip46_address_is_multicast(&t->remote))
+ {
+ /* Unicast tunnel -
+ * source the FIB entry for the tunnel's destination
+ * and become a child thereof. The tunnel will then get poked
+ * when the forwarding for the entry updates, and the tunnel can
+ * re-stack accordingly
+ */
+ vtep_addr_ref(&t->local);
+ t->fib_entry_index = fib_table_entry_special_add
+ (t->encap_fib_index, &tun_remote_pfx, FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE);
+ t->sibling_index = fib_entry_child_add
+ (t->fib_entry_index, FIB_NODE_TYPE_VXLAN_GPE_TUNNEL, t - ngm->tunnels);
+ vxlan_gpe_tunnel_restack_dpo(t);
+ }
+ else
+ {
+ /* Multicast tunnel -
+ * as the same mcast group can be used for mutiple mcast tunnels
+ * with different VNIs, create the output fib adjecency only if
+ * it does not already exist
+ */
+ fib_protocol_t fp = fib_ip_proto(is_ip6);
+
+ if (vtep_addr_ref(&t->remote) == 1)
+ {
+ fib_node_index_t mfei;
+ adj_index_t ai;
+ fib_route_path_t path = {
+ .frp_proto = fp,
+ .frp_addr = zero_addr,
+ .frp_sw_if_index = 0xffffffff,
+ .frp_fib_index = ~0,
+ .frp_weight = 0,
+ .frp_flags = FIB_ROUTE_PATH_LOCAL,
+ };
+ const mfib_prefix_t mpfx = {
+ .fp_proto = fp,
+ .fp_len = (is_ip6 ? 128 : 32),
+ .fp_grp_addr = tun_remote_pfx.fp_addr,
+ };
+
+ /*
+ * Setup the (*,G) to receive traffic on the mcast group
+ * - the forwarding interface is for-us
+ * - the accepting interface is that from the API
+ */
+ mfib_table_entry_path_update(t->encap_fib_index,
+ &mpfx,
+ MFIB_SOURCE_VXLAN_GPE,
+ &path,
+ MFIB_ITF_FLAG_FORWARD);
+
+ path.frp_sw_if_index = a->mcast_sw_if_index;
+ path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ mfei = mfib_table_entry_path_update(t->encap_fib_index,
+ &mpfx,
+ MFIB_SOURCE_VXLAN_GPE,
+ &path,
+ MFIB_ITF_FLAG_ACCEPT);
+
+ /*
+ * Create the mcast adjacency to send traffic to the group
+ */
+ ai = adj_mcast_add_or_lock(fp,
+ fib_proto_to_link(fp),
+ a->mcast_sw_if_index);
+
+ /*
+ * create a new end-point
+ */
+ mcast_shared_add(&t->remote, mfei, ai);
+ }
+
+ dpo_id_t dpo = DPO_INVALID;
+ mcast_shared_t ep = mcast_shared_get(&t->remote);
+
+ /* Stack shared mcast remote mac addr rewrite on encap */
+ dpo_set (&dpo, DPO_ADJACENCY_MCAST,
+ fib_proto_to_dpo(fp),
+ ep.mcast_adj_index);
+
+ dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+ dpo_reset (&dpo);
+ flood_class = VNET_FLOOD_CLASS_TUNNEL_MASTER;
+ }
+
+ /* Set vxlan tunnel output node */
+ hi->output_node_index = encap_index;
+
+ vnet_get_sw_interface (vnet_get_main(), sw_if_index)->flood_class = flood_class;
}
else
{
@@ -401,30 +704,36 @@ int vnet_vxlan_gpe_add_del_tunnel
if (!p)
return VNET_API_ERROR_NO_SUCH_ENTRY;
- t = pool_elt_at_index (gm->tunnels, p[0]);
+ t = pool_elt_at_index (ngm->tunnels, p[0]);
+ sw_if_index = t->sw_if_index;
vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */);
- vec_add1 (gm->free_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index);
+ vnet_sw_interface_t * si = vnet_get_sw_interface (vnm, t->sw_if_index);
+ si->flags |= VNET_SW_INTERFACE_FLAG_HIDDEN;
+ set_int_l2_mode(ngm->vlib_main, vnm, MODE_L3, t->sw_if_index, 0, 0, 0, 0);
+ vec_add1 (ngm->free_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index);
- gm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0;
+ ngm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0;
- if (!a->is_ip6)
- {
- hp = hash_get_pair (gm->vxlan4_gpe_tunnel_by_key, &key4);
- key4_copy = (void *)(hp->key);
- hash_unset_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
- clib_mem_free (key4_copy);
- }
+ if (!is_ip6)
+ hash_unset (ngm->vxlan4_gpe_tunnel_by_key, key4.as_u64);
else
- {
- hp = hash_get_pair (gm->vxlan6_gpe_tunnel_by_key, &key6);
- key6_copy = (void *)(hp->key);
- hash_unset_mem (gm->vxlan4_gpe_tunnel_by_key, &key6);
- clib_mem_free (key6_copy);
- }
+ hash_unset_key_free (&ngm->vxlan6_gpe_tunnel_by_key, &key6);
+
+ if (!ip46_address_is_multicast(&t->remote))
+ {
+ vtep_addr_unref(&t->local);
+ fib_entry_child_remove(t->fib_entry_index, t->sibling_index);
+ fib_table_entry_delete_index(t->fib_entry_index, FIB_SOURCE_RR);
+ }
+ else if (vtep_addr_unref(&t->remote) == 0)
+ {
+ mcast_shared_remove(&t->remote);
+ }
+ fib_node_deinit(&t->node);
vec_free (t->rewrite);
- pool_put (gm->tunnels, t);
+ pool_put (ngm->tunnels, t);
}
if (sw_if_indexp)
@@ -443,8 +752,10 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
ip46_address_t local, remote;
u8 local_set = 0;
u8 remote_set = 0;
+ u8 grp_set = 0;
u8 ipv4_set = 0;
u8 ipv6_set = 0;
+ u32 mcast_sw_if_index = ~0;
u32 encap_fib_index = 0;
u32 decap_fib_index = 0;
u8 protocol = VXLAN_GPE_PROTOCOL_IP4;
@@ -487,6 +798,22 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
remote_set = 1;
ipv6_set = 1;
}
+ else if (unformat (line_input, "group %U %U",
+ unformat_ip4_address, &remote.ip4,
+ unformat_vnet_sw_interface,
+ vnet_get_main(), &mcast_sw_if_index))
+ {
+ grp_set = remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "group %U %U",
+ unformat_ip6_address, &remote.ip6,
+ unformat_vnet_sw_interface,
+ vnet_get_main(), &mcast_sw_if_index))
+ {
+ grp_set = remote_set = 1;
+ ipv6_set = 1;
+ }
else if (unformat (line_input, "encap-vrf-id %d", &tmp))
{
if (ipv6_set)
@@ -543,6 +870,23 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
goto done;
}
+ if (grp_set && !ip46_address_is_multicast(&remote))
+ {
+ error = clib_error_return (0, "tunnel group address not multicast");
+ goto done;
+ }
+
+ if (grp_set == 0 && ip46_address_is_multicast(&remote))
+ {
+ error = clib_error_return (0, "remote address must be unicast");
+ goto done;
+ }
+
+ if (grp_set && mcast_sw_if_index == ~0)
+ {
+ error = clib_error_return (0, "tunnel nonexistent multicast device");
+ goto done;
+ }
if (ipv4_set && ipv6_set)
{
error = clib_error_return (0, "both IPv4 and IPv6 addresses specified");
@@ -552,7 +896,7 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
if ((ipv4_set && memcmp(&local.ip4, &remote.ip4, sizeof(local.ip4)) == 0) ||
(ipv6_set && memcmp(&local.ip6, &remote.ip6, sizeof(local.ip6)) == 0))
{
- error = clib_error_return (0, "src and dst addresses are identical");
+ error = clib_error_return (0, "src and remote addresses are identical");
goto done;
}
@@ -604,15 +948,36 @@ done:
return error;
}
+/*?
+ * Add or delete a VXLAN-GPE Tunnel.
+ *
+ * VXLAN-GPE provides the features needed to allow L2 bridge domains (BDs)
+ * to span multiple servers. This is done by building an L2 overlay on
+ * top of an L3 network underlay using VXLAN-GPE tunnels.
+ *
+ * This makes it possible for servers to be co-located in the same data
+ * center or be separated geographically as long as they are reachable
+ * through the underlay L3 network.
+ *
+ * You can refer to this kind of L2 overlay bridge domain as a VXLAN-GPE sengment.
+ *
+ * @cliexpar
+ * Example of how to create a VXLAN-GPE Tunnel:
+ * @cliexcmd{create vxlan-gpe tunnel local 10.0.3.1 local 10.0.3.3 vni 13 encap-vrf-id 7}
+ * Example of how to delete a VXLAN Tunnel:
+ * @cliexcmd{create vxlan tunnel src 10.0.3.1 remote 10.0.3.3 vni 13 del}
+ ?*/
+/* *INDENT-OFF* */
VLIB_CLI_COMMAND (create_vxlan_gpe_tunnel_command, static) = {
.path = "create vxlan-gpe tunnel",
.short_help =
- "create vxlan-gpe tunnel local <local-addr> remote <remote-addr>"
+ "create vxlan-gpe tunnel local <local-addr> "
+ " {remote <remote-addr>|group <mcast-addr> <intf-name>}"
" vni <nn> [next-ip4][next-ip6][next-ethernet][next-nsh]"
- " [encap-vrf-id <nn>] [decap-vrf-id <nn>]"
- " [del]\n",
+ " [encap-vrf-id <nn>] [decap-vrf-id <nn>] [del]\n",
.function = vxlan_gpe_add_del_tunnel_command_fn,
};
+/* *INDENT-ON* */
/**
* @brief CLI function for showing VXLAN GPE tunnels
@@ -629,13 +994,13 @@ show_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
- vxlan_gpe_main_t * gm = &vxlan_gpe_main;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
vxlan_gpe_tunnel_t * t;
- if (pool_elts (gm->tunnels) == 0)
+ if (pool_elts (ngm->tunnels) == 0)
vlib_cli_output (vm, "No vxlan-gpe tunnels configured.");
- pool_foreach (t, gm->tunnels,
+ pool_foreach (t, ngm->tunnels,
({
vlib_cli_output (vm, "%U", format_vxlan_gpe_tunnel, t);
}));
@@ -643,10 +1008,194 @@ show_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm,
return 0;
}
+/*?
+ * Display all the VXLAN-GPE Tunnel entries.
+ *
+ * @cliexpar
+ * Example of how to display the VXLAN-GPE Tunnel entries:
+ * @cliexstart{show vxlan-gpe tunnel}
+ * [0] local 10.0.3.1 remote 10.0.3.3 vni 13 encap_fib_index 0 sw_if_index 5 decap_next l2
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_vxlan_gpe_tunnel_command, static) = {
.path = "show vxlan-gpe",
.function = show_vxlan_gpe_tunnel_command_fn,
};
+/* *INDENT-ON* */
+
+void vnet_int_vxlan_gpe_bypass_mode (u32 sw_if_index,
+ u8 is_ip6,
+ u8 is_enable)
+{
+ if (is_ip6)
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-vxlan-gpe-bypass",
+ sw_if_index, is_enable, 0, 0);
+ else
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-gpe-bypass",
+ sw_if_index, is_enable, 0, 0);
+}
+
+
+static clib_error_t *
+set_ip_vxlan_gpe_bypass (u32 is_ip6,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ vnet_main_t * vnm = vnet_get_main();
+ clib_error_t * error = 0;
+ u32 sw_if_index, is_enable;
+
+ sw_if_index = ~0;
+ is_enable = 1;
+
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "del"))
+ is_enable = 0;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 == sw_if_index)
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ vnet_int_vxlan_gpe_bypass_mode (sw_if_index, is_ip6, is_enable);
+
+ done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+static clib_error_t *
+set_ip4_vxlan_gpe_bypass (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ return set_ip_vxlan_gpe_bypass (0, input, cmd);
+}
+
+/*?
+ * This command adds the 'ip4-vxlan-gpe-bypass' graph node for a given interface.
+ * By adding the IPv4 vxlan-gpe-bypass graph node to an interface, the node checks
+ * for and validate input vxlan_gpe packet and bypass ip4-lookup, ip4-local,
+ * ip4-udp-lookup nodes to speedup vxlan_gpe packet forwarding. This node will
+ * cause extra overhead to for non-vxlan_gpe packets which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip4-vxlan-gpe-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-gpe-bypass}
+ * Name Next Previous
+ * ip4-vxlan-gpe-bypass error-drop [0]
+ * vxlan4-gpe-input [1]
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip4-vxlan-gpe-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-gpe-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip4-vxlan-gpe-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-gpe-bypass}
+ * Name Next Previous
+ * ip4-vxlan-gpe-bypass error-drop [0] ip4-input
+ * vxlan4-gpe-input [1] ip4-input-no-checksum
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabed on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ * ...
+ * ipv4 unicast:
+ * ip4-vxlan-gpe-bypass
+ * ip4-lookup
+ * ...
+ * @cliexend
+ *
+ * Example of how to disable ip4-vxlan-gpe-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-gpe-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_vxlan_gpe_bypass_command, static) = {
+ .path = "set interface ip vxlan-gpe-bypass",
+ .function = set_ip4_vxlan_gpe_bypass,
+ .short_help = "set interface ip vxlan-gpe-bypass <interface> [del]",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_ip6_vxlan_gpe_bypass (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ return set_ip_vxlan_gpe_bypass (1, input, cmd);
+}
+
+/*?
+ * This command adds the 'ip6-vxlan-gpe-bypass' graph node for a given interface.
+ * By adding the IPv6 vxlan-gpe-bypass graph node to an interface, the node checks
+ * for and validate input vxlan_gpe packet and bypass ip6-lookup, ip6-local,
+ * ip6-udp-lookup nodes to speedup vxlan_gpe packet forwarding. This node will
+ * cause extra overhead to for non-vxlan_gpe packets which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip6-vxlan-gpe-bypass is enabled:
+ * @cliexstart{show vlib graph ip6-vxlan-gpe-bypass}
+ * Name Next Previous
+ * ip6-vxlan-gpe-bypass error-drop [0]
+ * vxlan6-gpe-input [1]
+ * ip6-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip6-vxlan-gpe-bypass on an interface:
+ * @cliexcmd{set interface ip6 vxlan-gpe-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip6-vxlan-gpe-bypass is enabled:
+ * @cliexstart{show vlib graph ip6-vxlan-gpe-bypass}
+ * Name Next Previous
+ * ip6-vxlan-gpe-bypass error-drop [0] ip6-input
+ * vxlan6-gpe-input [1] ip4-input-no-checksum
+ * ip6-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabed on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ * ...
+ * ipv6 unicast:
+ * ip6-vxlan-gpe-bypass
+ * ip6-lookup
+ * ...
+ * @cliexend
+ *
+ * Example of how to disable ip6-vxlan-gpe-bypass on an interface:
+ * @cliexcmd{set interface ip6 vxlan-gpe-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip6_vxlan_gpe_bypass_command, static) = {
+ .path = "set interface ip6 vxlan-gpe-bypass",
+ .function = set_ip6_vxlan_gpe_bypass,
+ .short_help = "set interface ip vxlan-gpe-bypass <interface> [del]",
+};
+/* *INDENT-ON* */
/**
* @brief Feature init function for VXLAN GPE
@@ -658,21 +1207,25 @@ VLIB_CLI_COMMAND (show_vxlan_gpe_tunnel_command, static) = {
*/
clib_error_t *vxlan_gpe_init (vlib_main_t *vm)
{
- vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
- gm->vnet_main = vnet_get_main();
- gm->vlib_main = vm;
+ ngm->vnet_main = vnet_get_main();
+ ngm->vlib_main = vm;
- gm->vxlan4_gpe_tunnel_by_key
+ ngm->vxlan4_gpe_tunnel_by_key
= hash_create_mem (0, sizeof(vxlan4_gpe_tunnel_key_t), sizeof (uword));
- gm->vxlan6_gpe_tunnel_by_key
+ ngm->vxlan6_gpe_tunnel_by_key
= hash_create_mem (0, sizeof(vxlan6_gpe_tunnel_key_t), sizeof (uword));
- udp_register_dst_port (vm, UDP_DST_PORT_vxlan_gpe,
+ ngm->mcast_shared = hash_create_mem(0,
+ sizeof(ip46_address_t),
+ sizeof(mcast_shared_t));
+
+ udp_register_dst_port (vm, UDP_DST_PORT_VXLAN_GPE,
vxlan4_gpe_input_node.index, 1 /* is_ip4 */);
- udp_register_dst_port (vm, UDP_DST_PORT_vxlan6_gpe,
+ udp_register_dst_port (vm, UDP_DST_PORT_VXLAN6_GPE,
vxlan6_gpe_input_node.index, 0 /* is_ip4 */);
/* Register the list of standard decap protocols supported */
@@ -682,6 +1235,9 @@ clib_error_t *vxlan_gpe_init (vlib_main_t *vm)
VXLAN_GPE_INPUT_NEXT_IP6_INPUT);
vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_ETHERNET,
VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT);
+
+ fib_node_register_type(FIB_NODE_TYPE_VXLAN_GPE_TUNNEL, &vxlan_gpe_vft);
+
return 0;
}