summaryrefslogtreecommitdiffstats
path: root/src/vnet/vxlan-gbp
diff options
context:
space:
mode:
authorNeale Ranns <neale.ranns@cisco.com>2018-10-10 07:22:51 -0700
committerDamjan Marion <dmarion@me.com>2018-11-07 12:00:10 +0000
commit93cc3ee3b3a9c9224a1446625882205f3282a949 (patch)
tree077421ee51238c22181a3b3f4871b648bb1299d3 /src/vnet/vxlan-gbp
parentc3df1e9a0ab79c1fe254394748ef441ffe224c43 (diff)
GBP Endpoint Learning
Learning GBP endpoints over vxlan-gbp tunnels Change-Id: I1db9fda5a16802d9ad8b4efd4e475614f3b21502 Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
Diffstat (limited to 'src/vnet/vxlan-gbp')
-rw-r--r--src/vnet/vxlan-gbp/decap.c127
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp.api2
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp.c99
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp.h40
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp_api.c3
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp_packet.c60
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp_packet.h25
7 files changed, 252 insertions, 104 deletions
diff --git a/src/vnet/vxlan-gbp/decap.c b/src/vnet/vxlan-gbp/decap.c
index 1602e940f3f..0d361a37751 100644
--- a/src/vnet/vxlan-gbp/decap.c
+++ b/src/vnet/vxlan-gbp/decap.c
@@ -29,6 +29,7 @@ typedef struct
u32 error;
u32 vni;
u16 sclass;
+ u8 flags;
} vxlan_gbp_rx_trace_t;
static u8 *
@@ -44,8 +45,10 @@ format_vxlan_gbp_rx_trace (u8 * s, va_list * args)
t->vni);
return format (s,
"VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d"
- " next %d error %d",
- t->tunnel_index, t->vni, t->sclass, t->next_index, t->error);
+ " flags %U next %d error %d",
+ t->tunnel_index, t->vni, t->sclass,
+ format_vxlan_gbp_header_gpflags, t->flags,
+ t->next_index, t->error);
}
always_inline u32
@@ -161,10 +164,34 @@ vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache,
return t0;
}
+always_inline vxlan_gbp_input_next_t
+vxlan_gbp_tunnel_get_next (const vxlan_gbp_tunnel_t * t, vlib_buffer_t * b0)
+{
+ if (VXLAN_GBP_TUNNEL_MODE_L2 == t->mode)
+ return (VXLAN_GBP_INPUT_NEXT_L2_INPUT);
+ else
+ {
+ ethernet_header_t *e0;
+ u16 type0;
+
+ e0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, sizeof (*e0));
+ type0 = clib_net_to_host_u16 (e0->type);
+ switch (type0)
+ {
+ case ETHERNET_TYPE_IP4:
+ return (VXLAN_GBP_INPUT_NEXT_IP4_INPUT);
+ case ETHERNET_TYPE_IP6:
+ return (VXLAN_GBP_INPUT_NEXT_IP6_INPUT);
+ }
+ }
+ return (VXLAN_GBP_INPUT_NEXT_DROP);
+}
+
always_inline uword
vxlan_gbp_input (vlib_main_t * vm,
vlib_node_runtime_t * node,
- vlib_frame_t * from_frame, u32 is_ip4)
+ vlib_frame_t * from_frame, u8 is_ip4)
{
vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
vnet_main_t *vnm = vxm->vnet_main;
@@ -239,10 +266,6 @@ vxlan_gbp_input (vlib_main_t * vm,
ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
}
- /* pop vxlan_gbp */
- vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
- vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
-
u32 fi0 = buf_fib_index (b0, is_ip4);
u32 fi1 = buf_fib_index (b1, is_ip4);
@@ -270,16 +293,19 @@ vxlan_gbp_input (vlib_main_t * vm,
u32 len0 = vlib_buffer_length_in_chain (vm, b0);
u32 len1 = vlib_buffer_length_in_chain (vm, b1);
- u32 next0, next1;
+ vxlan_gbp_input_next_t next0, next1;
u8 error0 = 0, error1 = 0;
u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1);
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ /* pop vxlan_gbp */
+ vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
+ vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
+
/* Validate VXLAN_GBP tunnel encap-fib index against packet */
if (PREDICT_FALSE
(t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
{
- next0 = VXLAN_GBP_INPUT_NEXT_DROP;
-
if (t0 != 0
&& flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
{
@@ -287,22 +313,18 @@ vxlan_gbp_input (vlib_main_t * vm,
vlib_increment_combined_counter
(drop_counter, thread_index, stats_t0->sw_if_index, 1,
len0);
+ next0 = VXLAN_GBP_INPUT_NEXT_DROP;
}
else
- error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ {
+ error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+ }
b0->error = node->errors[error0];
}
else
{
- next0 = t0->decap_next_index;
- vnet_buffer2 (b0)->gbp.flags =
- vxlan_gbp_get_gpflags (vxlan_gbp0);
- vnet_buffer2 (b0)->gbp.src_epg =
- vxlan_gbp_get_sclass (vxlan_gbp0);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
- vnet_update_l2_len (b0);
+ next0 = vxlan_gbp_tunnel_get_next (t0, b0);
/* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
@@ -311,12 +333,13 @@ vxlan_gbp_input (vlib_main_t * vm,
pkts_decapsulated++;
}
- /* Validate VXLAN_GBP tunnel encap-fib index against packet */
+ vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+ vnet_buffer2 (b0)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp0);
+
+
if (PREDICT_FALSE
(t1 == 0 || flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
{
- next1 = VXLAN_GBP_INPUT_NEXT_DROP;
-
if (t1 != 0
&& flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
{
@@ -324,22 +347,18 @@ vxlan_gbp_input (vlib_main_t * vm,
vlib_increment_combined_counter
(drop_counter, thread_index, stats_t1->sw_if_index, 1,
len1);
+ next1 = VXLAN_GBP_INPUT_NEXT_DROP;
}
else
- error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ {
+ error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+ }
b1->error = node->errors[error1];
}
else
{
- next1 = t1->decap_next_index;
- vnet_buffer2 (b1)->gbp.flags =
- vxlan_gbp_get_gpflags (vxlan_gbp1);
- vnet_buffer2 (b1)->gbp.src_epg =
- vxlan_gbp_get_sclass (vxlan_gbp1);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- if (PREDICT_TRUE (next1 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
- vnet_update_l2_len (b1);
+ next1 = vxlan_gbp_tunnel_get_next (t1, b1);
/* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
@@ -349,6 +368,12 @@ vxlan_gbp_input (vlib_main_t * vm,
(rx_counter, thread_index, stats_t1->sw_if_index, 1, len1);
}
+ vnet_buffer2 (b1)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
+ vnet_buffer2 (b1)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp1);
+
+ vnet_update_l2_len (b0);
+ vnet_update_l2_len (b1);
+
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
vxlan_gbp_rx_trace_t *tr =
@@ -358,6 +383,7 @@ vxlan_gbp_input (vlib_main_t * vm,
tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
}
if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -368,6 +394,7 @@ vxlan_gbp_input (vlib_main_t * vm,
tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels;
tr->vni = vxlan_gbp_get_vni (vxlan_gbp1);
tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
}
vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
@@ -395,9 +422,6 @@ vxlan_gbp_input (vlib_main_t * vm,
else
ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
- /* pop (ip, udp, vxlan_gbp) */
- vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
-
u32 fi0 = buf_fib_index (b0, is_ip4);
vxlan_gbp_tunnel_t *t0, *stats_t0 = 0;
@@ -412,15 +436,16 @@ vxlan_gbp_input (vlib_main_t * vm,
uword len0 = vlib_buffer_length_in_chain (vm, b0);
- u32 next0;
+ vxlan_gbp_input_next_t next0;
u8 error0 = 0;
u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
+
+ /* pop (ip, udp, vxlan_gbp) */
+ vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
/* Validate VXLAN_GBP tunnel encap-fib index against packet */
if (PREDICT_FALSE
(t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
{
- next0 = VXLAN_GBP_INPUT_NEXT_DROP;
-
if (t0 != 0
&& flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
{
@@ -428,24 +453,18 @@ vxlan_gbp_input (vlib_main_t * vm,
vlib_increment_combined_counter
(drop_counter, thread_index, stats_t0->sw_if_index, 1,
len0);
+ next0 = VXLAN_GBP_INPUT_NEXT_DROP;
}
else
- error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ {
+ error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+ }
b0->error = node->errors[error0];
}
else
{
- next0 = t0->decap_next_index;
- vnet_buffer2 (b0)->gbp.flags =
- vxlan_gbp_get_gpflags (vxlan_gbp0);
- vnet_buffer2 (b0)->gbp.src_epg =
- vxlan_gbp_get_sclass (vxlan_gbp0);
-
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
- vnet_update_l2_len (b0);
-
+ next0 = vxlan_gbp_tunnel_get_next (t0, b0);
/* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
pkts_decapsulated++;
@@ -453,6 +472,11 @@ vxlan_gbp_input (vlib_main_t * vm,
vlib_increment_combined_counter
(rx_counter, thread_index, stats_t0->sw_if_index, 1, len0);
}
+ vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+ vnet_buffer2 (b0)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -463,6 +487,7 @@ vxlan_gbp_input (vlib_main_t * vm,
tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.api b/src/vnet/vxlan-gbp/vxlan_gbp.api
index 6e41ec8b2b9..3e213ddc563 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.api
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.api
@@ -23,7 +23,6 @@ import "vnet/ip/ip_types.api";
@param dst - Destination IP address, can be multicast
@param mcast_sw_if_index - Interface for multicast destination
@param encap_table_id - Encap route table
- @param decap_next_index - Name of decap next graph node
@param vni - The VXLAN Network Identifier, uint24
@param sw_ifindex - Ignored in add message, set in details
*/
@@ -34,7 +33,6 @@ typedef vxlan_gbp_tunnel
vl_api_address_t dst;
u32 mcast_sw_if_index;
u32 encap_table_id;
- u32 decap_next_index;
u32 vni;
u32 sw_if_index;
};
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.c b/src/vnet/vxlan-gbp/vxlan_gbp.c
index ec4f923bdbc..691cc762549 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.c
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.c
@@ -32,16 +32,21 @@
vxlan_gbp_main_t vxlan_gbp_main;
-static u8 *
-format_decap_next (u8 * s, va_list * args)
+u8 *
+format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args)
{
- u32 next_index = va_arg (*args, u32);
+ vxlan_gbp_tunnel_mode_t mode = va_arg (*args, vxlan_gbp_tunnel_mode_t);
- if (next_index == VXLAN_GBP_INPUT_NEXT_DROP)
- return format (s, "drop");
- else
- return format (s, "index %d", next_index);
- return s;
+ switch (mode)
+ {
+ case VXLAN_GBP_TUNNEL_MODE_L2:
+ s = format (s, "L2");
+ break;
+ case VXLAN_GBP_TUNNEL_MODE_L3:
+ s = format (s, "L3");
+ break;
+ }
+ return (s);
}
u8 *
@@ -51,17 +56,15 @@ format_vxlan_gbp_tunnel (u8 * s, va_list * args)
s = format (s,
"[%d] instance %d src %U dst %U vni %d fib-idx %d"
- " sw-if-idx %d ",
+ " sw-if-idx %d mode %U ",
t->dev_instance, t->user_instance,
format_ip46_address, &t->src, IP46_TYPE_ANY,
format_ip46_address, &t->dst, IP46_TYPE_ANY,
- t->vni, t->encap_fib_index, t->sw_if_index);
+ t->vni, t->encap_fib_index, t->sw_if_index,
+ format_vxlan_gbp_tunnel_mode, t->mode);
s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index);
- if (PREDICT_FALSE (t->decap_next_index != VXLAN_GBP_INPUT_NEXT_L2_INPUT))
- s = format (s, "decap-next-%U ", format_decap_next, t->decap_next_index);
-
if (PREDICT_FALSE (ip46_address_is_multicast (&t->dst)))
s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index);
@@ -210,9 +213,9 @@ const static fib_node_vft_t vxlan_gbp_vft = {
#define foreach_copy_field \
_(vni) \
+_(mode) \
_(mcast_sw_if_index) \
_(encap_fib_index) \
-_(decap_next_index) \
_(src) \
_(dst)
@@ -267,18 +270,6 @@ vxlan_gbp_rewrite (vxlan_gbp_tunnel_t * t, bool is_ip6)
vnet_rewrite_set_data (*t, &h, len);
}
-static bool
-vxlan_gbp_decap_next_is_valid (vxlan_gbp_main_t * vxm, u32 is_ip6,
- u32 decap_next_index)
-{
- vlib_main_t *vm = vxm->vlib_main;
- u32 input_idx = (!is_ip6) ?
- vxlan4_gbp_input_node.index : vxlan6_gbp_input_node.index;
- vlib_node_runtime_t *r = vlib_node_get_runtime (vm, input_idx);
-
- return decap_next_index < r->n_next_nodes;
-}
-
static uword
vtep_addr_ref (ip46_address_t * ip)
{
@@ -434,14 +425,11 @@ int vnet_vxlan_gbp_tunnel_add_del
/* adding a tunnel: tunnel must not already exist */
if (p)
- return VNET_API_ERROR_TUNNEL_EXIST;
-
- /* if not set explicitly, default to l2 */
- if (a->decap_next_index == ~0)
- a->decap_next_index = VXLAN_GBP_INPUT_NEXT_L2_INPUT;
- if (!vxlan_gbp_decap_next_is_valid (vxm, is_ip6, a->decap_next_index))
- return VNET_API_ERROR_INVALID_DECAP_NEXT;
-
+ {
+ t = pool_elt_at_index (vxm->tunnels, *p);
+ *sw_if_indexp = t->sw_if_index;
+ return VNET_API_ERROR_TUNNEL_EXIST;
+ }
pool_get_aligned (vxm->tunnels, t, CLIB_CACHE_LINE_BYTES);
clib_memset (t, 0, sizeof (*t));
dev_instance = t - vxm->tunnels;
@@ -505,6 +493,12 @@ int vnet_vxlan_gbp_tunnel_add_del
t->sw_if_index = sw_if_index = hi->sw_if_index;
+ if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode)
+ {
+ ip4_sw_interface_enable_disable (t->sw_if_index, 1);
+ ip6_sw_interface_enable_disable (t->sw_if_index, 1);
+ }
+
vec_validate_init_empty (vxm->tunnel_index_by_sw_if_index, sw_if_index,
~0);
vxm->tunnel_index_by_sw_if_index[sw_if_index] = dev_instance;
@@ -626,6 +620,12 @@ int vnet_vxlan_gbp_tunnel_add_del
sw_if_index = t->sw_if_index;
vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */ );
+ if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode)
+ {
+ ip4_sw_interface_enable_disable (t->sw_if_index, 0);
+ ip6_sw_interface_enable_disable (t->sw_if_index, 0);
+ }
+
vxm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;
if (!is_ip6)
@@ -660,6 +660,36 @@ int vnet_vxlan_gbp_tunnel_add_del
return 0;
}
+int
+vnet_vxlan_gbp_tunnel_del (u32 sw_if_index)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vxlan_gbp_tunnel_t *t = 0;
+ u32 ti;
+
+ if (sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ ti = vxm->tunnel_index_by_sw_if_index[sw_if_index];
+ if (~0 != ti)
+ {
+ t = pool_elt_at_index (vxm->tunnels, ti);
+
+ vnet_vxlan_gbp_tunnel_add_del_args_t args = {
+ .is_add = 0,
+ .is_ip6 = !ip46_address_is_ip4 (&t->src),
+ .vni = t->vni,
+ .src = t->src,
+ .dst = t->dst,
+ .instance = ~0,
+ };
+
+ return (vnet_vxlan_gbp_tunnel_add_del (&args, NULL));
+ }
+
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+}
+
static uword
get_decap_next_for_node (u32 node_index, u32 ipv4_set)
{
@@ -700,6 +730,7 @@ vxlan_gbp_tunnel_add_del_command_fn (vlib_main_t * vm,
unformat_input_t _line_input, *line_input = &_line_input;
ip46_address_t src = ip46_address_initializer, dst =
ip46_address_initializer;
+ vxlan_gbp_tunnel_mode_t mode = VXLAN_GBP_TUNNEL_MODE_L2;
u8 is_add = 1;
u8 src_set = 0;
u8 dst_set = 0;
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.h b/src/vnet/vxlan-gbp/vxlan_gbp.h
index f9edcdcbd93..66f0cffd772 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.h
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.h
@@ -59,6 +59,14 @@ typedef clib_bihash_kv_16_8_t vxlan4_gbp_tunnel_key_t;
*/
typedef clib_bihash_kv_24_8_t vxlan6_gbp_tunnel_key_t;
+typedef enum vxlan_gbp_tunnel_mode_t_
+{
+ VXLAN_GBP_TUNNEL_MODE_L2,
+ VXLAN_GBP_TUNNEL_MODE_L3,
+} vxlan_gbp_tunnel_mode_t;
+
+extern u8 *format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args);
+
typedef struct
{
/* Required for pool_get_aligned */
@@ -67,9 +75,6 @@ typedef struct
/* FIB DPO for IP forwarding of VXLAN encap packet */
dpo_id_t next_dpo;
- /* Group Policy ID */
- u16 sclass;
-
/* flags */
u16 flags;
@@ -83,9 +88,6 @@ typedef struct
/* mcast packet output intfc index (used only if dst is mcast) */
u32 mcast_sw_if_index;
- /* decap next index */
- u32 decap_next_index;
-
/* The FIB index for src/dst addresses */
u32 encap_fib_index;
@@ -97,6 +99,12 @@ typedef struct
uword encap_next_node;
/**
+ * Tunnel mode.
+ * L2 tunnels decap to L2 path, L3 tunnels to the L3 path
+ */
+ vxlan_gbp_tunnel_mode_t mode;
+
+ /**
* Linkage into the FIB object graph
*/
fib_node_t node;
@@ -122,9 +130,12 @@ typedef struct
vnet_declare_rewrite (VLIB_BUFFER_PRE_DATA_SIZE);
} vxlan_gbp_tunnel_t;
-#define foreach_vxlan_gbp_input_next \
-_(DROP, "error-drop") \
-_(L2_INPUT, "l2-input")
+#define foreach_vxlan_gbp_input_next \
+ _(DROP, "error-drop") \
+ _(NO_TUNNEL, "error-punt") \
+ _(L2_INPUT, "l2-input") \
+ _(IP4_INPUT, "ip4-input") \
+ _(IP6_INPUT, "ip6-input")
typedef enum
{
@@ -142,6 +153,13 @@ typedef enum
VXLAN_GBP_N_ERROR,
} vxlan_gbp_input_error_t;
+/**
+ * Call back function packets that do not match a configured tunnel
+ */
+typedef vxlan_gbp_input_next_t (*vxlan_bgp_no_tunnel_t) (vlib_buffer_t * b,
+ u32 thread_index,
+ u8 is_ip6);
+
typedef struct
{
/* vector of encap tunnel instances */
@@ -189,20 +207,22 @@ typedef struct
u8 is_add;
u8 is_ip6;
u32 instance;
+ vxlan_gbp_tunnel_mode_t mode;
ip46_address_t src, dst;
u32 mcast_sw_if_index;
u32 encap_fib_index;
- u32 decap_next_index;
u32 vni;
} vnet_vxlan_gbp_tunnel_add_del_args_t;
int vnet_vxlan_gbp_tunnel_add_del
(vnet_vxlan_gbp_tunnel_add_del_args_t * a, u32 * sw_if_indexp);
+int vnet_vxlan_gbp_tunnel_del (u32 sw_if_indexp);
void vnet_int_vxlan_gbp_bypass_mode (u32 sw_if_index, u8 is_ip6,
u8 is_enable);
u32 vnet_vxlan_gbp_get_tunnel_index (u32 sw_if_index);
+
#endif /* included_vnet_vxlan_gbp_h */
/*
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_api.c b/src/vnet/vxlan-gbp/vxlan_gbp_api.c
index b7e6935b2f8..f5e97e5a364 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp_api.c
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_api.c
@@ -92,10 +92,10 @@ static void vl_api_vxlan_gbp_tunnel_add_del_t_handler
.instance = ntohl (mp->tunnel.instance),
.mcast_sw_if_index = ntohl (mp->tunnel.mcast_sw_if_index),
.encap_fib_index = fib_index,
- .decap_next_index = ntohl (mp->tunnel.decap_next_index),
.vni = ntohl (mp->tunnel.vni),
.dst = dst,
.src = src,
+ .mode = VXLAN_GBP_TUNNEL_MODE_L2,
};
/* Check src & dst are different */
@@ -142,7 +142,6 @@ static void send_vxlan_gbp_tunnel_details
rmp->tunnel.instance = htonl (t->user_instance);
rmp->tunnel.mcast_sw_if_index = htonl (t->mcast_sw_if_index);
rmp->tunnel.vni = htonl (t->vni);
- rmp->tunnel.decap_next_index = htonl (t->decap_next_index);
rmp->tunnel.sw_if_index = htonl (t->sw_if_index);
rmp->context = context;
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_packet.c b/src/vnet/vxlan-gbp/vxlan_gbp_packet.c
new file mode 100644
index 00000000000..01c7a19bfb9
--- /dev/null
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_packet.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+u8 *
+format_vxlan_gbp_header_flags (u8 * s, va_list * args)
+{
+ vxlan_gbp_flags_t flags = va_arg (*args, int);
+
+ if (VXLAN_GBP_FLAGS_NONE == flags)
+ {
+ s = format (s, "None");
+ }
+#define _(n,f) { \
+ if (VXLAN_GBP_FLAGS_##f & flags) \
+ s = format (s, #f); \
+ }
+ foreach_vxlan_gbp_flags
+#undef _
+ return (s);
+}
+
+u8 *
+format_vxlan_gbp_header_gpflags (u8 * s, va_list * args)
+{
+ vxlan_gbp_gpflags_t flags = va_arg (*args, int);
+
+ if (VXLAN_GBP_GPFLAGS_NONE == flags)
+ {
+ s = format (s, "None");
+ }
+#define _(n,f) { \
+ if (VXLAN_GBP_GPFLAGS_##f & flags) \
+ s = format (s, #f); \
+ }
+ foreach_vxlan_gbp_gpflags
+#undef _
+ return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_packet.h b/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
index e1674a0dba8..33bccd6aed6 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
@@ -15,6 +15,8 @@
#ifndef __included_vxlan_gbp_packet_h__
#define __included_vxlan_gbp_packet_h__ 1
+#include <vlib/vlib.h>
+
/*
* From draft-smith-vxlan-group-policy-04.txt
*
@@ -85,8 +87,17 @@ typedef struct
u32 vni_reserved;
} vxlan_gbp_header_t;
-#define VXLAN_GBP_FLAGS_G 0x80
-#define VXLAN_GBP_FLAGS_I 0x08
+#define foreach_vxlan_gbp_flags \
+ _ (0x80, G) \
+ _ (0x08, I)
+
+typedef enum
+{
+ VXLAN_GBP_FLAGS_NONE = 0,
+#define _(n,f) VXLAN_GBP_FLAGS_##f = n,
+ foreach_vxlan_gbp_flags
+#undef _
+} __attribute__ ((packed)) vxlan_gbp_flags_t;
#define foreach_vxlan_gbp_gpflags \
_ (0x40, D) \
@@ -96,10 +107,11 @@ _ (0x08, A)
typedef enum
{
+ VXLAN_GBP_GPFLAGS_NONE = 0,
#define _(n,f) VXLAN_GBP_GPFLAGS_##f = n,
foreach_vxlan_gbp_gpflags
#undef _
-} vxlan_gbp_gpflag_t;
+} __attribute__ ((packed)) vxlan_gbp_gpflags_t;
static inline u32
vxlan_gbp_get_vni (vxlan_gbp_header_t * h)
@@ -119,13 +131,13 @@ vxlan_gbp_get_sclass (vxlan_gbp_header_t * h)
return sclass_host_byte_order;
}
-static inline u8
+static inline vxlan_gbp_gpflags_t
vxlan_gbp_get_gpflags (vxlan_gbp_header_t * h)
{
return h->gpflags;
}
-static inline u8
+static inline vxlan_gbp_flags_t
vxlan_gbp_get_flags (vxlan_gbp_header_t * h)
{
return h->flag_g_i;
@@ -139,6 +151,9 @@ vxlan_gbp_set_header (vxlan_gbp_header_t * h, u32 vni)
h->flag_g_i = VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G;
}
+extern u8 *format_vxlan_gbp_header_flags (u8 * s, va_list * args);
+extern u8 *format_vxlan_gbp_header_gpflags (u8 * s, va_list * args);
+
#endif /* __included_vxlan_gbp_packet_h__ */
/*