summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2019-01-21 23:34:18 -0800
committerDamjan Marion <dmarion@me.com>2019-01-22 14:37:51 +0000
commit879d11c250a4710759ddefe22afd7fc05bda2946 (patch)
tree86f1984cf7324e05d8b100f9201ea69df83edc56 /src
parent03ae24b97498bcc9265d4650efe3ddaa5e1d3255 (diff)
GBP: Sclass to src-epg conversions
Change-Id: Ica88268fd6a6ee01da7e9219bb4e81f22ed2fd4b Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/plugins/gbp/CMakeLists.txt1
-rw-r--r--src/plugins/gbp/gbp.api2
-rw-r--r--src/plugins/gbp/gbp_api.c6
-rw-r--r--src/plugins/gbp/gbp_bridge_domain.c48
-rw-r--r--src/plugins/gbp/gbp_bridge_domain.h9
-rw-r--r--src/plugins/gbp/gbp_endpoint_group.c19
-rw-r--r--src/plugins/gbp/gbp_endpoint_group.h33
-rw-r--r--src/plugins/gbp/gbp_learn.c8
-rw-r--r--src/plugins/gbp/gbp_route_domain.c5
-rw-r--r--src/plugins/gbp/gbp_sclass.c386
-rw-r--r--src/plugins/gbp/gbp_sclass.h34
-rw-r--r--src/plugins/gbp/gbp_types.h1
-rw-r--r--src/plugins/gbp/gbp_vxlan.c35
-rw-r--r--src/vnet/buffer.h6
-rw-r--r--src/vnet/l2/l2_input.h1
-rw-r--r--src/vnet/l2/l2_output.h1
-rw-r--r--src/vnet/vxlan-gbp/decap.c6
-rw-r--r--src/vnet/vxlan-gbp/encap.c12
18 files changed, 580 insertions, 33 deletions
diff --git a/src/plugins/gbp/CMakeLists.txt b/src/plugins/gbp/CMakeLists.txt
index 4b511413b82..ef254024d43 100644
--- a/src/plugins/gbp/CMakeLists.txt
+++ b/src/plugins/gbp/CMakeLists.txt
@@ -29,6 +29,7 @@ add_vpp_plugin(gbp
gbp_recirc.c
gbp_route_domain.c
gbp_scanner.c
+ gbp_sclass.c
gbp_subnet.c
gbp_vxlan.c
diff --git a/src/plugins/gbp/gbp.api b/src/plugins/gbp/gbp.api
index e96cb508387..a7a9a7e8dc8 100644
--- a/src/plugins/gbp/gbp.api
+++ b/src/plugins/gbp/gbp.api
@@ -31,6 +31,7 @@ typedef gbp_bridge_domain
vl_api_gbp_bridge_domain_flags_t flags;
u32 bvi_sw_if_index;
u32 uu_fwd_sw_if_index;
+ u32 bm_flood_sw_if_index;
};
autoreply define gbp_bridge_domain_add
@@ -159,6 +160,7 @@ define gbp_endpoint_details
typeonly define gbp_endpoint_group
{
u16 epg_id;
+ u16 sclass;
u32 bd_id;
u32 rd_id;
u32 uplink_sw_if_index;
diff --git a/src/plugins/gbp/gbp_api.c b/src/plugins/gbp/gbp_api.c
index 8d80365c55d..665d97ea0b1 100644
--- a/src/plugins/gbp/gbp_api.c
+++ b/src/plugins/gbp/gbp_api.c
@@ -298,6 +298,7 @@ static void
int rv = 0;
rv = gbp_endpoint_group_add_and_lock (ntohs (mp->epg.epg_id),
+ ntohs (mp->epg.sclass),
ntohl (mp->epg.bd_id),
ntohl (mp->epg.rd_id),
ntohl (mp->epg.uplink_sw_if_index));
@@ -341,7 +342,8 @@ vl_api_gbp_bridge_domain_add_t_handler (vl_api_gbp_bridge_domain_add_t * mp)
gbp_bridge_domain_flags_from_api
(mp->bd.flags),
ntohl (mp->bd.bvi_sw_if_index),
- ntohl (mp->bd.uu_fwd_sw_if_index));
+ ntohl (mp->bd.uu_fwd_sw_if_index),
+ ntohl (mp->bd.bm_flood_sw_if_index));
REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_ADD_REPLY + GBP_MSG_BASE);
}
@@ -523,6 +525,7 @@ gbp_endpoint_group_send_details (gbp_endpoint_group_t * gg, void *args)
mp->epg.uplink_sw_if_index = ntohl (gg->gg_uplink_sw_if_index);
mp->epg.epg_id = ntohs (gg->gg_id);
+ mp->epg.sclass = ntohs (gg->gg_sclass);
mp->epg.bd_id = ntohl (gbp_endpoint_group_get_bd_id (gg));
mp->epg.rd_id = ntohl (gbp_route_domain_get_rd_id (gg->gg_rd));
@@ -567,6 +570,7 @@ gbp_bridge_domain_send_details (gbp_bridge_domain_t * gb, void *args)
mp->bd.bd_id = ntohl (gb->gb_bd_id);
mp->bd.bvi_sw_if_index = ntohl (gb->gb_bvi_sw_if_index);
mp->bd.uu_fwd_sw_if_index = ntohl (gb->gb_uu_fwd_sw_if_index);
+ mp->bd.bm_flood_sw_if_index = ntohl (gb->gb_bm_flood_sw_if_index);
vl_api_send_msg (ctx->reg, (u8 *) mp);
diff --git a/src/plugins/gbp/gbp_bridge_domain.c b/src/plugins/gbp/gbp_bridge_domain.c
index 21ffe9cc314..049c89bebab 100644
--- a/src/plugins/gbp/gbp_bridge_domain.c
+++ b/src/plugins/gbp/gbp_bridge_domain.c
@@ -15,6 +15,7 @@
#include <plugins/gbp/gbp_bridge_domain.h>
#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_sclass.h>
#include <vnet/dpo/dvr_dpo.h>
#include <vnet/fib/fib_table.h>
@@ -147,7 +148,9 @@ format_gbp_bridge_domain (u8 * s, va_list * args)
int
gbp_bridge_domain_add_and_lock (u32 bd_id,
gbp_bridge_domain_flags_t flags,
- u32 bvi_sw_if_index, u32 uu_fwd_sw_if_index)
+ u32 bvi_sw_if_index,
+ u32 uu_fwd_sw_if_index,
+ u32 bm_flood_sw_if_index)
{
gbp_bridge_domain_t *gb;
index_t gbi;
@@ -175,6 +178,7 @@ gbp_bridge_domain_add_and_lock (u32 bd_id,
gb->gb_bd_index = bd_index;
gb->gb_uu_fwd_sw_if_index = uu_fwd_sw_if_index;
gb->gb_bvi_sw_if_index = bvi_sw_if_index;
+ gb->gb_bm_flood_sw_if_index = bm_flood_sw_if_index;
gb->gb_locks = 1;
gb->gb_flags = flags;
@@ -185,9 +189,19 @@ gbp_bridge_domain_add_and_lock (u32 bd_id,
MODE_L2_BRIDGE, gb->gb_bvi_sw_if_index,
bd_index, L2_BD_PORT_TYPE_BVI, 0, 0);
if (~0 != gb->gb_uu_fwd_sw_if_index)
- set_int_l2_mode (vlib_get_main (), vnet_get_main (),
- MODE_L2_BRIDGE, gb->gb_uu_fwd_sw_if_index,
- bd_index, L2_BD_PORT_TYPE_UU_FWD, 0, 0);
+ {
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L2_BRIDGE, gb->gb_uu_fwd_sw_if_index,
+ bd_index, L2_BD_PORT_TYPE_UU_FWD, 0, 0);
+ gbp_sclass_enable_l2 (gb->gb_uu_fwd_sw_if_index);
+ }
+ if (~0 != gb->gb_bm_flood_sw_if_index)
+ {
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L2_BRIDGE, gb->gb_bm_flood_sw_if_index,
+ bd_index, L2_BD_PORT_TYPE_NORMAL, 0, 0);
+ gbp_sclass_enable_l2 (gb->gb_bm_flood_sw_if_index);
+ }
/*
* Add the BVI's MAC to the L2FIB
@@ -232,9 +246,19 @@ gbp_bridge_domain_unlock (index_t index)
MODE_L3, gb->gb_bvi_sw_if_index,
gb->gb_bd_index, L2_BD_PORT_TYPE_BVI, 0, 0);
if (~0 != gb->gb_uu_fwd_sw_if_index)
- set_int_l2_mode (vlib_get_main (), vnet_get_main (),
- MODE_L3, gb->gb_uu_fwd_sw_if_index,
- gb->gb_bd_index, L2_BD_PORT_TYPE_UU_FWD, 0, 0);
+ {
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L3, gb->gb_uu_fwd_sw_if_index,
+ gb->gb_bd_index, L2_BD_PORT_TYPE_UU_FWD, 0, 0);
+ gbp_sclass_disable_l2 (gb->gb_uu_fwd_sw_if_index);
+ }
+ if (~0 != gb->gb_bm_flood_sw_if_index)
+ {
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L3, gb->gb_bm_flood_sw_if_index,
+ gb->gb_bd_index, L2_BD_PORT_TYPE_NORMAL, 0, 0);
+ gbp_sclass_disable_l2 (gb->gb_bm_flood_sw_if_index);
+ }
gbp_bridge_domain_db_remove (gb);
@@ -280,6 +304,7 @@ gbp_bridge_domain_cli (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
vnet_main_t *vnm = vnet_get_main ();
+ u32 bm_flood_sw_if_index = ~0;
u32 uu_fwd_sw_if_index = ~0;
u32 bvi_sw_if_index = ~0;
u32 bd_id = ~0;
@@ -290,9 +315,12 @@ gbp_bridge_domain_cli (vlib_main_t * vm,
if (unformat (input, "bvi %U", unformat_vnet_sw_interface,
vnm, &bvi_sw_if_index))
;
- else if (unformat (input, "uu-flood %U", unformat_vnet_sw_interface,
+ else if (unformat (input, "uu-fwd %U", unformat_vnet_sw_interface,
vnm, &uu_fwd_sw_if_index))
;
+ else if (unformat (input, "bm-flood %U", unformat_vnet_sw_interface,
+ vnm, &bm_flood_sw_if_index))
+ ;
else if (unformat (input, "add"))
add = 1;
else if (unformat (input, "del"))
@@ -312,7 +340,9 @@ gbp_bridge_domain_cli (vlib_main_t * vm,
return clib_error_return (0, "interface must be specified");
gbp_bridge_domain_add_and_lock (bd_id, GBP_BD_FLAG_NONE,
- bvi_sw_if_index, uu_fwd_sw_if_index);
+ bvi_sw_if_index,
+ uu_fwd_sw_if_index,
+ bm_flood_sw_if_index);
}
else
gbp_bridge_domain_delete (bd_id);
diff --git a/src/plugins/gbp/gbp_bridge_domain.h b/src/plugins/gbp/gbp_bridge_domain.h
index 65f133c84da..95b53dc2088 100644
--- a/src/plugins/gbp/gbp_bridge_domain.h
+++ b/src/plugins/gbp/gbp_bridge_domain.h
@@ -58,6 +58,11 @@ typedef struct gbp_bridge_domain_t_
u32 gb_uu_fwd_sw_if_index;
/**
+ * The BD's interface to sned Broadcast and multicast packets
+ */
+ u32 gb_bm_flood_sw_if_index;
+
+ /**
* The BD's VNI interface on which packets from unkown endpoints
* arrive
*/
@@ -73,7 +78,9 @@ typedef struct gbp_bridge_domain_t_
extern int gbp_bridge_domain_add_and_lock (u32 bd_id,
gbp_bridge_domain_flags_t flags,
u32 bvi_sw_if_index,
- u32 uu_fwd_sw_if_index);
+ u32 uu_fwd_sw_if_index,
+ u32 bm_flood_sw_if_index);
+
extern void gbp_bridge_domain_unlock (index_t gbi);
extern index_t gbp_bridge_domain_find_and_lock (u32 bd_id);
extern int gbp_bridge_domain_delete (u32 bd_id);
diff --git a/src/plugins/gbp/gbp_endpoint_group.c b/src/plugins/gbp/gbp_endpoint_group.c
index 834f865bc92..cefdbea3652 100644
--- a/src/plugins/gbp/gbp_endpoint_group.c
+++ b/src/plugins/gbp/gbp_endpoint_group.c
@@ -33,6 +33,12 @@ gbp_endpoint_group_t *gbp_endpoint_group_pool;
* DB of endpoint_groups
*/
gbp_endpoint_group_db_t gbp_endpoint_group_db;
+
+/**
+ * Map sclass to EPG
+ */
+uword *gbp_epg_sclass_db;
+
vlib_log_class_t gg_logger;
#define GBP_EPG_DBG(...) \
@@ -68,6 +74,7 @@ gbp_endpoint_group_find (epg_id_t epg_id)
int
gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
+ u16 sclass,
u32 bd_id, u32 rd_id, u32 uplink_sw_if_index)
{
gbp_endpoint_group_t *gg;
@@ -105,6 +112,10 @@ gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
gg->gg_uplink_sw_if_index = uplink_sw_if_index;
gg->gg_locks = 1;
+ gg->gg_sclass = sclass;
+
+ if (SCLASS_INVALID != gg->gg_sclass)
+ hash_set (gbp_epg_sclass_db, gg->gg_sclass, gg->gg_id);
/*
* an egress DVR dpo for internal subnets to use when sending
@@ -179,6 +190,8 @@ gbp_endpoint_group_unlock (index_t ggi)
gbp_bridge_domain_unlock (gg->gg_gbd);
gbp_route_domain_unlock (gg->gg_rd);
+ if (SCLASS_INVALID != gg->gg_sclass)
+ hash_unset (gbp_epg_sclass_db, gg->gg_sclass);
hash_unset (gbp_endpoint_group_db.gg_hash, gg->gg_id);
pool_put (gbp_endpoint_group_pool, gg);
@@ -243,8 +256,8 @@ static clib_error_t *
gbp_endpoint_group_cli (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
+ epg_id_t epg_id = EPG_INVALID, sclass;
vnet_main_t *vnm = vnet_get_main ();
- epg_id_t epg_id = EPG_INVALID;
u32 uplink_sw_if_index = ~0;
u32 bd_id = ~0;
u32 rd_id = ~0;
@@ -261,6 +274,8 @@ gbp_endpoint_group_cli (vlib_main_t * vm,
add = 0;
else if (unformat (input, "epg %d", &epg_id))
;
+ else if (unformat (input, "sclass %d", &sclass))
+ ;
else if (unformat (input, "bd %d", &bd_id))
;
else if (unformat (input, "rd %d", &rd_id))
@@ -281,7 +296,7 @@ gbp_endpoint_group_cli (vlib_main_t * vm,
if (~0 == rd_id)
return clib_error_return (0, "route-domain must be specified");
- gbp_endpoint_group_add_and_lock (epg_id, bd_id, rd_id,
+ gbp_endpoint_group_add_and_lock (epg_id, sclass, bd_id, rd_id,
uplink_sw_if_index);
}
else
diff --git a/src/plugins/gbp/gbp_endpoint_group.h b/src/plugins/gbp/gbp_endpoint_group.h
index 763a80e4d87..123954f63ea 100644
--- a/src/plugins/gbp/gbp_endpoint_group.h
+++ b/src/plugins/gbp/gbp_endpoint_group.h
@@ -31,6 +31,11 @@ typedef struct gpb_endpoint_group_t_
epg_id_t gg_id;
/**
+ * Sclass. Could be unset => ~0
+ */
+ u16 gg_sclass;
+
+ /**
* Bridge-domain ID the EPG is in
*/
index_t gg_gbd;
@@ -71,6 +76,7 @@ typedef struct gbp_endpoint_group_db_t_
} gbp_endpoint_group_db_t;
extern int gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
+ u16 sclass,
u32 bd_id,
u32 rd_id,
u32 uplink_sw_if_index);
@@ -96,6 +102,19 @@ extern u8 *format_gbp_endpoint_group (u8 * s, va_list * args);
*/
extern gbp_endpoint_group_db_t gbp_endpoint_group_db;
extern gbp_endpoint_group_t *gbp_endpoint_group_pool;
+extern uword *gbp_epg_sclass_db;
+
+always_inline gbp_endpoint_group_t *
+gbp_epg_get (epg_id_t epg)
+{
+ uword *p;
+
+ p = hash_get (gbp_endpoint_group_db.gg_hash, epg);
+
+ if (NULL != p)
+ return (pool_elt_at_index (gbp_endpoint_group_pool, p[0]));
+ return (NULL);
+}
always_inline u32
gbp_epg_itf_lookup (epg_id_t epg)
@@ -114,6 +133,20 @@ gbp_epg_itf_lookup (epg_id_t epg)
return (~0);
}
+always_inline epg_id_t
+gbp_epg_sclass_2_id (u16 sclass)
+{
+ uword *p;
+
+ p = hash_get (gbp_epg_sclass_db, sclass);
+
+ if (NULL != p)
+ {
+ return (p[0]);
+ }
+ return (EPG_INVALID);
+}
+
always_inline const dpo_id_t *
gbp_epg_dpo_lookup (epg_id_t epg, fib_protocol_t fproto)
{
diff --git a/src/plugins/gbp/gbp_learn.c b/src/plugins/gbp/gbp_learn.c
index 762b463223e..514aca26ef9 100644
--- a/src/plugins/gbp/gbp_learn.c
+++ b/src/plugins/gbp/gbp_learn.c
@@ -706,7 +706,9 @@ void
gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode)
{
if (GBP_LEARN_MODE_L2 == mode)
- l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1);
+ {
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1);
+ }
else
{
vnet_feature_enable_disable ("ip4-unicast",
@@ -720,7 +722,9 @@ void
gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode)
{
if (GBP_LEARN_MODE_L2 == mode)
- l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0);
+ {
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0);
+ }
else
{
vnet_feature_enable_disable ("ip4-unicast",
diff --git a/src/plugins/gbp/gbp_route_domain.c b/src/plugins/gbp/gbp_route_domain.c
index 67b6915b463..6a3f4fa7f1e 100644
--- a/src/plugins/gbp/gbp_route_domain.c
+++ b/src/plugins/gbp/gbp_route_domain.c
@@ -15,6 +15,7 @@
#include <plugins/gbp/gbp_route_domain.h>
#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_sclass.h>
#include <vnet/dpo/dvr_dpo.h>
#include <vnet/fib/fib_table.h>
@@ -182,6 +183,8 @@ gbp_route_domain_add_and_lock (u32 rd_id,
&ADJ_BCAST_ADDR,
grd->grd_uu_sw_if_index[fproto],
rewrite);
+
+ gbp_sclass_enable_ip (grd->grd_uu_sw_if_index[fproto]);
}
else
{
@@ -223,6 +226,8 @@ gbp_route_domain_unlock (index_t index)
fproto, FIB_SOURCE_PLUGIN_HI);
if (INDEX_INVALID != grd->grd_adj[fproto])
adj_unlock (grd->grd_adj[fproto]);
+ if (~0 != grd->grd_uu_sw_if_index[fproto])
+ gbp_sclass_disable_ip (grd->grd_uu_sw_if_index[fproto]);
}
gbp_route_domain_db_remove (grd);
diff --git a/src/plugins/gbp/gbp_sclass.c b/src/plugins/gbp/gbp_sclass.c
new file mode 100644
index 00000000000..10ecf1fa42e
--- /dev/null
+++ b/src/plugins/gbp/gbp_sclass.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_sclass_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 gel_l2_input_feat_next[32];
+ u32 gel_l2_output_feat_next[32];
+} gbp_sclass_main_t;
+
+static gbp_sclass_main_t gbp_sclass_main;
+
+#define foreach_gbp_sclass \
+ _(DROP, "drop")
+
+
+typedef enum
+{
+#define _(sym,str) GBP_SCLASS_NEXT_##sym,
+ foreach_gbp_sclass
+#undef _
+ GBP_SCLASS_N_NEXT,
+} gbp_sclass_next_t;
+
+typedef struct gbp_sclass_trace_t_
+{
+ /* per-pkt trace data */
+ u32 epg;
+ u32 sclass;
+} gbp_sclass_trace_t;
+
+always_inline uword
+gbp_sclass_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_id_2_sclass, int is_l2)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ gbp_sclass_main_t *glm;
+
+ glm = &gbp_sclass_main;
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ gbp_sclass_next_t next0;
+ vlib_buffer_t *b0;
+ epg_id_t epg0;
+ u16 sclass0;
+ u32 bi0;
+
+ next0 = GBP_SCLASS_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (is_id_2_sclass)
+ {
+ // output direction - convert from the SRC-EPD to the sclass
+ gbp_endpoint_group_t *gg;
+
+ epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+ gg = gbp_epg_get (epg0);
+
+ if (NULL != gg)
+ {
+ sclass0 = vnet_buffer2 (b0)->gbp.sclass = gg->gg_sclass;
+ if (is_l2)
+ next0 =
+ vnet_l2_feature_next (b0, glm->gel_l2_output_feat_next,
+ L2OUTPUT_FEAT_GBP_ID_2_SCLASS);
+ else
+ vnet_feature_next (&next0, b0);
+ }
+ else
+ sclass0 = 0;
+ }
+ else
+ {
+ /* input direction - convert from the sclass to the SRC-EGD */
+ sclass0 = vnet_buffer2 (b0)->gbp.sclass;
+ vnet_buffer2 (b0)->gbp.src_epg =
+ gbp_epg_sclass_2_id (vnet_buffer2 (b0)->gbp.sclass);
+ epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+
+ if (EPG_INVALID != epg0)
+ {
+ if (is_l2)
+ next0 =
+ vnet_l2_feature_next (b0, glm->gel_l2_input_feat_next,
+ L2INPUT_FEAT_GBP_SCLASS_2_ID);
+ else
+ vnet_feature_next (&next0, b0);
+ }
+ }
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_sclass_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->epg = epg0;
+ t->sclass = sclass0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+always_inline uword
+l2_gbp_id_2_sclass (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 1, 1));
+}
+
+always_inline uword
+l2_gbp_sclass_2_id (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 0, 1));
+}
+
+always_inline uword
+ip4_gbp_id_2_sclass (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 1, 0));
+}
+
+always_inline uword
+ip4_gbp_sclass_2_id (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 0, 0));
+}
+
+always_inline uword
+ip6_gbp_id_2_sclass (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 1, 0));
+}
+
+always_inline uword
+ip6_gbp_sclass_2_id (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 0, 0));
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_sclass_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_sclass_trace_t *t = va_arg (*args, gbp_sclass_trace_t *);
+
+ s = format (s, "epg:%d sclass:%d", t->epg, t->sclass);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_gbp_id_2_sclass_node) = {
+ .function = l2_gbp_id_2_sclass,
+ .name = "l2-gbp-id-2-sclass",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+VLIB_REGISTER_NODE (l2_gbp_sclass_2_id_node) = {
+ .function = l2_gbp_sclass_2_id,
+ .name = "l2-gbp-sclass-2-id",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (ip4_gbp_id_2_sclass_node) = {
+ .function = ip4_gbp_id_2_sclass,
+ .name = "ip4-gbp-id-2-sclass",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+VLIB_REGISTER_NODE (ip4_gbp_sclass_2_id_node) = {
+ .function = ip4_gbp_sclass_2_id,
+ .name = "ip4-gbp-sclass-2-id",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (ip6_gbp_id_2_sclass_node) = {
+ .function = ip6_gbp_id_2_sclass,
+ .name = "ip6-gbp-id-2-sclass",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+VLIB_REGISTER_NODE (ip6_gbp_sclass_2_id_node) = {
+ .function = ip6_gbp_sclass_2_id,
+ .name = "ip6-gbp-sclass-2-id",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (l2_gbp_id_2_sclass_node, l2_gbp_id_2_sclass);
+VLIB_NODE_FUNCTION_MULTIARCH (l2_gbp_sclass_2_id_node, l2_gbp_sclass_2_id);
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_id_2_sclass_node, ip4_gbp_id_2_sclass);
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_sclass_2_id_node, ip4_gbp_sclass_2_id);
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_id_2_sclass_node, ip6_gbp_id_2_sclass);
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_sclass_2_id_node, ip6_gbp_sclass_2_id);
+
+VNET_FEATURE_INIT (ip4_gbp_sclass_2_id_feat, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-gbp-sclass-2-id",
+ .runs_before = VNET_FEATURES ("gbp-learn-ip4"),
+};
+VNET_FEATURE_INIT (ip6_gbp_sclass_2_id_feat, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-gbp-sclass-2-id",
+ .runs_before = VNET_FEATURES ("gbp-learn-ip6"),
+};
+VNET_FEATURE_INIT (ip4_gbp_id_2_sclass_feat, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "ip4-gbp-id-2-sclass",
+};
+VNET_FEATURE_INIT (ip6_gbp_id_2_sclass_feat, static) =
+{
+ .arc_name = "ip6-output",
+ .node_name = "ip6-gbp-id-2-sclass",
+};
+/* *INDENT-ON* */
+
+void
+gbp_sclass_enable_l2 (u32 sw_if_index)
+{
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SCLASS_2_ID, 1);
+ l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_ID_2_SCLASS, 1);
+}
+
+void
+gbp_sclass_disable_l2 (u32 sw_if_index)
+{
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SCLASS_2_ID, 0);
+ l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_ID_2_SCLASS, 0);
+}
+
+void
+gbp_sclass_enable_ip (u32 sw_if_index)
+{
+ vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-gbp-sclass-2-id", sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-gbp-sclass-2-id", sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip4-output",
+ "ip4-gbp-id-2-sclass", sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-output",
+ "ip6-gbp-id-2-sclass", sw_if_index, 1, 0, 0);
+}
+
+void
+gbp_sclass_disable_ip (u32 sw_if_index)
+{
+ vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-gbp-sclass-2-id", sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-gbp-sclass-2-id", sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip4-output",
+ "ip4-gbp-id-2-sclass", sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip6-output",
+ "ip6-gbp-id-2-sclass", sw_if_index, 0, 0, 0);
+}
+
+static clib_error_t *
+gbp_sclass_init (vlib_main_t * vm)
+{
+ gbp_sclass_main_t *glm = &gbp_sclass_main;
+
+ /* Initialize the feature next-node indices */
+ feat_bitmap_init_next_nodes (vm,
+ l2_gbp_sclass_2_id_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ glm->gel_l2_input_feat_next);
+ feat_bitmap_init_next_nodes (vm,
+ l2_gbp_id_2_sclass_node.index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ glm->gel_l2_output_feat_next);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_sclass_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_sclass.h b/src/plugins/gbp/gbp_sclass.h
new file mode 100644
index 00000000000..07c5fffcc96
--- /dev/null
+++ b/src/plugins/gbp/gbp_sclass.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_SCLASS_H__
+#define __GBP_SCLASS_H__
+
+#include <plugins/gbp/gbp.h>
+
+extern void gbp_sclass_enable_ip (u32 sw_if_index);
+extern void gbp_sclass_enable_l2 (u32 sw_if_index);
+extern void gbp_sclass_disable_ip (u32 sw_if_index);
+extern void gbp_sclass_disable_l2 (u32 sw_if_index);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_types.h b/src/plugins/gbp/gbp_types.h
index afb17e3a5d5..0faa74b694e 100644
--- a/src/plugins/gbp/gbp_types.h
+++ b/src/plugins/gbp/gbp_types.h
@@ -20,6 +20,7 @@
typedef u16 epg_id_t;
#define EPG_INVALID ((u16)~0)
+#define SCLASS_INVALID ((u16)~0)
#endif
diff --git a/src/plugins/gbp/gbp_vxlan.c b/src/plugins/gbp/gbp_vxlan.c
index 2b264f813ed..7fbd7e9e7d5 100644
--- a/src/plugins/gbp/gbp_vxlan.c
+++ b/src/plugins/gbp/gbp_vxlan.c
@@ -18,6 +18,7 @@
#include <plugins/gbp/gbp_learn.h>
#include <plugins/gbp/gbp_bridge_domain.h>
#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_sclass.h>
#include <vnet/vxlan-gbp/vxlan_gbp.h>
#include <vlibmemory/api.h>
@@ -106,13 +107,12 @@ format_vxlan_tunnel_ref (u8 * s, va_list * args)
static u32
gdb_vxlan_dep_add (gbp_vxlan_tunnel_t * gt,
- u32 vni,
const ip46_address_t * src, const ip46_address_t * dst)
{
vnet_vxlan_gbp_tunnel_add_del_args_t args = {
.is_add = 1,
.is_ip6 = !ip46_address_is_ip4 (src),
- .vni = vni,
+ .vni = gt->gt_vni,
.src = *src,
.dst = *dst,
.instance = ~0,
@@ -140,7 +140,7 @@ gdb_vxlan_dep_add (gbp_vxlan_tunnel_t * gt,
GBP_VXLAN_TUN_DBG ("add-dep:%U %U %U %d", format_vnet_sw_if_index_name,
vnet_get_main (), sw_if_index,
format_ip46_address, src, IP46_TYPE_ANY,
- format_ip46_address, dst, IP46_TYPE_ANY, vni);
+ format_ip46_address, dst, IP46_TYPE_ANY, gt->gt_vni);
pool_get_zero (vxlan_tunnel_ref_pool, vxr);
@@ -161,13 +161,25 @@ gdb_vxlan_dep_add (gbp_vxlan_tunnel_t * gt,
if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
{
+ l2output_feat_masks_t ofeat;
+ l2input_feat_masks_t ifeat;
+ gbp_bridge_domain_t *gbd;
+
+ gbd = gbp_bridge_domain_get (gt->gt_gbd);
vxr->vxr_itf = gbp_itf_add_and_lock (vxr->vxr_sw_if_index,
gt->gt_bd_index);
- gbp_itf_set_l2_output_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
- L2OUTPUT_FEAT_GBP_POLICY_MAC);
- gbp_itf_set_l2_input_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
- L2INPUT_FEAT_GBP_LEARN);
+ ofeat = (L2OUTPUT_FEAT_GBP_POLICY_MAC |
+ L2OUTPUT_FEAT_GBP_ID_2_SCLASS);
+ ifeat = L2INPUT_FEAT_GBP_SCLASS_2_ID;
+
+ if (!(gbd->gb_flags & GBP_BD_FLAG_DO_NOT_LEARN))
+ ifeat |= L2INPUT_FEAT_GBP_LEARN;
+
+ gbp_itf_set_l2_output_feature (vxr->vxr_itf,
+ vxr->vxr_sw_if_index, ofeat);
+ gbp_itf_set_l2_input_feature (vxr->vxr_itf,
+ vxr->vxr_sw_if_index, ifeat);
}
else
{
@@ -181,6 +193,7 @@ gdb_vxlan_dep_add (gbp_vxlan_tunnel_t * gt,
grd->grd_table_id[fproto], 1);
gbp_learn_enable (vxr->vxr_sw_if_index, GBP_LEARN_MODE_L3);
+ gbp_sclass_enable_ip (vxr->vxr_sw_if_index);
}
}
@@ -235,7 +248,7 @@ gbp_vxlan_tunnel_clone_and_lock (u32 sw_if_index,
gt = pool_elt_at_index (gbp_vxlan_tunnel_pool, gti);
- return (gdb_vxlan_dep_add (gt, gt->gt_vni, src, dst));
+ return (gdb_vxlan_dep_add (gt, src, dst));
}
static void
@@ -270,6 +283,8 @@ gdb_vxlan_dep_del (index_t vxri)
FOR_EACH_FIB_IP_PROTOCOL (fproto)
ip_table_bind (fproto, vxr->vxr_sw_if_index, 0, 0);
+ gbp_sclass_disable_ip (vxr->vxr_sw_if_index);
+ gbp_learn_disable (vxr->vxr_sw_if_index, GBP_LEARN_MODE_L3);
}
vnet_vxlan_gbp_tunnel_del (vxr->vxr_sw_if_index);
@@ -712,6 +727,7 @@ gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
gt->gt_itf = gbp_itf_add_and_lock (gt->gt_sw_if_index,
gt->gt_bd_index);
gbp_learn_enable (gt->gt_sw_if_index, GBP_LEARN_MODE_L2);
+ gbp_sclass_enable_l2 (gt->gt_sw_if_index);
}
else
{
@@ -724,6 +740,7 @@ gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
grd->grd_vni_sw_if_index = gt->gt_sw_if_index;
gbp_learn_enable (gt->gt_sw_if_index, GBP_LEARN_MODE_L3);
+ gbp_sclass_enable_ip (gt->gt_sw_if_index);
ip4_sw_interface_enable_disable (gt->gt_sw_if_index, 1);
ip6_sw_interface_enable_disable (gt->gt_sw_if_index, 1);
@@ -788,6 +805,7 @@ gbp_vxlan_tunnel_del (u32 vni)
if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
{
gbp_learn_disable (gt->gt_sw_if_index, GBP_LEARN_MODE_L2);
+ gbp_sclass_disable_l2 (gt->gt_sw_if_index);
gbp_itf_unlock (gt->gt_itf);
gbp_bridge_domain_unlock (gt->gt_gbd);
}
@@ -802,6 +820,7 @@ gbp_vxlan_tunnel_del (u32 vni)
ip6_sw_interface_enable_disable (gt->gt_sw_if_index, 0);
gbp_learn_disable (gt->gt_sw_if_index, GBP_LEARN_MODE_L3);
+ gbp_sclass_disable_ip (gt->gt_sw_if_index);
gbp_route_domain_unlock (gt->gt_grd);
}
diff --git a/src/vnet/buffer.h b/src/vnet/buffer.h
index 89dd84567bc..06696515ecd 100644
--- a/src/vnet/buffer.h
+++ b/src/vnet/buffer.h
@@ -389,7 +389,11 @@ typedef struct
{
u8 __unused;
u8 flags;
- u16 src_epg;
+ union
+ {
+ u16 src_epg;
+ u16 sclass;
+ };
} gbp;
union
diff --git a/src/vnet/l2/l2_input.h b/src/vnet/l2/l2_input.h
index 93da1277e67..12e7e54038c 100644
--- a/src/vnet/l2/l2_input.h
+++ b/src/vnet/l2/l2_input.h
@@ -114,6 +114,7 @@ l2input_bd_config (u32 bd_index)
_(GBP_NULL_CLASSIFY, "gbp-null-classify") \
_(GBP_SRC_CLASSIFY, "gbp-src-classify") \
_(GBP_LPM_CLASSIFY, "l2-gbp-lpm-classify") \
+ _(GBP_SCLASS_2_ID, "l2-gbp-sclass-2-id") \
_(VTR, "l2-input-vtr") \
_(L2_IP_QOS_RECORD, "l2-ip-qos-record") \
_(VPATH, "vpath-input-l2") \
diff --git a/src/vnet/l2/l2_output.h b/src/vnet/l2/l2_output.h
index 74d2829839f..fdb6167155f 100644
--- a/src/vnet/l2/l2_output.h
+++ b/src/vnet/l2/l2_output.h
@@ -81,6 +81,7 @@ extern vlib_node_registration_t l2output_node;
#define foreach_l2output_feat \
_(OUTPUT, "interface-output") \
_(SPAN, "span-l2-output") \
+ _(GBP_ID_2_SCLASS, "l2-gbp-id-2-sclass") \
_(GBP_POLICY_PORT, "gbp-policy-port") \
_(GBP_POLICY_MAC, "gbp-policy-mac") \
_(CFM, "feature-bitmap-drop") \
diff --git a/src/vnet/vxlan-gbp/decap.c b/src/vnet/vxlan-gbp/decap.c
index 613cb012319..6c14ef79531 100644
--- a/src/vnet/vxlan-gbp/decap.c
+++ b/src/vnet/vxlan-gbp/decap.c
@@ -334,7 +334,7 @@ vxlan_gbp_input (vlib_main_t * vm,
}
vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
- vnet_buffer2 (b0)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp0);
+ vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
if (PREDICT_FALSE
@@ -369,7 +369,7 @@ vxlan_gbp_input (vlib_main_t * vm,
}
vnet_buffer2 (b1)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
- vnet_buffer2 (b1)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp1);
+ vnet_buffer2 (b1)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
vnet_update_l2_len (b0);
vnet_update_l2_len (b1);
@@ -473,7 +473,7 @@ vxlan_gbp_input (vlib_main_t * vm,
(rx_counter, thread_index, stats_t0->sw_if_index, 1, len0);
}
vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
- vnet_buffer2 (b0)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp0);
+ vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
/* Required to make the l2 tag push / pop code work on l2 subifs */
vnet_update_l2_len (b0);
diff --git a/src/vnet/vxlan-gbp/encap.c b/src/vnet/vxlan-gbp/encap.c
index f1b839ce316..f8fc9b4b998 100644
--- a/src/vnet/vxlan-gbp/encap.c
+++ b/src/vnet/vxlan-gbp/encap.c
@@ -260,9 +260,9 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
vxlan_gbp1->gpflags = vnet_buffer2 (b1)->gbp.flags;
vxlan_gbp0->sclass =
- clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.src_epg);
+ clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
vxlan_gbp1->sclass =
- clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.src_epg);
+ clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.sclass);
if (csum_offload)
{
@@ -324,7 +324,7 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->tunnel_index = t0 - vxm->tunnels;
tr->vni = t0->vni;
- tr->sclass = vnet_buffer2 (b0)->gbp.src_epg;
+ tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
tr->flags = vnet_buffer2 (b0)->gbp.flags;
}
@@ -334,7 +334,7 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
vlib_add_trace (vm, node, b1, sizeof (*tr));
tr->tunnel_index = t1 - vxm->tunnels;
tr->vni = t1->vni;
- tr->sclass = vnet_buffer2 (b1)->gbp.src_epg;
+ tr->sclass = vnet_buffer2 (b1)->gbp.sclass;
tr->flags = vnet_buffer2 (b1)->gbp.flags;
}
@@ -426,7 +426,7 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
/* set source class and gpflags */
vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
vxlan_gbp0->sclass =
- clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.src_epg);
+ clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
if (csum_offload)
{
@@ -469,7 +469,7 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->tunnel_index = t0 - vxm->tunnels;
tr->vni = t0->vni;
- tr->sclass = vnet_buffer2 (b0)->gbp.src_epg;
+ tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
tr->flags = vnet_buffer2 (b0)->gbp.flags;
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,