aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/gbp/gbp_classify_node.c
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2019-02-28 11:11:39 +0000
committerNeale Ranns <nranns@cisco.com>2019-03-06 12:15:10 +0000
commit4ba67723d716660c56326ce498b99a060a9471b1 (patch)
tree10f2fc773e660bad99ee6b7ae7845b1f23102bb8 /src/plugins/gbp/gbp_classify_node.c
parent6955595a577e1b7d316b5b69267bf1d1d951a4ab (diff)
GBP: use sclass in the DP for policy
Change-Id: I154e18f22ec7708127b8ade98e80546ab1dcd05b Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'src/plugins/gbp/gbp_classify_node.c')
-rw-r--r--src/plugins/gbp/gbp_classify_node.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/src/plugins/gbp/gbp_classify_node.c b/src/plugins/gbp/gbp_classify_node.c
index 43fea769cce..1b2cb0a2bc7 100644
--- a/src/plugins/gbp/gbp_classify_node.c
+++ b/src/plugins/gbp/gbp_classify_node.c
@@ -33,7 +33,7 @@
typedef struct gbp_classify_trace_t_
{
/* per-pkt trace data */
- epg_id_t src_epg;
+ sclass_t sclass;
} gbp_classify_trace_t;
/*
@@ -61,9 +61,10 @@ gbp_classify_inline (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
- u32 next0, bi0, src_epg, sw_if_index0;
+ u32 next0, bi0, sw_if_index0;
const gbp_endpoint_t *ge0;
vlib_buffer_t *b0;
+ sclass_t sclass0;
bi0 = from[0];
to_next[0] = bi0;
@@ -79,7 +80,7 @@ gbp_classify_inline (vlib_main_t * vm,
if (GBP_SRC_CLASSIFY_NULL == type)
{
- src_epg = EPG_INVALID;
+ sclass0 = SCLASS_INVALID;
next0 =
vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
L2INPUT_FEAT_GBP_NULL_CLASSIFY);
@@ -139,18 +140,18 @@ gbp_classify_inline (vlib_main_t * vm,
}
if (PREDICT_TRUE (NULL != ge0))
- src_epg = ge0->ge_fwd.gef_epg_id;
+ sclass0 = ge0->ge_fwd.gef_sclass;
else
- src_epg = EPG_INVALID;
+ sclass0 = SCLASS_INVALID;
}
- vnet_buffer2 (b0)->gbp.src_epg = src_epg;
+ vnet_buffer2 (b0)->gbp.sclass = sclass0;
if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
{
gbp_classify_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
- t->src_epg = src_epg;
+ t->sclass = sclass0;
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
@@ -205,7 +206,7 @@ format_gbp_classify_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
- s = format (s, "src-epg:%d", t->src_epg);
+ s = format (s, "sclass:%d", t->sclass);
return s;
}
@@ -342,7 +343,7 @@ gbp_lpm_classify_inline (vlib_main_t * vm,
ip4_header_t *ip4_0;
ip6_header_t *ip6_0;
vlib_buffer_t *b0;
- epg_id_t src_epg0;
+ sclass_t sclass0;
bi0 = from[0];
to_next[0] = bi0;
@@ -383,7 +384,7 @@ gbp_lpm_classify_inline (vlib_main_t * vm,
break;
default:
/* not IP so no LPM classify possible */
- src_epg0 = EPG_INVALID;
+ sclass0 = SCLASS_INVALID;
goto trace;
}
}
@@ -418,7 +419,7 @@ gbp_lpm_classify_inline (vlib_main_t * vm,
else
{
/* not IP so no LPM classify possible */
- src_epg0 = EPG_INVALID;
+ sclass0 = SCLASS_INVALID;
goto trace;
}
lb0 = load_balance_get (lbi0);
@@ -427,23 +428,23 @@ gbp_lpm_classify_inline (vlib_main_t * vm,
if (gbp_policy_dpo_type == dpo0->dpoi_type)
{
gpd0 = gbp_policy_dpo_get (dpo0->dpoi_index);
- src_epg0 = gpd0->gpd_epg;
+ sclass0 = gpd0->gpd_sclass;
}
else
{
/* could not classify => drop */
- src_epg0 = EPG_INVALID;
+ sclass0 = SCLASS_INVALID;
next0 = GPB_LPM_CLASSIFY_DROP;
}
trace:
- vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
+ vnet_buffer2 (b0)->gbp.sclass = sclass0;
if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
{
gbp_classify_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
- t->src_epg = src_epg0;
+ t->sclass = sclass0;
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,