summaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/gbp/CMakeLists.txt14
-rw-r--r--src/plugins/gbp/gbp_classify.c537
-rw-r--r--src/plugins/gbp/gbp_classify.h53
-rw-r--r--src/plugins/gbp/gbp_classify_node.c542
-rw-r--r--src/plugins/gbp/gbp_fwd.c34
-rw-r--r--src/plugins/gbp/gbp_learn.c686
-rw-r--r--src/plugins/gbp/gbp_learn.h29
-rw-r--r--src/plugins/gbp/gbp_learn_node.c673
-rw-r--r--src/plugins/gbp/gbp_policy.c363
-rw-r--r--src/plugins/gbp/gbp_policy_node.c388
-rw-r--r--src/plugins/gbp/gbp_sclass.c290
-rw-r--r--src/plugins/gbp/gbp_sclass.h20
-rw-r--r--src/plugins/gbp/gbp_sclass_node.c296
13 files changed, 2036 insertions, 1889 deletions
diff --git a/src/plugins/gbp/CMakeLists.txt b/src/plugins/gbp/CMakeLists.txt
index 140a456bfd0..02c4c17beb2 100644
--- a/src/plugins/gbp/CMakeLists.txt
+++ b/src/plugins/gbp/CMakeLists.txt
@@ -16,6 +16,7 @@ add_vpp_plugin(gbp
gbp_api.c
gbp_bridge_domain.c
gbp_classify.c
+ gbp_classify_node.c
gbp_contract.c
gbp_endpoint.c
gbp_endpoint_group.c
@@ -25,25 +26,28 @@ add_vpp_plugin(gbp
gbp_fwd_node.c
gbp_itf.c
gbp_learn.c
+ gbp_learn_node.c
gbp_policy.c
gbp_policy_dpo.c
+ gbp_policy_node.c
gbp_recirc.c
gbp_route_domain.c
gbp_scanner.c
gbp_sclass.c
+ gbp_sclass_node.c
gbp_subnet.c
gbp_vxlan.c
gbp_vxlan_node.c
MULTIARCH_SOURCES
+ gbp_classify_node.c
+ gbp_fwd_dpo.c
gbp_fwd_node.c
- gbp_policy.c
- gbp_learn.c
+ gbp_learn_node.c
gbp_policy_dpo.c
- gbp_fwd_dpo.c
+ gbp_policy_node.c
gbp_vxlan_node.c
- gbp_sclass.c
- gbp_classify.c
+ gbp_sclass_node.c
API_FILES
gbp.api
diff --git a/src/plugins/gbp/gbp_classify.c b/src/plugins/gbp/gbp_classify.c
index 2eaa7bb44b6..5735f359af0 100644
--- a/src/plugins/gbp/gbp_classify.c
+++ b/src/plugins/gbp/gbp_classify.c
@@ -16,543 +16,10 @@
*/
#include <plugins/gbp/gbp.h>
-#include <plugins/gbp/gbp_policy_dpo.h>
-#include <plugins/gbp/gbp_ext_itf.h>
-#include <vnet/fib/ip4_fib.h>
-#include <vnet/fib/ip6_fib.h>
-#include <vnet/dpo/load_balance.h>
+#include <plugins/gbp/gbp_classify.h>
#include <vnet/l2/l2_input.h>
-#include <vnet/l2/feat_bitmap.h>
-#include <vnet/fib/fib_table.h>
-#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
-typedef enum gbp_src_classify_type_t_
-{
- GBP_SRC_CLASSIFY_NULL,
- GBP_SRC_CLASSIFY_PORT,
- GBP_SRC_CLASSIFY_LPM,
-} gbp_src_classify_type_t;
-
-#define GBP_SRC_N_CLASSIFY (GBP_SRC_CLASSIFY_LPM + 1)
-
-/**
- * Grouping of global data for the GBP source EPG classification feature
- */
-typedef struct gbp_src_classify_main_t_
-{
- /**
- * Next nodes for L2 output features
- */
- u32 l2_input_feat_next[GBP_SRC_N_CLASSIFY][32];
-} gbp_src_classify_main_t;
-
-static gbp_src_classify_main_t gbp_src_classify_main;
-
-/**
- * per-packet trace data
- */
-typedef struct gbp_classify_trace_t_
-{
- /* per-pkt trace data */
- epg_id_t src_epg;
-} gbp_classify_trace_t;
-
-/*
- * determine the SRC EPG form the input port
- */
-always_inline uword
-gbp_classify_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
- gbp_src_classify_type_t type, dpo_proto_t dproto)
-{
- gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
- u32 n_left_from, *from, *to_next;
- u32 next_index;
-
- next_index = 0;
- n_left_from = frame->n_vectors;
- from = vlib_frame_vector_args (frame);
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 next0, bi0, src_epg, sw_if_index0;
- const gbp_endpoint_t *ge0;
- vlib_buffer_t *b0;
-
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
-
- if (GBP_SRC_CLASSIFY_NULL == type)
- {
- src_epg = EPG_INVALID;
- next0 =
- vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
- L2INPUT_FEAT_GBP_NULL_CLASSIFY);
- }
- else
- {
- if (DPO_PROTO_ETHERNET == dproto)
- {
- const ethernet_header_t *h0;
-
- h0 = vlib_buffer_get_current (b0);
- next0 =
- vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
- L2INPUT_FEAT_GBP_SRC_CLASSIFY);
- ge0 = gbp_endpoint_find_mac (h0->src_address,
- vnet_buffer (b0)->l2.bd_index);
- }
- else if (DPO_PROTO_IP4 == dproto)
- {
- const ip4_header_t *h0;
-
- h0 = vlib_buffer_get_current (b0);
-
- ge0 = gbp_endpoint_find_ip4
- (&h0->src_address,
- fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
- sw_if_index0));
-
-
- /*
- * Go straight to looukp, do not pass go, do not collect $200
- */
- next0 = 0;
- }
- else if (DPO_PROTO_IP6 == dproto)
- {
- const ip6_header_t *h0;
-
- h0 = vlib_buffer_get_current (b0);
-
- ge0 = gbp_endpoint_find_ip6
- (&h0->src_address,
- fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
- sw_if_index0));
-
-
- /*
- * Go straight to lookup, do not pass go, do not collect $200
- */
- next0 = 0;
- }
- else
- {
- ge0 = NULL;
- next0 = 0;
- ASSERT (0);
- }
-
- if (PREDICT_TRUE (NULL != ge0))
- src_epg = ge0->ge_fwd.gef_epg_id;
- else
- src_epg = EPG_INVALID;
- }
-
- vnet_buffer2 (b0)->gbp.src_epg = src_epg;
-
- if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- gbp_classify_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->src_epg = src_epg;
- }
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
-
- return frame->n_vectors;
-}
-
-VLIB_NODE_FN (gbp_src_classify_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_classify_inline (vm, node, frame,
- GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET));
-}
-
-VLIB_NODE_FN (gbp_null_classify_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_classify_inline (vm, node, frame,
- GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET));
-}
-
-VLIB_NODE_FN (gbp_ip4_src_classify_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_classify_inline (vm, node, frame,
- GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4));
-}
-
-VLIB_NODE_FN (gbp_ip6_src_classify_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_classify_inline (vm, node, frame,
- GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6));
-}
-
-
-/* packet trace format function */
-static u8 *
-format_gbp_classify_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
-
- s = format (s, "src-epg:%d", t->src_epg);
-
- return s;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (gbp_null_classify_node) = {
- .name = "gbp-null-classify",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_classify_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = 0,
- .n_next_nodes = 0,
-};
-
-VLIB_REGISTER_NODE (gbp_src_classify_node) = {
- .name = "gbp-src-classify",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_classify_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = 0,
- .n_next_nodes = 0,
-};
-
-VLIB_REGISTER_NODE (gbp_ip4_src_classify_node) = {
- .name = "ip4-gbp-src-classify",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_classify_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = 0,
- .n_next_nodes = 1,
- .next_nodes = {
- [0] = "ip4-lookup"
- },
-};
-
-VLIB_REGISTER_NODE (gbp_ip6_src_classify_node) = {
- .name = "ip6-gbp-src-classify",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_classify_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = 0,
- .n_next_nodes = 1,
- .next_nodes = {
- [0] = "ip6-lookup"
- },
-};
-
-VNET_FEATURE_INIT (gbp_ip4_src_classify_feat_node, static) =
-{
- .arc_name = "ip4-unicast",
- .node_name = "ip4-gbp-src-classify",
- .runs_before = VNET_FEATURES ("nat44-out2in"),
-};
-VNET_FEATURE_INIT (gbp_ip6_src_classify_feat_node, static) =
-{
- .arc_name = "ip6-unicast",
- .node_name = "ip6-gbp-src-classify",
- .runs_before = VNET_FEATURES ("nat66-out2in"),
-};
-
-/* *INDENT-ON* */
-
-typedef enum gbp_lpm_classify_next_t_
-{
- GPB_LPM_CLASSIFY_DROP,
-} gbp_lpm_classify_next_t;
-
-always_inline dpo_proto_t
-ethertype_to_dpo_proto (const ethernet_header_t * eh0)
-{
- u16 etype = clib_net_to_host_u16 (eh0->type);
-
- switch (etype)
- {
- case ETHERNET_TYPE_IP4:
- return (DPO_PROTO_IP4);
- case ETHERNET_TYPE_IP6:
- return (DPO_PROTO_IP6);
- case ETHERNET_TYPE_VLAN:
- {
- ethernet_vlan_header_t *vh0;
-
- vh0 = (ethernet_vlan_header_t *) (eh0 + 1);
-
- switch (clib_net_to_host_u16 (vh0->type))
- {
- case ETHERNET_TYPE_IP4:
- return (DPO_PROTO_IP4);
- case ETHERNET_TYPE_IP6:
- return (DPO_PROTO_IP6);
- }
- }
- }
-
- return (DPO_PROTO_NONE);
-}
-
-/*
- * Determine the SRC EPG from a LPM
- */
-always_inline uword
-gbp_lpm_classify_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
- dpo_proto_t dproto, u8 is_recirc)
-{
- gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
- u32 n_left_from, *from, *to_next;
- u32 next_index;
-
- next_index = 0;
- n_left_from = frame->n_vectors;
- from = vlib_frame_vector_args (frame);
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 bi0, sw_if_index0, fib_index0, lbi0;
- gbp_lpm_classify_next_t next0;
- const gbp_policy_dpo_t *gpd0;
- const gbp_ext_itf_t *gx0;
- const gbp_recirc_t *gr0;
- const dpo_id_t *dpo0;
- load_balance_t *lb0;
- ip4_header_t *ip4_0;
- ip6_header_t *ip6_0;
- vlib_buffer_t *b0;
- epg_id_t src_epg0;
-
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
- ip4_0 = NULL;
- ip6_0 = NULL;
- next0 = GPB_LPM_CLASSIFY_DROP;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
-
- if (DPO_PROTO_IP4 == dproto)
- ip4_0 = vlib_buffer_get_current (b0);
- else if (DPO_PROTO_IP6 == dproto)
- ip6_0 = vlib_buffer_get_current (b0);
- else if (DPO_PROTO_ETHERNET == dproto)
- {
- const ethernet_header_t *eh0;
-
- eh0 = vlib_buffer_get_current (b0);
-
- dproto = ethertype_to_dpo_proto (eh0);
-
- switch (dproto)
- {
- case DPO_PROTO_IP4:
- ip4_0 = (vlib_buffer_get_current (b0) +
- vnet_buffer (b0)->l2.l2_len);
- break;
- case DPO_PROTO_IP6:
- ip6_0 = (vlib_buffer_get_current (b0) +
- vnet_buffer (b0)->l2.l2_len);
- break;
- default:
- /* not IP so no LPM classify possible */
- src_epg0 = EPG_INVALID;
- goto trace;
- }
- }
-
- if (is_recirc)
- {
- gr0 = gbp_recirc_get (sw_if_index0);
- fib_index0 = gr0->gr_fib_index[dproto];
-
- vnet_feature_next (&next0, b0);
- }
- else
- {
- gx0 = gbp_ext_itf_get (sw_if_index0);
- fib_index0 = gx0->gx_fib_index[dproto];
-
- next0 = vnet_l2_feature_next
- (b0, gscm->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM],
- L2INPUT_FEAT_GBP_LPM_CLASSIFY);
- }
-
- if (DPO_PROTO_IP4 == dproto)
- {
- lbi0 = ip4_fib_forwarding_lookup (fib_index0,
- &ip4_0->src_address);
- }
- else if (DPO_PROTO_IP6 == dproto)
- {
- lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
- &ip6_0->src_address);
- }
- else
- {
- /* not IP so no LPM classify possible */
- src_epg0 = EPG_INVALID;
- goto trace;
- }
- lb0 = load_balance_get (lbi0);
- dpo0 = load_balance_get_bucket_i (lb0, 0);
-
- if (gbp_policy_dpo_type == dpo0->dpoi_type)
- {
- gpd0 = gbp_policy_dpo_get (dpo0->dpoi_index);
- src_epg0 = gpd0->gpd_epg;
- }
- else
- {
- /* could not classify => drop */
- src_epg0 = EPG_INVALID;
- next0 = GPB_LPM_CLASSIFY_DROP;
- }
-
- trace:
- vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
-
- if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- gbp_classify_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->src_epg = src_epg0;
- }
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
-
- return frame->n_vectors;
-}
-
-VLIB_NODE_FN (gbp_ip4_lpm_classify_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP4, 1));
-}
-
-VLIB_NODE_FN (gbp_ip6_lpm_classify_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP6, 1));
-}
-
-VLIB_NODE_FN (gbp_l2_lpm_classify_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_ETHERNET, 0));
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
- .name = "ip4-gbp-lpm-classify",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_classify_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = 0,
- .n_next_nodes = 1,
- .next_nodes = {
- [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
- },
-};
-
-VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
- .name = "ip6-gbp-lpm-classify",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_classify_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = 0,
- .n_next_nodes = 1,
- .next_nodes = {
- [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
- },
-};
-
-VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node) = {
- .name = "l2-gbp-lpm-classify",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_classify_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = 0,
- .n_next_nodes = 1,
- .next_nodes = {
- [GPB_LPM_CLASSIFY_DROP] = "error-drop"
- },
-};
-
-VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
-{
- .arc_name = "ip4-unicast",
- .node_name = "ip4-gbp-lpm-classify",
- .runs_before = VNET_FEATURES ("nat44-out2in"),
-};
-VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
-{
- .arc_name = "ip6-unicast",
- .node_name = "ip6-gbp-lpm-classify",
- .runs_before = VNET_FEATURES ("nat66-out2in"),
-};
-
-/* *INDENT-ON* */
+gbp_src_classify_main_t gbp_src_classify_main;
static clib_error_t *
gbp_src_classify_init (vlib_main_t * vm)
diff --git a/src/plugins/gbp/gbp_classify.h b/src/plugins/gbp/gbp_classify.h
new file mode 100644
index 00000000000..c0c1fd53dc5
--- /dev/null
+++ b/src/plugins/gbp/gbp_classify.h
@@ -0,0 +1,53 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_CLASSIFY_H__
+#define __GBP_CLASSIFY_H__
+
+#include <plugins/gbp/gbp.h>
+
+typedef enum gbp_src_classify_type_t_
+{
+ GBP_SRC_CLASSIFY_NULL,
+ GBP_SRC_CLASSIFY_PORT,
+ GBP_SRC_CLASSIFY_LPM,
+} gbp_src_classify_type_t;
+
+#define GBP_SRC_N_CLASSIFY (GBP_SRC_CLASSIFY_LPM + 1)
+
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_src_classify_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 l2_input_feat_next[GBP_SRC_N_CLASSIFY][32];
+} gbp_src_classify_main_t;
+
+extern gbp_src_classify_main_t gbp_src_classify_main;
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_classify_node.c b/src/plugins/gbp/gbp_classify_node.c
new file mode 100644
index 00000000000..43fea769cce
--- /dev/null
+++ b/src/plugins/gbp/gbp_classify_node.c
@@ -0,0 +1,542 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_classify.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_ext_itf.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_classify_trace_t_
+{
+ /* per-pkt trace data */
+ epg_id_t src_epg;
+} gbp_classify_trace_t;
+
+/*
+ * determine the SRC EPG form the input port
+ */
+always_inline uword
+gbp_classify_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ gbp_src_classify_type_t type, dpo_proto_t dproto)
+{
+ gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0, bi0, src_epg, sw_if_index0;
+ const gbp_endpoint_t *ge0;
+ vlib_buffer_t *b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
+
+ if (GBP_SRC_CLASSIFY_NULL == type)
+ {
+ src_epg = EPG_INVALID;
+ next0 =
+ vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
+ L2INPUT_FEAT_GBP_NULL_CLASSIFY);
+ }
+ else
+ {
+ if (DPO_PROTO_ETHERNET == dproto)
+ {
+ const ethernet_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+ next0 =
+ vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
+ L2INPUT_FEAT_GBP_SRC_CLASSIFY);
+ ge0 = gbp_endpoint_find_mac (h0->src_address,
+ vnet_buffer (b0)->l2.bd_index);
+ }
+ else if (DPO_PROTO_IP4 == dproto)
+ {
+ const ip4_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+
+ ge0 = gbp_endpoint_find_ip4
+ (&h0->src_address,
+ fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+ sw_if_index0));
+
+
+ /*
+ * Go straight to looukp, do not pass go, do not collect $200
+ */
+ next0 = 0;
+ }
+ else if (DPO_PROTO_IP6 == dproto)
+ {
+ const ip6_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+
+ ge0 = gbp_endpoint_find_ip6
+ (&h0->src_address,
+ fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
+ sw_if_index0));
+
+
+ /*
+ * Go straight to lookup, do not pass go, do not collect $200
+ */
+ next0 = 0;
+ }
+ else
+ {
+ ge0 = NULL;
+ next0 = 0;
+ ASSERT (0);
+ }
+
+ if (PREDICT_TRUE (NULL != ge0))
+ src_epg = ge0->ge_fwd.gef_epg_id;
+ else
+ src_epg = EPG_INVALID;
+ }
+
+ vnet_buffer2 (b0)->gbp.src_epg = src_epg;
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->src_epg = src_epg;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (gbp_src_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET));
+}
+
+VLIB_NODE_FN (gbp_null_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET));
+}
+
+VLIB_NODE_FN (gbp_ip4_src_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4));
+}
+
+VLIB_NODE_FN (gbp_ip6_src_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6));
+}
+
+
+/* packet trace format function */
+static u8 *
+format_gbp_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
+
+ s = format (s, "src-epg:%d", t->src_epg);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_null_classify_node) = {
+ .name = "gbp-null-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 0,
+};
+
+VLIB_REGISTER_NODE (gbp_src_classify_node) = {
+ .name = "gbp-src-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 0,
+};
+
+VLIB_REGISTER_NODE (gbp_ip4_src_classify_node) = {
+ .name = "ip4-gbp-src-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip4-lookup"
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_ip6_src_classify_node) = {
+ .name = "ip6-gbp-src-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip6-lookup"
+ },
+};
+
+VNET_FEATURE_INIT (gbp_ip4_src_classify_feat_node, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-gbp-src-classify",
+ .runs_before = VNET_FEATURES ("nat44-out2in"),
+};
+VNET_FEATURE_INIT (gbp_ip6_src_classify_feat_node, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-gbp-src-classify",
+ .runs_before = VNET_FEATURES ("nat66-out2in"),
+};
+
+/* *INDENT-ON* */
+
+typedef enum gbp_lpm_classify_next_t_
+{
+ GPB_LPM_CLASSIFY_DROP,
+} gbp_lpm_classify_next_t;
+
+always_inline dpo_proto_t
+ethertype_to_dpo_proto (const ethernet_header_t * eh0)
+{
+ u16 etype = clib_net_to_host_u16 (eh0->type);
+
+ switch (etype)
+ {
+ case ETHERNET_TYPE_IP4:
+ return (DPO_PROTO_IP4);
+ case ETHERNET_TYPE_IP6:
+ return (DPO_PROTO_IP6);
+ case ETHERNET_TYPE_VLAN:
+ {
+ ethernet_vlan_header_t *vh0;
+
+ vh0 = (ethernet_vlan_header_t *) (eh0 + 1);
+
+ switch (clib_net_to_host_u16 (vh0->type))
+ {
+ case ETHERNET_TYPE_IP4:
+ return (DPO_PROTO_IP4);
+ case ETHERNET_TYPE_IP6:
+ return (DPO_PROTO_IP6);
+ }
+ }
+ }
+
+ return (DPO_PROTO_NONE);
+}
+
+/*
+ * Determine the SRC EPG from a LPM
+ */
+always_inline uword
+gbp_lpm_classify_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ dpo_proto_t dproto, u8 is_recirc)
+{
+ gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0, fib_index0, lbi0;
+ gbp_lpm_classify_next_t next0;
+ const gbp_policy_dpo_t *gpd0;
+ const gbp_ext_itf_t *gx0;
+ const gbp_recirc_t *gr0;
+ const dpo_id_t *dpo0;
+ load_balance_t *lb0;
+ ip4_header_t *ip4_0;
+ ip6_header_t *ip6_0;
+ vlib_buffer_t *b0;
+ epg_id_t src_epg0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ ip4_0 = NULL;
+ ip6_0 = NULL;
+ next0 = GPB_LPM_CLASSIFY_DROP;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
+
+ if (DPO_PROTO_IP4 == dproto)
+ ip4_0 = vlib_buffer_get_current (b0);
+ else if (DPO_PROTO_IP6 == dproto)
+ ip6_0 = vlib_buffer_get_current (b0);
+ else if (DPO_PROTO_ETHERNET == dproto)
+ {
+ const ethernet_header_t *eh0;
+
+ eh0 = vlib_buffer_get_current (b0);
+
+ dproto = ethertype_to_dpo_proto (eh0);
+
+ switch (dproto)
+ {
+ case DPO_PROTO_IP4:
+ ip4_0 = (vlib_buffer_get_current (b0) +
+ vnet_buffer (b0)->l2.l2_len);
+ break;
+ case DPO_PROTO_IP6:
+ ip6_0 = (vlib_buffer_get_current (b0) +
+ vnet_buffer (b0)->l2.l2_len);
+ break;
+ default:
+ /* not IP so no LPM classify possible */
+ src_epg0 = EPG_INVALID;
+ goto trace;
+ }
+ }
+
+ if (is_recirc)
+ {
+ gr0 = gbp_recirc_get (sw_if_index0);
+ fib_index0 = gr0->gr_fib_index[dproto];
+
+ vnet_feature_next (&next0, b0);
+ }
+ else
+ {
+ gx0 = gbp_ext_itf_get (sw_if_index0);
+ fib_index0 = gx0->gx_fib_index[dproto];
+
+ next0 = vnet_l2_feature_next
+ (b0, gscm->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM],
+ L2INPUT_FEAT_GBP_LPM_CLASSIFY);
+ }
+
+ if (DPO_PROTO_IP4 == dproto)
+ {
+ lbi0 = ip4_fib_forwarding_lookup (fib_index0,
+ &ip4_0->src_address);
+ }
+ else if (DPO_PROTO_IP6 == dproto)
+ {
+ lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
+ &ip6_0->src_address);
+ }
+ else
+ {
+ /* not IP so no LPM classify possible */
+ src_epg0 = EPG_INVALID;
+ goto trace;
+ }
+ lb0 = load_balance_get (lbi0);
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+
+ if (gbp_policy_dpo_type == dpo0->dpoi_type)
+ {
+ gpd0 = gbp_policy_dpo_get (dpo0->dpoi_index);
+ src_epg0 = gpd0->gpd_epg;
+ }
+ else
+ {
+ /* could not classify => drop */
+ src_epg0 = EPG_INVALID;
+ next0 = GPB_LPM_CLASSIFY_DROP;
+ }
+
+ trace:
+ vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->src_epg = src_epg0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (gbp_ip4_lpm_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP4, 1));
+}
+
+VLIB_NODE_FN (gbp_ip6_lpm_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP6, 1));
+}
+
+VLIB_NODE_FN (gbp_l2_lpm_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_ETHERNET, 0));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
+ .name = "ip4-gbp-lpm-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
+ .name = "ip6-gbp-lpm-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node) = {
+ .name = "l2-gbp-lpm-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [GPB_LPM_CLASSIFY_DROP] = "error-drop"
+ },
+};
+
+VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-gbp-lpm-classify",
+ .runs_before = VNET_FEATURES ("nat44-out2in"),
+};
+VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-gbp-lpm-classify",
+ .runs_before = VNET_FEATURES ("nat66-out2in"),
+};
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_fwd.c b/src/plugins/gbp/gbp_fwd.c
index 8d98f1cd365..dd393980038 100644
--- a/src/plugins/gbp/gbp_fwd.c
+++ b/src/plugins/gbp/gbp_fwd.c
@@ -29,40 +29,6 @@ typedef struct gbp_fwd_main_t_
static gbp_fwd_main_t gbp_fwd_main;
-gbp_policy_main_t gbp_policy_main;
-
-void
-gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode)
-{
- if (GBP_LEARN_MODE_L2 == mode)
- {
- l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1);
- }
- else
- {
- vnet_feature_enable_disable ("ip4-unicast",
- "gbp-learn-ip4", sw_if_index, 1, 0, 0);
- vnet_feature_enable_disable ("ip6-unicast",
- "gbp-learn-ip6", sw_if_index, 1, 0, 0);
- }
-}
-
-void
-gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode)
-{
- if (GBP_LEARN_MODE_L2 == mode)
- {
- l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0);
- }
- else
- {
- vnet_feature_enable_disable ("ip4-unicast",
- "gbp-learn-ip4", sw_if_index, 0, 0, 0);
- vnet_feature_enable_disable ("ip6-unicast",
- "gbp-learn-ip6", sw_if_index, 0, 0, 0);
- }
-}
-
static clib_error_t *
gbp_fwd_init (vlib_main_t * vm)
{
diff --git a/src/plugins/gbp/gbp_learn.c b/src/plugins/gbp/gbp_learn.c
index 156ea358044..612af981248 100644
--- a/src/plugins/gbp/gbp_learn.c
+++ b/src/plugins/gbp/gbp_learn.c
@@ -16,683 +16,43 @@
#include <plugins/gbp/gbp.h>
#include <plugins/gbp/gbp_learn.h>
#include <plugins/gbp/gbp_bridge_domain.h>
-#include <vlibmemory/api.h>
-#include <vnet/util/throttle.h>
#include <vnet/l2/l2_input.h>
-#include <vnet/fib/fib_table.h>
-#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
-/**
- * Grouping of global data for the GBP source EPG classification feature
- */
-typedef struct gbp_learn_main_t_
-{
- /**
- * Next nodes for L2 output features
- */
- u32 gl_l2_input_feat_next[32];
-
- /**
- * logger - VLIB log class
- */
- vlib_log_class_t gl_logger;
-
- /**
- * throttles for the DP leanring
- */
- throttle_t gl_l2_throttle;
- throttle_t gl_l3_throttle;
-} gbp_learn_main_t;
-
-/**
- * The maximum learning rate per-hashed EP
- */
-#define GBP_ENDPOINT_HASH_LEARN_RATE (1e-2)
-
-static gbp_learn_main_t gbp_learn_main;
-
-#define GBP_LEARN_DBG(...) \
- vlib_log_debug (gbp_learn_main.gl_logger, __VA_ARGS__);
-
-#define foreach_gbp_learn \
- _(DROP, "drop")
-
-typedef enum
-{
-#define _(sym,str) GBP_LEARN_ERROR_##sym,
- foreach_gbp_learn
-#undef _
- GBP_LEARN_N_ERROR,
-} gbp_learn_error_t;
-
-static char *gbp_learn_error_strings[] = {
-#define _(sym,string) string,
- foreach_gbp_learn
-#undef _
-};
-
-typedef enum
-{
-#define _(sym,str) GBP_LEARN_NEXT_##sym,
- foreach_gbp_learn
-#undef _
- GBP_LEARN_N_NEXT,
-} gbp_learn_next_t;
-
-typedef struct gbp_learn_l2_t_
-{
- ip46_address_t ip;
- mac_address_t mac;
- u32 sw_if_index;
- u32 bd_index;
- epg_id_t epg;
- ip46_address_t outer_src;
- ip46_address_t outer_dst;
-} gbp_learn_l2_t;
-
-
-static void
-gbp_learn_l2_cp (const gbp_learn_l2_t * gl2)
-{
- ip46_address_t *ips = NULL;
-
- GBP_LEARN_DBG ("L2 EP: %U %U, %d",
- format_mac_address_t, &gl2->mac,
- format_ip46_address, &gl2->ip, IP46_TYPE_ANY, gl2->epg);
-
- vec_add1 (ips, gl2->ip);
-
- ASSERT (!ip46_address_is_zero (&gl2->outer_src));
- ASSERT (!ip46_address_is_zero (&gl2->outer_dst));
-
- /*
- * flip the source and dst, since that's how it was received, this API
- * takes how it's sent
- */
- gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
- gl2->sw_if_index, ips,
- &gl2->mac, INDEX_INVALID,
- INDEX_INVALID, gl2->epg,
- (GBP_ENDPOINT_FLAG_LEARNT |
- GBP_ENDPOINT_FLAG_REMOTE),
- &gl2->outer_dst, &gl2->outer_src, NULL);
- vec_free (ips);
-}
-
-static void
-gbp_learn_l2_ip4_dp (const u8 * mac, const ip4_address_t * ip,
- u32 bd_index, u32 sw_if_index, epg_id_t epg,
- const ip4_address_t * outer_src,
- const ip4_address_t * outer_dst)
-{
- gbp_learn_l2_t gl2 = {
- .sw_if_index = sw_if_index,
- .bd_index = bd_index,
- .epg = epg,
- .ip.ip4 = *ip,
- .outer_src.ip4 = *outer_src,
- .outer_dst.ip4 = *outer_dst,
- };
- mac_address_from_bytes (&gl2.mac, mac);
-
- ASSERT (!ip46_address_is_zero (&gl2.outer_src));
- ASSERT (!ip46_address_is_zero (&gl2.outer_dst));
+gbp_learn_main_t gbp_learn_main;
- vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
-}
-
-static void
-gbp_learn_l2_ip6_dp (const u8 * mac, const ip6_address_t * ip,
- u32 bd_index, u32 sw_if_index, epg_id_t epg,
- const ip4_address_t * outer_src,
- const ip4_address_t * outer_dst)
+void
+gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode)
{
- gbp_learn_l2_t gl2 = {
- .sw_if_index = sw_if_index,
- .bd_index = bd_index,
- .epg = epg,
- .ip.ip6 = *ip,
- .outer_src.ip4 = *outer_src,
- .outer_dst.ip4 = *outer_dst,
- };
- mac_address_from_bytes (&gl2.mac, mac);
-
- vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
-}
-
-static void
-gbp_learn_l2_dp (const u8 * mac, u32 bd_index, u32 sw_if_index,
- epg_id_t epg,
- const ip4_address_t * outer_src,
- const ip4_address_t * outer_dst)
-{
- gbp_learn_l2_t gl2 = {
- .sw_if_index = sw_if_index,
- .bd_index = bd_index,
- .epg = epg,
- .outer_src.ip4 = *outer_src,
- .outer_dst.ip4 = *outer_dst,
- };
- mac_address_from_bytes (&gl2.mac, mac);
-
- vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
-}
-
-/**
- * per-packet trace data
- */
-typedef struct gbp_learn_l2_trace_t_
-{
- /* per-pkt trace data */
- mac_address_t mac;
- u32 sw_if_index;
- u32 new;
- u32 throttled;
- u32 epg;
- u32 d_bit;
-} gbp_learn_l2_trace_t;
-
-always_inline void
-gbp_learn_get_outer (const ethernet_header_t * eh0,
- ip4_address_t * outer_src, ip4_address_t * outer_dst)
-{
- ip4_header_t *ip0;
- u8 *buff;
-
- /* rewind back to the ivxlan header */
- buff = (u8 *) eh0;
- buff -= (sizeof (vxlan_gbp_header_t) +
- sizeof (udp_header_t) + sizeof (ip4_header_t));
-
- ip0 = (ip4_header_t *) buff;
-
- *outer_src = ip0->src_address;
- *outer_dst = ip0->dst_address;
-}
-
-VLIB_NODE_FN (gbp_learn_l2_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
- gbp_learn_main_t *glm;
- f64 time_now;
-
- glm = &gbp_learn_main;
- next_index = 0;
- n_left_from = frame->n_vectors;
- from = vlib_frame_vector_args (frame);
- time_now = vlib_time_now (vm);
- thread_index = vm->thread_index;
-
- seed = throttle_seed (&glm->gl_l2_throttle, thread_index, time_now);
-
- while (n_left_from > 0)
+ if (GBP_LEARN_MODE_L2 == mode)
{
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- ip4_address_t outer_src, outer_dst;
- u32 bi0, sw_if_index0, t0, epg0;
- const ethernet_header_t *eh0;
- gbp_bridge_domain_t *gb0;
- gbp_learn_next_t next0;
- gbp_endpoint_t *ge0;
- vlib_buffer_t *b0;
-
- next0 = GBP_LEARN_NEXT_DROP;
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-
- eh0 = vlib_buffer_get_current (b0);
- epg0 = vnet_buffer2 (b0)->gbp.src_epg;
-
- next0 = vnet_l2_feature_next (b0, glm->gl_l2_input_feat_next,
- L2INPUT_FEAT_GBP_LEARN);
-
- ge0 = gbp_endpoint_find_mac (eh0->src_address,
- vnet_buffer (b0)->l2.bd_index);
- gb0 =
- gbp_bridge_domain_get_by_bd_index (vnet_buffer (b0)->l2.bd_index);
-
- if ((vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D) ||
- (gb0->gb_flags & GBP_BD_FLAG_DO_NOT_LEARN))
- {
- t0 = 1;
- goto trace;
- }
-
- /*
- * check for new EP or a moved EP
- */
- if (NULL == ge0 || ge0->ge_fwd.gef_itf != sw_if_index0)
-
- {
- /*
- * use the last 4 bytes of the mac address as the hash for the EP
- */
- t0 = throttle_check (&glm->gl_l2_throttle, thread_index,
- *((u32 *) (eh0->src_address + 2)), seed);
- if (!t0)
- {
- gbp_learn_get_outer (eh0, &outer_src, &outer_dst);
-
- switch (clib_net_to_host_u16 (eh0->type))
- {
- case ETHERNET_TYPE_IP4:
- {
- const ip4_header_t *ip0;
-
- ip0 = (ip4_header_t *) (eh0 + 1);
-
- gbp_learn_l2_ip4_dp (eh0->src_address,
- &ip0->src_address,
- vnet_buffer (b0)->l2.bd_index,
- sw_if_index0, epg0,
- &outer_src, &outer_dst);
-
- break;
- }
- case ETHERNET_TYPE_IP6:
- {
- const ip6_header_t *ip0;
-
- ip0 = (ip6_header_t *) (eh0 + 1);
-
- gbp_learn_l2_ip6_dp (eh0->src_address,
- &ip0->src_address,
- vnet_buffer (b0)->l2.bd_index,
- sw_if_index0, epg0,
- &outer_src, &outer_dst);
-
- break;
- }
- default:
- gbp_learn_l2_dp (eh0->src_address,
- vnet_buffer (b0)->l2.bd_index,
- sw_if_index0, epg0,
- &outer_src, &outer_dst);
- break;
- }
- }
- }
- else
- {
- /*
- * this update could happen simultaneoulsy from multiple workers
- * but that's ok we are not interested in being very accurate.
- */
- t0 = 0;
- ge0->ge_last_time = time_now;
- }
- trace:
- if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- gbp_learn_l2_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- clib_memcpy_fast (t->mac.bytes, eh0->src_address, 6);
- t->new = (NULL == ge0);
- t->throttled = t0;
- t->sw_if_index = sw_if_index0;
- t->epg = epg0;
- t->d_bit = ! !(vnet_buffer2 (b0)->gbp.flags &
- VXLAN_GBP_GPFLAGS_D);
- }
-
- /* verify speculative enqueue, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1);
}
-
- return frame->n_vectors;
-}
-
-/* packet trace format function */
-static u8 *
-format_gbp_learn_l2_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- gbp_learn_l2_trace_t *t = va_arg (*args, gbp_learn_l2_trace_t *);
-
- s = format (s, "new:%d throttled:%d d-bit:%d mac:%U itf:%d epg:%d",
- t->new, t->throttled, t->d_bit,
- format_mac_address_t, &t->mac, t->sw_if_index, t->epg);
-
- return s;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (gbp_learn_l2_node) = {
- .name = "gbp-learn-l2",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_learn_l2_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = ARRAY_LEN(gbp_learn_error_strings),
- .error_strings = gbp_learn_error_strings,
-
- .n_next_nodes = GBP_LEARN_N_NEXT,
-
- .next_nodes = {
- [GBP_LEARN_NEXT_DROP] = "error-drop",
- },
-};
-/* *INDENT-ON* */
-
-typedef struct gbp_learn_l3_t_
-{
- ip46_address_t ip;
- u32 fib_index;
- u32 sw_if_index;
- epg_id_t epg;
- ip46_address_t outer_src;
- ip46_address_t outer_dst;
-} gbp_learn_l3_t;
-
-static void
-gbp_learn_l3_cp (const gbp_learn_l3_t * gl3)
-{
- ip46_address_t *ips = NULL;
-
- GBP_LEARN_DBG ("L3 EP: %U, %d", format_ip46_address, &gl3->ip,
- IP46_TYPE_ANY, gl3->epg);
-
- vec_add1 (ips, gl3->ip);
-
- gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
- gl3->sw_if_index, ips, NULL,
- INDEX_INVALID, INDEX_INVALID, gl3->epg,
- (GBP_ENDPOINT_FLAG_REMOTE |
- GBP_ENDPOINT_FLAG_LEARNT),
- &gl3->outer_dst, &gl3->outer_src, NULL);
- vec_free (ips);
-}
-
-static void
-gbp_learn_ip4_dp (const ip4_address_t * ip,
- u32 fib_index, u32 sw_if_index, epg_id_t epg,
- const ip4_address_t * outer_src,
- const ip4_address_t * outer_dst)
-{
- /* *INDENT-OFF* */
- gbp_learn_l3_t gl3 = {
- .ip = {
- .ip4 = *ip,
- },
- .sw_if_index = sw_if_index,
- .fib_index = fib_index,
- .epg = epg,
- .outer_src.ip4 = *outer_src,
- .outer_dst.ip4 = *outer_dst,
- };
- /* *INDENT-ON* */
-
- vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
-}
-
-static void
-gbp_learn_ip6_dp (const ip6_address_t * ip,
- u32 fib_index, u32 sw_if_index, epg_id_t epg,
- const ip4_address_t * outer_src,
- const ip4_address_t * outer_dst)
-{
- /* *INDENT-OFF* */
- gbp_learn_l3_t gl3 = {
- .ip = {
- .ip6 = *ip,
- },
- .sw_if_index = sw_if_index,
- .fib_index = fib_index,
- .epg = epg,
- .outer_src.ip4 = *outer_src,
- .outer_dst.ip4 = *outer_dst,
- };
- /* *INDENT-ON* */
-
- vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
-}
-
-/**
- * per-packet trace data
- */
-typedef struct gbp_learn_l3_trace_t_
-{
- /* per-pkt trace data */
- ip46_address_t ip;
- u32 sw_if_index;
- u32 new;
- u32 throttled;
- u32 epg;
-} gbp_learn_l3_trace_t;
-
-static uword
-gbp_learn_l3 (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame,
- fib_protocol_t fproto)
-{
- u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
- gbp_learn_main_t *glm;
- f64 time_now;
-
- glm = &gbp_learn_main;
- next_index = 0;
- n_left_from = frame->n_vectors;
- from = vlib_frame_vector_args (frame);
- time_now = vlib_time_now (vm);
- thread_index = vm->thread_index;
-
- seed = throttle_seed (&glm->gl_l3_throttle, thread_index, time_now);
-
- while (n_left_from > 0)
+ else
{
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 bi0, sw_if_index0, t0, epg0, fib_index0;
- CLIB_UNUSED (const ip4_header_t *) ip4_0;
- CLIB_UNUSED (const ip6_header_t *) ip6_0;
- ip4_address_t outer_src, outer_dst;
- ethernet_header_t *eth0;
- gbp_learn_next_t next0;
- gbp_endpoint_t *ge0;
- vlib_buffer_t *b0;
-
- next0 = GBP_LEARN_NEXT_DROP;
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- epg0 = vnet_buffer2 (b0)->gbp.src_epg;
- ip6_0 = NULL;
- ip4_0 = NULL;
-
- vnet_feature_next (&next0, b0);
-
- if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
- {
- t0 = 1;
- ge0 = NULL;
- goto trace;
- }
-
- fib_index0 = fib_table_get_index_for_sw_if_index (fproto,
- sw_if_index0);
-
- if (FIB_PROTOCOL_IP6 == fproto)
- {
- ip6_0 = vlib_buffer_get_current (b0);
- eth0 = (ethernet_header_t *) (((u8 *) ip6_0) - sizeof (*eth0));
-
- gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
-
- ge0 = gbp_endpoint_find_ip6 (&ip6_0->src_address, fib_index0);
-
- if (NULL == ge0)
- {
- t0 = throttle_check (&glm->gl_l3_throttle,
- thread_index,
- ip6_address_hash_to_u32
- (&ip6_0->src_address), seed);
-
- if (!t0)
- {
- gbp_learn_ip6_dp (&ip6_0->src_address,
- fib_index0, sw_if_index0, epg0,
- &outer_src, &outer_dst);
- }
- }
- else
- {
- /*
- * this update could happen simultaneoulsy from multiple
- * workers but that's ok we are not interested in being
- * very accurate.
- */
- t0 = 0;
- ge0->ge_last_time = time_now;
- }
- }
- else
- {
- ip4_0 = vlib_buffer_get_current (b0);
- eth0 = (ethernet_header_t *) (((u8 *) ip4_0) - sizeof (*eth0));
-
- gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
- ge0 = gbp_endpoint_find_ip4 (&ip4_0->src_address, fib_index0);
-
- if (NULL == ge0)
- {
- t0 = throttle_check (&glm->gl_l3_throttle, thread_index,
- ip4_0->src_address.as_u32, seed);
-
- if (!t0)
- {
- gbp_learn_ip4_dp (&ip4_0->src_address,
- fib_index0, sw_if_index0, epg0,
- &outer_src, &outer_dst);
- }
- }
- else
- {
- /*
- * this update could happen simultaneoulsy from multiple
- * workers but that's ok we are not interested in being
- * very accurate.
- */
- t0 = 0;
- ge0->ge_last_time = time_now;
- }
- }
- trace:
- if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- gbp_learn_l3_trace_t *t;
-
- t = vlib_add_trace (vm, node, b0, sizeof (*t));
- if (FIB_PROTOCOL_IP6 == fproto && ip6_0)
- ip46_address_set_ip6 (&t->ip, &ip6_0->src_address);
- if (FIB_PROTOCOL_IP4 == fproto && ip4_0)
- ip46_address_set_ip4 (&t->ip, &ip4_0->src_address);
- t->new = (NULL == ge0);
- t->throttled = t0;
- t->sw_if_index = sw_if_index0;
- t->epg = epg0;
- }
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vnet_feature_enable_disable ("ip4-unicast",
+ "gbp-learn-ip4", sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "gbp-learn-ip6", sw_if_index, 1, 0, 0);
}
-
- return frame->n_vectors;
}
-/* packet trace format function */
-static u8 *
-format_gbp_learn_l3_trace (u8 * s, va_list * args)
+void
+gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode)
{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- gbp_learn_l3_trace_t *t = va_arg (*args, gbp_learn_l3_trace_t *);
-
- s = format (s, "new:%d throttled:%d ip:%U itf:%d epg:%d",
- t->new, t->throttled,
- format_ip46_address, &t->ip, IP46_TYPE_ANY, t->sw_if_index,
- t->epg);
-
- return s;
-}
-
-VLIB_NODE_FN (gbp_learn_ip4_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP4));
-}
-
-VLIB_NODE_FN (gbp_learn_ip6_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP6));
+ if (GBP_LEARN_MODE_L2 == mode)
+ {
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0);
+ }
+ else
+ {
+ vnet_feature_enable_disable ("ip4-unicast",
+ "gbp-learn-ip4", sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "gbp-learn-ip6", sw_if_index, 0, 0, 0);
+ }
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (gbp_learn_ip4_node) = {
- .name = "gbp-learn-ip4",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_learn_l3_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-};
-
-VNET_FEATURE_INIT (gbp_learn_ip4, static) =
-{
- .arc_name = "ip4-unicast",
- .node_name = "gbp-learn-ip4",
-};
-
-VLIB_REGISTER_NODE (gbp_learn_ip6_node) = {
- .name = "gbp-learn-ip6",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_learn_l3_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-};
-
-VNET_FEATURE_INIT (gbp_learn_ip6, static) =
-{
- .arc_name = "ip6-unicast",
- .node_name = "gbp-learn-ip6",
-};
-
-/* *INDENT-ON* */
-
static clib_error_t *
gbp_learn_init (vlib_main_t * vm)
{
diff --git a/src/plugins/gbp/gbp_learn.h b/src/plugins/gbp/gbp_learn.h
index 836daf80886..c40196ef259 100644
--- a/src/plugins/gbp/gbp_learn.h
+++ b/src/plugins/gbp/gbp_learn.h
@@ -18,12 +18,41 @@
#include <plugins/gbp/gbp.h>
+/**
+ * The maximum learning rate per-hashed EP
+ */
+#define GBP_ENDPOINT_HASH_LEARN_RATE (1e-2)
+
typedef enum gbp_learn_mode_t_
{
GBP_LEARN_MODE_L2,
GBP_LEARN_MODE_L3,
} gbb_learn_mode_t;
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_learn_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 gl_l2_input_feat_next[32];
+
+ /**
+ * logger - VLIB log class
+ */
+ vlib_log_class_t gl_logger;
+
+ /**
+ * throttles for the DP leanring
+ */
+ throttle_t gl_l2_throttle;
+ throttle_t gl_l3_throttle;
+} gbp_learn_main_t;
+
+extern gbp_learn_main_t gbp_learn_main;
+
extern void gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode);
extern void gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode);
diff --git a/src/plugins/gbp/gbp_learn_node.c b/src/plugins/gbp/gbp_learn_node.c
new file mode 100644
index 00000000000..d7a70bb1991
--- /dev/null
+++ b/src/plugins/gbp/gbp_learn_node.c
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/util/throttle.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+#define GBP_LEARN_DBG(...) \
+ vlib_log_debug (gbp_learn_main.gl_logger, __VA_ARGS__);
+
+#define foreach_gbp_learn \
+ _(DROP, "drop")
+
+typedef enum
+{
+#define _(sym,str) GBP_LEARN_ERROR_##sym,
+ foreach_gbp_learn
+#undef _
+ GBP_LEARN_N_ERROR,
+} gbp_learn_error_t;
+
+static char *gbp_learn_error_strings[] = {
+#define _(sym,string) string,
+ foreach_gbp_learn
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) GBP_LEARN_NEXT_##sym,
+ foreach_gbp_learn
+#undef _
+ GBP_LEARN_N_NEXT,
+} gbp_learn_next_t;
+
+typedef struct gbp_learn_l2_t_
+{
+ ip46_address_t ip;
+ mac_address_t mac;
+ u32 sw_if_index;
+ u32 bd_index;
+ epg_id_t epg;
+ ip46_address_t outer_src;
+ ip46_address_t outer_dst;
+} gbp_learn_l2_t;
+
+
+static void
+gbp_learn_l2_cp (const gbp_learn_l2_t * gl2)
+{
+ ip46_address_t *ips = NULL;
+
+ GBP_LEARN_DBG ("L2 EP: %U %U, %d",
+ format_mac_address_t, &gl2->mac,
+ format_ip46_address, &gl2->ip, IP46_TYPE_ANY, gl2->epg);
+
+ vec_add1 (ips, gl2->ip);
+
+ ASSERT (!ip46_address_is_zero (&gl2->outer_src));
+ ASSERT (!ip46_address_is_zero (&gl2->outer_dst));
+
+ /*
+ * flip the source and dst, since that's how it was received, this API
+ * takes how it's sent
+ */
+ gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
+ gl2->sw_if_index, ips,
+ &gl2->mac, INDEX_INVALID,
+ INDEX_INVALID, gl2->epg,
+ (GBP_ENDPOINT_FLAG_LEARNT |
+ GBP_ENDPOINT_FLAG_REMOTE),
+ &gl2->outer_dst, &gl2->outer_src, NULL);
+ vec_free (ips);
+}
+
+static void
+gbp_learn_l2_ip4_dp (const u8 * mac, const ip4_address_t * ip,
+ u32 bd_index, u32 sw_if_index, epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .epg = epg,
+ .ip.ip4 = *ip,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ ASSERT (!ip46_address_is_zero (&gl2.outer_src));
+ ASSERT (!ip46_address_is_zero (&gl2.outer_dst));
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+static void
+gbp_learn_l2_ip6_dp (const u8 * mac, const ip6_address_t * ip,
+ u32 bd_index, u32 sw_if_index, epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .epg = epg,
+ .ip.ip6 = *ip,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+static void
+gbp_learn_l2_dp (const u8 * mac, u32 bd_index, u32 sw_if_index,
+ epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .epg = epg,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_learn_l2_trace_t_
+{
+ /* per-pkt trace data */
+ mac_address_t mac;
+ u32 sw_if_index;
+ u32 new;
+ u32 throttled;
+ u32 epg;
+ u32 d_bit;
+} gbp_learn_l2_trace_t;
+
+always_inline void
+gbp_learn_get_outer (const ethernet_header_t * eh0,
+ ip4_address_t * outer_src, ip4_address_t * outer_dst)
+{
+ ip4_header_t *ip0;
+ u8 *buff;
+
+ /* rewind back to the ivxlan header */
+ buff = (u8 *) eh0;
+ buff -= (sizeof (vxlan_gbp_header_t) +
+ sizeof (udp_header_t) + sizeof (ip4_header_t));
+
+ ip0 = (ip4_header_t *) buff;
+
+ *outer_src = ip0->src_address;
+ *outer_dst = ip0->dst_address;
+}
+
+VLIB_NODE_FN (gbp_learn_l2_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
+ gbp_learn_main_t *glm;
+ f64 time_now;
+
+ glm = &gbp_learn_main;
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+ time_now = vlib_time_now (vm);
+ thread_index = vm->thread_index;
+
+ seed = throttle_seed (&glm->gl_l2_throttle, thread_index, time_now);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip4_address_t outer_src, outer_dst;
+ u32 bi0, sw_if_index0, t0, epg0;
+ const ethernet_header_t *eh0;
+ gbp_bridge_domain_t *gb0;
+ gbp_learn_next_t next0;
+ gbp_endpoint_t *ge0;
+ vlib_buffer_t *b0;
+
+ next0 = GBP_LEARN_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ eh0 = vlib_buffer_get_current (b0);
+ epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+
+ next0 = vnet_l2_feature_next (b0, glm->gl_l2_input_feat_next,
+ L2INPUT_FEAT_GBP_LEARN);
+
+ ge0 = gbp_endpoint_find_mac (eh0->src_address,
+ vnet_buffer (b0)->l2.bd_index);
+ gb0 =
+ gbp_bridge_domain_get_by_bd_index (vnet_buffer (b0)->l2.bd_index);
+
+ if ((vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D) ||
+ (gb0->gb_flags & GBP_BD_FLAG_DO_NOT_LEARN))
+ {
+ t0 = 1;
+ goto trace;
+ }
+
+ /*
+ * check for new EP or a moved EP
+ */
+ if (NULL == ge0 || ge0->ge_fwd.gef_itf != sw_if_index0)
+
+ {
+ /*
+ * use the last 4 bytes of the mac address as the hash for the EP
+ */
+ t0 = throttle_check (&glm->gl_l2_throttle, thread_index,
+ *((u32 *) (eh0->src_address + 2)), seed);
+ if (!t0)
+ {
+ gbp_learn_get_outer (eh0, &outer_src, &outer_dst);
+
+ switch (clib_net_to_host_u16 (eh0->type))
+ {
+ case ETHERNET_TYPE_IP4:
+ {
+ const ip4_header_t *ip0;
+
+ ip0 = (ip4_header_t *) (eh0 + 1);
+
+ gbp_learn_l2_ip4_dp (eh0->src_address,
+ &ip0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+
+ break;
+ }
+ case ETHERNET_TYPE_IP6:
+ {
+ const ip6_header_t *ip0;
+
+ ip0 = (ip6_header_t *) (eh0 + 1);
+
+ gbp_learn_l2_ip6_dp (eh0->src_address,
+ &ip0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+
+ break;
+ }
+ default:
+ gbp_learn_l2_dp (eh0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple workers
+ * but that's ok we are not interested in being very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ trace:
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_learn_l2_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ clib_memcpy_fast (t->mac.bytes, eh0->src_address, 6);
+ t->new = (NULL == ge0);
+ t->throttled = t0;
+ t->sw_if_index = sw_if_index0;
+ t->epg = epg0;
+ t->d_bit = ! !(vnet_buffer2 (b0)->gbp.flags &
+ VXLAN_GBP_GPFLAGS_D);
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_learn_l2_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_learn_l2_trace_t *t = va_arg (*args, gbp_learn_l2_trace_t *);
+
+ s = format (s, "new:%d throttled:%d d-bit:%d mac:%U itf:%d epg:%d",
+ t->new, t->throttled, t->d_bit,
+ format_mac_address_t, &t->mac, t->sw_if_index, t->epg);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_learn_l2_node) = {
+ .name = "gbp-learn-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l2_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_learn_error_strings),
+ .error_strings = gbp_learn_error_strings,
+
+ .n_next_nodes = GBP_LEARN_N_NEXT,
+
+ .next_nodes = {
+ [GBP_LEARN_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+typedef struct gbp_learn_l3_t_
+{
+ ip46_address_t ip;
+ u32 fib_index;
+ u32 sw_if_index;
+ epg_id_t epg;
+ ip46_address_t outer_src;
+ ip46_address_t outer_dst;
+} gbp_learn_l3_t;
+
+static void
+gbp_learn_l3_cp (const gbp_learn_l3_t * gl3)
+{
+ ip46_address_t *ips = NULL;
+
+ GBP_LEARN_DBG ("L3 EP: %U, %d", format_ip46_address, &gl3->ip,
+ IP46_TYPE_ANY, gl3->epg);
+
+ vec_add1 (ips, gl3->ip);
+
+ gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
+ gl3->sw_if_index, ips, NULL,
+ INDEX_INVALID, INDEX_INVALID, gl3->epg,
+ (GBP_ENDPOINT_FLAG_REMOTE |
+ GBP_ENDPOINT_FLAG_LEARNT),
+ &gl3->outer_dst, &gl3->outer_src, NULL);
+ vec_free (ips);
+}
+
+static void
+gbp_learn_ip4_dp (const ip4_address_t * ip,
+ u32 fib_index, u32 sw_if_index, epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ /* *INDENT-OFF* */
+ gbp_learn_l3_t gl3 = {
+ .ip = {
+ .ip4 = *ip,
+ },
+ .sw_if_index = sw_if_index,
+ .fib_index = fib_index,
+ .epg = epg,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ /* *INDENT-ON* */
+
+ vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
+}
+
+static void
+gbp_learn_ip6_dp (const ip6_address_t * ip,
+ u32 fib_index, u32 sw_if_index, epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ /* *INDENT-OFF* */
+ gbp_learn_l3_t gl3 = {
+ .ip = {
+ .ip6 = *ip,
+ },
+ .sw_if_index = sw_if_index,
+ .fib_index = fib_index,
+ .epg = epg,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ /* *INDENT-ON* */
+
+ vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
+}
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_learn_l3_trace_t_
+{
+ /* per-pkt trace data */
+ ip46_address_t ip;
+ u32 sw_if_index;
+ u32 new;
+ u32 throttled;
+ u32 epg;
+} gbp_learn_l3_trace_t;
+
+static uword
+gbp_learn_l3 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame,
+ fib_protocol_t fproto)
+{
+ u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
+ gbp_learn_main_t *glm;
+ f64 time_now;
+
+ glm = &gbp_learn_main;
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+ time_now = vlib_time_now (vm);
+ thread_index = vm->thread_index;
+
+ seed = throttle_seed (&glm->gl_l3_throttle, thread_index, time_now);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0, t0, epg0, fib_index0;
+ CLIB_UNUSED (const ip4_header_t *) ip4_0;
+ CLIB_UNUSED (const ip6_header_t *) ip6_0;
+ ip4_address_t outer_src, outer_dst;
+ ethernet_header_t *eth0;
+ gbp_learn_next_t next0;
+ gbp_endpoint_t *ge0;
+ vlib_buffer_t *b0;
+
+ next0 = GBP_LEARN_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+ ip6_0 = NULL;
+ ip4_0 = NULL;
+
+ vnet_feature_next (&next0, b0);
+
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
+ {
+ t0 = 1;
+ ge0 = NULL;
+ goto trace;
+ }
+
+ fib_index0 = fib_table_get_index_for_sw_if_index (fproto,
+ sw_if_index0);
+
+ if (FIB_PROTOCOL_IP6 == fproto)
+ {
+ ip6_0 = vlib_buffer_get_current (b0);
+ eth0 = (ethernet_header_t *) (((u8 *) ip6_0) - sizeof (*eth0));
+
+ gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
+
+ ge0 = gbp_endpoint_find_ip6 (&ip6_0->src_address, fib_index0);
+
+ if (NULL == ge0)
+ {
+ t0 = throttle_check (&glm->gl_l3_throttle,
+ thread_index,
+ ip6_address_hash_to_u32
+ (&ip6_0->src_address), seed);
+
+ if (!t0)
+ {
+ gbp_learn_ip6_dp (&ip6_0->src_address,
+ fib_index0, sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple
+ * workers but that's ok we are not interested in being
+ * very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ }
+ else
+ {
+ ip4_0 = vlib_buffer_get_current (b0);
+ eth0 = (ethernet_header_t *) (((u8 *) ip4_0) - sizeof (*eth0));
+
+ gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
+ ge0 = gbp_endpoint_find_ip4 (&ip4_0->src_address, fib_index0);
+
+ if (NULL == ge0)
+ {
+ t0 = throttle_check (&glm->gl_l3_throttle, thread_index,
+ ip4_0->src_address.as_u32, seed);
+
+ if (!t0)
+ {
+ gbp_learn_ip4_dp (&ip4_0->src_address,
+ fib_index0, sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple
+ * workers but that's ok we are not interested in being
+ * very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ }
+ trace:
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_learn_l3_trace_t *t;
+
+ t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ if (FIB_PROTOCOL_IP6 == fproto && ip6_0)
+ ip46_address_set_ip6 (&t->ip, &ip6_0->src_address);
+ if (FIB_PROTOCOL_IP4 == fproto && ip4_0)
+ ip46_address_set_ip4 (&t->ip, &ip4_0->src_address);
+ t->new = (NULL == ge0);
+ t->throttled = t0;
+ t->sw_if_index = sw_if_index0;
+ t->epg = epg0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_learn_l3_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_learn_l3_trace_t *t = va_arg (*args, gbp_learn_l3_trace_t *);
+
+ s = format (s, "new:%d throttled:%d ip:%U itf:%d epg:%d",
+ t->new, t->throttled,
+ format_ip46_address, &t->ip, IP46_TYPE_ANY, t->sw_if_index,
+ t->epg);
+
+ return s;
+}
+
+VLIB_NODE_FN (gbp_learn_ip4_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP4));
+}
+
+VLIB_NODE_FN (gbp_learn_ip6_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP6));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_learn_ip4_node) = {
+ .name = "gbp-learn-ip4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l3_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+};
+
+VNET_FEATURE_INIT (gbp_learn_ip4, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "gbp-learn-ip4",
+};
+
+VLIB_REGISTER_NODE (gbp_learn_ip6_node) = {
+ .name = "gbp-learn-ip6",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l3_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+};
+
+VNET_FEATURE_INIT (gbp_learn_ip6, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "gbp-learn-ip6",
+};
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_policy.c b/src/plugins/gbp/gbp_policy.c
index 5d5dcd0befc..fbdf3946d1d 100644
--- a/src/plugins/gbp/gbp_policy.c
+++ b/src/plugins/gbp/gbp_policy.c
@@ -16,368 +16,7 @@
#include <plugins/gbp/gbp.h>
#include <plugins/gbp/gbp_policy_dpo.h>
-#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
-
-#define foreach_gbp_policy \
- _(DENY, "deny")
-
-typedef enum
-{
-#define _(sym,str) GBP_ERROR_##sym,
- foreach_gbp_policy
-#undef _
- GBP_POLICY_N_ERROR,
-} gbp_policy_error_t;
-
-static char *gbp_policy_error_strings[] = {
-#define _(sym,string) string,
- foreach_gbp_policy
-#undef _
-};
-
-typedef enum
-{
-#define _(sym,str) GBP_POLICY_NEXT_##sym,
- foreach_gbp_policy
-#undef _
- GBP_POLICY_N_NEXT,
-} gbp_policy_next_t;
-
-/**
- * per-packet trace data
- */
-typedef struct gbp_policy_trace_t_
-{
- /* per-pkt trace data */
- u32 src_epg;
- u32 dst_epg;
- u32 acl_index;
- u32 allowed;
-} gbp_policy_trace_t;
-
-always_inline dpo_proto_t
-ethertype_to_dpo_proto (u16 etype)
-{
- etype = clib_net_to_host_u16 (etype);
-
- switch (etype)
- {
- case ETHERNET_TYPE_IP4:
- return (DPO_PROTO_IP4);
- case ETHERNET_TYPE_IP6:
- return (DPO_PROTO_IP6);
- }
-
- return (DPO_PROTO_NONE);
-}
-
-always_inline u32
-gbp_rule_l2_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0)
-{
- const ethernet_header_t *eth0;
- const dpo_id_t *dpo;
- dpo_proto_t dproto;
-
- eth0 = vlib_buffer_get_current (b0);
- /* pop the ethernet header to prepare for L3 rewrite */
- vlib_buffer_advance (b0, vnet_buffer (b0)->l2.l2_len);
-
- dproto = ethertype_to_dpo_proto (eth0->type);
- dpo = &gu->gu_dpo[GBP_POLICY_NODE_L2][dproto];
-
- /* save the LB index for the next node and reset the IP flow hash
- * so it's recalculated */
- vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
- vnet_buffer (b0)->ip.flow_hash = 0;
-
- return (dpo->dpoi_next_node);
-}
-
-always_inline u8
-gbp_policy_is_ethertype_allowed (const gbp_contract_t * gc0, u16 ethertype)
-{
- u16 *et;
-
- vec_foreach (et, gc0->gc_allowed_ethertypes)
- {
- if (*et == ethertype)
- return (1);
- }
- return (0);
-}
-
-static uword
-gbp_policy_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, u8 is_port_based)
-{
- gbp_main_t *gm = &gbp_main;
- gbp_policy_main_t *gpm = &gbp_policy_main;
- u32 n_left_from, *from, *to_next;
- u32 next_index;
-
- next_index = 0;
- n_left_from = frame->n_vectors;
- from = vlib_frame_vector_args (frame);
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- const ethernet_header_t *h0;
- const gbp_endpoint_t *ge0;
- const gbp_contract_t *gc0;
- gbp_policy_next_t next0;
- gbp_contract_key_t key0;
- u32 bi0, sw_if_index0;
- vlib_buffer_t *b0;
- index_t gci0;
-
- gc0 = NULL;
- next0 = GBP_POLICY_NEXT_DENY;
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
- h0 = vlib_buffer_get_current (b0);
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
-
- /*
- * If the A0bit is set then policy has already been applied
- * and we skip enforcement here.
- */
- if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
- {
- next0 = vnet_l2_feature_next (b0,
- gpm->l2_output_feat_next
- [is_port_based],
- (is_port_based ?
- L2OUTPUT_FEAT_GBP_POLICY_PORT :
- L2OUTPUT_FEAT_GBP_POLICY_MAC));
- key0.as_u32 = ~0;
- goto trace;
- }
- /*
- * determine the src and dst EPG
- */
- if (is_port_based)
- ge0 = gbp_endpoint_find_itf (sw_if_index0);
- else
- ge0 = gbp_endpoint_find_mac (h0->dst_address,
- vnet_buffer (b0)->l2.bd_index);
-
- if (NULL != ge0)
- key0.gck_dst = ge0->ge_fwd.gef_epg_id;
- else
- /* If you cannot determine the destination EP then drop */
- goto trace;
-
- key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
-
- if (EPG_INVALID != key0.gck_src)
- {
- if (PREDICT_FALSE (key0.gck_src == key0.gck_dst))
- {
- /*
- * intra-epg allowed
- */
- next0 =
- vnet_l2_feature_next (b0,
- gpm->l2_output_feat_next
- [is_port_based],
- (is_port_based ?
- L2OUTPUT_FEAT_GBP_POLICY_PORT :
- L2OUTPUT_FEAT_GBP_POLICY_MAC));
- vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
- }
- else
- {
- gci0 = gbp_contract_find (&key0);
-
- if (INDEX_INVALID != gci0)
- {
- u32 rule_match_p0, trace_bitmap0;
- fa_5tuple_opaque_t pkt_5tuple0;
- u32 acl_pos_p0, acl_match_p0;
- u8 is_ip60, l2_len0, action0;
- const gbp_rule_t *gu;
- u16 ether_type0;
- const u8 *h0;
-
- action0 = 0;
- gc0 = gbp_contract_get (gci0);
- l2_len0 = vnet_buffer (b0)->l2.l2_len;
- h0 = vlib_buffer_get_current (b0);
-
- ether_type0 = *(u16 *) (h0 + l2_len0 - 2);
-
- if (!gbp_policy_is_ethertype_allowed (gc0, ether_type0))
- {
- /*
- * black list model so drop
- */
- goto trace;
- }
-
- if ((ether_type0 ==
- clib_net_to_host_u16 (ETHERNET_TYPE_IP6))
- || (ether_type0 ==
- clib_net_to_host_u16 (ETHERNET_TYPE_IP4)))
- {
- is_ip60 =
- (ether_type0 ==
- clib_net_to_host_u16 (ETHERNET_TYPE_IP6)) ? 1 :
- 0;
- /*
- * tests against the ACL
- */
- acl_plugin_fill_5tuple_inline (gm->
- acl_plugin.p_acl_main,
- gc0->gc_lc_index, b0,
- is_ip60,
- /* is_input */ 0,
- /* is_l2_path */ 1,
- &pkt_5tuple0);
- acl_plugin_match_5tuple_inline (gm->
- acl_plugin.p_acl_main,
- gc0->gc_lc_index,
- &pkt_5tuple0,
- is_ip60, &action0,
- &acl_pos_p0,
- &acl_match_p0,
- &rule_match_p0,
- &trace_bitmap0);
-
- if (action0 > 0)
- {
- vnet_buffer2 (b0)->gbp.flags |=
- VXLAN_GBP_GPFLAGS_A;
- gu =
- gbp_rule_get (gc0->gc_rules[rule_match_p0]);
-
- switch (gu->gu_action)
- {
- case GBP_RULE_PERMIT:
- next0 = vnet_l2_feature_next
- (b0,
- gpm->l2_output_feat_next
- [is_port_based],
- (is_port_based ?
- L2OUTPUT_FEAT_GBP_POLICY_PORT :
- L2OUTPUT_FEAT_GBP_POLICY_MAC));
- break;
- case GBP_RULE_DENY:
- next0 = 0;
- break;
- case GBP_RULE_REDIRECT:
- next0 = gbp_rule_l2_redirect (gu, b0);
- break;
- }
- }
- }
- }
- }
- }
- else
- {
- /*
- * the src EPG is not set when the packet arrives on an EPG
- * uplink interface and we do not need to apply policy
- */
- next0 =
- vnet_l2_feature_next (b0,
- gpm->l2_output_feat_next[is_port_based],
- (is_port_based ?
- L2OUTPUT_FEAT_GBP_POLICY_PORT :
- L2OUTPUT_FEAT_GBP_POLICY_MAC));
- }
-
- trace:
- if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- gbp_policy_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->src_epg = key0.gck_src;
- t->dst_epg = key0.gck_dst;
- t->acl_index = (gc0 ? gc0->gc_acl_index : ~0),
- t->allowed = (next0 != GBP_POLICY_NEXT_DENY);
- }
-
- /* verify speculative enqueue, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
-
- return frame->n_vectors;
-}
-
-VLIB_NODE_FN (gbp_policy_port_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_policy_inline (vm, node, frame, 1));
-}
-
-VLIB_NODE_FN (gbp_policy_mac_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_policy_inline (vm, node, frame, 0));
-}
-
-/* packet trace format function */
-static u8 *
-format_gbp_policy_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- gbp_policy_trace_t *t = va_arg (*args, gbp_policy_trace_t *);
-
- s =
- format (s, "src:%d, dst:%d, acl:%d allowed:%d",
- t->src_epg, t->dst_epg, t->acl_index, t->allowed);
-
- return s;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (gbp_policy_port_node) = {
- .name = "gbp-policy-port",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_policy_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_errors = ARRAY_LEN(gbp_policy_error_strings),
- .error_strings = gbp_policy_error_strings,
-
- .n_next_nodes = GBP_POLICY_N_NEXT,
-
- .next_nodes = {
- [GBP_POLICY_NEXT_DENY] = "error-drop",
- },
-};
-
-VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
- .name = "gbp-policy-mac",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_policy_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
- .sibling_of = "gbp-policy-port",
-};
-
-/* *INDENT-ON* */
+gbp_policy_main_t gbp_policy_main;
static clib_error_t *
gbp_policy_init (vlib_main_t * vm)
diff --git a/src/plugins/gbp/gbp_policy_node.c b/src/plugins/gbp/gbp_policy_node.c
new file mode 100644
index 00000000000..aaf362d2c9a
--- /dev/null
+++ b/src/plugins/gbp/gbp_policy_node.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+#define foreach_gbp_policy \
+ _(DENY, "deny")
+
+typedef enum
+{
+#define _(sym,str) GBP_ERROR_##sym,
+ foreach_gbp_policy
+#undef _
+ GBP_POLICY_N_ERROR,
+} gbp_policy_error_t;
+
+static char *gbp_policy_error_strings[] = {
+#define _(sym,string) string,
+ foreach_gbp_policy
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) GBP_POLICY_NEXT_##sym,
+ foreach_gbp_policy
+#undef _
+ GBP_POLICY_N_NEXT,
+} gbp_policy_next_t;
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_policy_trace_t_
+{
+ /* per-pkt trace data */
+ u32 src_epg;
+ u32 dst_epg;
+ u32 acl_index;
+ u32 allowed;
+} gbp_policy_trace_t;
+
+always_inline dpo_proto_t
+ethertype_to_dpo_proto (u16 etype)
+{
+ etype = clib_net_to_host_u16 (etype);
+
+ switch (etype)
+ {
+ case ETHERNET_TYPE_IP4:
+ return (DPO_PROTO_IP4);
+ case ETHERNET_TYPE_IP6:
+ return (DPO_PROTO_IP6);
+ }
+
+ return (DPO_PROTO_NONE);
+}
+
+always_inline u32
+gbp_rule_l2_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0)
+{
+ const ethernet_header_t *eth0;
+ const dpo_id_t *dpo;
+ dpo_proto_t dproto;
+
+ eth0 = vlib_buffer_get_current (b0);
+ /* pop the ethernet header to prepare for L3 rewrite */
+ vlib_buffer_advance (b0, vnet_buffer (b0)->l2.l2_len);
+
+ dproto = ethertype_to_dpo_proto (eth0->type);
+ dpo = &gu->gu_dpo[GBP_POLICY_NODE_L2][dproto];
+
+ /* save the LB index for the next node and reset the IP flow hash
+ * so it's recalculated */
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
+ vnet_buffer (b0)->ip.flow_hash = 0;
+
+ return (dpo->dpoi_next_node);
+}
+
+always_inline u8
+gbp_policy_is_ethertype_allowed (const gbp_contract_t * gc0, u16 ethertype)
+{
+ u16 *et;
+
+ vec_foreach (et, gc0->gc_allowed_ethertypes)
+ {
+ if (*et == ethertype)
+ return (1);
+ }
+ return (0);
+}
+
+static uword
+gbp_policy_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, u8 is_port_based)
+{
+ gbp_main_t *gm = &gbp_main;
+ gbp_policy_main_t *gpm = &gbp_policy_main;
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ const ethernet_header_t *h0;
+ const gbp_endpoint_t *ge0;
+ const gbp_contract_t *gc0;
+ gbp_policy_next_t next0;
+ gbp_contract_key_t key0;
+ u32 bi0, sw_if_index0;
+ vlib_buffer_t *b0;
+ index_t gci0;
+
+ gc0 = NULL;
+ next0 = GBP_POLICY_NEXT_DENY;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ /*
+ * If the A0bit is set then policy has already been applied
+ * and we skip enforcement here.
+ */
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
+ {
+ next0 = vnet_l2_feature_next (b0,
+ gpm->l2_output_feat_next
+ [is_port_based],
+ (is_port_based ?
+ L2OUTPUT_FEAT_GBP_POLICY_PORT :
+ L2OUTPUT_FEAT_GBP_POLICY_MAC));
+ key0.as_u32 = ~0;
+ goto trace;
+ }
+ /*
+ * determine the src and dst EPG
+ */
+ if (is_port_based)
+ ge0 = gbp_endpoint_find_itf (sw_if_index0);
+ else
+ ge0 = gbp_endpoint_find_mac (h0->dst_address,
+ vnet_buffer (b0)->l2.bd_index);
+
+ if (NULL != ge0)
+ key0.gck_dst = ge0->ge_fwd.gef_epg_id;
+ else
+ /* If you cannot determine the destination EP then drop */
+ goto trace;
+
+ key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
+
+ if (EPG_INVALID != key0.gck_src)
+ {
+ if (PREDICT_FALSE (key0.gck_src == key0.gck_dst))
+ {
+ /*
+ * intra-epg allowed
+ */
+ next0 =
+ vnet_l2_feature_next (b0,
+ gpm->l2_output_feat_next
+ [is_port_based],
+ (is_port_based ?
+ L2OUTPUT_FEAT_GBP_POLICY_PORT :
+ L2OUTPUT_FEAT_GBP_POLICY_MAC));
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+ }
+ else
+ {
+ gci0 = gbp_contract_find (&key0);
+
+ if (INDEX_INVALID != gci0)
+ {
+ u32 rule_match_p0, trace_bitmap0;
+ fa_5tuple_opaque_t pkt_5tuple0;
+ u32 acl_pos_p0, acl_match_p0;
+ u8 is_ip60, l2_len0, action0;
+ const gbp_rule_t *gu;
+ u16 ether_type0;
+ const u8 *h0;
+
+ action0 = 0;
+ gc0 = gbp_contract_get (gci0);
+ l2_len0 = vnet_buffer (b0)->l2.l2_len;
+ h0 = vlib_buffer_get_current (b0);
+
+ ether_type0 = *(u16 *) (h0 + l2_len0 - 2);
+
+ if (!gbp_policy_is_ethertype_allowed (gc0, ether_type0))
+ {
+ /*
+ * black list model so drop
+ */
+ goto trace;
+ }
+
+ if ((ether_type0 ==
+ clib_net_to_host_u16 (ETHERNET_TYPE_IP6))
+ || (ether_type0 ==
+ clib_net_to_host_u16 (ETHERNET_TYPE_IP4)))
+ {
+ is_ip60 =
+ (ether_type0 ==
+ clib_net_to_host_u16 (ETHERNET_TYPE_IP6)) ? 1 :
+ 0;
+ /*
+ * tests against the ACL
+ */
+ acl_plugin_fill_5tuple_inline (gm->
+ acl_plugin.p_acl_main,
+ gc0->gc_lc_index, b0,
+ is_ip60,
+ /* is_input */ 0,
+ /* is_l2_path */ 1,
+ &pkt_5tuple0);
+ acl_plugin_match_5tuple_inline (gm->
+ acl_plugin.p_acl_main,
+ gc0->gc_lc_index,
+ &pkt_5tuple0,
+ is_ip60, &action0,
+ &acl_pos_p0,
+ &acl_match_p0,
+ &rule_match_p0,
+ &trace_bitmap0);
+
+ if (action0 > 0)
+ {
+ vnet_buffer2 (b0)->gbp.flags |=
+ VXLAN_GBP_GPFLAGS_A;
+ gu =
+ gbp_rule_get (gc0->gc_rules[rule_match_p0]);
+
+ switch (gu->gu_action)
+ {
+ case GBP_RULE_PERMIT:
+ next0 = vnet_l2_feature_next
+ (b0,
+ gpm->l2_output_feat_next
+ [is_port_based],
+ (is_port_based ?
+ L2OUTPUT_FEAT_GBP_POLICY_PORT :
+ L2OUTPUT_FEAT_GBP_POLICY_MAC));
+ break;
+ case GBP_RULE_DENY:
+ next0 = 0;
+ break;
+ case GBP_RULE_REDIRECT:
+ next0 = gbp_rule_l2_redirect (gu, b0);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ /*
+ * the src EPG is not set when the packet arrives on an EPG
+ * uplink interface and we do not need to apply policy
+ */
+ next0 =
+ vnet_l2_feature_next (b0,
+ gpm->l2_output_feat_next[is_port_based],
+ (is_port_based ?
+ L2OUTPUT_FEAT_GBP_POLICY_PORT :
+ L2OUTPUT_FEAT_GBP_POLICY_MAC));
+ }
+
+ trace:
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_policy_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->src_epg = key0.gck_src;
+ t->dst_epg = key0.gck_dst;
+ t->acl_index = (gc0 ? gc0->gc_acl_index : ~0),
+ t->allowed = (next0 != GBP_POLICY_NEXT_DENY);
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (gbp_policy_port_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_policy_inline (vm, node, frame, 1));
+}
+
+VLIB_NODE_FN (gbp_policy_mac_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_policy_inline (vm, node, frame, 0));
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_policy_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_policy_trace_t *t = va_arg (*args, gbp_policy_trace_t *);
+
+ s =
+ format (s, "src:%d, dst:%d, acl:%d allowed:%d",
+ t->src_epg, t->dst_epg, t->acl_index, t->allowed);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_policy_port_node) = {
+ .name = "gbp-policy-port",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_policy_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_policy_error_strings),
+ .error_strings = gbp_policy_error_strings,
+
+ .n_next_nodes = GBP_POLICY_N_NEXT,
+
+ .next_nodes = {
+ [GBP_POLICY_NEXT_DENY] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
+ .name = "gbp-policy-mac",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_policy_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "gbp-policy-port",
+};
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_sclass.c b/src/plugins/gbp/gbp_sclass.c
index 24581e28904..481ee886976 100644
--- a/src/plugins/gbp/gbp_sclass.c
+++ b/src/plugins/gbp/gbp_sclass.c
@@ -13,296 +13,12 @@
* limitations under the License.
*/
-#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_sclass.h>
#include <vnet/l2/l2_input.h>
#include <vnet/l2/l2_output.h>
-/**
- * Grouping of global data for the GBP source EPG classification feature
- */
-typedef struct gbp_sclass_main_t_
-{
- /**
- * Next nodes for L2 output features
- */
- u32 gel_l2_input_feat_next[32];
- u32 gel_l2_output_feat_next[32];
-} gbp_sclass_main_t;
-
-static gbp_sclass_main_t gbp_sclass_main;
-
-#define foreach_gbp_sclass \
- _(DROP, "drop")
-
-
-typedef enum
-{
-#define _(sym,str) GBP_SCLASS_NEXT_##sym,
- foreach_gbp_sclass
-#undef _
- GBP_SCLASS_N_NEXT,
-} gbp_sclass_next_t;
-
-typedef struct gbp_sclass_trace_t_
-{
- /* per-pkt trace data */
- u32 epg;
- u32 sclass;
-} gbp_sclass_trace_t;
-
-static_always_inline uword
-gbp_sclass_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, int is_id_2_sclass, int is_l2)
-{
- u32 n_left_from, *from, *to_next, next_index;
- gbp_sclass_main_t *glm;
-
- glm = &gbp_sclass_main;
- next_index = 0;
- n_left_from = frame->n_vectors;
- from = vlib_frame_vector_args (frame);
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- gbp_sclass_next_t next0;
- vlib_buffer_t *b0;
- epg_id_t epg0;
- u16 sclass0;
- u32 bi0;
-
- next0 = GBP_SCLASS_NEXT_DROP;
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- if (is_id_2_sclass)
- {
- // output direction - convert from the SRC-EPD to the sclass
- gbp_endpoint_group_t *gg;
+gbp_sclass_main_t gbp_sclass_main;
- epg0 = vnet_buffer2 (b0)->gbp.src_epg;
- gg = gbp_epg_get (epg0);
-
- if (NULL != gg)
- {
- sclass0 = vnet_buffer2 (b0)->gbp.sclass = gg->gg_sclass;
- if (is_l2)
- next0 =
- vnet_l2_feature_next (b0, glm->gel_l2_output_feat_next,
- L2OUTPUT_FEAT_GBP_ID_2_SCLASS);
- else
- vnet_feature_next (&next0, b0);
- }
- else
- sclass0 = 0;
- }
- else
- {
- /* input direction - convert from the sclass to the SRC-EGD */
- sclass0 = vnet_buffer2 (b0)->gbp.sclass;
- vnet_buffer2 (b0)->gbp.src_epg =
- gbp_epg_sclass_2_id (vnet_buffer2 (b0)->gbp.sclass);
- epg0 = vnet_buffer2 (b0)->gbp.src_epg;
-
- if (EPG_INVALID != epg0)
- {
- if (is_l2)
- next0 =
- vnet_l2_feature_next (b0, glm->gel_l2_input_feat_next,
- L2INPUT_FEAT_GBP_SCLASS_2_ID);
- else
- vnet_feature_next (&next0, b0);
- }
- }
-
- if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- gbp_sclass_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->epg = epg0;
- t->sclass = sclass0;
- }
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
-
- return frame->n_vectors;
-}
-
-VLIB_NODE_FN (l2_gbp_id_2_sclass_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_sclass_inline (vm, node, frame, 1, 1));
-}
-
-VLIB_NODE_FN (l2_gbp_sclass_2_id_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_sclass_inline (vm, node, frame, 0, 1));
-}
-
-VLIB_NODE_FN (ip4_gbp_id_2_sclass_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_sclass_inline (vm, node, frame, 1, 0));
-}
-
-VLIB_NODE_FN (ip4_gbp_sclass_2_id_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_sclass_inline (vm, node, frame, 0, 0));
-}
-
-VLIB_NODE_FN (ip6_gbp_id_2_sclass_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_sclass_inline (vm, node, frame, 1, 0));
-}
-
-VLIB_NODE_FN (ip6_gbp_sclass_2_id_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- return (gbp_sclass_inline (vm, node, frame, 0, 0));
-}
-
-/* packet trace format function */
-static u8 *
-format_gbp_sclass_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- gbp_sclass_trace_t *t = va_arg (*args, gbp_sclass_trace_t *);
-
- s = format (s, "epg:%d sclass:%d", t->epg, t->sclass);
-
- return s;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (l2_gbp_id_2_sclass_node) = {
- .name = "l2-gbp-id-2-sclass",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_sclass_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_next_nodes = GBP_SCLASS_N_NEXT,
-
- .next_nodes = {
- [GBP_SCLASS_NEXT_DROP] = "error-drop",
- },
-};
-VLIB_REGISTER_NODE (l2_gbp_sclass_2_id_node) = {
- .name = "l2-gbp-sclass-2-id",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_sclass_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_next_nodes = GBP_SCLASS_N_NEXT,
-
- .next_nodes = {
- [GBP_SCLASS_NEXT_DROP] = "error-drop",
- },
-};
-
-VLIB_REGISTER_NODE (ip4_gbp_id_2_sclass_node) = {
- .name = "ip4-gbp-id-2-sclass",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_sclass_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_next_nodes = GBP_SCLASS_N_NEXT,
-
- .next_nodes = {
- [GBP_SCLASS_NEXT_DROP] = "error-drop",
- },
-};
-VLIB_REGISTER_NODE (ip4_gbp_sclass_2_id_node) = {
- .name = "ip4-gbp-sclass-2-id",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_sclass_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_next_nodes = GBP_SCLASS_N_NEXT,
-
- .next_nodes = {
- [GBP_SCLASS_NEXT_DROP] = "error-drop",
- },
-};
-
-VLIB_REGISTER_NODE (ip6_gbp_id_2_sclass_node) = {
- .name = "ip6-gbp-id-2-sclass",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_sclass_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_next_nodes = GBP_SCLASS_N_NEXT,
-
- .next_nodes = {
- [GBP_SCLASS_NEXT_DROP] = "error-drop",
- },
-};
-VLIB_REGISTER_NODE (ip6_gbp_sclass_2_id_node) = {
- .name = "ip6-gbp-sclass-2-id",
- .vector_size = sizeof (u32),
- .format_trace = format_gbp_sclass_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
-
- .n_next_nodes = GBP_SCLASS_N_NEXT,
-
- .next_nodes = {
- [GBP_SCLASS_NEXT_DROP] = "error-drop",
- },
-};
-
-
-VNET_FEATURE_INIT (ip4_gbp_sclass_2_id_feat, static) =
-{
- .arc_name = "ip4-unicast",
- .node_name = "ip4-gbp-sclass-2-id",
- .runs_before = VNET_FEATURES ("gbp-learn-ip4"),
-};
-VNET_FEATURE_INIT (ip6_gbp_sclass_2_id_feat, static) =
-{
- .arc_name = "ip6-unicast",
- .node_name = "ip6-gbp-sclass-2-id",
- .runs_before = VNET_FEATURES ("gbp-learn-ip6"),
-};
-VNET_FEATURE_INIT (ip4_gbp_id_2_sclass_feat, static) =
-{
- .arc_name = "ip4-output",
- .node_name = "ip4-gbp-id-2-sclass",
-};
-VNET_FEATURE_INIT (ip6_gbp_id_2_sclass_feat, static) =
-{
- .arc_name = "ip6-output",
- .node_name = "ip6-gbp-id-2-sclass",
-};
-/* *INDENT-ON* */
-
-#ifndef CLIB_MARCH_VARIANT
void
gbp_sclass_enable_l2 (u32 sw_if_index)
{
@@ -342,7 +58,6 @@ gbp_sclass_disable_ip (u32 sw_if_index)
vnet_feature_enable_disable ("ip6-output",
"ip6-gbp-id-2-sclass", sw_if_index, 0, 0, 0);
}
-#endif /* CLIB_MARCH_VARIANT */
static clib_error_t *
gbp_sclass_init (vlib_main_t * vm)
@@ -369,6 +84,7 @@ gbp_sclass_init (vlib_main_t * vm)
VLIB_INIT_FUNCTION (gbp_sclass_init);
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/gbp/gbp_sclass.h b/src/plugins/gbp/gbp_sclass.h
index 07c5fffcc96..75e35c36bdf 100644
--- a/src/plugins/gbp/gbp_sclass.h
+++ b/src/plugins/gbp/gbp_sclass.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Copyright (c) 2018 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -18,10 +18,24 @@
#include <plugins/gbp/gbp.h>
-extern void gbp_sclass_enable_ip (u32 sw_if_index);
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_sclass_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 gel_l2_input_feat_next[32];
+ u32 gel_l2_output_feat_next[32];
+} gbp_sclass_main_t;
+
+extern gbp_sclass_main_t gbp_sclass_main;
+
extern void gbp_sclass_enable_l2 (u32 sw_if_index);
-extern void gbp_sclass_disable_ip (u32 sw_if_index);
extern void gbp_sclass_disable_l2 (u32 sw_if_index);
+extern void gbp_sclass_enable_ip (u32 sw_if_index);
+extern void gbp_sclass_disable_ip (u32 sw_if_index);
#endif
diff --git a/src/plugins/gbp/gbp_sclass_node.c b/src/plugins/gbp/gbp_sclass_node.c
new file mode 100644
index 00000000000..35b23844fd6
--- /dev/null
+++ b/src/plugins/gbp/gbp_sclass_node.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_sclass.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+
+#define foreach_gbp_sclass \
+ _(DROP, "drop")
+
+typedef enum
+{
+#define _(sym,str) GBP_SCLASS_NEXT_##sym,
+ foreach_gbp_sclass
+#undef _
+ GBP_SCLASS_N_NEXT,
+} gbp_sclass_next_t;
+
+typedef struct gbp_sclass_trace_t_
+{
+ /* per-pkt trace data */
+ u32 epg;
+ u32 sclass;
+} gbp_sclass_trace_t;
+
+static_always_inline uword
+gbp_sclass_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_id_2_sclass, int is_l2)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ gbp_sclass_main_t *glm;
+
+ glm = &gbp_sclass_main;
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ gbp_sclass_next_t next0;
+ vlib_buffer_t *b0;
+ epg_id_t epg0;
+ u16 sclass0;
+ u32 bi0;
+
+ next0 = GBP_SCLASS_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (is_id_2_sclass)
+ {
+ // output direction - convert from the SRC-EPD to the sclass
+ gbp_endpoint_group_t *gg;
+
+ epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+ gg = gbp_epg_get (epg0);
+
+ if (NULL != gg)
+ {
+ sclass0 = vnet_buffer2 (b0)->gbp.sclass = gg->gg_sclass;
+ if (is_l2)
+ next0 =
+ vnet_l2_feature_next (b0, glm->gel_l2_output_feat_next,
+ L2OUTPUT_FEAT_GBP_ID_2_SCLASS);
+ else
+ vnet_feature_next (&next0, b0);
+ }
+ else
+ sclass0 = 0;
+ }
+ else
+ {
+ /* input direction - convert from the sclass to the SRC-EGD */
+ sclass0 = vnet_buffer2 (b0)->gbp.sclass;
+ vnet_buffer2 (b0)->gbp.src_epg =
+ gbp_epg_sclass_2_id (vnet_buffer2 (b0)->gbp.sclass);
+ epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+
+ if (EPG_INVALID != epg0)
+ {
+ if (is_l2)
+ next0 =
+ vnet_l2_feature_next (b0, glm->gel_l2_input_feat_next,
+ L2INPUT_FEAT_GBP_SCLASS_2_ID);
+ else
+ vnet_feature_next (&next0, b0);
+ }
+ }
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_sclass_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->epg = epg0;
+ t->sclass = sclass0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (l2_gbp_id_2_sclass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 1, 1));
+}
+
+VLIB_NODE_FN (l2_gbp_sclass_2_id_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 0, 1));
+}
+
+VLIB_NODE_FN (ip4_gbp_id_2_sclass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 1, 0));
+}
+
+VLIB_NODE_FN (ip4_gbp_sclass_2_id_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 0, 0));
+}
+
+VLIB_NODE_FN (ip6_gbp_id_2_sclass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 1, 0));
+}
+
+VLIB_NODE_FN (ip6_gbp_sclass_2_id_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_sclass_inline (vm, node, frame, 0, 0));
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_sclass_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_sclass_trace_t *t = va_arg (*args, gbp_sclass_trace_t *);
+
+ s = format (s, "epg:%d sclass:%d", t->epg, t->sclass);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_gbp_id_2_sclass_node) = {
+ .name = "l2-gbp-id-2-sclass",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+VLIB_REGISTER_NODE (l2_gbp_sclass_2_id_node) = {
+ .name = "l2-gbp-sclass-2-id",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (ip4_gbp_id_2_sclass_node) = {
+ .name = "ip4-gbp-id-2-sclass",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+VLIB_REGISTER_NODE (ip4_gbp_sclass_2_id_node) = {
+ .name = "ip4-gbp-sclass-2-id",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (ip6_gbp_id_2_sclass_node) = {
+ .name = "ip6-gbp-id-2-sclass",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+VLIB_REGISTER_NODE (ip6_gbp_sclass_2_id_node) = {
+ .name = "ip6-gbp-sclass-2-id",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_sclass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = GBP_SCLASS_N_NEXT,
+
+ .next_nodes = {
+ [GBP_SCLASS_NEXT_DROP] = "error-drop",
+ },
+};
+
+
+VNET_FEATURE_INIT (ip4_gbp_sclass_2_id_feat, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-gbp-sclass-2-id",
+ .runs_before = VNET_FEATURES ("gbp-learn-ip4"),
+};
+VNET_FEATURE_INIT (ip6_gbp_sclass_2_id_feat, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-gbp-sclass-2-id",
+ .runs_before = VNET_FEATURES ("gbp-learn-ip6"),
+};
+VNET_FEATURE_INIT (ip4_gbp_id_2_sclass_feat, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "ip4-gbp-id-2-sclass",
+};
+VNET_FEATURE_INIT (ip6_gbp_id_2_sclass_feat, static) =
+{
+ .arc_name = "ip6-output",
+ .node_name = "ip6-gbp-id-2-sclass",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */