summaryrefslogtreecommitdiffstats
path: root/extras/deprecated
diff options
context:
space:
mode:
Diffstat (limited to 'extras/deprecated')
-rw-r--r--extras/deprecated/plugins/gbp/CMakeLists.txt54
-rw-r--r--extras/deprecated/plugins/gbp/gbp.api470
-rw-r--r--extras/deprecated/plugins/gbp/gbp.h80
-rw-r--r--extras/deprecated/plugins/gbp/gbp_api.c1154
-rw-r--r--extras/deprecated/plugins/gbp/gbp_bridge_domain.c503
-rw-r--r--extras/deprecated/plugins/gbp/gbp_bridge_domain.h156
-rw-r--r--extras/deprecated/plugins/gbp/gbp_classify.c71
-rw-r--r--extras/deprecated/plugins/gbp/gbp_classify.h94
-rw-r--r--extras/deprecated/plugins/gbp/gbp_classify_node.c628
-rw-r--r--extras/deprecated/plugins/gbp/gbp_contract.c819
-rw-r--r--extras/deprecated/plugins/gbp/gbp_contract.h362
-rw-r--r--extras/deprecated/plugins/gbp/gbp_endpoint.c1597
-rw-r--r--extras/deprecated/plugins/gbp/gbp_endpoint.h376
-rw-r--r--extras/deprecated/plugins/gbp/gbp_endpoint_group.c402
-rw-r--r--extras/deprecated/plugins/gbp/gbp_endpoint_group.h166
-rw-r--r--extras/deprecated/plugins/gbp/gbp_ext_itf.c293
-rw-r--r--extras/deprecated/plugins/gbp/gbp_ext_itf.h92
-rw-r--r--extras/deprecated/plugins/gbp/gbp_fwd.c56
-rw-r--r--extras/deprecated/plugins/gbp/gbp_fwd_dpo.c306
-rw-r--r--extras/deprecated/plugins/gbp/gbp_fwd_dpo.h62
-rw-r--r--extras/deprecated/plugins/gbp/gbp_fwd_node.c163
-rw-r--r--extras/deprecated/plugins/gbp/gbp_itf.c574
-rw-r--r--extras/deprecated/plugins/gbp/gbp_itf.h97
-rw-r--r--extras/deprecated/plugins/gbp/gbp_learn.c76
-rw-r--r--extras/deprecated/plugins/gbp/gbp_learn.h63
-rw-r--r--extras/deprecated/plugins/gbp/gbp_learn_node.c718
-rw-r--r--extras/deprecated/plugins/gbp/gbp_policy.c79
-rw-r--r--extras/deprecated/plugins/gbp/gbp_policy.h57
-rw-r--r--extras/deprecated/plugins/gbp/gbp_policy_dpo.c420
-rw-r--r--extras/deprecated/plugins/gbp/gbp_policy_dpo.h121
-rw-r--r--extras/deprecated/plugins/gbp/gbp_policy_node.c341
-rw-r--r--extras/deprecated/plugins/gbp/gbp_recirc.c292
-rw-r--r--extras/deprecated/plugins/gbp/gbp_recirc.h88
-rw-r--r--extras/deprecated/plugins/gbp/gbp_route_domain.c447
-rw-r--r--extras/deprecated/plugins/gbp/gbp_route_domain.h84
-rw-r--r--extras/deprecated/plugins/gbp/gbp_scanner.c136
-rw-r--r--extras/deprecated/plugins/gbp/gbp_scanner.h30
-rw-r--r--extras/deprecated/plugins/gbp/gbp_subnet.c598
-rw-r--r--extras/deprecated/plugins/gbp/gbp_subnet.h53
-rw-r--r--extras/deprecated/plugins/gbp/gbp_types.h36
-rw-r--r--extras/deprecated/plugins/gbp/gbp_vxlan.c654
-rw-r--r--extras/deprecated/plugins/gbp/gbp_vxlan.h135
-rw-r--r--extras/deprecated/plugins/gbp/gbp_vxlan_node.c218
-rw-r--r--extras/deprecated/plugins/gbp/test_gbp.py5926
-rw-r--r--extras/deprecated/plugins/l2e/CMakeLists.txt28
-rw-r--r--extras/deprecated/plugins/l2e/l2e.api39
-rw-r--r--extras/deprecated/plugins/l2e/l2e.c198
-rw-r--r--extras/deprecated/plugins/l2e/l2e.h84
-rw-r--r--extras/deprecated/plugins/l2e/l2e_api.c89
-rw-r--r--extras/deprecated/plugins/l2e/l2e_node.c283
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/decap.c1050
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/dir.dox24
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/encap.c601
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/test_vxlan_gbp.py293
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/vpp_vxlan_gbp_tunnel.py75
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.api100
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.c1193
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.h250
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_api.c217
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_error.def17
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_packet.c60
-rw-r--r--extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_packet.h173
62 files changed, 23921 insertions, 0 deletions
diff --git a/extras/deprecated/plugins/gbp/CMakeLists.txt b/extras/deprecated/plugins/gbp/CMakeLists.txt
new file mode 100644
index 00000000000..95f664ff08e
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/CMakeLists.txt
@@ -0,0 +1,54 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(gbp
+ SOURCES
+ gbp_api.c
+ gbp_bridge_domain.c
+ gbp_classify.c
+ gbp_classify_node.c
+ gbp_contract.c
+ gbp_endpoint.c
+ gbp_endpoint_group.c
+ gbp_ext_itf.c
+ gbp_fwd.c
+ gbp_fwd_dpo.c
+ gbp_fwd_node.c
+ gbp_itf.c
+ gbp_learn.c
+ gbp_learn_node.c
+ gbp_policy.c
+ gbp_policy_dpo.c
+ gbp_policy_node.c
+ gbp_recirc.c
+ gbp_route_domain.c
+ gbp_scanner.c
+ gbp_subnet.c
+ gbp_vxlan.c
+ gbp_vxlan_node.c
+
+ MULTIARCH_SOURCES
+ gbp_classify_node.c
+ gbp_fwd_dpo.c
+ gbp_fwd_node.c
+ gbp_learn_node.c
+ gbp_policy_dpo.c
+ gbp_policy_node.c
+ gbp_vxlan_node.c
+
+ API_FILES
+ gbp.api
+
+ INSTALL_HEADERS
+ gbp.h
+)
diff --git a/extras/deprecated/plugins/gbp/gbp.api b/extras/deprecated/plugins/gbp/gbp.api
new file mode 100644
index 00000000000..525e70536bd
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp.api
@@ -0,0 +1,470 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option version = "2.0.0";
+
+import "vnet/ip/ip_types.api";
+import "vnet/ethernet/ethernet_types.api";
+import "vnet/interface_types.api";
+
+enum gbp_bridge_domain_flags
+{
+ GBP_BD_API_FLAG_NONE = 0,
+ GBP_BD_API_FLAG_DO_NOT_LEARN = 1,
+ GBP_BD_API_FLAG_UU_FWD_DROP = 2,
+ GBP_BD_API_FLAG_MCAST_DROP = 4,
+ GBP_BD_API_FLAG_UCAST_ARP = 8,
+};
+
+typedef gbp_bridge_domain
+{
+ u32 bd_id;
+ u32 rd_id;
+ vl_api_gbp_bridge_domain_flags_t flags;
+ vl_api_interface_index_t bvi_sw_if_index;
+ vl_api_interface_index_t uu_fwd_sw_if_index;
+ vl_api_interface_index_t bm_flood_sw_if_index;
+};
+
+ autoreply define gbp_bridge_domain_add
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ vl_api_gbp_bridge_domain_t bd;
+};
+ autoreply define gbp_bridge_domain_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+};
+autoreply define gbp_bridge_domain_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+define gbp_bridge_domain_details
+{
+ option status="in_progress";
+ u32 context;
+ vl_api_gbp_bridge_domain_t bd;
+};
+
+typedef u16 gbp_scope;
+
+typedef gbp_route_domain
+{
+ u32 rd_id;
+ u32 ip4_table_id;
+ u32 ip6_table_id;
+ vl_api_interface_index_t ip4_uu_sw_if_index;
+ vl_api_interface_index_t ip6_uu_sw_if_index;
+ vl_api_gbp_scope_t scope;
+};
+
+ autoreply define gbp_route_domain_add
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ vl_api_gbp_route_domain_t rd;
+};
+ autoreply define gbp_route_domain_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ u32 rd_id;
+};
+autoreply define gbp_route_domain_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+define gbp_route_domain_details
+{
+ option status="in_progress";
+ u32 context;
+ vl_api_gbp_route_domain_t rd;
+};
+
+/** \brief Endpoint
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+
+enum gbp_endpoint_flags
+{
+ GBP_API_ENDPOINT_FLAG_NONE = 0,
+ GBP_API_ENDPOINT_FLAG_BOUNCE = 0x1,
+ GBP_API_ENDPOINT_FLAG_REMOTE = 0x2,
+ GBP_API_ENDPOINT_FLAG_LEARNT = 0x4,
+ GBP_API_ENDPOINT_FLAG_EXTERNAL = 0x8,
+};
+
+typedef gbp_endpoint_tun
+{
+ vl_api_address_t src;
+ vl_api_address_t dst;
+};
+
+typedef gbp_endpoint
+{
+ vl_api_interface_index_t sw_if_index;
+ u16 sclass;
+ vl_api_gbp_endpoint_flags_t flags;
+ vl_api_mac_address_t mac;
+ vl_api_gbp_endpoint_tun_t tun;
+ u8 n_ips;
+ vl_api_address_t ips[n_ips];
+};
+
+ define gbp_endpoint_add
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ vl_api_gbp_endpoint_t endpoint;
+};
+
+define gbp_endpoint_add_reply
+{
+ option status="in_progress";
+ u32 context;
+ i32 retval;
+ u32 handle;
+};
+
+ autoreply define gbp_endpoint_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ u32 handle;
+};
+
+define gbp_endpoint_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+
+define gbp_endpoint_details
+{
+ option status="in_progress";
+ u32 context;
+ f64 age;
+ u32 handle;
+ vl_api_gbp_endpoint_t endpoint;
+};
+
+typedef gbp_endpoint_retention
+{
+ u32 remote_ep_timeout;
+};
+
+typedef gbp_endpoint_group
+{
+ u32 vnid;
+ u16 sclass;
+ u32 bd_id;
+ u32 rd_id;
+ vl_api_interface_index_t uplink_sw_if_index;
+ vl_api_gbp_endpoint_retention_t retention;
+};
+
+ autoreply define gbp_endpoint_group_add
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ vl_api_gbp_endpoint_group_t epg;
+};
+ autoreply define gbp_endpoint_group_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ u16 sclass;
+};
+
+define gbp_endpoint_group_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+
+define gbp_endpoint_group_details
+{
+ option status="in_progress";
+ u32 context;
+ vl_api_gbp_endpoint_group_t epg;
+};
+
+typedef gbp_recirc
+{
+ vl_api_interface_index_t sw_if_index;
+ u16 sclass;
+ bool is_ext;
+};
+
+ autoreply define gbp_recirc_add_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ bool is_add;
+ vl_api_gbp_recirc_t recirc;
+};
+
+define gbp_recirc_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+
+define gbp_recirc_details
+{
+ option status="in_progress";
+ u32 context;
+ vl_api_gbp_recirc_t recirc;
+};
+
+enum gbp_subnet_type
+{
+ GBP_API_SUBNET_TRANSPORT,
+ GBP_API_SUBNET_STITCHED_INTERNAL,
+ GBP_API_SUBNET_STITCHED_EXTERNAL,
+ GBP_API_SUBNET_L3_OUT,
+ GBP_API_SUBNET_ANON_L3_OUT,
+};
+
+typedef gbp_subnet
+{
+ u32 rd_id;
+ vl_api_interface_index_t sw_if_index [default= 0xffffffff];
+ u16 sclass [default=0xffffffff];
+ vl_api_gbp_subnet_type_t type;
+ vl_api_prefix_t prefix;
+};
+
+ autoreply define gbp_subnet_add_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ bool is_add;
+ vl_api_gbp_subnet_t subnet;
+};
+
+define gbp_subnet_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+
+define gbp_subnet_details
+{
+ option status="in_progress";
+ u32 context;
+ vl_api_gbp_subnet_t subnet;
+};
+
+typedef gbp_next_hop
+{
+ vl_api_address_t ip;
+ vl_api_mac_address_t mac;
+ u32 bd_id;
+ u32 rd_id;
+};
+
+enum gbp_hash_mode
+{
+ GBP_API_HASH_MODE_SRC_IP,
+ GBP_API_HASH_MODE_DST_IP,
+ GBP_API_HASH_MODE_SYMMETRIC,
+};
+
+typedef gbp_next_hop_set
+{
+ vl_api_gbp_hash_mode_t hash_mode;
+ u8 n_nhs;
+ vl_api_gbp_next_hop_t nhs[8];
+};
+
+enum gbp_rule_action
+{
+ GBP_API_RULE_PERMIT,
+ GBP_API_RULE_DENY,
+ GBP_API_RULE_REDIRECT,
+};
+
+typedef gbp_rule
+{
+ vl_api_gbp_rule_action_t action;
+ vl_api_gbp_next_hop_set_t nh_set;
+};
+
+typedef gbp_contract
+{
+ vl_api_gbp_scope_t scope;
+ u16 sclass;
+ u16 dclass;
+ u32 acl_index;
+ u8 n_ether_types;
+ u16 allowed_ethertypes[16];
+ u8 n_rules;
+ vl_api_gbp_rule_t rules[n_rules];
+};
+
+ define gbp_contract_add_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ bool is_add;
+ vl_api_gbp_contract_t contract;
+};
+define gbp_contract_add_del_reply
+{
+ option status="in_progress";
+ u32 context;
+ i32 retval;
+ u32 stats_index;
+};
+
+define gbp_contract_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+
+define gbp_contract_details
+{
+ option status="in_progress";
+ u32 context;
+ vl_api_gbp_contract_t contract;
+};
+
+/**
+ * @brief Configure a 'base' tunnel from which learned tunnels
+ * are permitted to derive
+ * A base tunnel consists only of the VNI, any src,dst IP
+ * pair is thus allowed.
+ */
+enum gbp_vxlan_tunnel_mode
+{
+ GBP_VXLAN_TUNNEL_MODE_L2,
+ GBP_VXLAN_TUNNEL_MODE_L3,
+};
+
+typedef gbp_vxlan_tunnel
+{
+ u32 vni;
+ vl_api_gbp_vxlan_tunnel_mode_t mode;
+ u32 bd_rd_id;
+ vl_api_ip4_address_t src;
+};
+
+ define gbp_vxlan_tunnel_add
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ vl_api_gbp_vxlan_tunnel_t tunnel;
+};
+
+define gbp_vxlan_tunnel_add_reply
+{
+ option status="in_progress";
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+
+ autoreply define gbp_vxlan_tunnel_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ u32 vni;
+};
+
+define gbp_vxlan_tunnel_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+
+define gbp_vxlan_tunnel_details
+{
+ option status="in_progress";
+ u32 context;
+ vl_api_gbp_vxlan_tunnel_t tunnel;
+};
+
+enum gbp_ext_itf_flags
+{
+ GBP_API_EXT_ITF_F_NONE = 0,
+ GBP_API_EXT_ITF_F_ANON = 1,
+};
+
+typedef gbp_ext_itf
+{
+ vl_api_interface_index_t sw_if_index;
+ u32 bd_id;
+ u32 rd_id;
+ vl_api_gbp_ext_itf_flags_t flags;
+};
+
+ autoreply define gbp_ext_itf_add_del
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ bool is_add;
+ vl_api_gbp_ext_itf_t ext_itf;
+};
+
+define gbp_ext_itf_dump
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+};
+
+define gbp_ext_itf_details
+{
+ option status="in_progress";
+ u32 context;
+ vl_api_gbp_ext_itf_t ext_itf;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp.h b/extras/deprecated/plugins/gbp/gbp.h
new file mode 100644
index 00000000000..50039b3bdcf
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Group Base Policy (GBP) defines:
+ * - endpoints: typically a VM or container that is connected to the
+ * virtual switch/router (i.e. to VPP)
+ * - endpoint-group: (EPG) a collection of endpoints
+ * - policy: rules determining which traffic can pass between EPGs a.k.a
+ * a 'contract'
+ *
+ * Here, policy is implemented via an ACL.
+ * EPG classification for transit packets is determined by:
+ * - source EPG: from the packet's input interface
+ * - destination EPG: from the packet's destination IP address.
+ *
+ */
+
+#ifndef __GBP_H__
+#define __GBP_H__
+
+#include <plugins/acl/exports.h>
+
+#include <plugins/gbp/gbp_types.h>
+#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_endpoint_group.h>
+#include <plugins/gbp/gbp_subnet.h>
+#include <plugins/gbp/gbp_recirc.h>
+
+typedef struct
+{
+ u32 gbp_acl_user_id;
+ acl_plugin_methods_t acl_plugin;
+} gbp_main_t;
+
+extern gbp_main_t gbp_main;
+
+typedef enum gbp_policy_type_t_
+{
+ GBP_POLICY_PORT,
+ GBP_POLICY_MAC,
+ GBP_POLICY_LPM,
+ GBP_N_POLICY
+#define GBP_N_POLICY GBP_N_POLICY
+} gbp_policy_type_t;
+
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_policy_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 l2_output_feat_next[GBP_N_POLICY][32];
+} gbp_policy_main_t;
+
+extern gbp_policy_main_t gbp_policy_main;
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_api.c b/extras/deprecated/plugins/gbp/gbp_api.c
new file mode 100644
index 00000000000..ab89172b1af
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_api.c
@@ -0,0 +1,1154 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip_types_api.h>
+#include <vnet/ethernet/ethernet_types_api.h>
+#include <vpp/app/version.h>
+
+#include <gbp/gbp.h>
+#include <gbp/gbp_learn.h>
+#include <gbp/gbp_itf.h>
+#include <gbp/gbp_vxlan.h>
+#include <gbp/gbp_bridge_domain.h>
+#include <gbp/gbp_route_domain.h>
+#include <gbp/gbp_ext_itf.h>
+#include <gbp/gbp_contract.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+/* define message IDs */
+#include <gbp/gbp.api_enum.h>
+#include <gbp/gbp.api_types.h>
+#include <vnet/format_fns.h>
+#include <vlibapi/api_helper_macros.h>
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+
+gbp_main_t gbp_main;
+
+static u16 msg_id_base;
+
+#define GBP_MSG_BASE msg_id_base
+
+static gbp_endpoint_flags_t
+gbp_endpoint_flags_decode (vl_api_gbp_endpoint_flags_t v)
+{
+ gbp_endpoint_flags_t f = GBP_ENDPOINT_FLAG_NONE;
+
+ v = ntohl (v);
+
+ if (v & GBP_API_ENDPOINT_FLAG_BOUNCE)
+ f |= GBP_ENDPOINT_FLAG_BOUNCE;
+ if (v & GBP_API_ENDPOINT_FLAG_REMOTE)
+ f |= GBP_ENDPOINT_FLAG_REMOTE;
+ if (v & GBP_API_ENDPOINT_FLAG_LEARNT)
+ f |= GBP_ENDPOINT_FLAG_LEARNT;
+ if (v & GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ f |= GBP_ENDPOINT_FLAG_EXTERNAL;
+
+ return (f);
+}
+
+static vl_api_gbp_endpoint_flags_t
+gbp_endpoint_flags_encode (gbp_endpoint_flags_t f)
+{
+ vl_api_gbp_endpoint_flags_t v = 0;
+
+
+ if (f & GBP_ENDPOINT_FLAG_BOUNCE)
+ v |= GBP_API_ENDPOINT_FLAG_BOUNCE;
+ if (f & GBP_ENDPOINT_FLAG_REMOTE)
+ v |= GBP_API_ENDPOINT_FLAG_REMOTE;
+ if (f & GBP_ENDPOINT_FLAG_LEARNT)
+ v |= GBP_API_ENDPOINT_FLAG_LEARNT;
+ if (f & GBP_ENDPOINT_FLAG_EXTERNAL)
+ v |= GBP_API_ENDPOINT_FLAG_EXTERNAL;
+
+ v = htonl (v);
+
+ return (v);
+}
+
+static void
+vl_api_gbp_endpoint_add_t_handler (vl_api_gbp_endpoint_add_t * mp)
+{
+ vl_api_gbp_endpoint_add_reply_t *rmp;
+ gbp_endpoint_flags_t gef;
+ u32 sw_if_index, handle;
+ ip46_address_t *ips;
+ mac_address_t mac;
+ int rv = 0, ii;
+
+ handle = INDEX_INVALID;
+
+ VALIDATE_SW_IF_INDEX (&(mp->endpoint));
+
+ gef = gbp_endpoint_flags_decode (mp->endpoint.flags), ips = NULL;
+ sw_if_index = ntohl (mp->endpoint.sw_if_index);
+
+ if (mp->endpoint.n_ips)
+ {
+ vec_validate (ips, mp->endpoint.n_ips - 1);
+
+ vec_foreach_index (ii, ips)
+ {
+ ip_address_decode (&mp->endpoint.ips[ii], &ips[ii]);
+ }
+ }
+ mac_address_decode (mp->endpoint.mac, &mac);
+
+ if (GBP_ENDPOINT_FLAG_REMOTE & gef)
+ {
+ ip46_address_t tun_src, tun_dst;
+
+ ip_address_decode (&mp->endpoint.tun.src, &tun_src);
+ ip_address_decode (&mp->endpoint.tun.dst, &tun_dst);
+
+ rv = gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP,
+ sw_if_index, ips, &mac,
+ INDEX_INVALID, INDEX_INVALID,
+ ntohs (mp->endpoint.sclass),
+ gef, &tun_src, &tun_dst, &handle);
+ }
+ else
+ {
+ rv = gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP,
+ sw_if_index, ips, &mac,
+ INDEX_INVALID, INDEX_INVALID,
+ ntohs (mp->endpoint.sclass),
+ gef, NULL, NULL, &handle);
+ }
+ vec_free (ips);
+ BAD_SW_IF_INDEX_LABEL;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_GBP_ENDPOINT_ADD_REPLY + GBP_MSG_BASE,
+ ({
+ rmp->handle = htonl (handle);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_gbp_endpoint_del_t_handler (vl_api_gbp_endpoint_del_t * mp)
+{
+ vl_api_gbp_endpoint_del_reply_t *rmp;
+ int rv = 0;
+
+ gbp_endpoint_unlock (GBP_ENDPOINT_SRC_CP, ntohl (mp->handle));
+
+ REPLY_MACRO (VL_API_GBP_ENDPOINT_DEL_REPLY + GBP_MSG_BASE);
+}
+
+typedef struct gbp_walk_ctx_t_
+{
+ vl_api_registration_t *reg;
+ u32 context;
+} gbp_walk_ctx_t;
+
+static walk_rc_t
+gbp_endpoint_send_details (index_t gei, void *args)
+{
+ vl_api_gbp_endpoint_details_t *mp;
+ gbp_endpoint_loc_t *gel;
+ gbp_endpoint_fwd_t *gef;
+ gbp_endpoint_t *ge;
+ gbp_walk_ctx_t *ctx;
+ u8 n_ips, ii;
+
+ ctx = args;
+ ge = gbp_endpoint_get (gei);
+
+ n_ips = vec_len (ge->ge_key.gek_ips);
+ mp = vl_msg_api_alloc (sizeof (*mp) + (sizeof (*mp->endpoint.ips) * n_ips));
+ if (!mp)
+ return 1;
+
+ clib_memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ gel = &ge->ge_locs[0];
+ gef = &ge->ge_fwd;
+
+ if (gbp_endpoint_is_remote (ge))
+ {
+ mp->endpoint.sw_if_index = ntohl (gel->tun.gel_parent_sw_if_index);
+ ip_address_encode (&gel->tun.gel_src, IP46_TYPE_ANY,
+ &mp->endpoint.tun.src);
+ ip_address_encode (&gel->tun.gel_dst, IP46_TYPE_ANY,
+ &mp->endpoint.tun.dst);
+ }
+ else
+ {
+ mp->endpoint.sw_if_index =
+ ntohl (gbp_itf_get_sw_if_index (gef->gef_itf));
+ }
+ mp->endpoint.sclass = ntohs (ge->ge_fwd.gef_sclass);
+ mp->endpoint.n_ips = n_ips;
+ mp->endpoint.flags = gbp_endpoint_flags_encode (gef->gef_flags);
+ mp->handle = htonl (gei);
+ mp->age =
+ clib_host_to_net_f64 (vlib_time_now (vlib_get_main ()) -
+ ge->ge_last_time);
+ mac_address_encode (&ge->ge_key.gek_mac, mp->endpoint.mac);
+
+ vec_foreach_index (ii, ge->ge_key.gek_ips)
+ {
+ ip_address_encode (&ge->ge_key.gek_ips[ii].fp_addr,
+ IP46_TYPE_ANY, &mp->endpoint.ips[ii]);
+ }
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (WALK_CONTINUE);
+}
+
+static void
+vl_api_gbp_endpoint_dump_t_handler (vl_api_gbp_endpoint_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_endpoint_walk (gbp_endpoint_send_details, &ctx);
+}
+
+static void
+gbp_retention_decode (const vl_api_gbp_endpoint_retention_t * in,
+ gbp_endpoint_retention_t * out)
+{
+ out->remote_ep_timeout = ntohl (in->remote_ep_timeout);
+}
+
+static void
+ vl_api_gbp_endpoint_group_add_t_handler
+ (vl_api_gbp_endpoint_group_add_t * mp)
+{
+ vl_api_gbp_endpoint_group_add_reply_t *rmp;
+ gbp_endpoint_retention_t retention;
+ int rv = 0;
+
+ gbp_retention_decode (&mp->epg.retention, &retention);
+
+ rv = gbp_endpoint_group_add_and_lock (ntohl (mp->epg.vnid),
+ ntohs (mp->epg.sclass),
+ ntohl (mp->epg.bd_id),
+ ntohl (mp->epg.rd_id),
+ ntohl (mp->epg.uplink_sw_if_index),
+ &retention);
+
+ REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+ vl_api_gbp_endpoint_group_del_t_handler
+ (vl_api_gbp_endpoint_group_del_t * mp)
+{
+ vl_api_gbp_endpoint_group_del_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_endpoint_group_delete (ntohs (mp->sclass));
+
+ REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static gbp_bridge_domain_flags_t
+gbp_bridge_domain_flags_from_api (vl_api_gbp_bridge_domain_flags_t a)
+{
+ gbp_bridge_domain_flags_t g;
+
+ g = GBP_BD_FLAG_NONE;
+ a = clib_net_to_host_u32 (a);
+
+ if (a & GBP_BD_API_FLAG_DO_NOT_LEARN)
+ g |= GBP_BD_FLAG_DO_NOT_LEARN;
+ if (a & GBP_BD_API_FLAG_UU_FWD_DROP)
+ g |= GBP_BD_FLAG_UU_FWD_DROP;
+ if (a & GBP_BD_API_FLAG_MCAST_DROP)
+ g |= GBP_BD_FLAG_MCAST_DROP;
+ if (a & GBP_BD_API_FLAG_UCAST_ARP)
+ g |= GBP_BD_FLAG_UCAST_ARP;
+
+ return (g);
+}
+
+static void
+vl_api_gbp_bridge_domain_add_t_handler (vl_api_gbp_bridge_domain_add_t * mp)
+{
+ vl_api_gbp_bridge_domain_add_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_bridge_domain_add_and_lock (ntohl (mp->bd.bd_id),
+ ntohl (mp->bd.rd_id),
+ gbp_bridge_domain_flags_from_api
+ (mp->bd.flags),
+ ntohl (mp->bd.bvi_sw_if_index),
+ ntohl (mp->bd.uu_fwd_sw_if_index),
+ ntohl (mp->bd.bm_flood_sw_if_index));
+
+ REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_bridge_domain_del_t_handler (vl_api_gbp_bridge_domain_del_t * mp)
+{
+ vl_api_gbp_bridge_domain_del_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_bridge_domain_delete (ntohl (mp->bd_id));
+
+ REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_route_domain_add_t_handler (vl_api_gbp_route_domain_add_t * mp)
+{
+ vl_api_gbp_route_domain_add_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_route_domain_add_and_lock (ntohl (mp->rd.rd_id),
+ ntohs (mp->rd.scope),
+ ntohl (mp->rd.ip4_table_id),
+ ntohl (mp->rd.ip6_table_id),
+ ntohl (mp->rd.ip4_uu_sw_if_index),
+ ntohl (mp->rd.ip6_uu_sw_if_index));
+
+ REPLY_MACRO (VL_API_GBP_ROUTE_DOMAIN_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_route_domain_del_t_handler (vl_api_gbp_route_domain_del_t * mp)
+{
+ vl_api_gbp_route_domain_del_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_route_domain_delete (ntohl (mp->rd_id));
+
+ REPLY_MACRO (VL_API_GBP_ROUTE_DOMAIN_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static int
+gub_subnet_type_from_api (vl_api_gbp_subnet_type_t a, gbp_subnet_type_t * t)
+{
+ a = clib_net_to_host_u32 (a);
+
+ switch (a)
+ {
+ case GBP_API_SUBNET_TRANSPORT:
+ *t = GBP_SUBNET_TRANSPORT;
+ return (0);
+ case GBP_API_SUBNET_L3_OUT:
+ *t = GBP_SUBNET_L3_OUT;
+ return (0);
+ case GBP_API_SUBNET_ANON_L3_OUT:
+ *t = GBP_SUBNET_ANON_L3_OUT;
+ return (0);
+ case GBP_API_SUBNET_STITCHED_INTERNAL:
+ *t = GBP_SUBNET_STITCHED_INTERNAL;
+ return (0);
+ case GBP_API_SUBNET_STITCHED_EXTERNAL:
+ *t = GBP_SUBNET_STITCHED_EXTERNAL;
+ return (0);
+ }
+
+ return (-1);
+}
+
+static void
+vl_api_gbp_subnet_add_del_t_handler (vl_api_gbp_subnet_add_del_t * mp)
+{
+ vl_api_gbp_subnet_add_del_reply_t *rmp;
+ gbp_subnet_type_t type;
+ fib_prefix_t pfx;
+ int rv = 0;
+
+ ip_prefix_decode (&mp->subnet.prefix, &pfx);
+
+ rv = gub_subnet_type_from_api (mp->subnet.type, &type);
+
+ if (0 != rv)
+ goto out;
+
+ if (mp->is_add)
+ rv = gbp_subnet_add (ntohl (mp->subnet.rd_id),
+ &pfx, type,
+ ntohl (mp->subnet.sw_if_index),
+ ntohs (mp->subnet.sclass));
+ else
+ rv = gbp_subnet_del (ntohl (mp->subnet.rd_id), &pfx);
+
+out:
+ REPLY_MACRO (VL_API_GBP_SUBNET_ADD_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static vl_api_gbp_subnet_type_t
+gub_subnet_type_to_api (gbp_subnet_type_t t)
+{
+ vl_api_gbp_subnet_type_t a = 0;
+
+ switch (t)
+ {
+ case GBP_SUBNET_TRANSPORT:
+ a = GBP_API_SUBNET_TRANSPORT;
+ break;
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ a = GBP_API_SUBNET_STITCHED_INTERNAL;
+ break;
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ a = GBP_API_SUBNET_STITCHED_EXTERNAL;
+ break;
+ case GBP_SUBNET_L3_OUT:
+ a = GBP_API_SUBNET_L3_OUT;
+ break;
+ case GBP_SUBNET_ANON_L3_OUT:
+ a = GBP_API_SUBNET_ANON_L3_OUT;
+ break;
+ }
+
+ a = clib_host_to_net_u32 (a);
+
+ return (a);
+}
+
+static walk_rc_t
+gbp_subnet_send_details (u32 rd_id,
+ const fib_prefix_t * pfx,
+ gbp_subnet_type_t type,
+ u32 sw_if_index, sclass_t sclass, void *args)
+{
+ vl_api_gbp_subnet_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ clib_memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_SUBNET_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->subnet.type = gub_subnet_type_to_api (type);
+ mp->subnet.sw_if_index = ntohl (sw_if_index);
+ mp->subnet.sclass = ntohs (sclass);
+ mp->subnet.rd_id = ntohl (rd_id);
+ ip_prefix_encode (pfx, &mp->subnet.prefix);
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (WALK_CONTINUE);
+}
+
+static void
+vl_api_gbp_subnet_dump_t_handler (vl_api_gbp_subnet_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_subnet_walk (gbp_subnet_send_details, &ctx);
+}
+
+static int
+gbp_endpoint_group_send_details (gbp_endpoint_group_t * gg, void *args)
+{
+ vl_api_gbp_endpoint_group_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ clib_memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_GROUP_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->epg.uplink_sw_if_index = ntohl (gg->gg_uplink_sw_if_index);
+ mp->epg.vnid = ntohl (gg->gg_vnid);
+ mp->epg.sclass = ntohs (gg->gg_sclass);
+ mp->epg.bd_id = ntohl (gbp_endpoint_group_get_bd_id (gg));
+ mp->epg.rd_id = ntohl (gbp_route_domain_get_rd_id (gg->gg_rd));
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (1);
+}
+
+static void
+vl_api_gbp_endpoint_group_dump_t_handler (vl_api_gbp_endpoint_group_dump_t *
+ mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_endpoint_group_walk (gbp_endpoint_group_send_details, &ctx);
+}
+
+static int
+gbp_bridge_domain_send_details (gbp_bridge_domain_t * gb, void *args)
+{
+ vl_api_gbp_bridge_domain_details_t *mp;
+ gbp_route_domain_t *gr;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_BRIDGE_DOMAIN_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ gr = gbp_route_domain_get (gb->gb_rdi);
+
+ mp->bd.bd_id = ntohl (gb->gb_bd_id);
+ mp->bd.rd_id = ntohl (gr->grd_id);
+ mp->bd.bvi_sw_if_index = ntohl (gb->gb_bvi_sw_if_index);
+ mp->bd.uu_fwd_sw_if_index = ntohl (gb->gb_uu_fwd_sw_if_index);
+ mp->bd.bm_flood_sw_if_index =
+ ntohl (gbp_itf_get_sw_if_index (gb->gb_bm_flood_itf));
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (1);
+}
+
+static void
+vl_api_gbp_bridge_domain_dump_t_handler (vl_api_gbp_bridge_domain_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_bridge_domain_walk (gbp_bridge_domain_send_details, &ctx);
+}
+
+static int
+gbp_route_domain_send_details (gbp_route_domain_t * grd, void *args)
+{
+ vl_api_gbp_route_domain_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_ROUTE_DOMAIN_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->rd.rd_id = ntohl (grd->grd_id);
+ mp->rd.ip4_uu_sw_if_index =
+ ntohl (grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4]);
+ mp->rd.ip6_uu_sw_if_index =
+ ntohl (grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6]);
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (1);
+}
+
+static void
+vl_api_gbp_route_domain_dump_t_handler (vl_api_gbp_route_domain_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_route_domain_walk (gbp_route_domain_send_details, &ctx);
+}
+
+static void
+vl_api_gbp_recirc_add_del_t_handler (vl_api_gbp_recirc_add_del_t * mp)
+{
+ vl_api_gbp_recirc_add_del_reply_t *rmp;
+ u32 sw_if_index;
+ int rv = 0;
+
+ sw_if_index = ntohl (mp->recirc.sw_if_index);
+ if (!vnet_sw_if_index_is_api_valid (sw_if_index))
+ goto bad_sw_if_index;
+
+ if (mp->is_add)
+ rv = gbp_recirc_add (sw_if_index,
+ ntohs (mp->recirc.sclass), mp->recirc.is_ext);
+ else
+ rv = gbp_recirc_delete (sw_if_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_GBP_RECIRC_ADD_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static walk_rc_t
+gbp_recirc_send_details (gbp_recirc_t * gr, void *args)
+{
+ vl_api_gbp_recirc_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return (WALK_STOP);
+
+ clib_memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_RECIRC_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->recirc.sclass = ntohs (gr->gr_sclass);
+ mp->recirc.sw_if_index = ntohl (gr->gr_sw_if_index);
+ mp->recirc.is_ext = gr->gr_is_ext;
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (WALK_CONTINUE);
+}
+
+static void
+vl_api_gbp_recirc_dump_t_handler (vl_api_gbp_recirc_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_recirc_walk (gbp_recirc_send_details, &ctx);
+}
+
+static void
+vl_api_gbp_ext_itf_add_del_t_handler (vl_api_gbp_ext_itf_add_del_t * mp)
+{
+ vl_api_gbp_ext_itf_add_del_reply_t *rmp;
+ u32 sw_if_index = ~0;
+ vl_api_gbp_ext_itf_t *ext_itf;
+ int rv = 0;
+
+ ext_itf = &mp->ext_itf;
+ if (ext_itf)
+ sw_if_index = ntohl (ext_itf->sw_if_index);
+
+ if (!vnet_sw_if_index_is_api_valid (sw_if_index))
+ goto bad_sw_if_index;
+
+ if (mp->is_add)
+ rv = gbp_ext_itf_add (sw_if_index,
+ ntohl (ext_itf->bd_id), ntohl (ext_itf->rd_id),
+ ntohl (ext_itf->flags));
+ else
+ rv = gbp_ext_itf_delete (sw_if_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_GBP_EXT_ITF_ADD_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static walk_rc_t
+gbp_ext_itf_send_details (gbp_ext_itf_t * gx, void *args)
+{
+ vl_api_gbp_ext_itf_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return (WALK_STOP);
+
+ clib_memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_EXT_ITF_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->ext_itf.flags = ntohl (gx->gx_flags);
+ mp->ext_itf.bd_id = ntohl (gbp_bridge_domain_get_bd_id (gx->gx_bd));
+ mp->ext_itf.rd_id = ntohl (gbp_route_domain_get_rd_id (gx->gx_rd));
+ mp->ext_itf.sw_if_index = ntohl (gbp_itf_get_sw_if_index (gx->gx_itf));
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (WALK_CONTINUE);
+}
+
+static void
+vl_api_gbp_ext_itf_dump_t_handler (vl_api_gbp_ext_itf_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_ext_itf_walk (gbp_ext_itf_send_details, &ctx);
+}
+
+static int
+gbp_contract_rule_action_deocde (vl_api_gbp_rule_action_t in,
+ gbp_rule_action_t * out)
+{
+ in = clib_net_to_host_u32 (in);
+
+ switch (in)
+ {
+ case GBP_API_RULE_PERMIT:
+ *out = GBP_RULE_PERMIT;
+ return (0);
+ case GBP_API_RULE_DENY:
+ *out = GBP_RULE_DENY;
+ return (0);
+ case GBP_API_RULE_REDIRECT:
+ *out = GBP_RULE_REDIRECT;
+ return (0);
+ }
+
+ return (-1);
+}
+
+static int
+gbp_hash_mode_decode (vl_api_gbp_hash_mode_t in, gbp_hash_mode_t * out)
+{
+ in = clib_net_to_host_u32 (in);
+
+ switch (in)
+ {
+ case GBP_API_HASH_MODE_SRC_IP:
+ *out = GBP_HASH_MODE_SRC_IP;
+ return (0);
+ case GBP_API_HASH_MODE_DST_IP:
+ *out = GBP_HASH_MODE_DST_IP;
+ return (0);
+ case GBP_API_HASH_MODE_SYMMETRIC:
+ *out = GBP_HASH_MODE_SYMMETRIC;
+ return (0);
+ }
+
+ return (-2);
+}
+
+static int
+gbp_next_hop_decode (const vl_api_gbp_next_hop_t * in, index_t * gnhi)
+{
+ ip46_address_t ip;
+ mac_address_t mac;
+ index_t grd, gbd;
+
+ gbd = gbp_bridge_domain_find_and_lock (ntohl (in->bd_id));
+
+ if (INDEX_INVALID == gbd)
+ return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+
+ grd = gbp_route_domain_find_and_lock (ntohl (in->rd_id));
+
+ if (INDEX_INVALID == grd)
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+
+ ip_address_decode (&in->ip, &ip);
+ mac_address_decode (in->mac, &mac);
+
+ *gnhi = gbp_next_hop_alloc (&ip, grd, &mac, gbd);
+
+ return (0);
+}
+
+static int
+gbp_next_hop_set_decode (const vl_api_gbp_next_hop_set_t * in,
+ gbp_hash_mode_t * hash_mode, index_t ** out)
+{
+
+ index_t *gnhis = NULL;
+ int rv;
+ u8 ii;
+
+ rv = gbp_hash_mode_decode (in->hash_mode, hash_mode);
+
+ if (0 != rv)
+ return rv;
+
+ vec_validate (gnhis, in->n_nhs - 1);
+
+ for (ii = 0; ii < in->n_nhs; ii++)
+ {
+ rv = gbp_next_hop_decode (&in->nhs[ii], &gnhis[ii]);
+
+ if (0 != rv)
+ {
+ vec_free (gnhis);
+ break;
+ }
+ }
+
+ *out = gnhis;
+ return (rv);
+}
+
+static int
+gbp_contract_rule_decode (const vl_api_gbp_rule_t * in, index_t * gui)
+{
+ gbp_hash_mode_t hash_mode;
+ gbp_rule_action_t action;
+ index_t *nhs = NULL;
+ int rv;
+
+ rv = gbp_contract_rule_action_deocde (in->action, &action);
+
+ if (0 != rv)
+ return rv;
+
+ if (GBP_RULE_REDIRECT == action)
+ {
+ rv = gbp_next_hop_set_decode (&in->nh_set, &hash_mode, &nhs);
+
+ if (0 != rv)
+ return (rv);
+ }
+ else
+ {
+ hash_mode = GBP_HASH_MODE_SRC_IP;
+ }
+
+ *gui = gbp_rule_alloc (action, hash_mode, nhs);
+
+ return (rv);
+}
+
+static int
+gbp_contract_rules_decode (u8 n_rules,
+ const vl_api_gbp_rule_t * rules, index_t ** out)
+{
+ index_t *guis = NULL;
+ int rv;
+ u8 ii;
+
+ if (0 == n_rules)
+ {
+ *out = NULL;
+ return (0);
+ }
+
+ vec_validate (guis, n_rules - 1);
+
+ for (ii = 0; ii < n_rules; ii++)
+ {
+ rv = gbp_contract_rule_decode (&rules[ii], &guis[ii]);
+
+ if (0 != rv)
+ {
+ index_t *gui;
+ vec_foreach (gui, guis) gbp_rule_free (*gui);
+ vec_free (guis);
+ return (rv);
+ }
+ }
+
+ *out = guis;
+ return (rv);
+}
+
+static void
+vl_api_gbp_contract_add_del_t_handler (vl_api_gbp_contract_add_del_t * mp)
+{
+ vl_api_gbp_contract_add_del_reply_t *rmp;
+ u16 *allowed_ethertypes;
+ u32 stats_index = ~0;
+ index_t *rules;
+ int ii, rv = 0;
+ u8 n_et;
+
+ if (mp->is_add)
+ {
+ rv = gbp_contract_rules_decode (mp->contract.n_rules,
+ mp->contract.rules, &rules);
+ if (0 != rv)
+ goto out;
+
+ allowed_ethertypes = NULL;
+
+ /*
+ * allowed ether types
+ */
+ n_et = mp->contract.n_ether_types;
+ vec_validate (allowed_ethertypes, n_et - 1);
+
+ for (ii = 0; ii < n_et; ii++)
+ {
+ /* leave the ether types in network order */
+ allowed_ethertypes[ii] = mp->contract.allowed_ethertypes[ii];
+ }
+
+ rv = gbp_contract_update (ntohs (mp->contract.scope),
+ ntohs (mp->contract.sclass),
+ ntohs (mp->contract.dclass),
+ ntohl (mp->contract.acl_index),
+ rules, allowed_ethertypes, &stats_index);
+ }
+ else
+ rv = gbp_contract_delete (ntohs (mp->contract.scope),
+ ntohs (mp->contract.sclass),
+ ntohs (mp->contract.dclass));
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_GBP_CONTRACT_ADD_DEL_REPLY + GBP_MSG_BASE,
+ ({
+ rmp->stats_index = htonl (stats_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static int
+gbp_contract_send_details (gbp_contract_t * gbpc, void *args)
+{
+ vl_api_gbp_contract_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ clib_memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_CONTRACT_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->contract.sclass = ntohs (gbpc->gc_key.gck_src);
+ mp->contract.dclass = ntohs (gbpc->gc_key.gck_dst);
+ mp->contract.acl_index = ntohl (gbpc->gc_acl_index);
+ mp->contract.scope = ntohs (gbpc->gc_key.gck_scope);
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (1);
+}
+
+static void
+vl_api_gbp_contract_dump_t_handler (vl_api_gbp_contract_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_contract_walk (gbp_contract_send_details, &ctx);
+}
+
+static int
+gbp_vxlan_tunnel_mode_2_layer (vl_api_gbp_vxlan_tunnel_mode_t mode,
+ gbp_vxlan_tunnel_layer_t * l)
+{
+ mode = clib_net_to_host_u32 (mode);
+
+ switch (mode)
+ {
+ case GBP_VXLAN_TUNNEL_MODE_L2:
+ *l = GBP_VXLAN_TUN_L2;
+ return (0);
+ case GBP_VXLAN_TUNNEL_MODE_L3:
+ *l = GBP_VXLAN_TUN_L3;
+ return (0);
+ }
+ return (-1);
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_add_t_handler (vl_api_gbp_vxlan_tunnel_add_t * mp)
+{
+ vl_api_gbp_vxlan_tunnel_add_reply_t *rmp;
+ gbp_vxlan_tunnel_layer_t layer;
+ ip4_address_t src;
+ u32 sw_if_index;
+ int rv = 0;
+
+ ip4_address_decode (mp->tunnel.src, &src);
+ rv = gbp_vxlan_tunnel_mode_2_layer (mp->tunnel.mode, &layer);
+
+ if (0 != rv)
+ goto out;
+
+ rv = gbp_vxlan_tunnel_add (ntohl (mp->tunnel.vni),
+ layer,
+ ntohl (mp->tunnel.bd_rd_id), &src, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_GBP_VXLAN_TUNNEL_ADD_REPLY + GBP_MSG_BASE,
+ ({
+ rmp->sw_if_index = htonl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_del_t_handler (vl_api_gbp_vxlan_tunnel_add_t * mp)
+{
+ vl_api_gbp_vxlan_tunnel_del_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_vxlan_tunnel_del (ntohl (mp->tunnel.vni));
+
+ REPLY_MACRO (VL_API_GBP_VXLAN_TUNNEL_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static vl_api_gbp_vxlan_tunnel_mode_t
+gbp_vxlan_tunnel_layer_2_mode (gbp_vxlan_tunnel_layer_t layer)
+{
+ vl_api_gbp_vxlan_tunnel_mode_t mode = GBP_VXLAN_TUNNEL_MODE_L2;
+
+ switch (layer)
+ {
+ case GBP_VXLAN_TUN_L2:
+ mode = GBP_VXLAN_TUNNEL_MODE_L2;
+ break;
+ case GBP_VXLAN_TUN_L3:
+ mode = GBP_VXLAN_TUNNEL_MODE_L3;
+ break;
+ }
+ mode = clib_host_to_net_u32 (mode);
+
+ return (mode);
+}
+
+static walk_rc_t
+gbp_vxlan_tunnel_send_details (gbp_vxlan_tunnel_t * gt, void *args)
+{
+ vl_api_gbp_vxlan_tunnel_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = htons (VL_API_GBP_VXLAN_TUNNEL_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->tunnel.vni = htonl (gt->gt_vni);
+ mp->tunnel.mode = gbp_vxlan_tunnel_layer_2_mode (gt->gt_layer);
+ mp->tunnel.bd_rd_id = htonl (gt->gt_bd_rd_id);
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (1);
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_dump_t_handler (vl_api_gbp_vxlan_tunnel_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_vxlan_walk (gbp_vxlan_tunnel_send_details, &ctx);
+}
+
+#include <gbp/gbp.api.c>
+static clib_error_t *
+gbp_init (vlib_main_t * vm)
+{
+ gbp_main_t *gbpm = &gbp_main;
+
+ gbpm->gbp_acl_user_id = ~0;
+
+ /* Ask for a correctly-sized block of API message decode slots */
+ msg_id_base = setup_message_id_table ();
+
+ return (NULL);
+}
+
+VLIB_API_INIT_FUNCTION (gbp_init);
+
+/* *INDENT-OFF* */
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Group Based Policy (GBP)",
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_bridge_domain.c b/extras/deprecated/plugins/gbp/gbp_bridge_domain.c
new file mode 100644
index 00000000000..279169abb1d
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_bridge_domain.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_itf.h>
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_bvi.h>
+#include <vnet/l2/l2_fib.h>
+
+/**
+ * Pool of GBP bridge_domains
+ */
+gbp_bridge_domain_t *gbp_bridge_domain_pool;
+
+/**
+ * DB of bridge_domains
+ */
+gbp_bridge_domain_db_t gbp_bridge_domain_db;
+
+/**
+ * Map of BD index to contract scope
+ */
+gbp_scope_t *gbp_scope_by_bd_index;
+
+/**
+ * logger
+ */
+vlib_log_class_t gb_logger;
+
+#define GBP_BD_DBG(...) \
+ vlib_log_debug (gb_logger, __VA_ARGS__);
+
+index_t
+gbp_bridge_domain_index (const gbp_bridge_domain_t * gbd)
+{
+ return (gbd - gbp_bridge_domain_pool);
+}
+
+static void
+gbp_bridge_domain_lock (index_t i)
+{
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (i);
+ gb->gb_locks++;
+}
+
+u32
+gbp_bridge_domain_get_bd_id (index_t gbdi)
+{
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gbdi);
+
+ return (gb->gb_bd_id);
+}
+
+static index_t
+gbp_bridge_domain_find (u32 bd_id)
+{
+ uword *p;
+
+ p = hash_get (gbp_bridge_domain_db.gbd_by_bd_id, bd_id);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+index_t
+gbp_bridge_domain_find_and_lock (u32 bd_id)
+{
+ uword *p;
+
+ p = hash_get (gbp_bridge_domain_db.gbd_by_bd_id, bd_id);
+
+ if (NULL != p)
+ {
+ gbp_bridge_domain_lock (p[0]);
+ return p[0];
+ }
+ return (INDEX_INVALID);
+}
+
+static void
+gbp_bridge_domain_db_add (gbp_bridge_domain_t * gb)
+{
+ index_t gbi = gb - gbp_bridge_domain_pool;
+
+ hash_set (gbp_bridge_domain_db.gbd_by_bd_id, gb->gb_bd_id, gbi);
+ vec_validate_init_empty (gbp_bridge_domain_db.gbd_by_bd_index,
+ gb->gb_bd_index, INDEX_INVALID);
+ gbp_bridge_domain_db.gbd_by_bd_index[gb->gb_bd_index] = gbi;
+}
+
+static void
+gbp_bridge_domain_db_remove (gbp_bridge_domain_t * gb)
+{
+ hash_unset (gbp_bridge_domain_db.gbd_by_bd_id, gb->gb_bd_id);
+ gbp_bridge_domain_db.gbd_by_bd_index[gb->gb_bd_index] = INDEX_INVALID;
+}
+
+u8 *
+format_gbp_bridge_domain_flags (u8 * s, va_list * args)
+{
+ gbp_bridge_domain_flags_t gf = va_arg (*args, gbp_bridge_domain_flags_t);
+
+ if (gf)
+ {
+ if (gf & GBP_BD_FLAG_DO_NOT_LEARN)
+ s = format (s, "do-not-learn ");
+ if (gf & GBP_BD_FLAG_UU_FWD_DROP)
+ s = format (s, "uu-fwd-drop ");
+ if (gf & GBP_BD_FLAG_MCAST_DROP)
+ s = format (s, "mcast-drop ");
+ if (gf & GBP_BD_FLAG_UCAST_ARP)
+ s = format (s, "ucast-arp ");
+ }
+ else
+ {
+ s = format (s, "none");
+ }
+ return (s);
+}
+
+static u8 *
+format_gbp_bridge_domain_ptr (u8 * s, va_list * args)
+{
+ gbp_bridge_domain_t *gb = va_arg (*args, gbp_bridge_domain_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+
+ if (NULL != gb)
+ s =
+ format (s,
+ "[%d] bd:[%d,%d], bvi:%U uu-flood:%U bm-flood:%U flags:%U locks:%d",
+ gb - gbp_bridge_domain_pool, gb->gb_bd_id, gb->gb_bd_index,
+ format_vnet_sw_if_index_name, vnm, gb->gb_bvi_sw_if_index,
+ format_vnet_sw_if_index_name, vnm, gb->gb_uu_fwd_sw_if_index,
+ format_gbp_itf_hdl, gb->gb_bm_flood_itf,
+ format_gbp_bridge_domain_flags, gb->gb_flags, gb->gb_locks);
+ else
+ s = format (s, "NULL");
+
+ return (s);
+}
+
+u8 *
+format_gbp_bridge_domain (u8 * s, va_list * args)
+{
+ index_t gbi = va_arg (*args, index_t);
+
+ s =
+ format (s, "%U", format_gbp_bridge_domain_ptr,
+ gbp_bridge_domain_get (gbi));
+
+ return (s);
+}
+
+int
+gbp_bridge_domain_add_and_lock (u32 bd_id,
+ u32 rd_id,
+ gbp_bridge_domain_flags_t flags,
+ u32 bvi_sw_if_index,
+ u32 uu_fwd_sw_if_index,
+ u32 bm_flood_sw_if_index)
+{
+ gbp_bridge_domain_t *gb;
+ index_t gbi;
+
+ gbi = gbp_bridge_domain_find (bd_id);
+
+ if (INDEX_INVALID == gbi)
+ {
+ gbp_route_domain_t *gr;
+ u32 bd_index;
+
+ bd_index = bd_find_index (&bd_main, bd_id);
+
+ if (~0 == bd_index)
+ return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+
+ bd_flags_t bd_flags = L2_NONE;
+ if (flags & GBP_BD_FLAG_UU_FWD_DROP)
+ bd_flags |= L2_UU_FLOOD;
+ if (flags & GBP_BD_FLAG_MCAST_DROP)
+ bd_flags |= L2_FLOOD;
+
+ pool_get (gbp_bridge_domain_pool, gb);
+ memset (gb, 0, sizeof (*gb));
+
+ gbi = gb - gbp_bridge_domain_pool;
+ gb->gb_bd_id = bd_id;
+ gb->gb_bd_index = bd_index;
+ gb->gb_uu_fwd_sw_if_index = uu_fwd_sw_if_index;
+ gb->gb_bvi_sw_if_index = bvi_sw_if_index;
+ gbp_itf_hdl_reset (&gb->gb_bm_flood_itf);
+ gb->gb_locks = 1;
+ gb->gb_flags = flags;
+ gb->gb_rdi = gbp_route_domain_find_and_lock (rd_id);
+
+ /*
+ * set the scope from the BD's RD's scope
+ */
+ gr = gbp_route_domain_get (gb->gb_rdi);
+ vec_validate (gbp_scope_by_bd_index, gb->gb_bd_index);
+ gbp_scope_by_bd_index[gb->gb_bd_index] = gr->grd_scope;
+
+ /*
+ * Set the BVI and uu-flood interfaces into the BD
+ */
+ gbp_bridge_domain_itf_add (gbi, gb->gb_bvi_sw_if_index,
+ L2_BD_PORT_TYPE_BVI);
+
+ if ((!(flags & GBP_BD_FLAG_UU_FWD_DROP) ||
+ (flags & GBP_BD_FLAG_UCAST_ARP)) &&
+ ~0 != gb->gb_uu_fwd_sw_if_index)
+ gbp_bridge_domain_itf_add (gbi, gb->gb_uu_fwd_sw_if_index,
+ L2_BD_PORT_TYPE_UU_FWD);
+
+ if (!(flags & GBP_BD_FLAG_MCAST_DROP) && ~0 != bm_flood_sw_if_index)
+ {
+ gb->gb_bm_flood_itf =
+ gbp_itf_l2_add_and_lock (bm_flood_sw_if_index, gbi);
+ gbp_itf_l2_set_input_feature (gb->gb_bm_flood_itf,
+ L2INPUT_FEAT_GBP_LEARN);
+ }
+
+ /*
+ * unset any flag(s) set above
+ */
+ bd_set_flags (vlib_get_main (), bd_index, bd_flags, 0);
+
+ if (flags & GBP_BD_FLAG_UCAST_ARP)
+ {
+ bd_flags = L2_ARP_UFWD;
+ bd_set_flags (vlib_get_main (), bd_index, bd_flags, 1);
+ }
+
+ /*
+ * Add the BVI's MAC to the L2FIB
+ */
+ l2fib_add_entry (vnet_sw_interface_get_hw_address
+ (vnet_get_main (), gb->gb_bvi_sw_if_index),
+ gb->gb_bd_index, gb->gb_bvi_sw_if_index,
+ (L2FIB_ENTRY_RESULT_FLAG_STATIC |
+ L2FIB_ENTRY_RESULT_FLAG_BVI));
+
+ gbp_bridge_domain_db_add (gb);
+ }
+ else
+ {
+ gb = gbp_bridge_domain_get (gbi);
+ gb->gb_locks++;
+ }
+
+ GBP_BD_DBG ("add: %U", format_gbp_bridge_domain_ptr, gb);
+
+ return (0);
+}
+
+void
+gbp_bridge_domain_itf_add (index_t gbdi,
+ u32 sw_if_index, l2_bd_port_type_t type)
+{
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gbdi);
+
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (), MODE_L2_BRIDGE,
+ sw_if_index, gb->gb_bd_index, type, 0, 0);
+ /*
+ * adding an interface to the bridge enables learning on the
+ * interface. Disable learning on the interface by default for gbp
+ * interfaces
+ */
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_LEARN, 0);
+}
+
+void
+gbp_bridge_domain_itf_del (index_t gbdi,
+ u32 sw_if_index, l2_bd_port_type_t type)
+{
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gbdi);
+
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (), MODE_L3, sw_if_index,
+ gb->gb_bd_index, type, 0, 0);
+}
+
+void
+gbp_bridge_domain_unlock (index_t gbdi)
+{
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gbdi);
+
+ gb->gb_locks--;
+
+ if (0 == gb->gb_locks)
+ {
+ GBP_BD_DBG ("destroy: %U", format_gbp_bridge_domain_ptr, gb);
+
+ l2fib_del_entry (vnet_sw_interface_get_hw_address
+ (vnet_get_main (), gb->gb_bvi_sw_if_index),
+ gb->gb_bd_index, gb->gb_bvi_sw_if_index);
+
+ gbp_bridge_domain_itf_del (gbdi, gb->gb_bvi_sw_if_index,
+ L2_BD_PORT_TYPE_BVI);
+ if (~0 != gb->gb_uu_fwd_sw_if_index)
+ gbp_bridge_domain_itf_del (gbdi, gb->gb_uu_fwd_sw_if_index,
+ L2_BD_PORT_TYPE_UU_FWD);
+ gbp_itf_unlock (&gb->gb_bm_flood_itf);
+
+ gbp_bridge_domain_db_remove (gb);
+ gbp_route_domain_unlock (gb->gb_rdi);
+
+ pool_put (gbp_bridge_domain_pool, gb);
+ }
+}
+
+int
+gbp_bridge_domain_delete (u32 bd_id)
+{
+ index_t gbi;
+
+ GBP_BD_DBG ("del: %d", bd_id);
+ gbi = gbp_bridge_domain_find (bd_id);
+
+ if (INDEX_INVALID != gbi)
+ {
+ GBP_BD_DBG ("del: %U", format_gbp_bridge_domain, gbi);
+ gbp_bridge_domain_unlock (gbi);
+
+ return (0);
+ }
+
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+void
+gbp_bridge_domain_walk (gbp_bridge_domain_cb_t cb, void *ctx)
+{
+ gbp_bridge_domain_t *gbpe;
+
+ /* *INDENT-OFF* */
+ pool_foreach (gbpe, gbp_bridge_domain_pool)
+ {
+ if (!cb(gbpe, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_bridge_domain_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ gbp_bridge_domain_flags_t flags;
+ u32 bm_flood_sw_if_index = ~0;
+ u32 uu_fwd_sw_if_index = ~0;
+ u32 bd_id = ~0, rd_id = ~0;
+ u32 bvi_sw_if_index = ~0;
+ u8 add = 1;
+
+ flags = GBP_BD_FLAG_NONE;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "bvi %U", unformat_vnet_sw_interface,
+ vnm, &bvi_sw_if_index))
+ ;
+ else if (unformat (input, "uu-fwd %U", unformat_vnet_sw_interface,
+ vnm, &uu_fwd_sw_if_index))
+ ;
+ else if (unformat (input, "bm-flood %U", unformat_vnet_sw_interface,
+ vnm, &bm_flood_sw_if_index))
+ ;
+ else if (unformat (input, "add"))
+ add = 1;
+ else if (unformat (input, "del"))
+ add = 0;
+ else if (unformat (input, "flags %d", &flags))
+ ;
+ else if (unformat (input, "bd %d", &bd_id))
+ ;
+ else if (unformat (input, "rd %d", &rd_id))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == bd_id)
+ return clib_error_return (0, "BD-ID must be specified");
+ if (~0 == rd_id)
+ return clib_error_return (0, "RD-ID must be specified");
+
+ if (add)
+ {
+ if (~0 == bvi_sw_if_index)
+ return clib_error_return (0, "interface must be specified");
+
+ gbp_bridge_domain_add_and_lock (bd_id, rd_id,
+ flags,
+ bvi_sw_if_index,
+ uu_fwd_sw_if_index,
+ bm_flood_sw_if_index);
+ }
+ else
+ gbp_bridge_domain_delete (bd_id);
+
+ return (NULL);
+}
+
+/*?
+ * Configure a GBP bridge-domain
+ *
+ * @cliexpar
+ * @cliexstart{gbp bridge-domain [del] bd <ID> bvi <interface> [uu-fwd <interface>] [bm-flood <interface>] [flags <flags>]}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_bridge_domain_cli_node, static) = {
+ .path = "gbp bridge-domain",
+ .short_help = "gbp bridge-domain [del] bd <ID> bvi <interface> [uu-fwd <interface>] [bm-flood <interface>] [flags <flags>]",
+ .function = gbp_bridge_domain_cli,
+};
+
+static int
+gbp_bridge_domain_show_one (gbp_bridge_domain_t *gb, void *ctx)
+{
+ vlib_main_t *vm;
+
+ vm = ctx;
+ vlib_cli_output (vm, " %U", format_gbp_bridge_domain_ptr, gb);
+
+ return (1);
+}
+
+static clib_error_t *
+gbp_bridge_domain_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "Bridge-Domains:");
+ gbp_bridge_domain_walk (gbp_bridge_domain_show_one, vm);
+
+ return (NULL);
+}
+
+
+/*?
+ * Show Group Based Policy Bridge_Domains and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp bridge_domain}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_bridge_domain_show_node, static) = {
+ .path = "show gbp bridge-domain",
+ .short_help = "show gbp bridge-domain\n",
+ .function = gbp_bridge_domain_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_bridge_domain_init (vlib_main_t * vm)
+{
+ gb_logger = vlib_log_register_class ("gbp", "bd");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_bridge_domain_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_bridge_domain.h b/extras/deprecated/plugins/gbp/gbp_bridge_domain.h
new file mode 100644
index 00000000000..0449240083c
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_bridge_domain.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_BRIDGE_DOMAIN_H__
+#define __GBP_BRIDGE_DOMAIN_H__
+
+#include <plugins/gbp/gbp_types.h>
+#include <plugins/gbp/gbp_itf.h>
+
+#include <vnet/fib/fib_types.h>
+#include <vnet/l2/l2_bd.h>
+
+/**
+ * Bridge Domain Flags
+ */
+typedef enum gbp_bridge_domain_flags_t_
+{
+ GBP_BD_FLAG_NONE = 0,
+ GBP_BD_FLAG_DO_NOT_LEARN = (1 << 0),
+ GBP_BD_FLAG_UU_FWD_DROP = (1 << 1),
+ GBP_BD_FLAG_MCAST_DROP = (1 << 2),
+ GBP_BD_FLAG_UCAST_ARP = (1 << 3),
+} gbp_bridge_domain_flags_t;
+
+/**
+ * A bridge Domain Representation.
+ * This is a standard bridge-domain plus all the attributes it must
+ * have to supprt the GBP model.
+ */
+typedef struct gbp_bridge_domain_t_
+{
+ /**
+ * Bridge-domain ID
+ */
+ u32 gb_bd_id;
+ u32 gb_bd_index;
+
+ /**
+ * Index of the Route-domain this BD is associated with. This is used as the
+ * 'scope' of the packets for contract matching.
+ */
+ u32 gb_rdi;
+
+ /**
+ * Flags conttrolling behaviour
+ */
+ gbp_bridge_domain_flags_t gb_flags;
+
+ /**
+ * The BD's BVI interface (obligatory)
+ */
+ u32 gb_bvi_sw_if_index;
+
+ /**
+ * The BD's MAC spine-proxy interface (optional)
+ */
+ u32 gb_uu_fwd_sw_if_index;
+
+ /**
+ * The BD's interface to sned Broadcast and multicast packets
+ */
+ gbp_itf_hdl_t gb_bm_flood_itf;
+
+ /**
+ * The index of the BD's VNI interface on which packets from
+ * unkown endpoints arrive
+ */
+ u32 gb_vni;
+
+ /**
+ * locks/references to the BD so it does not get deleted (from the API)
+ * whilst it is still being used
+ */
+ u32 gb_locks;
+} gbp_bridge_domain_t;
+
+extern void gbp_bridge_domain_itf_add (index_t gbdi,
+ u32 sw_if_index,
+ l2_bd_port_type_t type);
+extern void gbp_bridge_domain_itf_del (index_t gbdi,
+ u32 sw_if_index,
+ l2_bd_port_type_t type);
+
+extern int gbp_bridge_domain_add_and_lock (u32 bd_id,
+ u32 rd_id,
+ gbp_bridge_domain_flags_t flags,
+ u32 bvi_sw_if_index,
+ u32 uu_fwd_sw_if_index,
+ u32 bm_flood_sw_if_index);
+
+extern void gbp_bridge_domain_unlock (index_t gbi);
+extern index_t gbp_bridge_domain_find_and_lock (u32 bd_id);
+extern int gbp_bridge_domain_delete (u32 bd_id);
+extern index_t gbp_bridge_domain_index (const gbp_bridge_domain_t *);
+extern u32 gbp_bridge_domain_get_bd_id (index_t gbdi);
+
+typedef int (*gbp_bridge_domain_cb_t) (gbp_bridge_domain_t * gb, void *ctx);
+extern void gbp_bridge_domain_walk (gbp_bridge_domain_cb_t bgpe, void *ctx);
+
+extern u8 *format_gbp_bridge_domain (u8 * s, va_list * args);
+extern u8 *format_gbp_bridge_domain_flags (u8 * s, va_list * args);
+
+/**
+ * DB of bridge_domains
+ */
+typedef struct gbp_bridge_domain_db_t
+{
+ uword *gbd_by_bd_id;
+ index_t *gbd_by_bd_index;
+} gbp_bridge_domain_db_t;
+
+extern gbp_bridge_domain_db_t gbp_bridge_domain_db;
+extern gbp_bridge_domain_t *gbp_bridge_domain_pool;
+
+always_inline gbp_bridge_domain_t *
+gbp_bridge_domain_get (index_t i)
+{
+ return (pool_elt_at_index (gbp_bridge_domain_pool, i));
+}
+
+always_inline gbp_bridge_domain_t *
+gbp_bridge_domain_get_by_bd_index (u32 bd_index)
+{
+ return (gbp_bridge_domain_get
+ (gbp_bridge_domain_db.gbd_by_bd_index[bd_index]));
+}
+
+extern gbp_scope_t *gbp_scope_by_bd_index;
+
+always_inline gbp_scope_t
+gbp_bridge_domain_get_scope (u32 bd_index)
+{
+ return (gbp_scope_by_bd_index[bd_index]);
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_classify.c b/extras/deprecated/plugins/gbp/gbp_classify.c
new file mode 100644
index 00000000000..255db252871
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_classify.c
@@ -0,0 +1,71 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_classify.h>
+#include <vnet/l2/l2_input.h>
+
+gbp_src_classify_main_t gbp_src_classify_main;
+
+static clib_error_t *
+gbp_src_classify_init (vlib_main_t * vm)
+{
+ gbp_src_classify_main_t *em = &gbp_src_classify_main;
+
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-src-classify");
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ em->l2_input_feat_next[GBP_SRC_CLASSIFY_NULL]);
+
+ node = vlib_get_node_by_name (vm, (u8 *) "gbp-null-classify");
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ em->l2_input_feat_next[GBP_SRC_CLASSIFY_PORT]);
+
+ node = vlib_get_node_by_name (vm, (u8 *) "l2-gbp-lpm-classify");
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ em->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM]);
+
+ node = vlib_get_node_by_name (vm, (u8 *) "l2-gbp-lpm-anon-classify");
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ em->l2_input_feat_next
+ [GBP_SRC_CLASSIFY_LPM_ANON]);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gbp_src_classify_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_classify.h b/extras/deprecated/plugins/gbp/gbp_classify.h
new file mode 100644
index 00000000000..ca7db94a2c0
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_classify.h
@@ -0,0 +1,94 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_CLASSIFY_H__
+#define __GBP_CLASSIFY_H__
+
+#include <plugins/gbp/gbp.h>
+#include <vnet/ethernet/arp_packet.h>
+
+typedef enum gbp_src_classify_type_t_
+{
+ GBP_SRC_CLASSIFY_NULL,
+ GBP_SRC_CLASSIFY_PORT,
+ GBP_SRC_CLASSIFY_LPM,
+ GBP_SRC_CLASSIFY_LPM_ANON,
+ GBP_SRC_N_CLASSIFY
+#define GBP_SRC_N_CLASSIFY GBP_SRC_N_CLASSIFY
+} gbp_src_classify_type_t;
+
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_src_classify_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 l2_input_feat_next[GBP_SRC_N_CLASSIFY][32];
+} gbp_src_classify_main_t;
+
+extern gbp_src_classify_main_t gbp_src_classify_main;
+
+enum gbp_classify_get_ip_way
+{
+ GBP_CLASSIFY_GET_IP_SRC = 0,
+ GBP_CLASSIFY_GET_IP_DST = 1
+};
+
+static_always_inline dpo_proto_t
+gbp_classify_get_ip_address (const ethernet_header_t * eh0,
+ const ip4_address_t ** ip4,
+ const ip6_address_t ** ip6,
+ const enum gbp_classify_get_ip_way way)
+{
+ u16 etype = clib_net_to_host_u16 (eh0->type);
+ const void *l3h0 = eh0 + 1;
+
+ if (ETHERNET_TYPE_VLAN == etype)
+ {
+ const ethernet_vlan_header_t *vh0 =
+ (ethernet_vlan_header_t *) (eh0 + 1);
+ etype = clib_net_to_host_u16 (vh0->type);
+ l3h0 = vh0 + 1;
+ }
+
+ switch (etype)
+ {
+ case ETHERNET_TYPE_IP4:
+ *ip4 = &(&((const ip4_header_t *) l3h0)->src_address)[way];
+ return DPO_PROTO_IP4;
+ case ETHERNET_TYPE_IP6:
+ *ip6 = &(&((const ip6_header_t *) l3h0)->src_address)[way];
+ return DPO_PROTO_IP6;
+ case ETHERNET_TYPE_ARP:
+ *ip4 = &((ethernet_arp_header_t *) l3h0)->ip4_over_ethernet[way].ip4;
+ return DPO_PROTO_IP4;
+ }
+
+ return DPO_PROTO_NONE;
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_classify_node.c b/extras/deprecated/plugins/gbp/gbp_classify_node.c
new file mode 100644
index 00000000000..a2058a21284
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_classify_node.c
@@ -0,0 +1,628 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_classify.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_ext_itf.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+#include <vnet/ethernet/arp_packet.h>
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_classify_trace_t_
+{
+ /* per-pkt trace data */
+ sclass_t sclass;
+} gbp_classify_trace_t;
+
+/*
+ * determine the SRC EPG form the input port
+ */
+always_inline uword
+gbp_classify_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ gbp_src_classify_type_t type, dpo_proto_t dproto)
+{
+ gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0, bi0, sw_if_index0;
+ const gbp_endpoint_t *ge0;
+ vlib_buffer_t *b0;
+ sclass_t sclass0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
+
+ if (GBP_SRC_CLASSIFY_NULL == type)
+ {
+ sclass0 = SCLASS_INVALID;
+ next0 =
+ vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
+ L2INPUT_FEAT_GBP_NULL_CLASSIFY);
+ }
+ else
+ {
+ if (DPO_PROTO_ETHERNET == dproto)
+ {
+ const ethernet_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+ next0 =
+ vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
+ L2INPUT_FEAT_GBP_SRC_CLASSIFY);
+ ge0 = gbp_endpoint_find_mac (h0->src_address,
+ vnet_buffer (b0)->l2.bd_index);
+ }
+ else if (DPO_PROTO_IP4 == dproto)
+ {
+ const ip4_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+
+ ge0 = gbp_endpoint_find_ip4
+ (&h0->src_address,
+ fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+ sw_if_index0));
+
+
+ /*
+ * Go straight to looukp, do not pass go, do not collect $200
+ */
+ next0 = 0;
+ }
+ else if (DPO_PROTO_IP6 == dproto)
+ {
+ const ip6_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+
+ ge0 = gbp_endpoint_find_ip6
+ (&h0->src_address,
+ fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
+ sw_if_index0));
+
+
+ /*
+ * Go straight to lookup, do not pass go, do not collect $200
+ */
+ next0 = 0;
+ }
+ else
+ {
+ ge0 = NULL;
+ next0 = 0;
+ ASSERT (0);
+ }
+
+ if (PREDICT_TRUE (NULL != ge0))
+ sclass0 = ge0->ge_fwd.gef_sclass;
+ else
+ sclass0 = SCLASS_INVALID;
+ }
+
+ vnet_buffer2 (b0)->gbp.sclass = sclass0;
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sclass = sclass0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (gbp_src_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET));
+}
+
+VLIB_NODE_FN (gbp_null_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET));
+}
+
+VLIB_NODE_FN (gbp_ip4_src_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4));
+}
+
+VLIB_NODE_FN (gbp_ip6_src_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6));
+}
+
+
+/* packet trace format function */
+static u8 *
+format_gbp_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
+
+ s = format (s, "sclass:%d", t->sclass);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_null_classify_node) = {
+ .name = "gbp-null-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 0,
+};
+
+VLIB_REGISTER_NODE (gbp_src_classify_node) = {
+ .name = "gbp-src-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 0,
+};
+
+VLIB_REGISTER_NODE (gbp_ip4_src_classify_node) = {
+ .name = "ip4-gbp-src-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip4-lookup"
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_ip6_src_classify_node) = {
+ .name = "ip6-gbp-src-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip6-lookup"
+ },
+};
+
+VNET_FEATURE_INIT (gbp_ip4_src_classify_feat_node, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-gbp-src-classify",
+ .runs_before = VNET_FEATURES ("nat44-out2in"),
+};
+VNET_FEATURE_INIT (gbp_ip6_src_classify_feat_node, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-gbp-src-classify",
+ .runs_before = VNET_FEATURES ("nat66-out2in"),
+};
+
+/* *INDENT-ON* */
+
+typedef enum gbp_lpm_classify_next_t_
+{
+ GPB_LPM_CLASSIFY_DROP,
+} gbp_lpm_classify_next_t;
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_lpm_classify_trace_t_
+{
+ sclass_t sclass;
+ index_t lbi;
+ ip46_address_t src;
+} gbp_lpm_classify_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_gbp_lpm_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_lpm_classify_trace_t *t = va_arg (*args, gbp_lpm_classify_trace_t *);
+
+ s = format (s, "sclass:%d lb:%d src:%U",
+ t->sclass, t->lbi, format_ip46_address, &t->src, IP46_TYPE_ANY);
+
+ return s;
+}
+
+enum gbp_lpm_type
+{
+ GBP_LPM_RECIRC,
+ GBP_LPM_EPG,
+ GBP_LPM_ANON
+};
+
+/*
+ * Determine the SRC EPG from a LPM
+ */
+always_inline uword
+gbp_lpm_classify_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ const dpo_proto_t dproto,
+ const enum gbp_lpm_type type)
+{
+ gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0, fib_index0, lbi0;
+ const gbp_endpoint_t *ge0, *ge_lpm0;
+ gbp_lpm_classify_next_t next0;
+ const ethernet_header_t *eh0;
+ const gbp_policy_dpo_t *gpd0;
+ const ip4_address_t *ip4_0;
+ const ip6_address_t *ip6_0;
+ const gbp_recirc_t *gr0;
+ vlib_buffer_t *b0;
+ sclass_t sclass0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ ip4_0 = NULL;
+ ip6_0 = NULL;
+ next0 = GPB_LPM_CLASSIFY_DROP;
+
+ lbi0 = ~0;
+ eh0 = NULL;
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
+
+ if (DPO_PROTO_IP4 == dproto)
+ ip4_0 =
+ &((ip4_header_t *) vlib_buffer_get_current (b0))->src_address;
+ else if (DPO_PROTO_IP6 == dproto)
+ ip6_0 =
+ &((ip6_header_t *) vlib_buffer_get_current (b0))->src_address;
+ else if (DPO_PROTO_ETHERNET == dproto)
+ {
+ eh0 = vlib_buffer_get_current (b0);
+ gbp_classify_get_ip_address (eh0, &ip4_0, &ip6_0,
+ GBP_CLASSIFY_GET_IP_SRC);
+ }
+
+ if (GBP_LPM_RECIRC == type)
+ {
+ gr0 = gbp_recirc_get (sw_if_index0);
+ fib_index0 = gr0->gr_fib_index[dproto];
+ ge0 = NULL;
+
+ vnet_feature_next (&next0, b0);
+ }
+ else
+ {
+ if (NULL == eh0)
+ {
+ /* packet should be l2 */
+ sclass0 = SCLASS_INVALID;
+ goto trace;
+ }
+
+ if (GBP_LPM_ANON == type)
+ {
+ /*
+ * anonymous LPM classification: only honour LPM as no EP
+ * were programmed
+ */
+ gbp_ext_itf_t *gei = gbp_ext_itf_get (sw_if_index0);
+ if (ip4_0)
+ fib_index0 = gei->gx_fib_index[DPO_PROTO_IP4];
+ else if (ip6_0)
+ fib_index0 = gei->gx_fib_index[DPO_PROTO_IP6];
+ else
+ {
+ /* not IP so no LPM classify possible */
+ sclass0 = SCLASS_INVALID;
+ next0 = GPB_LPM_CLASSIFY_DROP;
+ goto trace;
+ }
+ next0 = vnet_l2_feature_next
+ (b0, gscm->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM_ANON],
+ L2INPUT_FEAT_GBP_LPM_ANON_CLASSIFY);
+ }
+ else
+ {
+ /*
+ * not an anonymous LPM classification: check it comes from
+ * an EP, and use EP RD info
+ */
+ ge0 = gbp_endpoint_find_mac (eh0->src_address,
+ vnet_buffer (b0)->l2.bd_index);
+
+ if (NULL == ge0)
+ {
+ /* packet must have come from an EP's mac */
+ sclass0 = SCLASS_INVALID;
+ goto trace;
+ }
+
+ fib_index0 = ge0->ge_fwd.gef_fib_index;
+
+ if (~0 == fib_index0)
+ {
+ sclass0 = SCLASS_INVALID;
+ goto trace;
+ }
+
+ if (ip4_0)
+ {
+ ge_lpm0 = gbp_endpoint_find_ip4 (ip4_0, fib_index0);
+ }
+ else if (ip6_0)
+ {
+ ge_lpm0 = gbp_endpoint_find_ip6 (ip6_0, fib_index0);
+ }
+ else
+ {
+ ge_lpm0 = NULL;
+ }
+
+ next0 = vnet_l2_feature_next
+ (b0, gscm->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM],
+ L2INPUT_FEAT_GBP_LPM_CLASSIFY);
+
+ /*
+ * if we found the EP by IP lookup, it must be from the EP
+ * not a network behind it
+ */
+ if (NULL != ge_lpm0)
+ {
+ if (PREDICT_FALSE (ge0 != ge_lpm0))
+ {
+ /* an EP spoofing another EP */
+ sclass0 = SCLASS_INVALID;
+ next0 = GPB_LPM_CLASSIFY_DROP;
+ }
+ else
+ {
+ sclass0 = ge0->ge_fwd.gef_sclass;
+ }
+ goto trace;
+ }
+ }
+ }
+
+ gpd0 = gbp_classify_get_gpd (ip4_0, ip6_0, fib_index0);
+ if (0 == gpd0)
+ {
+ /* could not classify => drop */
+ sclass0 = SCLASS_INVALID;
+ next0 = GPB_LPM_CLASSIFY_DROP;
+ goto trace;
+ }
+
+ sclass0 = gpd0->gpd_sclass;
+
+ /* all packets from an external network should not be learned by the
+ * reciever. so set the Do-not-learn bit here */
+ vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_D;
+
+ trace:
+ vnet_buffer2 (b0)->gbp.sclass = sclass0;
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_lpm_classify_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sclass = sclass0;
+ t->lbi = lbi0;
+ if (ip4_0)
+ t->src.ip4 = *ip4_0;
+ if (ip6_0)
+ t->src.ip6 = *ip6_0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (gbp_ip4_lpm_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_lpm_classify_inline
+ (vm, node, frame, DPO_PROTO_IP4, GBP_LPM_RECIRC));
+}
+
+VLIB_NODE_FN (gbp_ip6_lpm_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_lpm_classify_inline
+ (vm, node, frame, DPO_PROTO_IP6, GBP_LPM_RECIRC));
+}
+
+VLIB_NODE_FN (gbp_l2_lpm_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_lpm_classify_inline
+ (vm, node, frame, DPO_PROTO_ETHERNET, GBP_LPM_EPG));
+}
+
+VLIB_NODE_FN (gbp_l2_lpm_anon_classify_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_lpm_classify_inline
+ (vm, node, frame, DPO_PROTO_ETHERNET, GBP_LPM_ANON));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
+ .name = "ip4-gbp-lpm-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_lpm_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
+ .name = "ip6-gbp-lpm-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_lpm_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node) = {
+ .name = "l2-gbp-lpm-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_lpm_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [GPB_LPM_CLASSIFY_DROP] = "error-drop"
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_l2_lpm_anon_classify_node) = {
+ .name = "l2-gbp-lpm-anon-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_lpm_classify_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = 0,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [GPB_LPM_CLASSIFY_DROP] = "error-drop"
+ },
+};
+
+VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-gbp-lpm-classify",
+ .runs_before = VNET_FEATURES ("nat44-out2in"),
+};
+VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-gbp-lpm-classify",
+ .runs_before = VNET_FEATURES ("nat66-out2in"),
+};
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_contract.c b/extras/deprecated/plugins/gbp/gbp_contract.c
new file mode 100644
index 00000000000..dd433f28a84
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_contract.c
@@ -0,0 +1,819 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_contract.h>
+
+#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/drop_dpo.h>
+
+char *gbp_contract_error_strings[] = {
+#define _(sym,string) string,
+ foreach_gbp_contract_error
+#undef _
+};
+
+/**
+ * Single contract DB instance
+ */
+gbp_contract_db_t gbp_contract_db;
+
+gbp_contract_t *gbp_contract_pool;
+
+vlib_log_class_t gc_logger;
+
+fib_node_type_t gbp_next_hop_fib_type;
+
+gbp_rule_t *gbp_rule_pool;
+gbp_next_hop_t *gbp_next_hop_pool;
+
+#define GBP_CONTRACT_DBG(...) \
+ vlib_log_notice (gc_logger, __VA_ARGS__);
+
+/* Adjacency packet/byte counters indexed by adjacency index. */
+vlib_combined_counter_main_t gbp_contract_permit_counters = {
+ .name = "gbp-contracts-permit",
+ .stat_segment_name = "/net/gbp/contract/permit",
+};
+
+vlib_combined_counter_main_t gbp_contract_drop_counters = {
+ .name = "gbp-contracts-drop",
+ .stat_segment_name = "/net/gbp/contract/drop",
+};
+
+index_t
+gbp_rule_alloc (gbp_rule_action_t action,
+ gbp_hash_mode_t hash_mode, index_t * nhs)
+{
+ gbp_rule_t *gu;
+
+ pool_get_zero (gbp_rule_pool, gu);
+
+ gu->gu_hash_mode = hash_mode;
+ gu->gu_nhs = nhs;
+ gu->gu_action = action;
+
+ return (gu - gbp_rule_pool);
+}
+
+void
+gbp_rule_free (index_t gui)
+{
+ pool_put_index (gbp_rule_pool, gui);
+}
+
+index_t
+gbp_next_hop_alloc (const ip46_address_t * ip,
+ index_t grd, const mac_address_t * mac, index_t gbd)
+{
+ fib_protocol_t fproto;
+ gbp_next_hop_t *gnh;
+
+ pool_get_zero (gbp_next_hop_pool, gnh);
+
+ fib_node_init (&gnh->gnh_node, gbp_next_hop_fib_type);
+
+ ip46_address_copy (&gnh->gnh_ip, ip);
+ mac_address_copy (&gnh->gnh_mac, mac);
+
+ gnh->gnh_rd = grd;
+ gnh->gnh_bd = gbd;
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto) gnh->gnh_ai[fproto] = INDEX_INVALID;
+
+ return (gnh - gbp_next_hop_pool);
+}
+
+static inline gbp_next_hop_t *
+gbp_next_hop_get (index_t gui)
+{
+ return (pool_elt_at_index (gbp_next_hop_pool, gui));
+}
+
+static void
+gbp_contract_rules_free (index_t * rules)
+{
+ index_t *gui, *gnhi;
+
+ vec_foreach (gui, rules)
+ {
+ gbp_policy_node_t pnode;
+ fib_protocol_t fproto;
+ gbp_next_hop_t *gnh;
+ gbp_rule_t *gu;
+
+ gu = gbp_rule_get (*gui);
+
+ FOR_EACH_GBP_POLICY_NODE (pnode)
+ {
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ dpo_reset (&gu->gu_dpo[pnode][fproto]);
+ dpo_reset (&gu->gu_dpo[pnode][fproto]);
+ }
+ }
+
+ vec_foreach (gnhi, gu->gu_nhs)
+ {
+ fib_protocol_t fproto;
+
+ gnh = gbp_next_hop_get (*gnhi);
+ gbp_bridge_domain_unlock (gnh->gnh_bd);
+ gbp_route_domain_unlock (gnh->gnh_rd);
+ gbp_endpoint_child_remove (gnh->gnh_ge, gnh->gnh_sibling);
+ gbp_endpoint_unlock (GBP_ENDPOINT_SRC_RR, gnh->gnh_ge);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ adj_unlock (gnh->gnh_ai[fproto]);
+ }
+ }
+
+ gbp_rule_free (*gui);
+ }
+ vec_free (rules);
+}
+
+static u8 *
+format_gbp_next_hop (u8 * s, va_list * args)
+{
+ index_t gnhi = va_arg (*args, index_t);
+ gbp_next_hop_t *gnh;
+
+ gnh = gbp_next_hop_get (gnhi);
+
+ s = format (s, "%U, %U, %U EP:%d",
+ format_mac_address_t, &gnh->gnh_mac,
+ format_gbp_bridge_domain, gnh->gnh_bd,
+ format_ip46_address, &gnh->gnh_ip, IP46_TYPE_ANY, gnh->gnh_ge);
+
+ return (s);
+}
+
+u8 *
+format_gbp_rule_action (u8 * s, va_list * args)
+{
+ gbp_rule_action_t action = va_arg (*args, gbp_rule_action_t);
+
+ switch (action)
+ {
+#define _(v,a) case GBP_RULE_##v: return (format (s, "%s", a));
+ foreach_gbp_rule_action
+#undef _
+ }
+
+ return (format (s, "unknown"));
+}
+
+static u8 *
+format_gbp_hash_mode (u8 * s, va_list * args)
+{
+ gbp_hash_mode_t hash_mode = va_arg (*args, gbp_hash_mode_t);
+
+ switch (hash_mode)
+ {
+#define _(v,a) case GBP_HASH_MODE_##v: return (format (s, "%s", a));
+ foreach_gbp_hash_mode
+#undef _
+ }
+
+ return (format (s, "unknown"));
+}
+
+static u8 *
+format_gbp_policy_node (u8 * s, va_list * args)
+{
+ gbp_policy_node_t action = va_arg (*args, gbp_policy_node_t);
+
+ switch (action)
+ {
+#define _(v,a) case GBP_POLICY_NODE_##v: return (format (s, "%s", a));
+ foreach_gbp_policy_node
+#undef _
+ }
+
+ return (format (s, "unknown"));
+}
+
+static u8 *
+format_gbp_rule (u8 * s, va_list * args)
+{
+ index_t gui = va_arg (*args, index_t);
+ gbp_policy_node_t pnode;
+ fib_protocol_t fproto;
+ gbp_rule_t *gu;
+ index_t *gnhi;
+
+ gu = gbp_rule_get (gui);
+ s = format (s, "%U", format_gbp_rule_action, gu->gu_action);
+
+ switch (gu->gu_action)
+ {
+ case GBP_RULE_PERMIT:
+ case GBP_RULE_DENY:
+ return (s);
+ case GBP_RULE_REDIRECT:
+ s = format (s, ", %U", format_gbp_hash_mode, gu->gu_hash_mode);
+ break;
+ }
+
+ vec_foreach (gnhi, gu->gu_nhs)
+ {
+ s = format (s, "\n [%U]", format_gbp_next_hop, *gnhi);
+ }
+
+ FOR_EACH_GBP_POLICY_NODE (pnode)
+ {
+ s = format (s, "\n policy-%U", format_gbp_policy_node, pnode);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ if (dpo_id_is_valid (&gu->gu_dpo[pnode][fproto]))
+ {
+ s =
+ format (s, "\n %U", format_dpo_id,
+ &gu->gu_dpo[pnode][fproto], 8);
+ }
+ }
+ }
+
+ return (s);
+}
+
+static void
+gbp_contract_mk_adj (gbp_next_hop_t * gnh, fib_protocol_t fproto)
+{
+ ethernet_header_t *eth;
+ gbp_endpoint_t *ge;
+ index_t old_ai;
+ u8 *rewrite;
+
+ old_ai = gnh->gnh_ai[fproto];
+ rewrite = NULL;
+ vec_validate (rewrite, sizeof (*eth) - 1);
+ eth = (ethernet_header_t *) rewrite;
+
+ GBP_CONTRACT_DBG ("...mk-adj: %U", format_gbp_next_hop,
+ gnh - gbp_next_hop_pool);
+
+ ge = gbp_endpoint_get (gnh->gnh_ge);
+
+ eth->type = clib_host_to_net_u16 ((fproto == FIB_PROTOCOL_IP4 ?
+ ETHERNET_TYPE_IP4 : ETHERNET_TYPE_IP6));
+ mac_address_to_bytes (gbp_route_domain_get_local_mac (), eth->src_address);
+ mac_address_to_bytes (&gnh->gnh_mac, eth->dst_address);
+
+ gnh->gnh_ai[fproto] =
+ adj_nbr_add_or_lock_w_rewrite (fproto,
+ fib_proto_to_link (fproto),
+ &gnh->gnh_ip,
+ gbp_itf_get_sw_if_index (ge->
+ ge_fwd.gef_itf),
+ rewrite);
+
+ adj_unlock (old_ai);
+}
+
+static flow_hash_config_t
+gbp_contract_mk_lb_hp (gbp_hash_mode_t gu_hash_mode)
+{
+ switch (gu_hash_mode)
+ {
+ case GBP_HASH_MODE_SRC_IP:
+ return IP_FLOW_HASH_SRC_ADDR;
+ case GBP_HASH_MODE_DST_IP:
+ return IP_FLOW_HASH_DST_ADDR;
+ case GBP_HASH_MODE_SYMMETRIC:
+ return (IP_FLOW_HASH_SRC_ADDR | IP_FLOW_HASH_DST_ADDR |
+ IP_FLOW_HASH_PROTO | IP_FLOW_HASH_SYMMETRIC);
+ }
+
+ return 0;
+}
+
+static void
+gbp_contract_mk_lb (index_t gui, fib_protocol_t fproto)
+{
+ load_balance_path_t *paths = NULL;
+ gbp_policy_node_t pnode;
+ gbp_next_hop_t *gnh;
+ dpo_proto_t dproto;
+ gbp_rule_t *gu;
+ u32 ii;
+
+ u32 policy_nodes[] = {
+ [GBP_POLICY_NODE_L2] = gbp_policy_port_node.index,
+ [GBP_POLICY_NODE_IP4] = ip4_gbp_policy_dpo_node.index,
+ [GBP_POLICY_NODE_IP6] = ip6_gbp_policy_dpo_node.index,
+ };
+
+ GBP_CONTRACT_DBG ("..mk-lb: %U", format_gbp_rule, gui);
+
+ gu = gbp_rule_get (gui);
+ dproto = fib_proto_to_dpo (fproto);
+
+ if (GBP_RULE_REDIRECT != gu->gu_action)
+ return;
+
+ vec_foreach_index (ii, gu->gu_nhs)
+ {
+ gnh = gbp_next_hop_get (gu->gu_nhs[ii]);
+
+ gbp_contract_mk_adj (gnh, FIB_PROTOCOL_IP4);
+ gbp_contract_mk_adj (gnh, FIB_PROTOCOL_IP6);
+ }
+
+ FOR_EACH_GBP_POLICY_NODE (pnode)
+ {
+ vec_validate (paths, vec_len (gu->gu_nhs) - 1);
+
+ vec_foreach_index (ii, gu->gu_nhs)
+ {
+ gnh = gbp_next_hop_get (gu->gu_nhs[ii]);
+
+ paths[ii].path_index = FIB_NODE_INDEX_INVALID;
+ paths[ii].path_weight = 1;
+ dpo_set (&paths[ii].path_dpo, DPO_ADJACENCY,
+ dproto, gnh->gnh_ai[fproto]);
+ }
+
+ if (!dpo_id_is_valid (&gu->gu_dpo[pnode][fproto]))
+ {
+ dpo_id_t dpo = DPO_INVALID;
+
+ dpo_set (&dpo, DPO_LOAD_BALANCE, dproto,
+ load_balance_create (vec_len (paths),
+ dproto,
+ gbp_contract_mk_lb_hp
+ (gu->gu_hash_mode)));
+ dpo_stack_from_node (policy_nodes[pnode], &gu->gu_dpo[pnode][fproto],
+ &dpo);
+ dpo_reset (&dpo);
+ }
+
+ load_balance_multipath_update (&gu->gu_dpo[pnode][fproto],
+ paths, LOAD_BALANCE_FLAG_NONE);
+ vec_free (paths);
+ }
+}
+
+static void
+gbp_contract_mk_one_lb (index_t gui)
+{
+ gbp_contract_mk_lb (gui, FIB_PROTOCOL_IP4);
+ gbp_contract_mk_lb (gui, FIB_PROTOCOL_IP6);
+}
+
+static int
+gbp_contract_next_hop_resolve (index_t gui, index_t gnhi)
+{
+ gbp_bridge_domain_t *gbd;
+ gbp_next_hop_t *gnh;
+ ip46_address_t *ips;
+ int rv;
+
+ ips = NULL;
+ gnh = gbp_next_hop_get (gnhi);
+ gbd = gbp_bridge_domain_get (gnh->gnh_bd);
+
+ gnh->gnh_gu = gui;
+ vec_add1 (ips, gnh->gnh_ip);
+
+ /*
+ * source the endpoint this contract needs to forward via.
+ * give ofrwarding details via the spine proxy. if this EP is known
+ * to us, then since we source here with a low priority, the learned
+ * info will take precedenc.
+ */
+ rv = gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_RR,
+ gbd->gb_uu_fwd_sw_if_index,
+ ips,
+ &gnh->gnh_mac,
+ gnh->gnh_bd, gnh->gnh_rd, SCLASS_INVALID,
+ GBP_ENDPOINT_FLAG_NONE, NULL, NULL,
+ &gnh->gnh_ge);
+
+ if (0 == rv)
+ {
+ gnh->gnh_sibling = gbp_endpoint_child_add (gnh->gnh_ge,
+ gbp_next_hop_fib_type, gnhi);
+ }
+
+ GBP_CONTRACT_DBG ("..resolve: %d: %d: %U", gui, gnhi, format_gbp_next_hop,
+ gnhi);
+
+ vec_free (ips);
+ return (rv);
+}
+
+static void
+gbp_contract_rule_resolve (index_t gui)
+{
+ gbp_rule_t *gu;
+ index_t *gnhi;
+
+ gu = gbp_rule_get (gui);
+
+ GBP_CONTRACT_DBG ("..resolve: %U", format_gbp_rule, gui);
+
+ vec_foreach (gnhi, gu->gu_nhs)
+ {
+ gbp_contract_next_hop_resolve (gui, *gnhi);
+ }
+}
+
+static void
+gbp_contract_resolve (index_t * guis)
+{
+ index_t *gui;
+
+ vec_foreach (gui, guis)
+ {
+ gbp_contract_rule_resolve (*gui);
+ }
+}
+
+static void
+gbp_contract_mk_lbs (index_t * guis)
+{
+ index_t *gui;
+
+ vec_foreach (gui, guis)
+ {
+ gbp_contract_mk_one_lb (*gui);
+ }
+}
+
+int
+gbp_contract_update (gbp_scope_t scope,
+ sclass_t sclass,
+ sclass_t dclass,
+ u32 acl_index,
+ index_t * rules,
+ u16 * allowed_ethertypes, u32 * stats_index)
+{
+ gbp_main_t *gm = &gbp_main;
+ u32 *acl_vec = NULL;
+ gbp_contract_t *gc;
+ index_t gci;
+ uword *p;
+
+ gbp_contract_key_t key = {
+ .gck_scope = scope,
+ .gck_src = sclass,
+ .gck_dst = dclass,
+ };
+
+ if (~0 == gm->gbp_acl_user_id)
+ {
+ acl_plugin_exports_init (&gm->acl_plugin);
+ gm->gbp_acl_user_id =
+ gm->acl_plugin.register_user_module ("GBP ACL", "src-epg", "dst-epg");
+ }
+
+ p = hash_get (gbp_contract_db.gc_hash, key.as_u64);
+ if (p != NULL)
+ {
+ gci = p[0];
+ gc = gbp_contract_get (gci);
+ gbp_contract_rules_free (gc->gc_rules);
+ gbp_main.acl_plugin.put_lookup_context_index (gc->gc_lc_index);
+ gc->gc_rules = NULL;
+ vec_free (gc->gc_allowed_ethertypes);
+ }
+ else
+ {
+ pool_get_zero (gbp_contract_pool, gc);
+ gc->gc_key = key;
+ gci = gc - gbp_contract_pool;
+ hash_set (gbp_contract_db.gc_hash, key.as_u64, gci);
+
+ vlib_validate_combined_counter (&gbp_contract_drop_counters, gci);
+ vlib_zero_combined_counter (&gbp_contract_drop_counters, gci);
+ vlib_validate_combined_counter (&gbp_contract_permit_counters, gci);
+ vlib_zero_combined_counter (&gbp_contract_permit_counters, gci);
+ }
+
+ GBP_CONTRACT_DBG ("update: %U", format_gbp_contract, gci);
+
+ gc->gc_rules = rules;
+ gc->gc_allowed_ethertypes = allowed_ethertypes;
+ gbp_contract_resolve (gc->gc_rules);
+ gbp_contract_mk_lbs (gc->gc_rules);
+
+ gc->gc_acl_index = acl_index;
+ gc->gc_lc_index =
+ gm->acl_plugin.get_lookup_context_index (gm->gbp_acl_user_id,
+ sclass, dclass);
+
+ vec_add1 (acl_vec, gc->gc_acl_index);
+ gm->acl_plugin.set_acl_vec_for_context (gc->gc_lc_index, acl_vec);
+ vec_free (acl_vec);
+
+ *stats_index = gci;
+
+ return (0);
+}
+
+int
+gbp_contract_delete (gbp_scope_t scope, sclass_t sclass, sclass_t dclass)
+{
+ gbp_contract_key_t key = {
+ .gck_scope = scope,
+ .gck_src = sclass,
+ .gck_dst = dclass,
+ };
+ gbp_contract_t *gc;
+ uword *p;
+
+ p = hash_get (gbp_contract_db.gc_hash, key.as_u64);
+ if (p != NULL)
+ {
+ gc = gbp_contract_get (p[0]);
+
+ gbp_contract_rules_free (gc->gc_rules);
+ gbp_main.acl_plugin.put_lookup_context_index (gc->gc_lc_index);
+ vec_free (gc->gc_allowed_ethertypes);
+
+ hash_unset (gbp_contract_db.gc_hash, key.as_u64);
+ pool_put (gbp_contract_pool, gc);
+
+ return (0);
+ }
+
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+void
+gbp_contract_walk (gbp_contract_cb_t cb, void *ctx)
+{
+ gbp_contract_t *gc;
+
+ /* *INDENT-OFF* */
+ pool_foreach (gc, gbp_contract_pool)
+ {
+ if (!cb(gc, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_contract_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ sclass_t sclass = SCLASS_INVALID, dclass = SCLASS_INVALID;
+ u32 acl_index = ~0, stats_index, scope;
+ u8 add = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "add"))
+ add = 1;
+ else if (unformat (input, "del"))
+ add = 0;
+ else if (unformat (input, "scope %d", &scope))
+ ;
+ else if (unformat (input, "sclass %d", &sclass))
+ ;
+ else if (unformat (input, "dclass %d", &dclass))
+ ;
+ else if (unformat (input, "acl-index %d", &acl_index))
+ ;
+ else
+ break;
+ }
+
+ if (SCLASS_INVALID == sclass)
+ return clib_error_return (0, "Source EPG-ID must be specified");
+ if (SCLASS_INVALID == dclass)
+ return clib_error_return (0, "Destination EPG-ID must be specified");
+
+ if (add)
+ {
+ gbp_contract_update (scope, sclass, dclass, acl_index,
+ NULL, NULL, &stats_index);
+ }
+ else
+ {
+ gbp_contract_delete (scope, sclass, dclass);
+ }
+
+ return (NULL);
+}
+
+/*?
+ * Configure a GBP Contract
+ *
+ * @cliexpar
+ * @cliexstart{set gbp contract [del] src-epg <ID> dst-epg <ID> acl-index <ACL>}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_contract_cli_node, static) =
+{
+ .path = "gbp contract",
+ .short_help =
+ "gbp contract [del] src-epg <ID> dst-epg <ID> acl-index <ACL>",
+ .function = gbp_contract_cli,
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_gbp_contract_key (u8 * s, va_list * args)
+{
+ gbp_contract_key_t *gck = va_arg (*args, gbp_contract_key_t *);
+
+ s = format (s, "{%d,%d,%d}", gck->gck_scope, gck->gck_src, gck->gck_dst);
+
+ return (s);
+}
+
+u8 *
+format_gbp_contract (u8 * s, va_list * args)
+{
+ index_t gci = va_arg (*args, index_t);
+ vlib_counter_t counts;
+ gbp_contract_t *gc;
+ index_t *gui;
+ u16 *et;
+
+ gc = gbp_contract_get (gci);
+
+ s = format (s, "[%d] %U: acl-index:%d",
+ gci, format_gbp_contract_key, &gc->gc_key, gc->gc_acl_index);
+
+ s = format (s, "\n rules:");
+ vec_foreach (gui, gc->gc_rules)
+ {
+ s = format (s, "\n %d: %U", *gui, format_gbp_rule, *gui);
+ }
+
+ s = format (s, "\n allowed-ethertypes:");
+ s = format (s, "\n [");
+ vec_foreach (et, gc->gc_allowed_ethertypes)
+ {
+ int host_et = clib_net_to_host_u16 (*et);
+ if (0 != host_et)
+ s = format (s, "0x%x, ", host_et);
+ }
+ s = format (s, "]");
+
+ s = format (s, "\n stats:");
+ vlib_get_combined_counter (&gbp_contract_drop_counters, gci, &counts);
+ s = format (s, "\n drop:[%Ld:%Ld]", counts.packets, counts.bytes);
+ vlib_get_combined_counter (&gbp_contract_permit_counters, gci, &counts);
+ s = format (s, "\n permit:[%Ld:%Ld]", counts.packets, counts.bytes);
+
+ s = format (s, "]");
+
+ return (s);
+}
+
+static clib_error_t *
+gbp_contract_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ gbp_contract_t *gc;
+ u32 src, dst;
+ index_t gci;
+
+ src = dst = SCLASS_INVALID;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "src %d", &src))
+ ;
+ else if (unformat (input, "dst %d", &dst))
+ ;
+ else
+ break;
+ }
+
+ vlib_cli_output (vm, "Contracts:");
+
+ /* *INDENT-OFF* */
+ pool_foreach (gc, gbp_contract_pool)
+ {
+ gci = gc - gbp_contract_pool;
+
+ if (SCLASS_INVALID != src && SCLASS_INVALID != dst)
+ {
+ if (gc->gc_key.gck_src == src &&
+ gc->gc_key.gck_dst == dst)
+ vlib_cli_output (vm, " %U", format_gbp_contract, gci);
+ }
+ else if (SCLASS_INVALID != src)
+ {
+ if (gc->gc_key.gck_src == src)
+ vlib_cli_output (vm, " %U", format_gbp_contract, gci);
+ }
+ else if (SCLASS_INVALID != dst)
+ {
+ if (gc->gc_key.gck_dst == dst)
+ vlib_cli_output (vm, " %U", format_gbp_contract, gci);
+ }
+ else
+ vlib_cli_output (vm, " %U", format_gbp_contract, gci);
+ }
+ /* *INDENT-ON* */
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Contracts
+ *
+ * @cliexpar
+ * @cliexstart{show gbp contract}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_contract_show_node, static) = {
+ .path = "show gbp contract",
+ .short_help = "show gbp contract [src <SRC>] [dst <DST>]\n",
+ .function = gbp_contract_show,
+};
+/* *INDENT-ON* */
+
+static fib_node_t *
+gbp_next_hop_get_node (fib_node_index_t index)
+{
+ gbp_next_hop_t *gnh;
+
+ gnh = gbp_next_hop_get (index);
+
+ return (&gnh->gnh_node);
+}
+
+static void
+gbp_next_hop_last_lock_gone (fib_node_t * node)
+{
+ ASSERT (0);
+}
+
+static gbp_next_hop_t *
+gbp_next_hop_from_fib_node (fib_node_t * node)
+{
+ ASSERT (gbp_next_hop_fib_type == node->fn_type);
+ return ((gbp_next_hop_t *) node);
+}
+
+static fib_node_back_walk_rc_t
+gbp_next_hop_back_walk_notify (fib_node_t * node,
+ fib_node_back_walk_ctx_t * ctx)
+{
+ gbp_next_hop_t *gnh;
+
+ gnh = gbp_next_hop_from_fib_node (node);
+
+ gbp_contract_mk_one_lb (gnh->gnh_gu);
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/*
+ * The FIB path's graph node virtual function table
+ */
+static const fib_node_vft_t gbp_next_hop_vft = {
+ .fnv_get = gbp_next_hop_get_node,
+ .fnv_last_lock = gbp_next_hop_last_lock_gone,
+ .fnv_back_walk = gbp_next_hop_back_walk_notify,
+ // .fnv_mem_show = fib_path_memory_show,
+};
+
+static clib_error_t *
+gbp_contract_init (vlib_main_t * vm)
+{
+ gc_logger = vlib_log_register_class ("gbp", "con");
+ gbp_next_hop_fib_type = fib_node_register_new_type (&gbp_next_hop_vft);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_contract_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_contract.h b/extras/deprecated/plugins/gbp/gbp_contract.h
new file mode 100644
index 00000000000..1e74db60116
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_contract.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_CONTRACT_H__
+#define __GBP_CONTRACT_H__
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_types.h>
+
+#define foreach_gbp_contract_error \
+ _(ALLOW_NO_SCLASS, "allow-no-sclass") \
+ _(ALLOW_INTRA, "allow-intra-sclass") \
+ _(ALLOW_A_BIT, "allow-a-bit-set") \
+ _(ALLOW_SCLASS_1, "allow-sclass-1") \
+ _(ALLOW_CONTRACT, "allow-contract") \
+ _(DROP_CONTRACT, "drop-contract") \
+ _(DROP_ETHER_TYPE, "drop-ether-type") \
+ _(DROP_NO_CONTRACT, "drop-no-contract") \
+ _(DROP_NO_DCLASS, "drop-no-dclass") \
+ _(DROP_NO_RULE, "drop-no-rule")
+
+typedef enum
+{
+#define _(sym,str) GBP_CONTRACT_ERROR_##sym,
+ foreach_gbp_contract_error
+#undef _
+ GBP_CONTRACT_N_ERROR,
+#define GBP_CONTRACT_N_ERROR GBP_CONTRACT_N_ERROR
+} gbp_contract_error_t;
+
+extern char *gbp_contract_error_strings[GBP_CONTRACT_N_ERROR];
+
+/**
+ * The key for an Contract
+ */
+typedef struct gbp_contract_key_t_
+{
+ union
+ {
+ struct
+ {
+ gbp_scope_t gck_scope;
+ /**
+ * source and destination EPGs for which the ACL applies
+ */
+ sclass_t gck_src;
+ sclass_t gck_dst;
+ };
+ u64 as_u64;
+ };
+} gbp_contract_key_t;
+
+typedef struct gbp_next_hop_t_
+{
+ fib_node_t gnh_node;
+ ip46_address_t gnh_ip;
+ mac_address_t gnh_mac;
+ index_t gnh_gu;
+ index_t gnh_bd;
+ index_t gnh_rd;
+ u32 gnh_ge;
+ u32 gnh_sibling;
+ index_t gnh_ai[FIB_PROTOCOL_IP_MAX];
+} gbp_next_hop_t;
+
+#define foreach_gbp_hash_mode \
+ _(SRC_IP, "src-ip") \
+ _(DST_IP, "dst-ip") \
+ _(SYMMETRIC, "symmetric")
+
+typedef enum gbp_hash_mode_t_
+{
+#define _(v,s) GBP_HASH_MODE_##v,
+ foreach_gbp_hash_mode
+#undef _
+} gbp_hash_mode_t;
+
+#define foreach_gbp_rule_action \
+ _(PERMIT, "permit") \
+ _(DENY, "deny") \
+ _(REDIRECT, "redirect")
+
+typedef enum gbp_rule_action_t_
+{
+#define _(v,s) GBP_RULE_##v,
+ foreach_gbp_rule_action
+#undef _
+} gbp_rule_action_t;
+
+#define foreach_gbp_policy_node \
+ _(L2, "L2") \
+ _(IP4, "ip4") \
+ _(IP6, "ip6")
+
+typedef enum gbp_policy_node_t_
+{
+#define _(v,s) GBP_POLICY_NODE_##v,
+ foreach_gbp_policy_node
+#undef _
+} gbp_policy_node_t;
+#define GBP_POLICY_N_NODES (GBP_POLICY_NODE_IP6+1)
+
+#define FOR_EACH_GBP_POLICY_NODE(pnode) \
+ for (pnode = GBP_POLICY_NODE_L2; pnode < GBP_POLICY_N_NODES; pnode++)
+
+typedef struct gbp_rule_t_
+{
+ gbp_rule_action_t gu_action;
+ gbp_hash_mode_t gu_hash_mode;
+ index_t *gu_nhs;
+
+ /**
+ * DPO of the load-balance object used to redirect
+ */
+ dpo_id_t gu_dpo[GBP_POLICY_N_NODES][FIB_PROTOCOL_IP_MAX];
+} gbp_rule_t;
+
+/**
+ * A Group Based Policy Contract.
+ * Determines the ACL that applies to traffic pass between two endpoint groups
+ */
+typedef struct gbp_contract_t_
+{
+ /**
+ * source and destination EPGs
+ */
+ gbp_contract_key_t gc_key;
+
+ u32 gc_acl_index;
+ u32 gc_lc_index;
+
+ /**
+ * The ACL to apply for packets from the source to the destination EPG
+ */
+ index_t *gc_rules;
+
+ /**
+ * An ethertype whitelist
+ */
+ u16 *gc_allowed_ethertypes;
+} gbp_contract_t;
+
+/**
+ * EPG src,dst pair to ACL mapping table, aka contract DB
+ */
+typedef struct gbp_contract_db_t_
+{
+ /**
+ * We can form a u64 key from the pair, so use a simple hash table
+ */
+ uword *gc_hash;
+} gbp_contract_db_t;
+
+extern int gbp_contract_update (gbp_scope_t scope,
+ sclass_t sclass,
+ sclass_t dclass,
+ u32 acl_index,
+ index_t * rules,
+ u16 * allowed_ethertypes, u32 * stats_index);
+extern int gbp_contract_delete (gbp_scope_t scope, sclass_t sclass,
+ sclass_t dclass);
+
+extern index_t gbp_rule_alloc (gbp_rule_action_t action,
+ gbp_hash_mode_t hash_mode, index_t * nhs);
+extern void gbp_rule_free (index_t gui);
+extern index_t gbp_next_hop_alloc (const ip46_address_t * ip,
+ index_t grd,
+ const mac_address_t * mac, index_t gbd);
+
+typedef int (*gbp_contract_cb_t) (gbp_contract_t * gbpe, void *ctx);
+extern void gbp_contract_walk (gbp_contract_cb_t bgpe, void *ctx);
+
+extern u8 *format_gbp_rule_action (u8 * s, va_list * args);
+extern u8 *format_gbp_contract (u8 * s, va_list * args);
+
+/**
+ * DP functions and databases
+ */
+extern gbp_contract_db_t gbp_contract_db;
+
+always_inline index_t
+gbp_contract_find (gbp_contract_key_t * key)
+{
+ uword *p;
+
+ p = hash_get (gbp_contract_db.gc_hash, key->as_u64);
+
+ if (NULL != p)
+ return (p[0]);
+
+ return (INDEX_INVALID);
+}
+
+extern gbp_contract_t *gbp_contract_pool;
+
+always_inline gbp_contract_t *
+gbp_contract_get (index_t gci)
+{
+ return (pool_elt_at_index (gbp_contract_pool, gci));
+}
+
+extern gbp_rule_t *gbp_rule_pool;
+
+always_inline gbp_rule_t *
+gbp_rule_get (index_t gui)
+{
+ return (pool_elt_at_index (gbp_rule_pool, gui));
+}
+
+extern vlib_combined_counter_main_t gbp_contract_permit_counters;
+extern vlib_combined_counter_main_t gbp_contract_drop_counters;
+
+typedef enum
+{
+ GBP_CONTRACT_APPLY_L2,
+ GBP_CONTRACT_APPLY_IP4,
+ GBP_CONTRACT_APPLY_IP6,
+} gbp_contract_apply_type_t;
+
+static_always_inline gbp_rule_action_t
+gbp_contract_apply (vlib_main_t * vm, gbp_main_t * gm,
+ gbp_contract_key_t * key, vlib_buffer_t * b,
+ gbp_rule_t ** rule, u32 * intra, u32 * sclass1,
+ u32 * acl_match, u32 * rule_match,
+ gbp_contract_error_t * err,
+ gbp_contract_apply_type_t type)
+{
+ fa_5tuple_opaque_t fa_5tuple;
+ const gbp_contract_t *contract;
+ index_t contract_index;
+ u32 acl_pos, trace_bitmap;
+ u16 etype;
+ u8 ip6, action;
+
+ *rule = 0;
+ trace_bitmap = 0;
+
+ if (key->gck_src == key->gck_dst)
+ {
+ /* intra-epg allowed */
+ (*intra)++;
+ *err = GBP_CONTRACT_ERROR_ALLOW_INTRA;
+ return GBP_RULE_PERMIT;
+ }
+
+ if (1 == key->gck_src || 1 == key->gck_dst)
+ {
+ /* sclass 1 allowed */
+ (*sclass1)++;
+ *err = GBP_CONTRACT_ERROR_ALLOW_SCLASS_1;
+ return GBP_RULE_PERMIT;
+ }
+
+ /* look for contract */
+ contract_index = gbp_contract_find (key);
+ if (INDEX_INVALID == contract_index)
+ {
+ *err = GBP_CONTRACT_ERROR_DROP_NO_CONTRACT;
+ return GBP_RULE_DENY;
+ }
+
+ contract = gbp_contract_get (contract_index);
+
+ *err = GBP_CONTRACT_ERROR_DROP_CONTRACT;
+
+ switch (type)
+ {
+ case GBP_CONTRACT_APPLY_IP4:
+ ip6 = 0;
+ break;
+ case GBP_CONTRACT_APPLY_IP6:
+ ip6 = 1;
+ break;
+ case GBP_CONTRACT_APPLY_L2:
+ {
+ /* check ethertype */
+ etype =
+ ((u16 *) (vlib_buffer_get_current (b) +
+ vnet_buffer (b)->l2.l2_len))[-1];
+
+ if (~0 == vec_search (contract->gc_allowed_ethertypes, etype))
+ {
+ *err = GBP_CONTRACT_ERROR_DROP_ETHER_TYPE;
+ goto contract_deny;
+ }
+
+ switch (clib_net_to_host_u16 (etype))
+ {
+ case ETHERNET_TYPE_IP4:
+ ip6 = 0;
+ break;
+ case ETHERNET_TYPE_IP6:
+ ip6 = 1;
+ break;
+ default:
+ goto contract_deny;
+ }
+ }
+ break;
+ }
+
+ /* check ACL */
+ action = 0;
+ acl_plugin_fill_5tuple_inline (gm->acl_plugin.p_acl_main,
+ contract->gc_lc_index, b, ip6,
+ GBP_CONTRACT_APPLY_L2 != type /* input */ ,
+ GBP_CONTRACT_APPLY_L2 == type /* l2_path */ ,
+ &fa_5tuple);
+ acl_plugin_match_5tuple_inline (gm->acl_plugin.p_acl_main,
+ contract->gc_lc_index, &fa_5tuple, ip6,
+ &action, &acl_pos, acl_match, rule_match,
+ &trace_bitmap);
+ if (action <= 0)
+ goto contract_deny;
+
+ if (PREDICT_FALSE (*rule_match >= vec_len (contract->gc_rules)))
+ {
+ *err = GBP_CONTRACT_ERROR_DROP_NO_RULE;
+ goto contract_deny;
+ }
+
+ *rule = gbp_rule_get (contract->gc_rules[*rule_match]);
+ switch ((*rule)->gu_action)
+ {
+ case GBP_RULE_PERMIT:
+ case GBP_RULE_REDIRECT:
+ *err = GBP_CONTRACT_ERROR_ALLOW_CONTRACT;
+ vlib_increment_combined_counter (&gbp_contract_permit_counters,
+ vm->thread_index, contract_index, 1,
+ vlib_buffer_length_in_chain (vm, b));
+ return (*rule)->gu_action;
+ case GBP_RULE_DENY:
+ break;
+ }
+
+contract_deny:
+ vlib_increment_combined_counter (&gbp_contract_drop_counters,
+ vm->thread_index, contract_index, 1,
+ vlib_buffer_length_in_chain (vm, b));
+ return GBP_RULE_DENY;
+}
+
+#endif /* __GBP_CONTRACT_H__ */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_endpoint.c b/extras/deprecated/plugins/gbp/gbp_endpoint.c
new file mode 100644
index 00000000000..b0cf64ced2d
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_endpoint.c
@@ -0,0 +1,1597 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_endpoint_group.h>
+#include <plugins/gbp/gbp_itf.h>
+#include <plugins/gbp/gbp_scanner.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_vxlan.h>
+
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_fib.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/ip-neighbor/ip_neighbor.h>
+#include <vnet/ip-neighbor/ip4_neighbor.h>
+#include <vnet/ip-neighbor/ip6_neighbor.h>
+#include <vnet/fib/fib_walk.h>
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+
+static const char *gbp_endpoint_attr_names[] = GBP_ENDPOINT_ATTR_NAMES;
+
+/**
+ * EP DBs
+ */
+gbp_ep_db_t gbp_ep_db;
+
+static fib_source_t gbp_fib_source_hi;
+static fib_source_t gbp_fib_source_low;
+static fib_node_type_t gbp_endpoint_fib_type;
+static vlib_log_class_t gbp_ep_logger;
+
+#define GBP_ENDPOINT_DBG(...) \
+ vlib_log_debug (gbp_ep_logger, __VA_ARGS__);
+
+#define GBP_ENDPOINT_INFO(...) \
+ vlib_log_notice (gbp_ep_logger, __VA_ARGS__);
+
+/**
+ * Pool of GBP endpoints
+ */
+gbp_endpoint_t *gbp_endpoint_pool;
+
+/**
+ * A count of the number of dynamic entries
+ */
+static u32 gbp_n_learnt_endpoints;
+
+#define FOR_EACH_GBP_ENDPOINT_ATTR(_item) \
+ for (_item = GBP_ENDPOINT_ATTR_FIRST; \
+ _item < GBP_ENDPOINT_ATTR_LAST; \
+ _item++)
+
+u8 *
+format_gbp_endpoint_flags (u8 * s, va_list * args)
+{
+ gbp_endpoint_attr_t attr;
+ gbp_endpoint_flags_t flags = va_arg (*args, gbp_endpoint_flags_t);
+
+ FOR_EACH_GBP_ENDPOINT_ATTR (attr)
+ {
+ if ((1 << attr) & flags)
+ {
+ s = format (s, "%s,", gbp_endpoint_attr_names[attr]);
+ }
+ }
+
+ return (s);
+}
+
+int
+gbp_endpoint_is_remote (const gbp_endpoint_t * ge)
+{
+ return (! !(ge->ge_fwd.gef_flags & GBP_ENDPOINT_FLAG_REMOTE));
+}
+
+int
+gbp_endpoint_is_local (const gbp_endpoint_t * ge)
+{
+ return (!(ge->ge_fwd.gef_flags & GBP_ENDPOINT_FLAG_REMOTE));
+}
+
+int
+gbp_endpoint_is_external (const gbp_endpoint_t * ge)
+{
+ return (! !(ge->ge_fwd.gef_flags & GBP_ENDPOINT_FLAG_EXTERNAL));
+}
+
+int
+gbp_endpoint_is_learnt (const gbp_endpoint_t * ge)
+{
+ if (0 == vec_len (ge->ge_locs))
+ return 0;
+
+ /* DP is the highest source so if present it will be first */
+ return (ge->ge_locs[0].gel_src == GBP_ENDPOINT_SRC_DP);
+}
+
+static void
+gbp_endpoint_extract_key_mac_itf (const clib_bihash_kv_16_8_t * key,
+ mac_address_t * mac, u32 * sw_if_index)
+{
+ mac_address_from_u64 (mac, key->key[0]);
+ *sw_if_index = key->key[1];
+}
+
+static void
+gbp_endpoint_extract_key_ip_itf (const clib_bihash_kv_24_8_t * key,
+ ip46_address_t * ip, u32 * sw_if_index)
+{
+ ip->as_u64[0] = key->key[0];
+ ip->as_u64[1] = key->key[1];
+ *sw_if_index = key->key[2];
+}
+
+gbp_endpoint_t *
+gbp_endpoint_find_ip (const ip46_address_t * ip, u32 fib_index)
+{
+ clib_bihash_kv_24_8_t key, value;
+ int rv;
+
+ gbp_endpoint_mk_key_ip (ip, fib_index, &key);
+
+ rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
+
+ if (0 != rv)
+ return NULL;
+
+ return (gbp_endpoint_get (value.value));
+}
+
+static void
+gbp_endpoint_add_itf (u32 sw_if_index, index_t gei)
+{
+ vec_validate_init_empty (gbp_ep_db.ged_by_sw_if_index, sw_if_index, ~0);
+
+ gbp_ep_db.ged_by_sw_if_index[sw_if_index] = gei;
+}
+
+static bool
+gbp_endpoint_add_mac (const mac_address_t * mac, u32 bd_index, index_t gei)
+{
+ clib_bihash_kv_16_8_t key;
+ int rv;
+
+ gbp_endpoint_mk_key_mac (mac->bytes, bd_index, &key);
+ key.value = gei;
+
+ rv = clib_bihash_add_del_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, 1);
+
+
+ return (0 == rv);
+}
+
+static bool
+gbp_endpoint_add_ip (const ip46_address_t * ip, u32 fib_index, index_t gei)
+{
+ clib_bihash_kv_24_8_t key;
+ int rv;
+
+ gbp_endpoint_mk_key_ip (ip, fib_index, &key);
+ key.value = gei;
+
+ rv = clib_bihash_add_del_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, 1);
+
+ return (0 == rv);
+}
+
+static void
+gbp_endpoint_del_mac (const mac_address_t * mac, u32 bd_index)
+{
+ clib_bihash_kv_16_8_t key;
+
+ gbp_endpoint_mk_key_mac (mac->bytes, bd_index, &key);
+
+ clib_bihash_add_del_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, 0);
+}
+
+static void
+gbp_endpoint_del_ip (const ip46_address_t * ip, u32 fib_index)
+{
+ clib_bihash_kv_24_8_t key;
+
+ gbp_endpoint_mk_key_ip (ip, fib_index, &key);
+
+ clib_bihash_add_del_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, 0);
+}
+
+static index_t
+gbp_endpoint_index (const gbp_endpoint_t * ge)
+{
+ return (ge - gbp_endpoint_pool);
+}
+
+static int
+gbp_endpoint_ip_is_equal (const fib_prefix_t * fp, const ip46_address_t * ip)
+{
+ return (ip46_address_is_equal (ip, &fp->fp_addr));
+}
+
+static void
+gbp_endpoint_ips_update (gbp_endpoint_t * ge,
+ const ip46_address_t * ips,
+ const gbp_route_domain_t * grd)
+{
+ const ip46_address_t *ip;
+ index_t gei, grdi;
+
+ gei = gbp_endpoint_index (ge);
+ grdi = gbp_route_domain_index (grd);
+
+ ASSERT ((ge->ge_key.gek_grd == INDEX_INVALID) ||
+ (ge->ge_key.gek_grd == grdi));
+
+ vec_foreach (ip, ips)
+ {
+ if (~0 == vec_search_with_function (ge->ge_key.gek_ips, ip,
+ gbp_endpoint_ip_is_equal))
+ {
+ fib_prefix_t *pfx;
+
+ vec_add2 (ge->ge_key.gek_ips, pfx, 1);
+ fib_prefix_from_ip46_addr (ip, pfx);
+
+ gbp_endpoint_add_ip (&pfx->fp_addr,
+ grd->grd_fib_index[pfx->fp_proto], gei);
+ }
+ ge->ge_key.gek_grd = grdi;
+ }
+}
+
+static gbp_endpoint_t *
+gbp_endpoint_alloc (const ip46_address_t * ips,
+ const gbp_route_domain_t * grd,
+ const mac_address_t * mac,
+ const gbp_bridge_domain_t * gbd)
+{
+ gbp_endpoint_t *ge;
+ index_t gei;
+
+ pool_get_zero (gbp_endpoint_pool, ge);
+
+ fib_node_init (&ge->ge_node, gbp_endpoint_fib_type);
+ gei = gbp_endpoint_index (ge);
+ ge->ge_key.gek_gbd =
+ ge->ge_key.gek_grd = ge->ge_fwd.gef_fib_index = INDEX_INVALID;
+ gbp_itf_hdl_reset (&ge->ge_fwd.gef_itf);
+ ge->ge_last_time = vlib_time_now (vlib_get_main ());
+ ge->ge_key.gek_gbd = gbp_bridge_domain_index (gbd);
+
+ if (NULL != mac)
+ {
+ mac_address_copy (&ge->ge_key.gek_mac, mac);
+ gbp_endpoint_add_mac (mac, gbd->gb_bd_index, gei);
+ }
+ gbp_endpoint_ips_update (ge, ips, grd);
+
+ return (ge);
+}
+
+static int
+gbp_endpoint_loc_is_equal (gbp_endpoint_loc_t * a, gbp_endpoint_loc_t * b)
+{
+ return (a->gel_src == b->gel_src);
+}
+
+static int
+gbp_endpoint_loc_cmp_for_sort (gbp_endpoint_loc_t * a, gbp_endpoint_loc_t * b)
+{
+ return (a->gel_src - b->gel_src);
+}
+
+static gbp_endpoint_loc_t *
+gbp_endpoint_loc_find (gbp_endpoint_t * ge, gbp_endpoint_src_t src)
+{
+ gbp_endpoint_loc_t gel = {
+ .gel_src = src,
+ };
+ u32 pos;
+
+ pos = vec_search_with_function (ge->ge_locs, &gel,
+ gbp_endpoint_loc_is_equal);
+
+ if (~0 != pos)
+ return (&ge->ge_locs[pos]);
+
+ return NULL;
+}
+
+static int
+gbp_endpoint_loc_unlock (gbp_endpoint_t * ge, gbp_endpoint_loc_t * gel)
+{
+ u32 pos;
+
+ gel->gel_locks--;
+
+ if (0 == gel->gel_locks)
+ {
+ pos = gel - ge->ge_locs;
+
+ vec_del1 (ge->ge_locs, pos);
+ if (vec_len (ge->ge_locs) > 1)
+ vec_sort_with_function (ge->ge_locs, gbp_endpoint_loc_cmp_for_sort);
+
+ /* This could be the last lock, so don't access the EP from
+ * this point on */
+ fib_node_unlock (&ge->ge_node);
+
+ return (1);
+ }
+ return (0);
+}
+
+static void
+gbp_endpoint_loc_destroy (gbp_endpoint_loc_t * gel)
+{
+ gbp_endpoint_group_unlock (gel->gel_epg);
+ gbp_itf_unlock (&gel->gel_itf);
+}
+
+static gbp_endpoint_loc_t *
+gbp_endpoint_loc_find_or_add (gbp_endpoint_t * ge, gbp_endpoint_src_t src)
+{
+ gbp_endpoint_loc_t gel = {
+ .gel_src = src,
+ .gel_epg = INDEX_INVALID,
+ .gel_itf = GBP_ITF_HDL_INVALID,
+ .gel_locks = 0,
+ };
+ u32 pos;
+
+ pos = vec_search_with_function (ge->ge_locs, &gel,
+ gbp_endpoint_loc_is_equal);
+
+ if (~0 == pos)
+ {
+ vec_add1 (ge->ge_locs, gel);
+
+ if (vec_len (ge->ge_locs) > 1)
+ {
+ vec_sort_with_function (ge->ge_locs, gbp_endpoint_loc_cmp_for_sort);
+
+ pos = vec_search_with_function (ge->ge_locs, &gel,
+ gbp_endpoint_loc_is_equal);
+ }
+ else
+ pos = 0;
+
+ /*
+ * it's the sources and children that lock the endpoints
+ */
+ fib_node_lock (&ge->ge_node);
+ }
+
+ return (&ge->ge_locs[pos]);
+}
+
+/**
+ * Find an EP inthe DBs and check that if we find it in the L2 DB
+ * it has the same IPs as this update
+ */
+static int
+gbp_endpoint_find_for_update (const ip46_address_t * ips,
+ const gbp_route_domain_t * grd,
+ const mac_address_t * mac,
+ const gbp_bridge_domain_t * gbd,
+ gbp_endpoint_t ** ge)
+{
+ gbp_endpoint_t *l2_ge, *l3_ge, *tmp;
+
+ l2_ge = l3_ge = NULL;
+
+ if (NULL != mac && !mac_address_is_zero (mac))
+ {
+ ASSERT (gbd);
+ l2_ge = gbp_endpoint_find_mac (mac->bytes, gbd->gb_bd_index);
+ }
+ if (NULL != ips && !ip46_address_is_zero (ips))
+ {
+ const ip46_address_t *ip;
+ fib_protocol_t fproto;
+
+ ASSERT (grd);
+ vec_foreach (ip, ips)
+ {
+ fproto = fib_proto_from_ip46 (ip46_address_get_type (ip));
+
+ tmp = gbp_endpoint_find_ip (ip, grd->grd_fib_index[fproto]);
+
+ if (NULL == tmp)
+ /* not found */
+ continue;
+ else if (NULL == l3_ge)
+ /* first match against an IP address */
+ l3_ge = tmp;
+ else if (tmp == l3_ge)
+ /* another match against IP address that is the same endpoint */
+ continue;
+ else
+ {
+ /*
+ * a match agains a different endpoint.
+ * this means the KEY of the EP is changing which is not allowed
+ */
+ return (-1);
+ }
+ }
+ }
+
+ if (NULL == l2_ge && NULL == l3_ge)
+ /* not found */
+ *ge = NULL;
+ else if (NULL == l2_ge)
+ /* found at L3 */
+ *ge = l3_ge;
+ else if (NULL == l3_ge)
+ /* found at L2 */
+ *ge = l2_ge;
+ else
+ {
+ /* found both L3 and L2 - they must be the same else the KEY
+ * is changing
+ */
+ if (l2_ge == l3_ge)
+ *ge = l2_ge;
+ else
+ return (-1);
+ }
+
+ return (0);
+}
+
+static gbp_endpoint_src_t
+gbp_endpoint_get_best_src (const gbp_endpoint_t * ge)
+{
+ if (0 == vec_len (ge->ge_locs))
+ return (GBP_ENDPOINT_SRC_MAX);
+
+ return (ge->ge_locs[0].gel_src);
+}
+
+static void
+gbp_endpoint_n_learned (int n)
+{
+ gbp_n_learnt_endpoints += n;
+
+ if (n > 0 && 1 == gbp_n_learnt_endpoints)
+ {
+ vlib_process_signal_event (vlib_get_main (),
+ gbp_scanner_node.index,
+ GBP_ENDPOINT_SCAN_START, 0);
+ }
+ if (n < 0 && 0 == gbp_n_learnt_endpoints)
+ {
+ vlib_process_signal_event (vlib_get_main (),
+ gbp_scanner_node.index,
+ GBP_ENDPOINT_SCAN_STOP, 0);
+ }
+}
+
+static void
+gbp_endpoint_loc_update (const gbp_endpoint_t * ge,
+ gbp_endpoint_loc_t * gel,
+ const gbp_bridge_domain_t * gb,
+ u32 sw_if_index,
+ index_t ggi,
+ gbp_endpoint_flags_t flags,
+ const ip46_address_t * tun_src,
+ const ip46_address_t * tun_dst)
+{
+ int was_learnt, is_learnt;
+
+ gel->gel_locks++;
+ was_learnt = ! !(gel->gel_flags & GBP_ENDPOINT_FLAG_REMOTE);
+ gel->gel_flags = flags;
+ is_learnt = ! !(gel->gel_flags & GBP_ENDPOINT_FLAG_REMOTE);
+
+ gbp_endpoint_n_learned (is_learnt - was_learnt);
+
+ /*
+ * update the EPG
+ */
+ gbp_endpoint_group_lock (ggi);
+ gbp_endpoint_group_unlock (gel->gel_epg);
+ gel->gel_epg = ggi;
+
+ if (gel->gel_flags & GBP_ENDPOINT_FLAG_REMOTE)
+ {
+ if (NULL != tun_src)
+ ip46_address_copy (&gel->tun.gel_src, tun_src);
+ if (NULL != tun_dst)
+ ip46_address_copy (&gel->tun.gel_dst, tun_dst);
+
+ if (ip46_address_is_multicast (&gel->tun.gel_src))
+ {
+ /*
+ * we learnt the EP from the multicast tunnel.
+ * Create a unicast TEP from the packet's source
+ * and the fixed address of the BD's parent tunnel
+ */
+ const gbp_vxlan_tunnel_t *gt;
+
+ gt = gbp_vxlan_tunnel_get (gb->gb_vni);
+
+ if (NULL != gt)
+ {
+ ip46_address_copy (&gel->tun.gel_src, &gt->gt_src);
+ sw_if_index = gt->gt_sw_if_index;
+ }
+ }
+
+ /*
+ * the input interface may be the parent GBP-vxlan interface,
+ * create a child vlxan-gbp tunnel and use that as the endpoint's
+ * interface.
+ */
+ gbp_itf_hdl_t old = gel->gel_itf;
+
+ switch (gbp_vxlan_tunnel_get_type (sw_if_index))
+ {
+ case GBP_VXLAN_TEMPLATE_TUNNEL:
+ gel->tun.gel_parent_sw_if_index = sw_if_index;
+ gel->gel_itf = gbp_vxlan_tunnel_clone_and_lock (sw_if_index,
+ &gel->tun.gel_src,
+ &gel->tun.gel_dst);
+ break;
+ case VXLAN_GBP_TUNNEL:
+ gel->tun.gel_parent_sw_if_index =
+ vxlan_gbp_tunnel_get_parent (sw_if_index);
+ gel->gel_itf = vxlan_gbp_tunnel_lock_itf (sw_if_index);
+ break;
+ }
+
+ gbp_itf_unlock (&old);
+ }
+ else
+ {
+ gel->gel_itf = gbp_itf_l2_add_and_lock (sw_if_index,
+ ge->ge_key.gek_gbd);
+ }
+}
+
+static void
+gbb_endpoint_fwd_reset (gbp_endpoint_t * ge)
+{
+ const gbp_route_domain_t *grd;
+ const gbp_bridge_domain_t *gbd;
+ gbp_endpoint_fwd_t *gef;
+ const fib_prefix_t *pfx;
+ index_t *ai;
+
+ gbd = gbp_bridge_domain_get (ge->ge_key.gek_gbd);
+ gef = &ge->ge_fwd;
+
+ vec_foreach (pfx, ge->ge_key.gek_ips)
+ {
+ u32 fib_index;
+
+ grd = gbp_route_domain_get (ge->ge_key.gek_grd);
+ fib_index = grd->grd_fib_index[pfx->fp_proto];
+
+ bd_add_del_ip_mac (gbd->gb_bd_index, fib_proto_to_ip46 (pfx->fp_proto),
+ &pfx->fp_addr, &ge->ge_key.gek_mac, 0);
+
+ /*
+ * remove a host route
+ */
+ if (gbp_endpoint_is_remote (ge))
+ {
+ fib_table_entry_special_remove (fib_index, pfx, gbp_fib_source_hi);
+ }
+
+ fib_table_entry_delete (fib_index, pfx, gbp_fib_source_low);
+ }
+ vec_foreach (ai, gef->gef_adjs)
+ {
+ adj_unlock (*ai);
+ }
+
+ if (gbp_itf_hdl_is_valid (gef->gef_itf))
+ {
+ l2fib_del_entry (ge->ge_key.gek_mac.bytes,
+ gbd->gb_bd_index,
+ gbp_itf_get_sw_if_index (gef->gef_itf));
+ }
+
+ gbp_itf_unlock (&gef->gef_itf);
+ vec_free (gef->gef_adjs);
+}
+
+static void
+gbb_endpoint_fwd_recalc (gbp_endpoint_t * ge)
+{
+ const gbp_bridge_domain_t *gbd;
+ const gbp_endpoint_group_t *gg;
+ const gbp_route_domain_t *grd;
+ gbp_endpoint_loc_t *gel;
+ gbp_endpoint_fwd_t *gef;
+ const fib_prefix_t *pfx;
+ index_t gei;
+
+ /*
+ * locations are sort in source priority order
+ */
+ gei = gbp_endpoint_index (ge);
+ gel = &ge->ge_locs[0];
+ gef = &ge->ge_fwd;
+ gbd = gbp_bridge_domain_get (ge->ge_key.gek_gbd);
+
+ gef->gef_flags = gel->gel_flags;
+
+ if (INDEX_INVALID != gel->gel_epg)
+ {
+ gg = gbp_endpoint_group_get (gel->gel_epg);
+ gef->gef_sclass = gg->gg_sclass;
+ }
+ else
+ {
+ gg = NULL;
+ }
+
+ gef->gef_itf = gbp_itf_clone_and_lock (gel->gel_itf);
+
+ if (!mac_address_is_zero (&ge->ge_key.gek_mac))
+ {
+ gbp_itf_l2_set_input_feature (gef->gef_itf, L2INPUT_FEAT_GBP_FWD);
+
+ if (gbp_endpoint_is_remote (ge) || gbp_endpoint_is_external (ge))
+ {
+ /*
+ * bridged packets to external endpoints should be classifed
+ * based on the EP's/BD's EPG
+ */
+ gbp_itf_l2_set_output_feature (gef->gef_itf,
+ L2OUTPUT_FEAT_GBP_POLICY_MAC);
+ }
+ else
+ {
+ gbp_endpoint_add_itf (gbp_itf_get_sw_if_index (gef->gef_itf), gei);
+ gbp_itf_l2_set_output_feature (gef->gef_itf,
+ L2OUTPUT_FEAT_GBP_POLICY_PORT);
+ }
+ l2fib_add_entry (ge->ge_key.gek_mac.bytes,
+ gbd->gb_bd_index,
+ gbp_itf_get_sw_if_index (gef->gef_itf),
+ L2FIB_ENTRY_RESULT_FLAG_STATIC);
+ }
+
+ vec_foreach (pfx, ge->ge_key.gek_ips)
+ {
+ ethernet_header_t *eth;
+ u32 ip_sw_if_index;
+ u32 fib_index;
+ u8 *rewrite;
+ index_t ai;
+
+ rewrite = NULL;
+ grd = gbp_route_domain_get (ge->ge_key.gek_grd);
+ fib_index = grd->grd_fib_index[pfx->fp_proto];
+ gef->gef_fib_index = fib_index;
+
+ bd_add_del_ip_mac (gbd->gb_bd_index, fib_proto_to_ip46 (pfx->fp_proto),
+ &pfx->fp_addr, &ge->ge_key.gek_mac, 1);
+
+ /*
+ * add a host route via the EPG's BVI we need this because the
+ * adj fib does not install, due to cover refinement check, since
+ * the BVI's prefix is /32
+ */
+ vec_validate (rewrite, sizeof (*eth) - 1);
+ eth = (ethernet_header_t *) rewrite;
+
+ eth->type = clib_host_to_net_u16 ((pfx->fp_proto == FIB_PROTOCOL_IP4 ?
+ ETHERNET_TYPE_IP4 :
+ ETHERNET_TYPE_IP6));
+
+ if (gbp_endpoint_is_remote (ge))
+ {
+ /*
+ * for dynamic EPs we must add the IP adjacency via the learned
+ * tunnel since the BD will not contain the EP's MAC since it was
+ * L3 learned. The dst MAC address used is the 'BD's MAC'.
+ */
+ ip_sw_if_index = gbp_itf_get_sw_if_index (gef->gef_itf);
+
+ mac_address_to_bytes (gbp_route_domain_get_local_mac (),
+ eth->src_address);
+ mac_address_to_bytes (gbp_route_domain_get_remote_mac (),
+ eth->dst_address);
+ }
+ else
+ {
+ /*
+ * for the static EPs we add the IP adjacency via the BVI
+ * knowing that the BD has the MAC address to route to and
+ * that policy will be applied on egress to the EP's port
+ */
+ ip_sw_if_index = gbd->gb_bvi_sw_if_index;
+
+ clib_memcpy (eth->src_address,
+ vnet_sw_interface_get_hw_address (vnet_get_main (),
+ ip_sw_if_index),
+ sizeof (eth->src_address));
+ mac_address_to_bytes (&ge->ge_key.gek_mac, eth->dst_address);
+ }
+
+ fib_table_entry_path_add (fib_index, pfx,
+ gbp_fib_source_low,
+ FIB_ENTRY_FLAG_NONE,
+ fib_proto_to_dpo (pfx->fp_proto),
+ &pfx->fp_addr, ip_sw_if_index,
+ ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE);
+
+ ai = adj_nbr_add_or_lock_w_rewrite (pfx->fp_proto,
+ fib_proto_to_link (pfx->fp_proto),
+ &pfx->fp_addr,
+ ip_sw_if_index, rewrite);
+ vec_add1 (gef->gef_adjs, ai);
+
+ /*
+ * if the endpoint is external then routed packet to it must be
+ * classifed to the BD's EPG. but this will happen anyway with
+ * the GBP_MAC classification.
+ */
+
+ if (NULL != gg)
+ {
+ if (gbp_endpoint_is_remote (ge))
+ {
+ dpo_id_t policy_dpo = DPO_INVALID;
+
+ /*
+ * interpose a policy DPO from the endpoint so that policy
+ * is applied
+ */
+ gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (pfx->fp_proto),
+ grd->grd_scope,
+ gg->gg_sclass, ~0, &policy_dpo);
+
+ fib_table_entry_special_dpo_add (fib_index, pfx,
+ gbp_fib_source_hi,
+ FIB_ENTRY_FLAG_INTERPOSE,
+ &policy_dpo);
+ dpo_reset (&policy_dpo);
+ }
+
+ /*
+ * send a gratuitous ARP on the EPG's uplink. this is done so
+ * that if this EP has moved from some other place in the
+ * 'fabric', upstream devices are informed
+ */
+ if (gbp_endpoint_is_local (ge) && ~0 != gg->gg_uplink_sw_if_index)
+ {
+ gbp_endpoint_add_itf (gbp_itf_get_sw_if_index (gef->gef_itf),
+ gei);
+ if (FIB_PROTOCOL_IP4 == pfx->fp_proto)
+ ip4_neighbor_advertise (vlib_get_main (),
+ vnet_get_main (),
+ gg->gg_uplink_sw_if_index,
+ &pfx->fp_addr.ip4);
+ else
+ ip6_neighbor_advertise (vlib_get_main (),
+ vnet_get_main (),
+ gg->gg_uplink_sw_if_index,
+ &pfx->fp_addr.ip6);
+ }
+ }
+ }
+
+ if (gbp_endpoint_is_external (ge))
+ {
+ gbp_itf_l2_set_input_feature (gef->gef_itf,
+ L2INPUT_FEAT_GBP_LPM_CLASSIFY);
+ }
+ else if (gbp_endpoint_is_local (ge))
+ {
+ /*
+ * non-remote endpoints (i.e. those not arriving on iVXLAN
+ * tunnels) need to be classifed based on the the input interface.
+ * We enable the GBP-FWD feature only if the group has an uplink
+ * interface (on which the GBP-FWD feature would send UU traffic).
+ * External endpoints get classified based on an LPM match
+ */
+ l2input_feat_masks_t feats = L2INPUT_FEAT_GBP_SRC_CLASSIFY;
+
+ if (NULL != gg && ~0 != gg->gg_uplink_sw_if_index)
+ feats |= L2INPUT_FEAT_GBP_FWD;
+ gbp_itf_l2_set_input_feature (gef->gef_itf, feats);
+ }
+
+ /*
+ * update children with the new forwarding info
+ */
+ fib_node_back_walk_ctx_t bw_ctx = {
+ .fnbw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE,
+ .fnbw_flags = FIB_NODE_BW_FLAG_FORCE_SYNC,
+ };
+
+ fib_walk_sync (gbp_endpoint_fib_type, gei, &bw_ctx);
+}
+
+int
+gbp_endpoint_update_and_lock (gbp_endpoint_src_t src,
+ u32 sw_if_index,
+ const ip46_address_t * ips,
+ const mac_address_t * mac,
+ index_t gbdi, index_t grdi,
+ sclass_t sclass,
+ gbp_endpoint_flags_t flags,
+ const ip46_address_t * tun_src,
+ const ip46_address_t * tun_dst, u32 * handle)
+{
+ gbp_bridge_domain_t *gbd;
+ gbp_endpoint_group_t *gg;
+ gbp_endpoint_src_t best;
+ gbp_route_domain_t *grd;
+ gbp_endpoint_loc_t *gel;
+ gbp_endpoint_t *ge;
+ index_t ggi, gei;
+ int rv;
+
+ if (~0 == sw_if_index)
+ return (VNET_API_ERROR_INVALID_SW_IF_INDEX);
+
+ ge = NULL;
+ gg = NULL;
+
+ /*
+ * we need to determine the bridge-domain, either from the EPG or
+ * the BD passed
+ */
+ if (SCLASS_INVALID != sclass)
+ {
+ ggi = gbp_endpoint_group_find (sclass);
+
+ if (INDEX_INVALID == ggi)
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+
+ gg = gbp_endpoint_group_get (ggi);
+ gbdi = gg->gg_gbd;
+ grdi = gg->gg_rd;
+ }
+ else
+ {
+ if (INDEX_INVALID == gbdi)
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+ if (INDEX_INVALID == grdi)
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+ ggi = INDEX_INVALID;
+ }
+
+ gbd = gbp_bridge_domain_get (gbdi);
+ grd = gbp_route_domain_get (grdi);
+ rv = gbp_endpoint_find_for_update (ips, grd, mac, gbd, &ge);
+
+ if (0 != rv)
+ return (rv);
+
+ if (NULL == ge)
+ {
+ ge = gbp_endpoint_alloc (ips, grd, mac, gbd);
+ }
+ else
+ {
+ gbp_endpoint_ips_update (ge, ips, grd);
+ }
+
+ best = gbp_endpoint_get_best_src (ge);
+ gei = gbp_endpoint_index (ge);
+ gel = gbp_endpoint_loc_find_or_add (ge, src);
+
+ gbp_endpoint_loc_update (ge, gel, gbd, sw_if_index, ggi, flags,
+ tun_src, tun_dst);
+
+ if (src <= best)
+ {
+ /*
+ * either the best source has been updated or we have a new best source
+ */
+ gbb_endpoint_fwd_reset (ge);
+ gbb_endpoint_fwd_recalc (ge);
+ }
+ else
+ {
+ /*
+ * an update to a lower priority source, so we need do nothing
+ */
+ }
+
+ if (handle)
+ *handle = gei;
+
+ GBP_ENDPOINT_INFO ("update: %U", format_gbp_endpoint, gei);
+
+ return (0);
+}
+
+void
+gbp_endpoint_unlock (gbp_endpoint_src_t src, index_t gei)
+{
+ gbp_endpoint_loc_t *gel, gel_copy;
+ gbp_endpoint_src_t best;
+ gbp_endpoint_t *ge;
+ int removed;
+
+ if (pool_is_free_index (gbp_endpoint_pool, gei))
+ return;
+
+ GBP_ENDPOINT_INFO ("delete: %U", format_gbp_endpoint, gei);
+
+ ge = gbp_endpoint_get (gei);
+
+ gel = gbp_endpoint_loc_find (ge, src);
+
+ if (NULL == gel)
+ return;
+
+ /*
+ * lock the EP so we can control when it is deleted
+ */
+ fib_node_lock (&ge->ge_node);
+ best = gbp_endpoint_get_best_src (ge);
+
+ /*
+ * copy the location info since we'll lose it when it's removed from
+ * the vector
+ */
+ clib_memcpy (&gel_copy, gel, sizeof (gel_copy));
+
+ /*
+ * remove the source we no longer need
+ */
+ removed = gbp_endpoint_loc_unlock (ge, gel);
+
+ if (src == best)
+ {
+ /*
+ * we have removed the old best source => recalculate fwding
+ */
+ if (0 == vec_len (ge->ge_locs))
+ {
+ /*
+ * if there are no more sources left, then we need only release
+ * the fwding resources held and then this EP is gawn.
+ */
+ gbb_endpoint_fwd_reset (ge);
+ }
+ else
+ {
+ /*
+ * else there are more sources. release the old and get new
+ * fwding objects
+ */
+ gbb_endpoint_fwd_reset (ge);
+ gbb_endpoint_fwd_recalc (ge);
+ }
+ }
+ /*
+ * else
+ * we removed a lower priority source so we need to do nothing
+ */
+
+ /*
+ * clear up any resources held by the source
+ */
+ if (removed)
+ gbp_endpoint_loc_destroy (&gel_copy);
+
+ /*
+ * remove the lock taken above
+ */
+ fib_node_unlock (&ge->ge_node);
+ /*
+ * We may have removed the last source and so this EP is now TOAST
+ * DO NOTHING BELOW HERE
+ */
+}
+
+u32
+gbp_endpoint_child_add (index_t gei,
+ fib_node_type_t type, fib_node_index_t index)
+{
+ return (fib_node_child_add (gbp_endpoint_fib_type, gei, type, index));
+}
+
+void
+gbp_endpoint_child_remove (index_t gei, u32 sibling)
+{
+ return (fib_node_child_remove (gbp_endpoint_fib_type, gei, sibling));
+}
+
+typedef struct gbp_endpoint_flush_ctx_t_
+{
+ u32 sw_if_index;
+ gbp_endpoint_src_t src;
+ index_t *geis;
+} gbp_endpoint_flush_ctx_t;
+
+static walk_rc_t
+gbp_endpoint_flush_cb (index_t gei, void *args)
+{
+ gbp_endpoint_flush_ctx_t *ctx = args;
+ gbp_endpoint_loc_t *gel;
+ gbp_endpoint_t *ge;
+
+ ge = gbp_endpoint_get (gei);
+ gel = gbp_endpoint_loc_find (ge, ctx->src);
+
+ if ((NULL != gel) && ctx->sw_if_index == gel->tun.gel_parent_sw_if_index)
+ {
+ vec_add1 (ctx->geis, gei);
+ }
+
+ return (WALK_CONTINUE);
+}
+
+/**
+ * remove all learnt endpoints using the interface
+ */
+void
+gbp_endpoint_flush (gbp_endpoint_src_t src, u32 sw_if_index)
+{
+ gbp_endpoint_flush_ctx_t ctx = {
+ .sw_if_index = sw_if_index,
+ .src = src,
+ };
+ index_t *gei;
+
+ GBP_ENDPOINT_INFO ("flush: %U %U",
+ format_gbp_endpoint_src, src,
+ format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+ gbp_endpoint_walk (gbp_endpoint_flush_cb, &ctx);
+
+ vec_foreach (gei, ctx.geis)
+ {
+ gbp_endpoint_unlock (src, *gei);
+ }
+
+ vec_free (ctx.geis);
+}
+
+void
+gbp_endpoint_walk (gbp_endpoint_cb_t cb, void *ctx)
+{
+ u32 index;
+
+ /* *INDENT-OFF* */
+ pool_foreach_index (index, gbp_endpoint_pool)
+ {
+ if (!cb(index, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_endpoint_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ip46_address_t ip = ip46_address_initializer, *ips = NULL;
+ mac_address_t mac = ZERO_MAC_ADDRESS;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sclass = SCLASS_INVALID;
+ u32 handle = INDEX_INVALID;
+ u32 sw_if_index = ~0;
+ u32 flags = GBP_ENDPOINT_FLAG_NONE;
+ u8 add = 1;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ ip46_address_reset (&ip);
+
+ if (unformat (input, "%U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "add"))
+ add = 1;
+ else if (unformat (input, "del"))
+ add = 0;
+ else if (unformat (input, "sclass %d", &sclass))
+ ;
+ else if (unformat (input, "handle %d", &handle))
+ ;
+ else if (unformat (input, "ip %U", unformat_ip4_address, &ip.ip4))
+ vec_add1 (ips, ip);
+ else if (unformat (input, "ip %U", unformat_ip6_address, &ip.ip6))
+ vec_add1 (ips, ip);
+ else if (unformat (input, "mac %U", unformat_mac_address, &mac))
+ ;
+ else if (unformat (input, "flags 0x%x", &flags))
+ ;
+ else
+ break;
+ }
+
+ if (add)
+ {
+ if (~0 == sw_if_index)
+ return clib_error_return (0, "interface must be specified");
+ if (SCLASS_INVALID == sclass)
+ return clib_error_return (0, "SCLASS must be specified");
+
+ rv =
+ gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP,
+ sw_if_index, ips, &mac,
+ INDEX_INVALID, INDEX_INVALID,
+ sclass, flags, NULL, NULL, &handle);
+
+ if (rv)
+ return clib_error_return (0, "GBP Endpoint update returned %d", rv);
+ else
+ vlib_cli_output (vm, "handle %d\n", handle);
+ }
+ else
+ {
+ if (INDEX_INVALID == handle)
+ return clib_error_return (0, "handle must be specified");
+
+ gbp_endpoint_unlock (GBP_ENDPOINT_SRC_CP, handle);
+ }
+
+ vec_free (ips);
+
+ return (NULL);
+}
+
+/*?
+ * Configure a GBP Endpoint
+ *
+ * @cliexpar
+ * @cliexstart{gbp endpoint del <handle> | [add] <interface> sclass <SCLASS> ip <IP> mac <MAC> [flags <flags>]}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_endpoint_cli_node, static) = {
+ .path = "gbp endpoint",
+ .short_help = "gbp endpoint del <handle> | [add] <interface> sclass <SCLASS> ip <IP> mac <MAC> [flags <flags>]",
+ .function = gbp_endpoint_cli,
+};
+/* *INDENT-ON* */
+
+u8 *
+format_gbp_endpoint_src (u8 * s, va_list * args)
+{
+ gbp_endpoint_src_t action = va_arg (*args, gbp_endpoint_src_t);
+
+ switch (action)
+ {
+#define _(v,a) case GBP_ENDPOINT_SRC_##v: return (format (s, "%s", a));
+ foreach_gbp_endpoint_src
+#undef _
+ }
+
+ return (format (s, "unknown"));
+}
+
+static u8 *
+format_gbp_endpoint_fwd (u8 * s, va_list * args)
+{
+ gbp_endpoint_fwd_t *gef = va_arg (*args, gbp_endpoint_fwd_t *);
+
+ s = format (s, "fwd:");
+ s = format (s, "\n itf:[%U]", format_gbp_itf_hdl, gef->gef_itf);
+ if (GBP_ENDPOINT_FLAG_NONE != gef->gef_flags)
+ {
+ s = format (s, " flags:%U", format_gbp_endpoint_flags, gef->gef_flags);
+ }
+
+ return (s);
+}
+
+static u8 *
+format_gbp_endpoint_key (u8 * s, va_list * args)
+{
+ gbp_endpoint_key_t *gek = va_arg (*args, gbp_endpoint_key_t *);
+ const fib_prefix_t *pfx;
+
+ s = format (s, "ips:[");
+
+ vec_foreach (pfx, gek->gek_ips)
+ {
+ s = format (s, "%U, ", format_fib_prefix, pfx);
+ }
+ s = format (s, "]");
+
+ s = format (s, " mac:%U", format_mac_address_t, &gek->gek_mac);
+
+ return (s);
+}
+
+static u8 *
+format_gbp_endpoint_loc (u8 * s, va_list * args)
+{
+ gbp_endpoint_loc_t *gel = va_arg (*args, gbp_endpoint_loc_t *);
+
+ s = format (s, "%U", format_gbp_endpoint_src, gel->gel_src);
+ s = format (s, "\n EPG:%d [%U]", gel->gel_epg,
+ format_gbp_itf_hdl, gel->gel_itf);
+
+ if (GBP_ENDPOINT_FLAG_NONE != gel->gel_flags)
+ {
+ s = format (s, " flags:%U", format_gbp_endpoint_flags, gel->gel_flags);
+ }
+ if (GBP_ENDPOINT_FLAG_REMOTE & gel->gel_flags)
+ {
+ s = format (s, " tun:[");
+ s = format (s, "parent:%U", format_vnet_sw_if_index_name,
+ vnet_get_main (), gel->tun.gel_parent_sw_if_index);
+ s = format (s, " {%U,%U}]",
+ format_ip46_address, &gel->tun.gel_src, IP46_TYPE_ANY,
+ format_ip46_address, &gel->tun.gel_dst, IP46_TYPE_ANY);
+ }
+
+ return (s);
+}
+
+u8 *
+format_gbp_endpoint (u8 * s, va_list * args)
+{
+ index_t gei = va_arg (*args, index_t);
+ gbp_endpoint_loc_t *gel;
+ gbp_endpoint_t *ge;
+
+ ge = gbp_endpoint_get (gei);
+
+ s = format (s, "[@%d] %U", gei, format_gbp_endpoint_key, &ge->ge_key);
+ s = format (s, " last-time:[%f]", ge->ge_last_time);
+
+ vec_foreach (gel, ge->ge_locs)
+ {
+ s = format (s, "\n %U", format_gbp_endpoint_loc, gel);
+ }
+ s = format (s, "\n %U", format_gbp_endpoint_fwd, &ge->ge_fwd);
+
+ return s;
+}
+
+static walk_rc_t
+gbp_endpoint_show_one (index_t gei, void *ctx)
+{
+ vlib_main_t *vm;
+
+ vm = ctx;
+ vlib_cli_output (vm, " %U", format_gbp_endpoint, gei);
+
+ return (WALK_CONTINUE);
+}
+
+static int
+gbp_endpoint_walk_ip_itf (clib_bihash_kv_24_8_t * kvp, void *arg)
+{
+ ip46_address_t ip;
+ vlib_main_t *vm;
+ u32 sw_if_index;
+
+ vm = arg;
+
+ gbp_endpoint_extract_key_ip_itf (kvp, &ip, &sw_if_index);
+
+ vlib_cli_output (vm, " {%U, %U} -> %d",
+ format_ip46_address, &ip, IP46_TYPE_ANY,
+ format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index, kvp->value);
+ return (BIHASH_WALK_CONTINUE);
+}
+
+static int
+gbp_endpoint_walk_mac_itf (clib_bihash_kv_16_8_t * kvp, void *arg)
+{
+ mac_address_t mac;
+ vlib_main_t *vm;
+ u32 sw_if_index;
+
+ vm = arg;
+
+ gbp_endpoint_extract_key_mac_itf (kvp, &mac, &sw_if_index);
+
+ vlib_cli_output (vm, " {%U, %U} -> %d",
+ format_mac_address_t, &mac,
+ format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index, kvp->value);
+ return (BIHASH_WALK_CONTINUE);
+}
+
+static clib_error_t *
+gbp_endpoint_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 show_dbs, handle;
+
+ handle = INDEX_INVALID;
+ show_dbs = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &handle))
+ ;
+ else if (unformat (input, "db"))
+ show_dbs = 1;
+ else
+ break;
+ }
+
+ if (INDEX_INVALID != handle)
+ {
+ vlib_cli_output (vm, "%U", format_gbp_endpoint, handle);
+ }
+ else if (show_dbs)
+ {
+ vlib_cli_output (vm, "\nDatabases:");
+ clib_bihash_foreach_key_value_pair_24_8 (&gbp_ep_db.ged_by_ip_rd,
+ gbp_endpoint_walk_ip_itf, vm);
+ clib_bihash_foreach_key_value_pair_16_8
+ (&gbp_ep_db.ged_by_mac_bd, gbp_endpoint_walk_mac_itf, vm);
+ }
+ else
+ {
+ vlib_cli_output (vm, "Endpoints:");
+ gbp_endpoint_walk (gbp_endpoint_show_one, vm);
+ }
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Endpoints and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp endpoint}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_endpoint_show_node, static) = {
+ .path = "show gbp endpoint",
+ .short_help = "show gbp endpoint\n",
+ .function = gbp_endpoint_show,
+};
+/* *INDENT-ON* */
+
+static void
+gbp_endpoint_check (index_t gei, f64 start_time)
+{
+ gbp_endpoint_group_t *gg;
+ gbp_endpoint_loc_t *gel;
+ gbp_endpoint_t *ge;
+
+ ge = gbp_endpoint_get (gei);
+ gel = gbp_endpoint_loc_find (ge, GBP_ENDPOINT_SRC_DP);
+
+ if (NULL != gel)
+ {
+ gg = gbp_endpoint_group_get (gel->gel_epg);
+
+ if ((start_time - ge->ge_last_time) >
+ gg->gg_retention.remote_ep_timeout)
+ {
+ gbp_endpoint_unlock (GBP_ENDPOINT_SRC_DP, gei);
+ }
+ }
+}
+
+static void
+gbp_endpoint_scan_l2 (vlib_main_t * vm)
+{
+ clib_bihash_16_8_t *gte_table = &gbp_ep_db.ged_by_mac_bd;
+ f64 last_start, start_time, delta_t;
+ int i, j, k;
+
+ if (!gte_table->instantiated)
+ return;
+
+ delta_t = 0;
+ last_start = start_time = vlib_time_now (vm);
+
+ for (i = 0; i < gte_table->nbuckets; i++)
+ {
+ clib_bihash_bucket_16_8_t *b;
+ clib_bihash_value_16_8_t *v;
+
+ /* allow no more than 20us without a pause */
+ delta_t = vlib_time_now (vm) - last_start;
+ if (delta_t > 20e-6)
+ {
+ /* suspend for 100 us */
+ vlib_process_suspend (vm, 100e-6);
+ last_start = vlib_time_now (vm);
+ }
+
+ b = clib_bihash_get_bucket_16_8 (gte_table, i);
+ if (clib_bihash_bucket_is_empty_16_8 (b))
+ continue;
+ v = clib_bihash_get_value_16_8 (gte_table, b->offset);
+
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (clib_bihash_is_free_16_8 (&v->kvp[k]))
+ continue;
+
+ gbp_endpoint_check (v->kvp[k].value, start_time);
+
+ /*
+ * Note: we may have just freed the bucket's backing
+ * storage, so check right here...
+ */
+ if (clib_bihash_bucket_is_empty_16_8 (b))
+ goto doublebreak;
+ }
+ v++;
+ }
+ doublebreak:
+ ;
+ }
+}
+
+static void
+gbp_endpoint_scan_l3 (vlib_main_t * vm)
+{
+ clib_bihash_24_8_t *gte_table = &gbp_ep_db.ged_by_ip_rd;
+ f64 last_start, start_time, delta_t;
+ int i, j, k;
+
+ if (!gte_table->instantiated)
+ return;
+
+ delta_t = 0;
+ last_start = start_time = vlib_time_now (vm);
+
+ for (i = 0; i < gte_table->nbuckets; i++)
+ {
+ clib_bihash_bucket_24_8_t *b;
+ clib_bihash_value_24_8_t *v;
+
+ /* allow no more than 20us without a pause */
+ delta_t = vlib_time_now (vm) - last_start;
+ if (delta_t > 20e-6)
+ {
+ /* suspend for 100 us */
+ vlib_process_suspend (vm, 100e-6);
+ last_start = vlib_time_now (vm);
+ }
+
+ b = clib_bihash_get_bucket_24_8 (gte_table, i);
+ if (clib_bihash_bucket_is_empty_24_8 (b))
+ continue;
+ v = clib_bihash_get_value_24_8 (gte_table, b->offset);
+
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (clib_bihash_is_free_24_8 (&v->kvp[k]))
+ continue;
+
+ gbp_endpoint_check (v->kvp[k].value, start_time);
+
+ /*
+ * Note: we may have just freed the bucket's backing
+ * storage, so check right here...
+ */
+ if (clib_bihash_bucket_is_empty_24_8 (b))
+ goto doublebreak;
+ }
+ v++;
+ }
+ doublebreak:
+ ;
+ }
+}
+
+void
+gbp_endpoint_scan (vlib_main_t * vm)
+{
+ gbp_endpoint_scan_l2 (vm);
+ gbp_endpoint_scan_l3 (vm);
+}
+
+static fib_node_t *
+gbp_endpoint_get_node (fib_node_index_t index)
+{
+ gbp_endpoint_t *ge;
+
+ ge = gbp_endpoint_get (index);
+
+ return (&ge->ge_node);
+}
+
+static gbp_endpoint_t *
+gbp_endpoint_from_fib_node (fib_node_t * node)
+{
+ ASSERT (gbp_endpoint_fib_type == node->fn_type);
+ return ((gbp_endpoint_t *) node);
+}
+
+static void
+gbp_endpoint_last_lock_gone (fib_node_t * node)
+{
+ const gbp_bridge_domain_t *gbd;
+ const gbp_route_domain_t *grd;
+ const fib_prefix_t *pfx;
+ gbp_endpoint_t *ge;
+
+ ge = gbp_endpoint_from_fib_node (node);
+
+ ASSERT (0 == vec_len (ge->ge_locs));
+
+ gbd = gbp_bridge_domain_get (ge->ge_key.gek_gbd);
+
+ /*
+ * we have removed the last source. this EP is toast
+ */
+ if (INDEX_INVALID != ge->ge_key.gek_gbd)
+ {
+ gbp_endpoint_del_mac (&ge->ge_key.gek_mac, gbd->gb_bd_index);
+ }
+ vec_foreach (pfx, ge->ge_key.gek_ips)
+ {
+ grd = gbp_route_domain_get (ge->ge_key.gek_grd);
+ gbp_endpoint_del_ip (&pfx->fp_addr, grd->grd_fib_index[pfx->fp_proto]);
+ }
+ pool_put (gbp_endpoint_pool, ge);
+}
+
+static fib_node_back_walk_rc_t
+gbp_endpoint_back_walk_notify (fib_node_t * node,
+ fib_node_back_walk_ctx_t * ctx)
+{
+ ASSERT (0);
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/*
+ * The FIB path's graph node virtual function table
+ */
+static const fib_node_vft_t gbp_endpoint_vft = {
+ .fnv_get = gbp_endpoint_get_node,
+ .fnv_last_lock = gbp_endpoint_last_lock_gone,
+ .fnv_back_walk = gbp_endpoint_back_walk_notify,
+ // .fnv_mem_show = fib_path_memory_show,
+};
+
+static clib_error_t *
+gbp_endpoint_init (vlib_main_t * vm)
+{
+#define GBP_EP_HASH_NUM_BUCKETS (2 * 1024)
+#define GBP_EP_HASH_MEMORY_SIZE (1 << 20)
+
+ clib_bihash_init_24_8 (&gbp_ep_db.ged_by_ip_rd,
+ "GBP Endpoints - IP/RD",
+ GBP_EP_HASH_NUM_BUCKETS, GBP_EP_HASH_MEMORY_SIZE);
+
+ clib_bihash_init_16_8 (&gbp_ep_db.ged_by_mac_bd,
+ "GBP Endpoints - MAC/BD",
+ GBP_EP_HASH_NUM_BUCKETS, GBP_EP_HASH_MEMORY_SIZE);
+
+ gbp_ep_logger = vlib_log_register_class ("gbp", "ep");
+ gbp_endpoint_fib_type = fib_node_register_new_type (&gbp_endpoint_vft);
+ gbp_fib_source_hi = fib_source_allocate ("gbp-endpoint-hi",
+ FIB_SOURCE_PRIORITY_HI,
+ FIB_SOURCE_BH_SIMPLE);
+ gbp_fib_source_low = fib_source_allocate ("gbp-endpoint-low",
+ FIB_SOURCE_PRIORITY_LOW,
+ FIB_SOURCE_BH_SIMPLE);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_endpoint_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_endpoint.h b/extras/deprecated/plugins/gbp/gbp_endpoint.h
new file mode 100644
index 00000000000..3155e7be4e0
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_endpoint.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_ENDPOINT_H__
+#define __GBP_ENDPOINT_H__
+
+#include <plugins/gbp/gbp_types.h>
+#include <plugins/gbp/gbp_itf.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/mac_address.h>
+
+#include <vppinfra/bihash_16_8.h>
+#include <vppinfra/bihash_template.h>
+#include <vppinfra/bihash_24_8.h>
+#include <vppinfra/bihash_template.h>
+
+/**
+ * Flags for each endpoint
+ */
+typedef enum gbp_endpoint_attr_t_
+{
+ GBP_ENDPOINT_ATTR_FIRST = 0,
+ GBP_ENDPOINT_ATTR_BOUNCE = GBP_ENDPOINT_ATTR_FIRST,
+ GBP_ENDPOINT_ATTR_REMOTE,
+ GBP_ENDPOINT_ATTR_LEARNT,
+ GBP_ENDPOINT_ATTR_EXTERNAL,
+ GBP_ENDPOINT_ATTR_LAST,
+} gbp_endpoint_attr_t;
+
+typedef enum gbp_endpoint_flags_t_
+{
+ GBP_ENDPOINT_FLAG_NONE = 0,
+ GBP_ENDPOINT_FLAG_BOUNCE = (1 << GBP_ENDPOINT_ATTR_BOUNCE),
+ GBP_ENDPOINT_FLAG_REMOTE = (1 << GBP_ENDPOINT_ATTR_REMOTE),
+ GBP_ENDPOINT_FLAG_LEARNT = (1 << GBP_ENDPOINT_ATTR_LEARNT),
+ GBP_ENDPOINT_FLAG_EXTERNAL = (1 << GBP_ENDPOINT_ATTR_EXTERNAL),
+} gbp_endpoint_flags_t;
+
+#define GBP_ENDPOINT_ATTR_NAMES { \
+ [GBP_ENDPOINT_ATTR_BOUNCE] = "bounce", \
+ [GBP_ENDPOINT_ATTR_REMOTE] = "remote", \
+ [GBP_ENDPOINT_ATTR_LEARNT] = "learnt", \
+ [GBP_ENDPOINT_ATTR_EXTERNAL] = "external", \
+}
+
+extern u8 *format_gbp_endpoint_flags (u8 * s, va_list * args);
+
+/**
+ * Sources of Endpoints in priority order. The best (lowest value) source
+ * provides the forwarding information.
+ * Data-plane takes preference because the CP data is not always complete,
+ * it may not have the sclass.
+ */
+#define foreach_gbp_endpoint_src \
+ _(DP, "data-plane") \
+ _(CP, "control-plane") \
+ _(RR, "recursive-resolution")
+
+typedef enum gbp_endpoint_src_t_
+{
+#define _(v,s) GBP_ENDPOINT_SRC_##v,
+ foreach_gbp_endpoint_src
+#undef _
+} gbp_endpoint_src_t;
+
+#define GBP_ENDPOINT_SRC_MAX (GBP_ENDPOINT_SRC_RR+1)
+
+extern u8 *format_gbp_endpoint_src (u8 * s, va_list * args);
+
+/**
+ * This is the identity of an endpoint, as such it is information
+ * about an endpoint that is idempotent.
+ * The ID is used to add the EP into the various data-bases for retrieval.
+ */
+typedef struct gbp_endpoint_key_t_
+{
+ /**
+ * A vector of ip addresses that belong to the endpoint.
+ * Together with the route EPG's RD this forms the EP's L3 key
+ */
+ fib_prefix_t *gek_ips;
+
+ /**
+ * MAC address of the endpoint.
+ * Together with the route EPG's BD this forms the EP's L2 key
+ */
+ mac_address_t gek_mac;
+
+ /**
+ * Index of the Bridge-Domain
+ */
+ index_t gek_gbd;
+
+ /**
+ * Index of the Route-Domain
+ */
+ index_t gek_grd;
+} gbp_endpoint_key_t;
+
+/**
+ * Information about the location of the endpoint provided by a source
+ * of endpoints
+ */
+typedef struct gbp_endpoint_loc_t_
+{
+ /**
+ * The source providing this location information
+ */
+ gbp_endpoint_src_t gel_src;
+
+ /**
+ * The interface on which the EP is connected
+ */
+ gbp_itf_hdl_t gel_itf;
+
+ /**
+ * Endpoint flags
+ */
+ gbp_endpoint_flags_t gel_flags;
+
+ /**
+ * Endpoint Group.
+ */
+ index_t gel_epg;
+
+ /**
+ * number of times this source has locked this
+ */
+ u32 gel_locks;
+
+ /**
+ * Tunnel info for remote endpoints
+ */
+ struct
+ {
+ u32 gel_parent_sw_if_index;
+ ip46_address_t gel_src;
+ ip46_address_t gel_dst;
+ } tun;
+} gbp_endpoint_loc_t;
+
+/**
+ * And endpoints current forwarding state
+ */
+typedef struct gbp_endpoint_fwd_t_
+{
+ /**
+ * The interface on which the EP is connected
+ */
+ gbp_itf_hdl_t gef_itf;
+
+ /**
+ * The L3 adj, if created
+ */
+ index_t *gef_adjs;
+
+ /**
+ * Endpoint Group's sclass. cached for fast DP access.
+ */
+ sclass_t gef_sclass;
+
+ /**
+ * FIB index the EP is in
+ */
+ u32 gef_fib_index;
+
+ gbp_endpoint_flags_t gef_flags;
+} gbp_endpoint_fwd_t;
+
+/**
+ * A Group Based Policy Endpoint.
+ * This is typically a VM or container. If the endpoint is local (i.e. on
+ * the same compute node as VPP) then there is one interface per-endpoint.
+ * If the EP is remote,e.g. reachable over a [vxlan] tunnel, then there
+ * will be multiple EPs reachable over the tunnel and they can be distinguished
+ * via either their MAC or IP Address[es].
+ */
+typedef struct gbp_endpoint_t_
+{
+ /**
+ * A FIB node that allows the tracking of children.
+ */
+ fib_node_t ge_node;
+
+ /**
+ * The key/ID of this EP
+ */
+ gbp_endpoint_key_t ge_key;
+
+ /**
+ * Location information provided by the various sources.
+ * These are sorted based on source priority.
+ */
+ gbp_endpoint_loc_t *ge_locs;
+
+ gbp_endpoint_fwd_t ge_fwd;
+
+ /**
+ * The last time a packet from seen from this end point
+ */
+ f64 ge_last_time;
+} gbp_endpoint_t;
+
+extern u8 *format_gbp_endpoint (u8 * s, va_list * args);
+
+/**
+ * GBP Endpoint Databases
+ */
+typedef struct gbp_ep_by_ip_itf_db_t_
+{
+ index_t *ged_by_sw_if_index;
+ clib_bihash_24_8_t ged_by_ip_rd;
+ clib_bihash_16_8_t ged_by_mac_bd;
+} gbp_ep_db_t;
+
+extern int gbp_endpoint_update_and_lock (gbp_endpoint_src_t src,
+ u32 sw_if_index,
+ const ip46_address_t * ip,
+ const mac_address_t * mac,
+ index_t gbd, index_t grd,
+ sclass_t sclass,
+ gbp_endpoint_flags_t flags,
+ const ip46_address_t * tun_src,
+ const ip46_address_t * tun_dst,
+ u32 * handle);
+extern void gbp_endpoint_unlock (gbp_endpoint_src_t src, index_t gbpei);
+extern u32 gbp_endpoint_child_add (index_t gei,
+ fib_node_type_t type,
+ fib_node_index_t index);
+extern void gbp_endpoint_child_remove (index_t gei, u32 sibling);
+
+typedef walk_rc_t (*gbp_endpoint_cb_t) (index_t gbpei, void *ctx);
+extern void gbp_endpoint_walk (gbp_endpoint_cb_t cb, void *ctx);
+extern void gbp_endpoint_scan (vlib_main_t * vm);
+extern int gbp_endpoint_is_remote (const gbp_endpoint_t * ge);
+extern int gbp_endpoint_is_local (const gbp_endpoint_t * ge);
+extern int gbp_endpoint_is_external (const gbp_endpoint_t * ge);
+extern int gbp_endpoint_is_learnt (const gbp_endpoint_t * ge);
+
+
+extern void gbp_endpoint_flush (gbp_endpoint_src_t src, u32 sw_if_index);
+
+/**
+ * DP functions and databases
+ */
+extern gbp_ep_db_t gbp_ep_db;
+extern gbp_endpoint_t *gbp_endpoint_pool;
+
+/**
+ * Get the endpoint from a port/interface
+ */
+always_inline gbp_endpoint_t *
+gbp_endpoint_get (index_t gbpei)
+{
+ return (pool_elt_at_index (gbp_endpoint_pool, gbpei));
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_mac (const u8 * mac,
+ u32 bd_index, clib_bihash_kv_16_8_t * key)
+{
+ key->key[0] = ethernet_mac_address_u64 (mac);
+ key->key[1] = bd_index;
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_mac (const u8 * mac, u32 bd_index)
+{
+ clib_bihash_kv_16_8_t key, value;
+ int rv;
+
+ gbp_endpoint_mk_key_mac (mac, bd_index, &key);
+
+ rv = clib_bihash_search_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, &value);
+
+ if (0 != rv)
+ return NULL;
+
+ return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip (const ip46_address_t * ip,
+ u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+ key->key[0] = ip->as_u64[0];
+ key->key[1] = ip->as_u64[1];
+ key->key[2] = fib_index;
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip4 (const ip4_address_t * ip,
+ u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+ const ip46_address_t a = {
+ .ip4 = *ip,
+ };
+ gbp_endpoint_mk_key_ip (&a, fib_index, key);
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_ip4 (const ip4_address_t * ip, u32 fib_index)
+{
+ clib_bihash_kv_24_8_t key, value;
+ int rv;
+
+ gbp_endpoint_mk_key_ip4 (ip, fib_index, &key);
+
+ rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
+
+ if (0 != rv)
+ return NULL;
+
+ return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip6 (const ip6_address_t * ip,
+ u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+ key->key[0] = ip->as_u64[0];
+ key->key[1] = ip->as_u64[1];
+ key->key[2] = fib_index;
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_ip6 (const ip6_address_t * ip, u32 fib_index)
+{
+ clib_bihash_kv_24_8_t key, value;
+ int rv;
+
+ gbp_endpoint_mk_key_ip6 (ip, fib_index, &key);
+
+ rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
+
+ if (0 != rv)
+ return NULL;
+
+ return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_itf (u32 sw_if_index)
+{
+ index_t gei;
+
+ gei = gbp_ep_db.ged_by_sw_if_index[sw_if_index];
+
+ if (INDEX_INVALID != gei)
+ return (gbp_endpoint_get (gei));
+
+ return (NULL);
+}
+
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_endpoint_group.c b/extras/deprecated/plugins/gbp/gbp_endpoint_group.c
new file mode 100644
index 00000000000..b9044378e3b
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_endpoint_group.c
@@ -0,0 +1,402 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_endpoint_group.h>
+#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_itf.h>
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/l2/l2_input.h>
+
+/**
+ * Pool of GBP endpoint_groups
+ */
+gbp_endpoint_group_t *gbp_endpoint_group_pool;
+
+/**
+ * DB of endpoint_groups
+ */
+gbp_endpoint_group_db_t gbp_endpoint_group_db;
+
+/**
+ * Map sclass to EPG
+ */
+uword *gbp_epg_sclass_db;
+
+vlib_log_class_t gg_logger;
+
+#define GBP_EPG_DBG(...) \
+ vlib_log_debug (gg_logger, __VA_ARGS__);
+
+gbp_endpoint_group_t *
+gbp_endpoint_group_get (index_t i)
+{
+ return (pool_elt_at_index (gbp_endpoint_group_pool, i));
+}
+
+void
+gbp_endpoint_group_lock (index_t ggi)
+{
+ gbp_endpoint_group_t *gg;
+
+ if (INDEX_INVALID == ggi)
+ return;
+
+ gg = gbp_endpoint_group_get (ggi);
+ gg->gg_locks++;
+}
+
+index_t
+gbp_endpoint_group_find (sclass_t sclass)
+{
+ uword *p;
+
+ p = hash_get (gbp_endpoint_group_db.gg_hash_sclass, sclass);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+int
+gbp_endpoint_group_add_and_lock (vnid_t vnid,
+ u16 sclass,
+ u32 bd_id,
+ u32 rd_id,
+ u32 uplink_sw_if_index,
+ const gbp_endpoint_retention_t * retention)
+{
+ gbp_endpoint_group_t *gg;
+ index_t ggi;
+
+ ggi = gbp_endpoint_group_find (sclass);
+
+ if (INDEX_INVALID == ggi)
+ {
+ fib_protocol_t fproto;
+ index_t gbi, grdi;
+
+ gbi = gbp_bridge_domain_find_and_lock (bd_id);
+
+ if (~0 == gbi)
+ return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+
+ grdi = gbp_route_domain_find_and_lock (rd_id);
+
+ if (~0 == grdi)
+ {
+ gbp_bridge_domain_unlock (gbi);
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+ }
+
+ pool_get_zero (gbp_endpoint_group_pool, gg);
+
+ gg->gg_vnid = vnid;
+ gg->gg_rd = grdi;
+ gg->gg_gbd = gbi;
+
+ gg->gg_uplink_sw_if_index = uplink_sw_if_index;
+ gbp_itf_hdl_reset (&gg->gg_uplink_itf);
+ gg->gg_locks = 1;
+ gg->gg_sclass = sclass;
+ gg->gg_retention = *retention;
+
+ if (SCLASS_INVALID != gg->gg_sclass)
+ hash_set (gbp_epg_sclass_db, gg->gg_sclass, gg->gg_vnid);
+
+ /*
+ * an egress DVR dpo for internal subnets to use when sending
+ * on the uplink interface
+ */
+ if (~0 != gg->gg_uplink_sw_if_index)
+ {
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ dvr_dpo_add_or_lock (uplink_sw_if_index,
+ fib_proto_to_dpo (fproto),
+ &gg->gg_dpo[fproto]);
+ }
+
+ /*
+ * Add the uplink to the BD
+ * packets direct from the uplink have had policy applied
+ */
+ gg->gg_uplink_itf =
+ gbp_itf_l2_add_and_lock (gg->gg_uplink_sw_if_index, gbi);
+
+ gbp_itf_l2_set_input_feature (gg->gg_uplink_itf,
+ L2INPUT_FEAT_GBP_NULL_CLASSIFY);
+ }
+
+ hash_set (gbp_endpoint_group_db.gg_hash_sclass,
+ gg->gg_sclass, gg - gbp_endpoint_group_pool);
+ }
+ else
+ {
+ gg = gbp_endpoint_group_get (ggi);
+ gg->gg_locks++;
+ }
+
+ GBP_EPG_DBG ("add: %U", format_gbp_endpoint_group, gg);
+
+ return (0);
+}
+
+void
+gbp_endpoint_group_unlock (index_t ggi)
+{
+ gbp_endpoint_group_t *gg;
+
+ if (INDEX_INVALID == ggi)
+ return;
+
+ gg = gbp_endpoint_group_get (ggi);
+
+ gg->gg_locks--;
+
+ if (0 == gg->gg_locks)
+ {
+ fib_protocol_t fproto;
+
+ gg = pool_elt_at_index (gbp_endpoint_group_pool, ggi);
+
+ gbp_itf_unlock (&gg->gg_uplink_itf);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ dpo_reset (&gg->gg_dpo[fproto]);
+ }
+ gbp_bridge_domain_unlock (gg->gg_gbd);
+ gbp_route_domain_unlock (gg->gg_rd);
+
+ if (SCLASS_INVALID != gg->gg_sclass)
+ hash_unset (gbp_epg_sclass_db, gg->gg_sclass);
+ hash_unset (gbp_endpoint_group_db.gg_hash_sclass, gg->gg_sclass);
+
+ pool_put (gbp_endpoint_group_pool, gg);
+ }
+}
+
+int
+gbp_endpoint_group_delete (sclass_t sclass)
+{
+ index_t ggi;
+
+ ggi = gbp_endpoint_group_find (sclass);
+
+ if (INDEX_INVALID != ggi)
+ {
+ GBP_EPG_DBG ("del: %U", format_gbp_endpoint_group,
+ gbp_endpoint_group_get (ggi));
+ gbp_endpoint_group_unlock (ggi);
+
+ return (0);
+ }
+
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+u32
+gbp_endpoint_group_get_bd_id (const gbp_endpoint_group_t * gg)
+{
+ const gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gg->gg_gbd);
+
+ return (gb->gb_bd_id);
+}
+
+index_t
+gbp_endpoint_group_get_fib_index (const gbp_endpoint_group_t * gg,
+ fib_protocol_t fproto)
+{
+ const gbp_route_domain_t *grd;
+
+ grd = gbp_route_domain_get (gg->gg_rd);
+
+ return (grd->grd_fib_index[fproto]);
+}
+
+void
+gbp_endpoint_group_walk (gbp_endpoint_group_cb_t cb, void *ctx)
+{
+ gbp_endpoint_group_t *gbpe;
+
+ /* *INDENT-OFF* */
+ pool_foreach (gbpe, gbp_endpoint_group_pool)
+ {
+ if (!cb(gbpe, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_endpoint_group_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ gbp_endpoint_retention_t retention = { 0 };
+ vnid_t vnid = VNID_INVALID, sclass;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 uplink_sw_if_index = ~0;
+ u32 bd_id = ~0;
+ u32 rd_id = ~0;
+ u8 add = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U", unformat_vnet_sw_interface,
+ vnm, &uplink_sw_if_index))
+ ;
+ else if (unformat (input, "add"))
+ add = 1;
+ else if (unformat (input, "del"))
+ add = 0;
+ else if (unformat (input, "epg %d", &vnid))
+ ;
+ else if (unformat (input, "sclass %d", &sclass))
+ ;
+ else if (unformat (input, "bd %d", &bd_id))
+ ;
+ else if (unformat (input, "rd %d", &rd_id))
+ ;
+ else
+ break;
+ }
+
+ if (VNID_INVALID == vnid)
+ return clib_error_return (0, "EPG-ID must be specified");
+
+ if (add)
+ {
+ if (~0 == bd_id)
+ return clib_error_return (0, "Bridge-domain must be specified");
+ if (~0 == rd_id)
+ return clib_error_return (0, "route-domain must be specified");
+
+ gbp_endpoint_group_add_and_lock (vnid, sclass, bd_id, rd_id,
+ uplink_sw_if_index, &retention);
+ }
+ else
+ gbp_endpoint_group_delete (vnid);
+
+ return (NULL);
+}
+
+/*?
+ * Configure a GBP Endpoint Group
+ *
+ * @cliexpar
+ * @cliexstart{gbp endpoint-group [del] epg <ID> bd <ID> rd <ID> [sclass <ID>] [<interface>]}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_endpoint_group_cli_node, static) = {
+ .path = "gbp endpoint-group",
+ .short_help = "gbp endpoint-group [del] epg <ID> bd <ID> rd <ID> [sclass <ID>] [<interface>]",
+ .function = gbp_endpoint_group_cli,
+};
+
+static u8 *
+format_gbp_endpoint_retention (u8 * s, va_list * args)
+{
+ gbp_endpoint_retention_t *rt = va_arg (*args, gbp_endpoint_retention_t*);
+
+ s = format (s, "[remote-EP-timeout:%d]", rt->remote_ep_timeout);
+
+ return (s);
+}
+
+u8 *
+format_gbp_endpoint_group (u8 * s, va_list * args)
+{
+ gbp_endpoint_group_t *gg = va_arg (*args, gbp_endpoint_group_t*);
+
+ if (NULL != gg)
+ s = format (s, "[%d] %d, sclass:%d bd:%d rd:%d uplink:%U retention:%U locks:%d",
+ gg - gbp_endpoint_group_pool,
+ gg->gg_vnid,
+ gg->gg_sclass,
+ gg->gg_gbd,
+ gg->gg_rd,
+ format_gbp_itf_hdl, gg->gg_uplink_itf,
+ format_gbp_endpoint_retention, &gg->gg_retention,
+ gg->gg_locks);
+ else
+ s = format (s, "NULL");
+
+ return (s);
+}
+
+static int
+gbp_endpoint_group_show_one (gbp_endpoint_group_t *gg, void *ctx)
+{
+ vlib_main_t *vm;
+
+ vm = ctx;
+ vlib_cli_output (vm, " %U",format_gbp_endpoint_group, gg);
+
+ return (1);
+}
+
+static clib_error_t *
+gbp_endpoint_group_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "Endpoint-Groups:");
+ gbp_endpoint_group_walk (gbp_endpoint_group_show_one, vm);
+
+ return (NULL);
+}
+
+
+/*?
+ * Show Group Based Policy Endpoint_Groups and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp endpoint_group}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_endpoint_group_show_node, static) = {
+ .path = "show gbp endpoint-group",
+ .short_help = "show gbp endpoint-group\n",
+ .function = gbp_endpoint_group_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_endpoint_group_init (vlib_main_t * vm)
+{
+ gg_logger = vlib_log_register_class ("gbp", "epg");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_endpoint_group_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_endpoint_group.h b/extras/deprecated/plugins/gbp/gbp_endpoint_group.h
new file mode 100644
index 00000000000..c5fdff8463d
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_endpoint_group.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_ENDPOINT_GROUP_H__
+#define __GBP_ENDPOINT_GROUP_H__
+
+#include <plugins/gbp/gbp_types.h>
+#include <plugins/gbp/gbp_itf.h>
+
+#include <vnet/fib/fib_types.h>
+
+/**
+ * Endpoint Retnetion Policy
+ */
+typedef struct gbp_endpoint_retention_t_
+{
+ /** Aging timeout for remote endpoints */
+ u32 remote_ep_timeout;
+} gbp_endpoint_retention_t;
+
+/**
+ * An Endpoint Group representation
+ */
+typedef struct gpb_endpoint_group_t_
+{
+ /**
+ * ID
+ */
+ vnid_t gg_vnid;
+
+ /**
+ * Sclass. Could be unset => ~0
+ */
+ u16 gg_sclass;
+
+ /**
+ * Bridge-domain ID the EPG is in
+ */
+ index_t gg_gbd;
+
+ /**
+ * route-domain/IP-table ID the EPG is in
+ */
+ index_t gg_rd;
+
+ /**
+ * Is the EPG an external/NAT
+ */
+ u8 gg_is_ext;
+
+ /**
+ * the uplink interface dedicated to the EPG
+ */
+ u32 gg_uplink_sw_if_index;
+ gbp_itf_hdl_t gg_uplink_itf;
+
+ /**
+ * The DPO used in the L3 path for forwarding internal subnets
+ */
+ dpo_id_t gg_dpo[FIB_PROTOCOL_IP_MAX];
+
+ /**
+ * Locks/references to this EPG
+ */
+ u32 gg_locks;
+
+ /**
+ * EP retention policy
+ */
+ gbp_endpoint_retention_t gg_retention;
+} gbp_endpoint_group_t;
+
+/**
+ * EPG DB, key'd on EGP-ID
+ */
+typedef struct gbp_endpoint_group_db_t_
+{
+ uword *gg_hash_sclass;
+} gbp_endpoint_group_db_t;
+
+extern int gbp_endpoint_group_add_and_lock (vnid_t vnid,
+ u16 sclass,
+ u32 bd_id,
+ u32 rd_id,
+ u32 uplink_sw_if_index,
+ const gbp_endpoint_retention_t *
+ retention);
+extern index_t gbp_endpoint_group_find (sclass_t sclass);
+extern int gbp_endpoint_group_delete (sclass_t sclass);
+extern void gbp_endpoint_group_unlock (index_t index);
+extern void gbp_endpoint_group_lock (index_t index);
+extern u32 gbp_endpoint_group_get_bd_id (const gbp_endpoint_group_t *);
+
+extern gbp_endpoint_group_t *gbp_endpoint_group_get (index_t i);
+extern index_t gbp_endpoint_group_get_fib_index (const gbp_endpoint_group_t *
+ gg, fib_protocol_t fproto);
+
+typedef int (*gbp_endpoint_group_cb_t) (gbp_endpoint_group_t * gbpe,
+ void *ctx);
+extern void gbp_endpoint_group_walk (gbp_endpoint_group_cb_t bgpe, void *ctx);
+
+
+extern u8 *format_gbp_endpoint_group (u8 * s, va_list * args);
+
+/**
+ * DP functions and databases
+ */
+extern gbp_endpoint_group_db_t gbp_endpoint_group_db;
+extern gbp_endpoint_group_t *gbp_endpoint_group_pool;
+extern uword *gbp_epg_sclass_db;
+
+always_inline u32
+gbp_epg_itf_lookup_sclass (sclass_t sclass)
+{
+ uword *p;
+
+ p = hash_get (gbp_endpoint_group_db.gg_hash_sclass, sclass);
+
+ if (NULL != p)
+ {
+ gbp_endpoint_group_t *gg;
+
+ gg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
+ return (gg->gg_uplink_sw_if_index);
+ }
+ return (~0);
+}
+
+always_inline const dpo_id_t *
+gbp_epg_dpo_lookup (sclass_t sclass, fib_protocol_t fproto)
+{
+ uword *p;
+
+ p = hash_get (gbp_endpoint_group_db.gg_hash_sclass, sclass);
+
+ if (NULL != p)
+ {
+ gbp_endpoint_group_t *gg;
+
+ gg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
+ return (&gg->gg_dpo[fproto]);
+ }
+ return (NULL);
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_ext_itf.c b/extras/deprecated/plugins/gbp/gbp_ext_itf.c
new file mode 100644
index 00000000000..c5506661c2d
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_ext_itf.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_ext_itf.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_itf.h>
+
+/**
+ * Pool of GBP ext_itfs
+ */
+gbp_ext_itf_t *gbp_ext_itf_pool;
+
+/**
+ * external interface configs keyed by sw_if_index
+ */
+index_t *gbp_ext_itf_db;
+
+#define GBP_EXT_ITF_ID 0x00000080
+
+/**
+ * logger
+ */
+vlib_log_class_t gx_logger;
+
+#define GBP_EXT_ITF_DBG(...) \
+ vlib_log_debug (gx_logger, __VA_ARGS__);
+
+u8 *
+format_gbp_ext_itf (u8 * s, va_list * args)
+{
+ gbp_ext_itf_t *gx = va_arg (*args, gbp_ext_itf_t *);
+
+ return (format (s, "%U%s in %U",
+ format_gbp_itf_hdl, gx->gx_itf,
+ (gx->gx_flags & GBP_EXT_ITF_F_ANON) ? " [anon]" : "",
+ format_gbp_bridge_domain, gx->gx_bd));
+}
+
+int
+gbp_ext_itf_add (u32 sw_if_index, u32 bd_id, u32 rd_id, u32 flags)
+{
+ gbp_ext_itf_t *gx;
+ index_t gxi;
+
+ vec_validate_init_empty (gbp_ext_itf_db, sw_if_index, INDEX_INVALID);
+
+ gxi = gbp_ext_itf_db[sw_if_index];
+
+ if (INDEX_INVALID == gxi)
+ {
+ gbp_route_domain_t *gr;
+ fib_protocol_t fproto;
+ index_t gbi, gri;
+
+ gbi = gbp_bridge_domain_find_and_lock (bd_id);
+
+ if (INDEX_INVALID == gbi)
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+
+ gri = gbp_route_domain_find_and_lock (rd_id);
+
+ if (INDEX_INVALID == gri)
+ {
+ gbp_bridge_domain_unlock (gbi);
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+ }
+
+ pool_get_zero (gbp_ext_itf_pool, gx);
+ gxi = gx - gbp_ext_itf_pool;
+
+ gr = gbp_route_domain_get (gri);
+
+ gx->gx_bd = gbi;
+ gx->gx_rd = gri;
+ gbp_itf_hdl_reset (&gx->gx_itf);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ gx->gx_fib_index[fproto] =
+ gr->grd_fib_index[fib_proto_to_dpo (fproto)];
+ }
+
+ if (flags & GBP_EXT_ITF_F_ANON)
+ {
+ /* add interface to the BD */
+ gx->gx_itf = gbp_itf_l2_add_and_lock (sw_if_index, gbi);
+
+ /* setup GBP L2 features on this interface */
+ gbp_itf_l2_set_input_feature (gx->gx_itf,
+ L2INPUT_FEAT_GBP_LPM_ANON_CLASSIFY |
+ L2INPUT_FEAT_LEARN);
+ gbp_itf_l2_set_output_feature (gx->gx_itf,
+ L2OUTPUT_FEAT_GBP_POLICY_LPM);
+ }
+
+ gx->gx_flags = flags;
+
+ gbp_ext_itf_db[sw_if_index] = gxi;
+
+ GBP_EXT_ITF_DBG ("add: %U", format_gbp_ext_itf, gx);
+
+ return (0);
+ }
+
+ return (VNET_API_ERROR_ENTRY_ALREADY_EXISTS);
+}
+
+int
+gbp_ext_itf_delete (u32 sw_if_index)
+{
+ gbp_ext_itf_t *gx;
+ index_t gxi;
+
+ if (vec_len (gbp_ext_itf_db) <= sw_if_index)
+ return (VNET_API_ERROR_INVALID_SW_IF_INDEX);
+
+ gxi = gbp_ext_itf_db[sw_if_index];
+
+ if (INDEX_INVALID != gxi)
+ {
+ gx = pool_elt_at_index (gbp_ext_itf_pool, gxi);
+
+ GBP_EXT_ITF_DBG ("del: %U", format_gbp_ext_itf, gx);
+
+ gbp_itf_unlock (&gx->gx_itf);
+ gbp_route_domain_unlock (gx->gx_rd);
+ gbp_bridge_domain_unlock (gx->gx_bd);
+
+ gbp_ext_itf_db[sw_if_index] = INDEX_INVALID;
+ pool_put (gbp_ext_itf_pool, gx);
+
+ return (0);
+ }
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+static clib_error_t *
+gbp_ext_itf_add_del_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 sw_if_index = ~0, bd_id = ~0, rd_id = ~0, flags = 0;
+ int add = 1;
+ int rv;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ add = 0;
+ else
+ if (unformat
+ (line_input, "%U", unformat_vnet_sw_interface, vnet_get_main (),
+ &sw_if_index))
+ ;
+ else if (unformat (line_input, "bd %d", &bd_id))
+ ;
+ else if (unformat (line_input, "rd %d", &rd_id))
+ ;
+ else if (unformat (line_input, "anon-l3-out"))
+ flags |= GBP_EXT_ITF_F_ANON;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ }
+ unformat_free (line_input);
+
+ if (~0 == sw_if_index)
+ return clib_error_return (0, "interface must be specified");
+
+ if (add)
+ {
+ if (~0 == bd_id)
+ return clib_error_return (0, "BD-ID must be specified");
+ if (~0 == rd_id)
+ return clib_error_return (0, "RD-ID must be specified");
+ rv = gbp_ext_itf_add (sw_if_index, bd_id, rd_id, flags);
+ }
+ else
+ rv = gbp_ext_itf_delete (sw_if_index);
+
+ switch (rv)
+ {
+ case 0:
+ return 0;
+ case VNET_API_ERROR_ENTRY_ALREADY_EXISTS:
+ return clib_error_return (0, "interface already exists");
+ case VNET_API_ERROR_NO_SUCH_ENTRY: /* fallthrough */
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return clib_error_return (0, "unknown interface");
+ default:
+ return clib_error_return (0, "error %d", rv);
+ }
+
+ /* never reached */
+ return 0;
+}
+
+/*?
+ * Add Group Based Interface as anonymous L3out interface
+ *
+ * @cliexpar
+ * @cliexstart{gbp interface [del] anon-l3out <interface> bd <ID>}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_itf_anon_l3out_add_del_node, static) = {
+ .path = "gbp ext-itf",
+ .short_help = "gbp ext-itf [del] <interface> bd <ID> rd <ID> [anon-l3-out]\n",
+ .function = gbp_ext_itf_add_del_cli,
+};
+/* *INDENT-ON* */
+
+void
+gbp_ext_itf_walk (gbp_ext_itf_cb_t cb, void *ctx)
+{
+ gbp_ext_itf_t *ge;
+
+ /* *INDENT-OFF* */
+ pool_foreach (ge, gbp_ext_itf_pool)
+ {
+ if (!cb(ge, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+static walk_rc_t
+gbp_ext_itf_show_one (gbp_ext_itf_t * gx, void *ctx)
+{
+ vlib_cli_output (ctx, " %U", format_gbp_ext_itf, gx);
+
+ return (WALK_CONTINUE);
+}
+
+static clib_error_t *
+gbp_ext_itf_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "External-Interfaces:");
+ gbp_ext_itf_walk (gbp_ext_itf_show_one, vm);
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy external interface and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp ext-itf}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_ext_itf_show_node, static) = {
+ .path = "show gbp ext-itf",
+ .short_help = "show gbp ext-itf\n",
+ .function = gbp_ext_itf_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_ext_itf_init (vlib_main_t * vm)
+{
+ gx_logger = vlib_log_register_class ("gbp", "ext-itf");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_ext_itf_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_ext_itf.h b/extras/deprecated/plugins/gbp/gbp_ext_itf.h
new file mode 100644
index 00000000000..03b1992ca45
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_ext_itf.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_EXT_ITF_H__
+#define __GBP_EXT_ITF_H__
+
+#include <gbp/gbp.h>
+
+enum
+{
+ GBP_EXT_ITF_F_NONE = 0,
+ GBP_EXT_ITF_F_ANON = 1 << 0,
+};
+
+/**
+ * An external interface maps directly to an oflex L3ExternalInterface.
+ * The special characteristics of an external interface is the way the source
+ * EPG is determined for input packets which, like a recirc interface, is via
+ * a LPM.
+ */
+typedef struct gpb_ext_itf_t_
+{
+ /**
+ * The interface
+ */
+ gbp_itf_hdl_t gx_itf;
+
+ /**
+ * The BD this external interface is a member of
+ */
+ index_t gx_bd;
+
+ /**
+ * The RD this external interface is a member of
+ */
+ index_t gx_rd;
+
+ /**
+ * cached FIB indices from the RD
+ */
+ u32 gx_fib_index[DPO_PROTO_NUM];
+
+ /**
+ * The associated flags
+ */
+ u32 gx_flags;
+
+} gbp_ext_itf_t;
+
+
+extern int gbp_ext_itf_add (u32 sw_if_index, u32 bd_id, u32 rd_id, u32 flags);
+extern int gbp_ext_itf_delete (u32 sw_if_index);
+
+extern u8 *format_gbp_ext_itf (u8 * s, va_list * args);
+
+typedef walk_rc_t (*gbp_ext_itf_cb_t) (gbp_ext_itf_t * gbpe, void *ctx);
+extern void gbp_ext_itf_walk (gbp_ext_itf_cb_t bgpe, void *ctx);
+
+
+/**
+ * Exposed types for the data-plane
+ */
+extern gbp_ext_itf_t *gbp_ext_itf_pool;
+extern index_t *gbp_ext_itf_db;
+
+always_inline gbp_ext_itf_t *
+gbp_ext_itf_get (u32 sw_if_index)
+{
+ return (pool_elt_at_index (gbp_ext_itf_pool, gbp_ext_itf_db[sw_if_index]));
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_fwd.c b/extras/deprecated/plugins/gbp/gbp_fwd.c
new file mode 100644
index 00000000000..4ecc4779b92
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_fwd.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <plugins/gbp/gbp.h>
+#include <vnet/l2/l2_input.h>
+#include <plugins/gbp/gbp_learn.h>
+
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_fwd_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 l2_input_feat_next[32];
+} gbp_fwd_main_t;
+
+gbp_fwd_main_t gbp_fwd_main;
+
+static clib_error_t *
+gbp_fwd_init (vlib_main_t * vm)
+{
+ gbp_fwd_main_t *gpm = &gbp_fwd_main;
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-fwd");
+
+ /* Initialize the feature next-node indices */
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ gpm->l2_input_feat_next);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gbp_fwd_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_fwd_dpo.c b/extras/deprecated/plugins/gbp/gbp_fwd_dpo.c
new file mode 100644
index 00000000000..b1023f5e78f
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_fwd_dpo.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_fwd_dpo.h>
+
+#include <vnet/ethernet/ethernet.h>
+
+
+#ifndef CLIB_MARCH_VARIANT
+/**
+ * The 'DB' of GBP FWD DPOs.
+ * There is one per-proto
+ */
+static index_t gbp_fwd_dpo_db[DPO_PROTO_NUM] = { INDEX_INVALID };
+
+/**
+ * DPO type registered for these GBP FWD
+ */
+static dpo_type_t gbp_fwd_dpo_type;
+
+/**
+ * @brief pool of all interface DPOs
+ */
+gbp_fwd_dpo_t *gbp_fwd_dpo_pool;
+
+static gbp_fwd_dpo_t *
+gbp_fwd_dpo_alloc (void)
+{
+ gbp_fwd_dpo_t *gfd;
+
+ pool_get (gbp_fwd_dpo_pool, gfd);
+
+ return (gfd);
+}
+
+static inline gbp_fwd_dpo_t *
+gbp_fwd_dpo_get_from_dpo (const dpo_id_t * dpo)
+{
+ ASSERT (gbp_fwd_dpo_type == dpo->dpoi_type);
+
+ return (gbp_fwd_dpo_get (dpo->dpoi_index));
+}
+
+static inline index_t
+gbp_fwd_dpo_get_index (gbp_fwd_dpo_t * gfd)
+{
+ return (gfd - gbp_fwd_dpo_pool);
+}
+
+static void
+gbp_fwd_dpo_lock (dpo_id_t * dpo)
+{
+ gbp_fwd_dpo_t *gfd;
+
+ gfd = gbp_fwd_dpo_get_from_dpo (dpo);
+ gfd->gfd_locks++;
+}
+
+static void
+gbp_fwd_dpo_unlock (dpo_id_t * dpo)
+{
+ gbp_fwd_dpo_t *gfd;
+
+ gfd = gbp_fwd_dpo_get_from_dpo (dpo);
+ gfd->gfd_locks--;
+
+ if (0 == gfd->gfd_locks)
+ {
+ gbp_fwd_dpo_db[gfd->gfd_proto] = INDEX_INVALID;
+ pool_put (gbp_fwd_dpo_pool, gfd);
+ }
+}
+
+void
+gbp_fwd_dpo_add_or_lock (dpo_proto_t dproto, dpo_id_t * dpo)
+{
+ gbp_fwd_dpo_t *gfd;
+
+ if (INDEX_INVALID == gbp_fwd_dpo_db[dproto])
+ {
+ gfd = gbp_fwd_dpo_alloc ();
+
+ gfd->gfd_proto = dproto;
+
+ gbp_fwd_dpo_db[dproto] = gbp_fwd_dpo_get_index (gfd);
+ }
+ else
+ {
+ gfd = gbp_fwd_dpo_get (gbp_fwd_dpo_db[dproto]);
+ }
+
+ dpo_set (dpo, gbp_fwd_dpo_type, dproto, gbp_fwd_dpo_get_index (gfd));
+}
+
+u8 *
+format_gbp_fwd_dpo (u8 * s, va_list * ap)
+{
+ index_t index = va_arg (*ap, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*ap, u32);
+ gbp_fwd_dpo_t *gfd = gbp_fwd_dpo_get (index);
+
+ return (format (s, "gbp-fwd-dpo: %U", format_dpo_proto, gfd->gfd_proto));
+}
+
+const static dpo_vft_t gbp_fwd_dpo_vft = {
+ .dv_lock = gbp_fwd_dpo_lock,
+ .dv_unlock = gbp_fwd_dpo_unlock,
+ .dv_format = format_gbp_fwd_dpo,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a glean
+ * object.
+ *
+ * this means that these graph nodes are ones from which a glean is the
+ * parent object in the DPO-graph.
+ */
+const static char *const gbp_fwd_dpo_ip4_nodes[] = {
+ "ip4-gbp-fwd-dpo",
+ NULL,
+};
+
+const static char *const gbp_fwd_dpo_ip6_nodes[] = {
+ "ip6-gbp-fwd-dpo",
+ NULL,
+};
+
+const static char *const *const gbp_fwd_dpo_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = gbp_fwd_dpo_ip4_nodes,
+ [DPO_PROTO_IP6] = gbp_fwd_dpo_ip6_nodes,
+};
+
+dpo_type_t
+gbp_fwd_dpo_get_type (void)
+{
+ return (gbp_fwd_dpo_type);
+}
+
+static clib_error_t *
+gbp_fwd_dpo_module_init (vlib_main_t * vm)
+{
+ dpo_proto_t dproto;
+
+ FOR_EACH_DPO_PROTO (dproto)
+ {
+ gbp_fwd_dpo_db[dproto] = INDEX_INVALID;
+ }
+
+ gbp_fwd_dpo_type = dpo_register_new_type (&gbp_fwd_dpo_vft,
+ gbp_fwd_dpo_nodes);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_fwd_dpo_module_init);
+#endif /* CLIB_MARCH_VARIANT */
+
+typedef struct gbp_fwd_dpo_trace_t_
+{
+ u32 sclass;
+ u32 dpo_index;
+} gbp_fwd_dpo_trace_t;
+
+typedef enum
+{
+ GBP_FWD_DROP,
+ GBP_FWD_FWD,
+ GBP_FWD_N_NEXT,
+} gbp_fwd_next_t;
+
+always_inline uword
+gbp_fwd_dpo_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, fib_protocol_t fproto)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ const dpo_id_t *next_dpo0;
+ vlib_buffer_t *b0;
+ sclass_t sclass0;
+ u32 bi0, next0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sclass0 = vnet_buffer2 (b0)->gbp.sclass;
+ next_dpo0 = gbp_epg_dpo_lookup (sclass0, fproto);
+
+ if (PREDICT_TRUE (NULL != next_dpo0))
+ {
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = next_dpo0->dpoi_index;
+ next0 = GBP_FWD_FWD;
+ }
+ else
+ {
+ next0 = GBP_FWD_DROP;
+ }
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gbp_fwd_dpo_trace_t *tr;
+
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->sclass = sclass0;
+ tr->dpo_index = (NULL != next_dpo0 ?
+ next_dpo0->dpoi_index : ~0);
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_gbp_fwd_dpo_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_fwd_dpo_trace_t *t = va_arg (*args, gbp_fwd_dpo_trace_t *);
+
+ s = format (s, " sclass:%d dpo:%d", t->sclass, t->dpo_index);
+
+ return s;
+}
+
+VLIB_NODE_FN (ip4_gbp_fwd_dpo_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (gbp_fwd_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP4));
+}
+
+VLIB_NODE_FN (ip6_gbp_fwd_dpo_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (gbp_fwd_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP6));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_gbp_fwd_dpo_node) = {
+ .name = "ip4-gbp-fwd-dpo",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_fwd_dpo_trace,
+ .n_next_nodes = GBP_FWD_N_NEXT,
+ .next_nodes =
+ {
+ [GBP_FWD_DROP] = "ip4-drop",
+ [GBP_FWD_FWD] = "ip4-dvr-dpo",
+ }
+};
+VLIB_REGISTER_NODE (ip6_gbp_fwd_dpo_node) = {
+ .name = "ip6-gbp-fwd-dpo",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_fwd_dpo_trace,
+ .n_next_nodes = GBP_FWD_N_NEXT,
+ .next_nodes =
+ {
+ [GBP_FWD_DROP] = "ip6-drop",
+ [GBP_FWD_FWD] = "ip6-dvr-dpo",
+ }
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_fwd_dpo.h b/extras/deprecated/plugins/gbp/gbp_fwd_dpo.h
new file mode 100644
index 00000000000..6092d6241b5
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_fwd_dpo.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_FWD_DPO_H__
+#define __GBP_FWD_DPO_H__
+
+#include <vnet/dpo/dpo.h>
+
+/**
+ * @brief
+ * The GBP FWD DPO. Used in the L3 path to select the correct EPG uplink
+ * based on the source EPG.
+ */
+typedef struct gbp_fwd_dpo_t_
+{
+ /**
+ * The protocol of packets using this DPO
+ */
+ dpo_proto_t gfd_proto;
+
+ /**
+ * number of locks.
+ */
+ u16 gfd_locks;
+} gbp_fwd_dpo_t;
+
+extern void gbp_fwd_dpo_add_or_lock (dpo_proto_t dproto, dpo_id_t * dpo);
+
+extern dpo_type_t gbp_fwd_dpo_get_type (void);
+
+/**
+ * @brief pool of all interface DPOs
+ */
+extern gbp_fwd_dpo_t *gbp_fwd_dpo_pool;
+
+static inline gbp_fwd_dpo_t *
+gbp_fwd_dpo_get (index_t index)
+{
+ return (pool_elt_at_index (gbp_fwd_dpo_pool, index));
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+
+#endif
diff --git a/extras/deprecated/plugins/gbp/gbp_fwd_node.c b/extras/deprecated/plugins/gbp/gbp_fwd_node.c
new file mode 100644
index 00000000000..6ea56fd8074
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_fwd_node.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <vnet/l2/l2_input.h>
+
+#define foreach_gbp_fwd \
+ _(DROP, "drop") \
+ _(OUTPUT, "output")
+
+typedef enum
+{
+#define _(sym,str) GBP_FWD_ERROR_##sym,
+ foreach_gbp_fwd
+#undef _
+ GBP_FWD_N_ERROR,
+} gbp_fwd_error_t;
+
+static char *gbp_fwd_error_strings[] = {
+#define _(sym,string) string,
+ foreach_gbp_fwd
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) GBP_FWD_NEXT_##sym,
+ foreach_gbp_fwd
+#undef _
+ GBP_FWD_N_NEXT,
+} gbp_fwd_next_t;
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_fwd_trace_t_
+{
+ /* per-pkt trace data */
+ sclass_t sclass;
+ u32 sw_if_index;
+} gbp_fwd_trace_t;
+
+VLIB_NODE_FN (gbp_fwd_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0;
+ gbp_fwd_next_t next0;
+ vlib_buffer_t *b0;
+ sclass_t sclass0;
+
+ next0 = GBP_FWD_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /*
+ * lookup the uplink based on src EPG
+ */
+ sclass0 = vnet_buffer2 (b0)->gbp.sclass;
+
+ sw_if_index0 = gbp_epg_itf_lookup_sclass (sclass0);
+
+ if (~0 != sw_if_index0)
+ {
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
+
+ next0 = GBP_FWD_NEXT_OUTPUT;
+ }
+ /*
+ * else
+ * don't know the uplink interface for this EPG => drop
+ */
+
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_fwd_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sclass = sclass0;
+ t->sw_if_index = sw_if_index0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_fwd_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_fwd_trace_t *t = va_arg (*args, gbp_fwd_trace_t *);
+
+ s = format (s, "sclass:%d", t->sclass);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_fwd_node) = {
+ .name = "gbp-fwd",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_fwd_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_fwd_error_strings),
+ .error_strings = gbp_fwd_error_strings,
+
+ .n_next_nodes = GBP_FWD_N_NEXT,
+
+ .next_nodes = {
+ [GBP_FWD_NEXT_DROP] = "error-drop",
+ [GBP_FWD_NEXT_OUTPUT] = "l2-output",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_itf.c b/extras/deprecated/plugins/gbp/gbp_itf.c
new file mode 100644
index 00000000000..738a7ac2e39
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_itf.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_itf.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+
+#include <vnet/ip/ip.h>
+
+#define foreach_gbp_itf_mode \
+ _(L2, "l2") \
+ _(L3, "L3")
+
+typedef enum gbp_ift_mode_t_
+{
+#define _(s,v) GBP_ITF_MODE_##s,
+ foreach_gbp_itf_mode
+#undef _
+} gbp_itf_mode_t;
+
+/**
+ * Attributes and configurations attached to interfaces by GBP
+ */
+typedef struct gbp_itf_t_
+{
+ /**
+ * Number of references to this interface
+ */
+ u32 gi_locks;
+
+ /**
+ * The interface this wrapper is managing
+ */
+ u32 gi_sw_if_index;
+
+ /**
+ * The mode of the interface
+ */
+ gbp_itf_mode_t gi_mode;
+
+ /**
+ * Users of this interface - this is encoded in the user's handle
+ */
+ u32 *gi_users;
+
+ /**
+ * L2/L3 Features configured by each user
+ */
+ u32 *gi_input_fbs;
+ u32 gi_input_fb;
+ u32 *gi_output_fbs;
+ u32 gi_output_fb;
+
+ /**
+ * function to call when the interface is deleted.
+ */
+ gbp_itf_free_fn_t gi_free_fn;
+
+ union
+ {
+ /**
+ * GBP BD or RD index
+ */
+ u32 gi_gbi;
+ index_t gi_gri;
+ };
+} gbp_itf_t;
+
+static gbp_itf_t *gbp_itf_pool;
+static uword *gbp_itf_db;
+
+static const char *gbp_itf_feat_bit_pos_to_arc[] = {
+#define _(s,v,a) [GBP_ITF_L3_FEAT_POS_##s] = a,
+ foreach_gdb_l3_feature
+#undef _
+};
+
+static const char *gbp_itf_feat_bit_pos_to_feat[] = {
+#define _(s,v,a) [GBP_ITF_L3_FEAT_POS_##s] = v,
+ foreach_gdb_l3_feature
+#undef _
+};
+
+u8 *
+format_gbp_itf_l3_feat (u8 * s, va_list * args)
+{
+ gbp_itf_l3_feat_t flags = va_arg (*args, gbp_itf_l3_feat_t);
+
+#define _(a, b, c) \
+ if (flags & GBP_ITF_L3_FEAT_##a) \
+ s = format (s, "%s ", b);
+ foreach_gdb_l3_feature
+#undef _
+ return (s);
+}
+
+void
+gbp_itf_hdl_reset (gbp_itf_hdl_t * gh)
+{
+ *gh = GBP_ITF_HDL_INVALID;
+}
+
+bool
+gbp_itf_hdl_is_valid (gbp_itf_hdl_t gh)
+{
+ return (gh.gh_which != GBP_ITF_HDL_INVALID.gh_which);
+}
+
+static gbp_itf_t *
+gbp_itf_get (index_t gii)
+{
+ if (pool_is_free_index (gbp_itf_pool, gii))
+ return (NULL);
+
+ return (pool_elt_at_index (gbp_itf_pool, gii));
+}
+
+static gbp_itf_t *
+gbp_itf_find (u32 sw_if_index)
+{
+ uword *p;
+
+ p = hash_get (gbp_itf_db, sw_if_index);
+
+ if (NULL != p)
+ return (gbp_itf_get (p[0]));
+
+ return (NULL);
+}
+
+static gbp_itf_t *
+gbp_itf_find_hdl (gbp_itf_hdl_t gh)
+{
+ return (gbp_itf_find (gh.gh_which));
+}
+
+u32
+gbp_itf_get_sw_if_index (gbp_itf_hdl_t hdl)
+{
+ return (hdl.gh_which);
+}
+
+static gbp_itf_hdl_t
+gbp_itf_mk_hdl (gbp_itf_t * gi)
+{
+ gbp_itf_hdl_t gh;
+ u32 *useri;
+
+ pool_get (gi->gi_users, useri);
+ *useri = 0;
+
+ gh.gh_who = useri - gi->gi_users;
+ gh.gh_which = gi->gi_sw_if_index;
+
+ return (gh);
+}
+
+static gbp_itf_hdl_t
+gbp_itf_l2_add_and_lock_i (u32 sw_if_index, index_t gbi, gbp_itf_free_fn_t ff)
+{
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_find (sw_if_index);
+
+ if (NULL == gi)
+ {
+ pool_get_zero (gbp_itf_pool, gi);
+
+ gi->gi_sw_if_index = sw_if_index;
+ gi->gi_gbi = gbi;
+ gi->gi_mode = GBP_ITF_MODE_L2;
+ gi->gi_free_fn = ff;
+
+ gbp_bridge_domain_itf_add (gi->gi_gbi, gi->gi_sw_if_index,
+ L2_BD_PORT_TYPE_NORMAL);
+
+ hash_set (gbp_itf_db, gi->gi_sw_if_index, gi - gbp_itf_pool);
+ }
+
+ gi->gi_locks++;
+
+ return (gbp_itf_mk_hdl (gi));
+}
+
+gbp_itf_hdl_t
+gbp_itf_l2_add_and_lock (u32 sw_if_index, index_t gbi)
+{
+ return (gbp_itf_l2_add_and_lock_i (sw_if_index, gbi, NULL));
+}
+
+gbp_itf_hdl_t
+gbp_itf_l2_add_and_lock_w_free (u32 sw_if_index,
+ index_t gbi, gbp_itf_free_fn_t ff)
+{
+ return (gbp_itf_l2_add_and_lock_i (sw_if_index, gbi, ff));
+}
+
+gbp_itf_hdl_t
+gbp_itf_l3_add_and_lock_i (u32 sw_if_index, index_t gri, gbp_itf_free_fn_t ff)
+{
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_find (sw_if_index);
+
+ if (NULL == gi)
+ {
+ const gbp_route_domain_t *grd;
+ fib_protocol_t fproto;
+
+ pool_get_zero (gbp_itf_pool, gi);
+
+ gi->gi_sw_if_index = sw_if_index;
+ gi->gi_mode = GBP_ITF_MODE_L3;
+ gi->gi_gri = gri;
+ gi->gi_free_fn = ff;
+
+ grd = gbp_route_domain_get (gi->gi_gri);
+
+ ip4_sw_interface_enable_disable (gi->gi_sw_if_index, 1);
+ ip6_sw_interface_enable_disable (gi->gi_sw_if_index, 1);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ ip_table_bind (fproto, gi->gi_sw_if_index, grd->grd_table_id[fproto]);
+
+ hash_set (gbp_itf_db, gi->gi_sw_if_index, gi - gbp_itf_pool);
+ }
+
+ gi->gi_locks++;
+
+ return (gbp_itf_mk_hdl (gi));
+}
+
+gbp_itf_hdl_t
+gbp_itf_l3_add_and_lock (u32 sw_if_index, index_t gri)
+{
+ return (gbp_itf_l3_add_and_lock_i (sw_if_index, gri, NULL));
+}
+
+gbp_itf_hdl_t
+gbp_itf_l3_add_and_lock_w_free (u32 sw_if_index,
+ index_t gri, gbp_itf_free_fn_t ff)
+{
+ return (gbp_itf_l3_add_and_lock_i (sw_if_index, gri, ff));
+}
+
+void
+gbp_itf_lock (gbp_itf_hdl_t gh)
+{
+ gbp_itf_t *gi;
+
+ if (!gbp_itf_hdl_is_valid (gh))
+ return;
+
+ gi = gbp_itf_find_hdl (gh);
+
+ gi->gi_locks++;
+}
+
+gbp_itf_hdl_t
+gbp_itf_clone_and_lock (gbp_itf_hdl_t gh)
+{
+ gbp_itf_t *gi;
+
+ if (!gbp_itf_hdl_is_valid (gh))
+ return (GBP_ITF_HDL_INVALID);
+
+ gi = gbp_itf_find_hdl (gh);
+
+ gi->gi_locks++;
+
+ return (gbp_itf_mk_hdl (gi));
+}
+
+void
+gbp_itf_unlock (gbp_itf_hdl_t * gh)
+{
+ gbp_itf_t *gi;
+
+ if (!gbp_itf_hdl_is_valid (*gh))
+ return;
+
+ gi = gbp_itf_find_hdl (*gh);
+ ASSERT (gi->gi_locks > 0);
+ gi->gi_locks--;
+
+ if (0 == gi->gi_locks)
+ {
+ if (GBP_ITF_MODE_L2 == gi->gi_mode)
+ {
+ gbp_itf_l2_set_input_feature (*gh, L2INPUT_FEAT_NONE);
+ gbp_itf_l2_set_output_feature (*gh, L2OUTPUT_FEAT_NONE);
+ gbp_bridge_domain_itf_del (gi->gi_gbi,
+ gi->gi_sw_if_index,
+ L2_BD_PORT_TYPE_NORMAL);
+ }
+ else
+ {
+ fib_protocol_t fproto;
+
+ gbp_itf_l3_set_input_feature (*gh, GBP_ITF_L3_FEAT_NONE);
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ ip_table_bind (fproto, gi->gi_sw_if_index, 0);
+
+ ip4_sw_interface_enable_disable (gi->gi_sw_if_index, 0);
+ ip6_sw_interface_enable_disable (gi->gi_sw_if_index, 0);
+ }
+
+ hash_unset (gbp_itf_db, gi->gi_sw_if_index);
+
+ if (gi->gi_free_fn)
+ gi->gi_free_fn (gi->gi_sw_if_index);
+
+ pool_free (gi->gi_users);
+ vec_free (gi->gi_input_fbs);
+ vec_free (gi->gi_output_fbs);
+
+ memset (gi, 0, sizeof (*gi));
+ }
+
+ gbp_itf_hdl_reset (gh);
+}
+
+void
+gbp_itf_l3_set_input_feature (gbp_itf_hdl_t gh, gbp_itf_l3_feat_t feats)
+{
+ u32 diff_fb, new_fb, *fb, feat;
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_find_hdl (gh);
+
+ if (NULL == gi || GBP_ITF_MODE_L3 != gi->gi_mode)
+ return;
+
+ vec_validate (gi->gi_input_fbs, gh.gh_who);
+ gi->gi_input_fbs[gh.gh_who] = feats;
+
+ new_fb = 0;
+ vec_foreach (fb, gi->gi_input_fbs)
+ {
+ new_fb |= *fb;
+ }
+
+ /* add new features */
+ diff_fb = (gi->gi_input_fb ^ new_fb) & new_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ vnet_feature_enable_disable (gbp_itf_feat_bit_pos_to_arc[feat],
+ gbp_itf_feat_bit_pos_to_feat[feat],
+ gi->gi_sw_if_index, 1, 0, 0);
+ }));
+ /* *INDENT-ON* */
+
+ /* remove unneeded features */
+ diff_fb = (gi->gi_input_fb ^ new_fb) & gi->gi_input_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ vnet_feature_enable_disable (gbp_itf_feat_bit_pos_to_arc[feat],
+ gbp_itf_feat_bit_pos_to_feat[feat],
+ gi->gi_sw_if_index, 0, 0, 0);
+ }));
+ /* *INDENT-ON* */
+
+ gi->gi_input_fb = new_fb;
+}
+
+void
+gbp_itf_l2_set_input_feature (gbp_itf_hdl_t gh, l2input_feat_masks_t feats)
+{
+ u32 diff_fb, new_fb, *fb, feat;
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_find_hdl (gh);
+
+ if (NULL == gi || GBP_ITF_MODE_L2 != gi->gi_mode)
+ {
+ ASSERT (0);
+ return;
+ }
+
+ vec_validate (gi->gi_input_fbs, gh.gh_who);
+ gi->gi_input_fbs[gh.gh_who] = feats;
+
+ new_fb = 0;
+ vec_foreach (fb, gi->gi_input_fbs)
+ {
+ new_fb |= *fb;
+ }
+
+ /* add new features */
+ diff_fb = (gi->gi_input_fb ^ new_fb) & new_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ l2input_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 1);
+ }));
+ /* *INDENT-ON* */
+
+ /* remove unneeded features */
+ diff_fb = (gi->gi_input_fb ^ new_fb) & gi->gi_input_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ l2input_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 0);
+ }));
+ /* *INDENT-ON* */
+
+ gi->gi_input_fb = new_fb;
+}
+
+void
+gbp_itf_l2_set_output_feature (gbp_itf_hdl_t gh, l2output_feat_masks_t feats)
+{
+ u32 diff_fb, new_fb, *fb, feat;
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_find_hdl (gh);
+
+ if (NULL == gi || GBP_ITF_MODE_L2 != gi->gi_mode)
+ {
+ ASSERT (0);
+ return;
+ }
+
+ vec_validate (gi->gi_output_fbs, gh.gh_who);
+ gi->gi_output_fbs[gh.gh_who] = feats;
+
+ new_fb = 0;
+ vec_foreach (fb, gi->gi_output_fbs)
+ {
+ new_fb |= *fb;
+ }
+
+ /* add new features */
+ diff_fb = (gi->gi_output_fb ^ new_fb) & new_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ l2output_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 1);
+ }));
+ /* *INDENT-ON* */
+
+ /* remove unneeded features */
+ diff_fb = (gi->gi_output_fb ^ new_fb) & gi->gi_output_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ l2output_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 0);
+ }));
+ /* *INDENT-ON* */
+
+ gi->gi_output_fb = new_fb;
+}
+
+static u8 *
+format_gbp_itf_mode (u8 * s, va_list * args)
+{
+ gbp_itf_mode_t mode = va_arg (*args, gbp_itf_mode_t);
+
+ switch (mode)
+ {
+#define _(a,v) \
+ case GBP_ITF_MODE_##a: \
+ return format(s, "%s", v);
+ foreach_gbp_itf_mode
+#undef _
+ }
+ return (s);
+}
+
+static u8 *
+format_gbp_itf (u8 * s, va_list * args)
+{
+ index_t gii = va_arg (*args, index_t);
+ gbp_itf_t *gi;
+
+ if (INDEX_INVALID == gii)
+ return (format (s, "unset"));
+
+ gi = gbp_itf_get (gii);
+
+ s = format (s, "%U locks:%d mode:%U ",
+ format_vnet_sw_if_index_name, vnet_get_main (),
+ gi->gi_sw_if_index, gi->gi_locks,
+ format_gbp_itf_mode, gi->gi_mode);
+
+ if (GBP_ITF_MODE_L2 == gi->gi_mode)
+ s = format (s, "gbp-bd:%d input-feats:[%U] output-feats:[%U]",
+ gi->gi_gbi,
+ format_l2_input_features, gi->gi_input_fb, 0,
+ format_l2_output_features, gi->gi_output_fb, 0);
+ else
+ s = format (s, "gbp-rd:%d input-feats:[%U] output-feats:[%U]",
+ gi->gi_gbi,
+ format_gbp_itf_l3_feat, gi->gi_input_fb,
+ format_gbp_itf_l3_feat, gi->gi_output_fb);
+
+ return (s);
+}
+
+u8 *
+format_gbp_itf_hdl (u8 * s, va_list * args)
+{
+ gbp_itf_hdl_t gh = va_arg (*args, gbp_itf_hdl_t);
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_find_hdl (gh);
+
+ if (NULL == gi)
+ return format (s, "INVALID");
+
+ return (format (s, "%U", format_gbp_itf, gi - gbp_itf_pool));
+}
+
+static clib_error_t *
+gbp_itf_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 gii;
+
+ vlib_cli_output (vm, "Interfaces:");
+
+ /* *INDENT-OFF* */
+ pool_foreach_index (gii, gbp_itf_pool)
+ {
+ vlib_cli_output (vm, " [%d] %U", gii, format_gbp_itf, gii);
+ }
+ /* *INDENT-ON* */
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Interfaces
+ *
+ * @cliexpar
+ * @cliexstart{show gbp contract}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_contract_show_node, static) = {
+ .path = "show gbp interface",
+ .short_help = "show gbp interface\n",
+ .function = gbp_itf_show,
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_itf.h b/extras/deprecated/plugins/gbp/gbp_itf.h
new file mode 100644
index 00000000000..23a09b2a9ff
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_itf.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_INTERFACE_H__
+#define __GBP_INTERFACE_H__
+
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/dpo/dpo.h>
+
+
+#define foreach_gdb_l3_feature \
+ _(LEARN_IP4, "gbp-learn-ip4", "ip4-unicast") \
+ _(LEARN_IP6, "gbp-learn-ip6", "ip6-unicast")
+
+typedef enum gbp_itf_l3_feat_pos_t_
+{
+#define _(s,v,a) GBP_ITF_L3_FEAT_POS_##s,
+ foreach_gdb_l3_feature
+#undef _
+} gbp_itf_l3_feat_pos_t;
+
+typedef enum gbp_itf_l3_feat_t_
+{
+ GBP_ITF_L3_FEAT_NONE,
+#define _(s,v,a) GBP_ITF_L3_FEAT_##s = (1 << GBP_ITF_L3_FEAT_POS_##s),
+ foreach_gdb_l3_feature
+#undef _
+} gbp_itf_l3_feat_t;
+
+#define GBP_ITF_L3_FEAT_LEARN (GBP_ITF_L3_FEAT_LEARN_IP4|GBP_ITF_L3_FEAT_LEARN_IP6)
+
+typedef struct gbp_itf_hdl_t_
+{
+ union
+ {
+ struct
+ {
+ u32 gh_who;
+ u32 gh_which;
+ };
+ };
+} gbp_itf_hdl_t;
+
+#define GBP_ITF_HDL_INIT {.gh_which = ~0}
+const static gbp_itf_hdl_t GBP_ITF_HDL_INVALID = GBP_ITF_HDL_INIT;
+
+extern void gbp_itf_hdl_reset (gbp_itf_hdl_t * gh);
+extern bool gbp_itf_hdl_is_valid (gbp_itf_hdl_t gh);
+
+typedef void (*gbp_itf_free_fn_t) (u32 sw_if_index);
+
+extern gbp_itf_hdl_t gbp_itf_l2_add_and_lock (u32 sw_if_index, u32 bd_index);
+extern gbp_itf_hdl_t gbp_itf_l3_add_and_lock (u32 sw_if_index, index_t gri);
+extern gbp_itf_hdl_t gbp_itf_l2_add_and_lock_w_free (u32 sw_if_index,
+ u32 bd_index,
+ gbp_itf_free_fn_t ff);
+extern gbp_itf_hdl_t gbp_itf_l3_add_and_lock_w_free (u32 sw_if_index,
+ index_t gri,
+ gbp_itf_free_fn_t ff);
+
+extern void gbp_itf_unlock (gbp_itf_hdl_t * hdl);
+extern void gbp_itf_lock (gbp_itf_hdl_t hdl);
+extern gbp_itf_hdl_t gbp_itf_clone_and_lock (gbp_itf_hdl_t hdl);
+extern u32 gbp_itf_get_sw_if_index (gbp_itf_hdl_t hdl);
+
+extern void gbp_itf_l2_set_input_feature (gbp_itf_hdl_t hdl,
+ l2input_feat_masks_t feats);
+extern void gbp_itf_l2_set_output_feature (gbp_itf_hdl_t hdl,
+ l2output_feat_masks_t feats);
+
+extern void gbp_itf_l3_set_input_feature (gbp_itf_hdl_t hdl,
+ gbp_itf_l3_feat_t feats);
+
+extern u8 *format_gbp_itf_hdl (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_learn.c b/extras/deprecated/plugins/gbp/gbp_learn.c
new file mode 100644
index 00000000000..af3a6fb52ac
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_learn.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+
+#include <vnet/l2/l2_input.h>
+
+gbp_learn_main_t gbp_learn_main;
+
+void
+gbp_learn_enable (u32 sw_if_index)
+{
+ vnet_feature_enable_disable ("ip4-unicast",
+ "gbp-learn-ip4", sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "gbp-learn-ip6", sw_if_index, 1, 0, 0);
+}
+
+void
+gbp_learn_disable (u32 sw_if_index)
+{
+ vnet_feature_enable_disable ("ip4-unicast",
+ "gbp-learn-ip4", sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "gbp-learn-ip6", sw_if_index, 0, 0, 0);
+}
+
+static clib_error_t *
+gbp_learn_init (vlib_main_t * vm)
+{
+ gbp_learn_main_t *glm = &gbp_learn_main;
+ vlib_thread_main_t *tm = &vlib_thread_main;
+
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-learn-l2");
+
+ /* Initialize the feature next-node indices */
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ glm->gl_l2_input_feat_next);
+
+ throttle_init (&glm->gl_l2_throttle,
+ tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
+
+ throttle_init (&glm->gl_l3_throttle,
+ tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
+
+ glm->gl_logger = vlib_log_register_class ("gbp", "learn");
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gbp_learn_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_learn.h b/extras/deprecated/plugins/gbp/gbp_learn.h
new file mode 100644
index 00000000000..b4f3ae0a23d
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_learn.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_LEARN_H__
+#define __GBP_LEARN_H__
+
+#include <plugins/gbp/gbp.h>
+
+#include <vnet/util/throttle.h>
+
+/**
+ * The maximum learning rate per-hashed EP
+ */
+#define GBP_ENDPOINT_HASH_LEARN_RATE (1e-2)
+
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_learn_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 gl_l2_input_feat_next[32];
+
+ /**
+ * logger - VLIB log class
+ */
+ vlib_log_class_t gl_logger;
+
+ /**
+ * throttles for the DP leanring
+ */
+ throttle_t gl_l2_throttle;
+ throttle_t gl_l3_throttle;
+} gbp_learn_main_t;
+
+extern gbp_learn_main_t gbp_learn_main;
+
+extern void gbp_learn_enable (u32 sw_if_index);
+extern void gbp_learn_disable (u32 sw_if_index);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_learn_node.c b/extras/deprecated/plugins/gbp/gbp_learn_node.c
new file mode 100644
index 00000000000..a6c54971956
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_learn_node.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/util/throttle.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+#include <vnet/ethernet/arp_packet.h>
+
+#define GBP_LEARN_DBG(...) \
+ vlib_log_debug (gbp_learn_main.gl_logger, __VA_ARGS__);
+
+#define foreach_gbp_learn \
+ _(DROP, "drop")
+
+typedef enum
+{
+#define _(sym,str) GBP_LEARN_ERROR_##sym,
+ foreach_gbp_learn
+#undef _
+ GBP_LEARN_N_ERROR,
+} gbp_learn_error_t;
+
+static char *gbp_learn_error_strings[] = {
+#define _(sym,string) string,
+ foreach_gbp_learn
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) GBP_LEARN_NEXT_##sym,
+ foreach_gbp_learn
+#undef _
+ GBP_LEARN_N_NEXT,
+} gbp_learn_next_t;
+
+typedef struct gbp_learn_l2_t_
+{
+ ip46_address_t ip;
+ mac_address_t mac;
+ u32 sw_if_index;
+ u32 bd_index;
+ sclass_t sclass;
+ ip46_address_t outer_src;
+ ip46_address_t outer_dst;
+} gbp_learn_l2_t;
+
+
+static void
+gbp_learn_l2_cp (const gbp_learn_l2_t * gl2)
+{
+ ip46_address_t *ips = NULL;
+
+ GBP_LEARN_DBG ("L2 EP: %U %U, %d",
+ format_mac_address_t, &gl2->mac,
+ format_ip46_address, &gl2->ip, IP46_TYPE_ANY, gl2->sclass);
+
+ if (!ip46_address_is_zero (&gl2->ip))
+ vec_add1 (ips, gl2->ip);
+
+ /*
+ * flip the source and dst, since that's how it was received, this API
+ * takes how it's sent
+ */
+ gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
+ gl2->sw_if_index, ips,
+ &gl2->mac, INDEX_INVALID,
+ INDEX_INVALID, gl2->sclass,
+ (GBP_ENDPOINT_FLAG_LEARNT |
+ GBP_ENDPOINT_FLAG_REMOTE),
+ &gl2->outer_dst, &gl2->outer_src, NULL);
+ vec_free (ips);
+}
+
+static void
+gbp_learn_l2_ip4_dp (const u8 * mac, const ip4_address_t * ip,
+ u32 bd_index, u32 sw_if_index, sclass_t sclass,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .sclass = sclass,
+ .ip.ip4 = *ip,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+static void
+gbp_learn_l2_ip6_dp (const u8 * mac, const ip6_address_t * ip,
+ u32 bd_index, u32 sw_if_index, sclass_t sclass,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .sclass = sclass,
+ .ip.ip6 = *ip,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+static void
+gbp_learn_l2_dp (const u8 * mac, u32 bd_index, u32 sw_if_index,
+ sclass_t sclass,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .sclass = sclass,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_learn_l2_trace_t_
+{
+ /* per-pkt trace data */
+ mac_address_t mac;
+ u32 sw_if_index;
+ u32 new;
+ u32 throttled;
+ u32 sclass;
+ u32 d_bit;
+ gbp_bridge_domain_flags_t gb_flags;
+} gbp_learn_l2_trace_t;
+
+always_inline void
+gbp_learn_get_outer (const ethernet_header_t * eh0,
+ ip4_address_t * outer_src, ip4_address_t * outer_dst)
+{
+ ip4_header_t *ip0;
+ u8 *buff;
+
+ /* rewind back to the ivxlan header */
+ buff = (u8 *) eh0;
+ buff -= (sizeof (vxlan_gbp_header_t) +
+ sizeof (udp_header_t) + sizeof (ip4_header_t));
+
+ ip0 = (ip4_header_t *) buff;
+
+ *outer_src = ip0->src_address;
+ *outer_dst = ip0->dst_address;
+}
+
+always_inline int
+gbp_endpoint_update_required (const gbp_endpoint_t * ge0,
+ u32 rx_sw_if_index, sclass_t sclass)
+{
+ /* Conditions for [re]learning this EP */
+
+ /* 1. it doesn't have a dataplane source */
+ if (!gbp_endpoint_is_learnt (ge0))
+ return (!0);
+
+ /* 2. has the input interface changed */
+ if (gbp_itf_get_sw_if_index (ge0->ge_fwd.gef_itf) != rx_sw_if_index)
+ return (!0);
+
+ /* 3. has the sclass changed */
+ if (sclass != ge0->ge_fwd.gef_sclass)
+ return (!0);
+
+ /* otherwise it's unchanged */
+ return (0);
+}
+
+VLIB_NODE_FN (gbp_learn_l2_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
+ gbp_learn_main_t *glm;
+ f64 time_now;
+
+ glm = &gbp_learn_main;
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+ time_now = vlib_time_now (vm);
+ thread_index = vm->thread_index;
+
+ seed = throttle_seed (&glm->gl_l2_throttle, thread_index, time_now);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip4_address_t outer_src, outer_dst;
+ const ethernet_header_t *eh0;
+ u32 bi0, sw_if_index0, t0;
+ gbp_bridge_domain_t *gb0;
+ gbp_learn_next_t next0;
+ gbp_endpoint_t *ge0;
+ vlib_buffer_t *b0;
+ sclass_t sclass0;
+
+ next0 = GBP_LEARN_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ eh0 = vlib_buffer_get_current (b0);
+ sclass0 = vnet_buffer2 (b0)->gbp.sclass;
+
+ next0 = vnet_l2_feature_next (b0, glm->gl_l2_input_feat_next,
+ L2INPUT_FEAT_GBP_LEARN);
+
+ ge0 = gbp_endpoint_find_mac (eh0->src_address,
+ vnet_buffer (b0)->l2.bd_index);
+ gb0 =
+ gbp_bridge_domain_get_by_bd_index (vnet_buffer (b0)->l2.bd_index);
+
+ if ((vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D) ||
+ (gb0->gb_flags & GBP_BD_FLAG_DO_NOT_LEARN))
+ {
+ t0 = 1;
+ goto trace;
+ }
+
+ /*
+ * check for new EP or a moved EP
+ */
+ if (NULL == ge0 ||
+ gbp_endpoint_update_required (ge0, sw_if_index0, sclass0))
+ {
+ /*
+ * use the last 4 bytes of the mac address as the hash for the EP
+ */
+ t0 = throttle_check (&glm->gl_l2_throttle, thread_index,
+ *((u32 *) (eh0->src_address + 2)), seed);
+ if (!t0)
+ {
+ gbp_learn_get_outer (eh0, &outer_src, &outer_dst);
+
+ if (outer_src.as_u32 == 0 || outer_dst.as_u32 == 0)
+ {
+ t0 = 2;
+ goto trace;
+ }
+
+ switch (clib_net_to_host_u16 (eh0->type))
+ {
+ case ETHERNET_TYPE_IP4:
+ {
+ const ip4_header_t *ip0;
+
+ ip0 = (ip4_header_t *) (eh0 + 1);
+
+ gbp_learn_l2_ip4_dp (eh0->src_address,
+ &ip0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, sclass0,
+ &outer_src, &outer_dst);
+
+ break;
+ }
+ case ETHERNET_TYPE_IP6:
+ {
+ const ip6_header_t *ip0;
+
+ ip0 = (ip6_header_t *) (eh0 + 1);
+
+ gbp_learn_l2_ip6_dp (eh0->src_address,
+ &ip0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, sclass0,
+ &outer_src, &outer_dst);
+
+ break;
+ }
+ case ETHERNET_TYPE_ARP:
+ {
+ const ethernet_arp_header_t *arp0;
+
+ arp0 = (ethernet_arp_header_t *) (eh0 + 1);
+
+ gbp_learn_l2_ip4_dp (eh0->src_address,
+ &arp0->ip4_over_ethernet[0].ip4,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, sclass0,
+ &outer_src, &outer_dst);
+ break;
+ }
+ default:
+ gbp_learn_l2_dp (eh0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, sclass0,
+ &outer_src, &outer_dst);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple workers
+ * but that's ok we are not interested in being very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ trace:
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_learn_l2_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ clib_memcpy_fast (t->mac.bytes, eh0->src_address, 6);
+ t->new = (NULL == ge0);
+ t->throttled = t0;
+ t->sw_if_index = sw_if_index0;
+ t->sclass = sclass0;
+ t->gb_flags = gb0->gb_flags;
+ t->d_bit = ! !(vnet_buffer2 (b0)->gbp.flags &
+ VXLAN_GBP_GPFLAGS_D);
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_learn_l2_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_learn_l2_trace_t *t = va_arg (*args, gbp_learn_l2_trace_t *);
+
+ s = format (s, "new:%d throttled:%d d-bit:%d mac:%U itf:%d sclass:%d"
+ " gb-flags:%U",
+ t->new, t->throttled, t->d_bit,
+ format_mac_address_t, &t->mac, t->sw_if_index, t->sclass,
+ format_gbp_bridge_domain_flags, t->gb_flags);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_learn_l2_node) = {
+ .name = "gbp-learn-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l2_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_learn_error_strings),
+ .error_strings = gbp_learn_error_strings,
+
+ .n_next_nodes = GBP_LEARN_N_NEXT,
+
+ .next_nodes = {
+ [GBP_LEARN_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+typedef struct gbp_learn_l3_t_
+{
+ ip46_address_t ip;
+ u32 fib_index;
+ u32 sw_if_index;
+ sclass_t sclass;
+ ip46_address_t outer_src;
+ ip46_address_t outer_dst;
+} gbp_learn_l3_t;
+
+static void
+gbp_learn_l3_cp (const gbp_learn_l3_t * gl3)
+{
+ ip46_address_t *ips = NULL;
+
+ GBP_LEARN_DBG ("L3 EP: %U, %d", format_ip46_address, &gl3->ip,
+ IP46_TYPE_ANY, gl3->sclass);
+
+ vec_add1 (ips, gl3->ip);
+
+ gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
+ gl3->sw_if_index, ips, NULL,
+ INDEX_INVALID, INDEX_INVALID, gl3->sclass,
+ (GBP_ENDPOINT_FLAG_REMOTE |
+ GBP_ENDPOINT_FLAG_LEARNT),
+ &gl3->outer_dst, &gl3->outer_src, NULL);
+ vec_free (ips);
+}
+
+static void
+gbp_learn_ip4_dp (const ip4_address_t * ip,
+ u32 fib_index, u32 sw_if_index, sclass_t sclass,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ /* *INDENT-OFF* */
+ gbp_learn_l3_t gl3 = {
+ .ip = {
+ .ip4 = *ip,
+ },
+ .sw_if_index = sw_if_index,
+ .fib_index = fib_index,
+ .sclass = sclass,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ /* *INDENT-ON* */
+
+ vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
+}
+
+static void
+gbp_learn_ip6_dp (const ip6_address_t * ip,
+ u32 fib_index, u32 sw_if_index, sclass_t sclass,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ /* *INDENT-OFF* */
+ gbp_learn_l3_t gl3 = {
+ .ip = {
+ .ip6 = *ip,
+ },
+ .sw_if_index = sw_if_index,
+ .fib_index = fib_index,
+ .sclass = sclass,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ /* *INDENT-ON* */
+
+ vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
+}
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_learn_l3_trace_t_
+{
+ /* per-pkt trace data */
+ ip46_address_t ip;
+ u32 sw_if_index;
+ u32 new;
+ u32 throttled;
+ u32 sclass;
+} gbp_learn_l3_trace_t;
+
+static uword
+gbp_learn_l3 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame,
+ fib_protocol_t fproto)
+{
+ u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
+ gbp_learn_main_t *glm;
+ f64 time_now;
+
+ glm = &gbp_learn_main;
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+ time_now = vlib_time_now (vm);
+ thread_index = vm->thread_index;
+
+ seed = throttle_seed (&glm->gl_l3_throttle, thread_index, time_now);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ CLIB_UNUSED (const ip4_header_t *) ip4_0;
+ CLIB_UNUSED (const ip6_header_t *) ip6_0;
+ u32 bi0, sw_if_index0, t0, fib_index0;
+ ip4_address_t outer_src, outer_dst;
+ ethernet_header_t *eth0;
+ gbp_learn_next_t next0;
+ gbp_endpoint_t *ge0;
+ vlib_buffer_t *b0;
+ sclass_t sclass0;
+
+ next0 = GBP_LEARN_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sclass0 = vnet_buffer2 (b0)->gbp.sclass;
+ ip6_0 = NULL;
+ ip4_0 = NULL;
+
+ vnet_feature_next (&next0, b0);
+
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
+ {
+ t0 = 1;
+ ge0 = NULL;
+ goto trace;
+ }
+
+ fib_index0 = fib_table_get_index_for_sw_if_index (fproto,
+ sw_if_index0);
+
+ if (FIB_PROTOCOL_IP6 == fproto)
+ {
+ ip6_0 = vlib_buffer_get_current (b0);
+ eth0 = (ethernet_header_t *) (((u8 *) ip6_0) - sizeof (*eth0));
+
+ gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
+
+ ge0 = gbp_endpoint_find_ip6 (&ip6_0->src_address, fib_index0);
+
+ if ((NULL == ge0) ||
+ gbp_endpoint_update_required (ge0, sw_if_index0, sclass0))
+ {
+ t0 = throttle_check (&glm->gl_l3_throttle,
+ thread_index,
+ ip6_address_hash_to_u32
+ (&ip6_0->src_address), seed);
+
+ if (!t0)
+ {
+ gbp_learn_ip6_dp (&ip6_0->src_address,
+ fib_index0, sw_if_index0, sclass0,
+ &outer_src, &outer_dst);
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple
+ * workers but that's ok we are not interested in being
+ * very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ }
+ else
+ {
+ ip4_0 = vlib_buffer_get_current (b0);
+ eth0 = (ethernet_header_t *) (((u8 *) ip4_0) - sizeof (*eth0));
+
+ gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
+ ge0 = gbp_endpoint_find_ip4 (&ip4_0->src_address, fib_index0);
+
+ if ((NULL == ge0) ||
+ gbp_endpoint_update_required (ge0, sw_if_index0, sclass0))
+ {
+ t0 = throttle_check (&glm->gl_l3_throttle, thread_index,
+ ip4_0->src_address.as_u32, seed);
+
+ if (!t0)
+ {
+ gbp_learn_ip4_dp (&ip4_0->src_address,
+ fib_index0, sw_if_index0, sclass0,
+ &outer_src, &outer_dst);
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple
+ * workers but that's ok we are not interested in being
+ * very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ }
+ trace:
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_learn_l3_trace_t *t;
+
+ t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ if (FIB_PROTOCOL_IP6 == fproto && ip6_0)
+ ip46_address_set_ip6 (&t->ip, &ip6_0->src_address);
+ if (FIB_PROTOCOL_IP4 == fproto && ip4_0)
+ ip46_address_set_ip4 (&t->ip, &ip4_0->src_address);
+ t->new = (NULL == ge0);
+ t->throttled = t0;
+ t->sw_if_index = sw_if_index0;
+ t->sclass = sclass0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_learn_l3_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_learn_l3_trace_t *t = va_arg (*args, gbp_learn_l3_trace_t *);
+
+ s = format (s, "new:%d throttled:%d ip:%U itf:%d sclass:%d",
+ t->new, t->throttled,
+ format_ip46_address, &t->ip, IP46_TYPE_ANY, t->sw_if_index,
+ t->sclass);
+
+ return s;
+}
+
+VLIB_NODE_FN (gbp_learn_ip4_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP4));
+}
+
+VLIB_NODE_FN (gbp_learn_ip6_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP6));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_learn_ip4_node) = {
+ .name = "gbp-learn-ip4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l3_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+};
+
+VNET_FEATURE_INIT (gbp_learn_ip4, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "gbp-learn-ip4",
+};
+
+VLIB_REGISTER_NODE (gbp_learn_ip6_node) = {
+ .name = "gbp-learn-ip6",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l3_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+};
+
+VNET_FEATURE_INIT (gbp_learn_ip6, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "gbp-learn-ip6",
+};
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_policy.c b/extras/deprecated/plugins/gbp/gbp_policy.c
new file mode 100644
index 00000000000..127c6d3f059
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_policy.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_policy.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+gbp_policy_main_t gbp_policy_main;
+
+/* packet trace format function */
+u8 *
+format_gbp_policy_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_policy_trace_t *t = va_arg (*args, gbp_policy_trace_t *);
+
+ s =
+ format (s,
+ "scope:%d sclass:%d, dclass:%d, action:%U flags:%U acl: %d rule: %d",
+ t->scope, t->sclass, t->dclass, format_gbp_rule_action, t->action,
+ format_vxlan_gbp_header_gpflags, t->flags, t->acl_match,
+ t->rule_match);
+
+ return s;
+}
+
+static clib_error_t *
+gbp_policy_init (vlib_main_t * vm)
+{
+ gbp_policy_main_t *gpm = &gbp_policy_main;
+ clib_error_t *error = 0;
+
+ /* Initialize the feature next-node indexes */
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-policy-port");
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ gpm->l2_output_feat_next[GBP_POLICY_PORT]);
+
+ node = vlib_get_node_by_name (vm, (u8 *) "gbp-policy-mac");
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ gpm->l2_output_feat_next[GBP_POLICY_MAC]);
+
+ node = vlib_get_node_by_name (vm, (u8 *) "gbp-policy-lpm");
+ feat_bitmap_init_next_nodes (vm,
+ node->index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ gpm->l2_output_feat_next[GBP_POLICY_LPM]);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (gbp_policy_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_policy.h b/extras/deprecated/plugins/gbp/gbp_policy.h
new file mode 100644
index 00000000000..6f87f2ec7c4
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_policy.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_POLICY_H__
+#define __GBP_POLICY_H__
+
+#include <plugins/gbp/gbp_contract.h>
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_policy_trace_t_
+{
+ /* per-pkt trace data */
+ gbp_scope_t scope;
+ sclass_t sclass;
+ sclass_t dclass;
+ gbp_rule_action_t action;
+ u32 flags;
+ u32 acl_match;
+ u32 rule_match;
+} gbp_policy_trace_t;
+
+/* packet trace format function */
+u8 * format_gbp_policy_trace (u8 * s, va_list * args);
+
+static_always_inline void
+gbp_policy_trace(vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t *b, const gbp_contract_key_t *key, gbp_rule_action_t action, u32 acl_match, u32 rule_match)
+{
+ gbp_policy_trace_t *t;
+
+ if (PREDICT_TRUE (!(b->flags & VLIB_BUFFER_IS_TRACED)))
+ return;
+
+ t = vlib_add_trace (vm, node, b, sizeof (*t));
+ t->sclass = key->gck_src;
+ t->dclass = key->gck_dst;
+ t->scope = key->gck_scope;
+ t->action = action;
+ t->flags = vnet_buffer2 (b)->gbp.flags;
+ t->acl_match = acl_match;
+ t->rule_match = rule_match;
+}
+
+#endif /* __GBP_POLICY_H__ */
diff --git a/extras/deprecated/plugins/gbp/gbp_policy_dpo.c b/extras/deprecated/plugins/gbp/gbp_policy_dpo.c
new file mode 100644
index 00000000000..9f26b9c67ab
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_policy_dpo.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_policy.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_recirc.h>
+#include <plugins/gbp/gbp_contract.h>
+
+#ifndef CLIB_MARCH_VARIANT
+/**
+ * DPO pool
+ */
+gbp_policy_dpo_t *gbp_policy_dpo_pool;
+
+/**
+ * DPO type registered for these GBP FWD
+ */
+dpo_type_t gbp_policy_dpo_type;
+
+static gbp_policy_dpo_t *
+gbp_policy_dpo_alloc (void)
+{
+ gbp_policy_dpo_t *gpd;
+
+ pool_get_aligned_zero (gbp_policy_dpo_pool, gpd, CLIB_CACHE_LINE_BYTES);
+
+ return (gpd);
+}
+
+static inline gbp_policy_dpo_t *
+gbp_policy_dpo_get_from_dpo (const dpo_id_t * dpo)
+{
+ ASSERT (gbp_policy_dpo_type == dpo->dpoi_type);
+
+ return (gbp_policy_dpo_get (dpo->dpoi_index));
+}
+
+static inline index_t
+gbp_policy_dpo_get_index (gbp_policy_dpo_t * gpd)
+{
+ return (gpd - gbp_policy_dpo_pool);
+}
+
+static void
+gbp_policy_dpo_lock (dpo_id_t * dpo)
+{
+ gbp_policy_dpo_t *gpd;
+
+ gpd = gbp_policy_dpo_get_from_dpo (dpo);
+ gpd->gpd_locks++;
+}
+
+static void
+gbp_policy_dpo_unlock (dpo_id_t * dpo)
+{
+ gbp_policy_dpo_t *gpd;
+
+ gpd = gbp_policy_dpo_get_from_dpo (dpo);
+ gpd->gpd_locks--;
+
+ if (0 == gpd->gpd_locks)
+ {
+ dpo_reset (&gpd->gpd_dpo);
+ pool_put (gbp_policy_dpo_pool, gpd);
+ }
+}
+
+static u32
+gbp_policy_dpo_get_urpf (const dpo_id_t * dpo)
+{
+ gbp_policy_dpo_t *gpd;
+
+ gpd = gbp_policy_dpo_get_from_dpo (dpo);
+
+ return (gpd->gpd_sw_if_index);
+}
+
+void
+gbp_policy_dpo_add_or_lock (dpo_proto_t dproto,
+ gbp_scope_t scope,
+ sclass_t sclass, u32 sw_if_index, dpo_id_t * dpo)
+{
+ gbp_policy_dpo_t *gpd;
+ dpo_id_t parent = DPO_INVALID;
+
+ gpd = gbp_policy_dpo_alloc ();
+
+ gpd->gpd_proto = dproto;
+ gpd->gpd_sw_if_index = sw_if_index;
+ gpd->gpd_sclass = sclass;
+ gpd->gpd_scope = scope;
+
+ if (~0 != sw_if_index)
+ {
+ /*
+ * stack on the DVR DPO for the output interface
+ */
+ dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
+ }
+ else
+ {
+ dpo_copy (&parent, drop_dpo_get (dproto));
+ }
+
+ dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent);
+ dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd));
+}
+
+u8 *
+format_gbp_policy_dpo (u8 * s, va_list * ap)
+{
+ index_t index = va_arg (*ap, index_t);
+ u32 indent = va_arg (*ap, u32);
+ gbp_policy_dpo_t *gpd = gbp_policy_dpo_get (index);
+ vnet_main_t *vnm = vnet_get_main ();
+
+ s = format (s, "gbp-policy-dpo: %U, scope:%d sclass:%d out:%U",
+ format_dpo_proto, gpd->gpd_proto,
+ gpd->gpd_scope, (int) gpd->gpd_sclass,
+ format_vnet_sw_if_index_name, vnm, gpd->gpd_sw_if_index);
+ s = format (s, "\n%U", format_white_space, indent + 2);
+ s = format (s, "%U", format_dpo_id, &gpd->gpd_dpo, indent + 4);
+
+ return (s);
+}
+
+/**
+ * Interpose a policy DPO
+ */
+static void
+gbp_policy_dpo_interpose (const dpo_id_t * original,
+ const dpo_id_t * parent, dpo_id_t * clone)
+{
+ gbp_policy_dpo_t *gpd, *gpd_clone;
+
+ gpd_clone = gbp_policy_dpo_alloc ();
+ gpd = gbp_policy_dpo_get (original->dpoi_index);
+
+ gpd_clone->gpd_proto = gpd->gpd_proto;
+ gpd_clone->gpd_scope = gpd->gpd_scope;
+ gpd_clone->gpd_sclass = gpd->gpd_sclass;
+ gpd_clone->gpd_sw_if_index = gpd->gpd_sw_if_index;
+
+ /*
+ * if no interface is provided, grab one from the parent
+ * on which we stack
+ */
+ if (~0 == gpd_clone->gpd_sw_if_index)
+ gpd_clone->gpd_sw_if_index = dpo_get_urpf (parent);
+
+ dpo_stack (gbp_policy_dpo_type,
+ gpd_clone->gpd_proto, &gpd_clone->gpd_dpo, parent);
+
+ dpo_set (clone,
+ gbp_policy_dpo_type,
+ gpd_clone->gpd_proto, gbp_policy_dpo_get_index (gpd_clone));
+}
+
+const static dpo_vft_t gbp_policy_dpo_vft = {
+ .dv_lock = gbp_policy_dpo_lock,
+ .dv_unlock = gbp_policy_dpo_unlock,
+ .dv_format = format_gbp_policy_dpo,
+ .dv_get_urpf = gbp_policy_dpo_get_urpf,
+ .dv_mk_interpose = gbp_policy_dpo_interpose,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a glean
+ * object.
+ *
+ * this means that these graph nodes are ones from which a glean is the
+ * parent object in the DPO-graph.
+ */
+const static char *const gbp_policy_dpo_ip4_nodes[] = {
+ "ip4-gbp-policy-dpo",
+ NULL,
+};
+
+const static char *const gbp_policy_dpo_ip6_nodes[] = {
+ "ip6-gbp-policy-dpo",
+ NULL,
+};
+
+const static char *const *const gbp_policy_dpo_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = gbp_policy_dpo_ip4_nodes,
+ [DPO_PROTO_IP6] = gbp_policy_dpo_ip6_nodes,
+};
+
+dpo_type_t
+gbp_policy_dpo_get_type (void)
+{
+ return (gbp_policy_dpo_type);
+}
+
+static clib_error_t *
+gbp_policy_dpo_module_init (vlib_main_t * vm)
+{
+ gbp_policy_dpo_type = dpo_register_new_type (&gbp_policy_dpo_vft,
+ gbp_policy_dpo_nodes);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_policy_dpo_module_init);
+#endif /* CLIB_MARCH_VARIANT */
+
+typedef enum
+{
+ GBP_POLICY_DROP,
+ GBP_POLICY_N_NEXT,
+} gbp_policy_next_t;
+
+always_inline u32
+gbp_rule_l3_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0, int is_ip6)
+{
+ gbp_policy_node_t pnode;
+ const dpo_id_t *dpo;
+ dpo_proto_t dproto;
+
+ pnode = (is_ip6 ? GBP_POLICY_NODE_IP6 : GBP_POLICY_NODE_IP4);
+ dproto = (is_ip6 ? DPO_PROTO_IP6 : DPO_PROTO_IP4);
+ dpo = &gu->gu_dpo[pnode][dproto];
+
+ /* The flow hash is still valid as this is a IP packet being switched */
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
+
+ return (dpo->dpoi_next_node);
+}
+
+always_inline uword
+gbp_policy_dpo_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_ip6)
+{
+ gbp_main_t *gm = &gbp_main;
+ u32 n_left_from, next_index, *from, *to_next;
+ u32 n_allow_intra, n_allow_a_bit, n_allow_sclass_1;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ n_allow_intra = n_allow_a_bit = n_allow_sclass_1 = 0;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ gbp_rule_action_t action0 = GBP_RULE_DENY;
+ u32 acl_match = ~0, rule_match = ~0;
+ const gbp_policy_dpo_t *gpd0;
+ gbp_contract_error_t err0;
+ gbp_contract_key_t key0;
+ vlib_buffer_t *b0;
+ gbp_rule_t *rule0;
+ u32 bi0, next0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ next0 = GBP_POLICY_DROP;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ gpd0 = gbp_policy_dpo_get (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
+
+ /*
+ * Reflection check; in and out on an ivxlan tunnel
+ */
+ if ((~0 != vxlan_gbp_tunnel_by_sw_if_index (gpd0->gpd_sw_if_index))
+ && (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_R))
+ {
+ goto trace;
+ }
+
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
+ {
+ next0 = gpd0->gpd_dpo.dpoi_next_node;
+ key0.as_u64 = ~0;
+ n_allow_a_bit++;
+ goto trace;
+ }
+
+ /* zero out the key to ensure the pad space is clear */
+ key0.as_u64 = 0;
+ key0.gck_src = vnet_buffer2 (b0)->gbp.sclass;
+
+ if (SCLASS_INVALID == key0.gck_src)
+ {
+ /*
+ * the src EPG is not set when the packet arrives on an EPG
+ * uplink interface and we do not need to apply policy
+ */
+ next0 = gpd0->gpd_dpo.dpoi_next_node;
+ goto trace;
+ }
+
+ key0.gck_scope = gpd0->gpd_scope;
+ key0.gck_dst = gpd0->gpd_sclass;
+
+ action0 =
+ gbp_contract_apply (vm, gm, &key0, b0, &rule0, &n_allow_intra,
+ &n_allow_sclass_1, &acl_match, &rule_match,
+ &err0,
+ is_ip6 ? GBP_CONTRACT_APPLY_IP6 :
+ GBP_CONTRACT_APPLY_IP4);
+ switch (action0)
+ {
+ case GBP_RULE_PERMIT:
+ next0 = gpd0->gpd_dpo.dpoi_next_node;
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+ break;
+ case GBP_RULE_REDIRECT:
+ next0 = gbp_rule_l3_redirect (rule0, b0, is_ip6);
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+ break;
+ case GBP_RULE_DENY:
+ next0 = GBP_POLICY_DROP;
+ b0->error = node->errors[err0];
+ break;
+ }
+
+ trace:
+ gbp_policy_trace (vm, node, b0, &key0, action0, acl_match,
+ rule_match);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ GBP_CONTRACT_ERROR_ALLOW_INTRA, n_allow_intra);
+ vlib_node_increment_counter (vm, node->node_index,
+ GBP_CONTRACT_ERROR_ALLOW_A_BIT, n_allow_a_bit);
+ vlib_node_increment_counter (vm, node->node_index,
+ GBP_CONTRACT_ERROR_ALLOW_SCLASS_1,
+ n_allow_sclass_1);
+ return from_frame->n_vectors;
+}
+
+VLIB_NODE_FN (ip4_gbp_policy_dpo_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (gbp_policy_dpo_inline (vm, node, from_frame, 0));
+}
+
+VLIB_NODE_FN (ip6_gbp_policy_dpo_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (gbp_policy_dpo_inline (vm, node, from_frame, 1));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = {
+ .name = "ip4-gbp-policy-dpo",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_policy_trace,
+
+ .n_errors = ARRAY_LEN(gbp_contract_error_strings),
+ .error_strings = gbp_contract_error_strings,
+
+ .n_next_nodes = GBP_POLICY_N_NEXT,
+ .next_nodes =
+ {
+ [GBP_POLICY_DROP] = "ip4-drop",
+ }
+};
+VLIB_REGISTER_NODE (ip6_gbp_policy_dpo_node) = {
+ .name = "ip6-gbp-policy-dpo",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_policy_trace,
+
+ .n_errors = ARRAY_LEN(gbp_contract_error_strings),
+ .error_strings = gbp_contract_error_strings,
+
+ .n_next_nodes = GBP_POLICY_N_NEXT,
+ .next_nodes =
+ {
+ [GBP_POLICY_DROP] = "ip6-drop",
+ }
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_policy_dpo.h b/extras/deprecated/plugins/gbp/gbp_policy_dpo.h
new file mode 100644
index 00000000000..77ca5d93bd0
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_policy_dpo.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_POLICY_DPO_H__
+#define __GBP_POLICY_DPO_H__
+
+#include <vnet/dpo/dpo.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+
+/**
+ * @brief
+ * The GBP FWD DPO. Used in the L3 path to select the correct EPG uplink
+ * based on the source EPG.
+ */
+typedef struct gbp_policy_dpo_t_
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /**
+ * The protocol of packets using this DPO
+ */
+ dpo_proto_t gpd_proto;
+
+ /**
+ * SClass
+ */
+ sclass_t gpd_sclass;
+
+ /**
+ * sclass scope
+ */
+ gbp_scope_t gpd_scope;
+
+ /**
+ * output sw_if_index
+ */
+ u32 gpd_sw_if_index;
+
+ /**
+ * number of locks.
+ */
+ u16 gpd_locks;
+
+ /**
+ * Stacked DPO on DVR/ADJ of output interface
+ */
+ dpo_id_t gpd_dpo;
+} gbp_policy_dpo_t;
+
+extern void gbp_policy_dpo_add_or_lock (dpo_proto_t dproto,
+ gbp_scope_t scope,
+ sclass_t sclass,
+ u32 sw_if_index, dpo_id_t * dpo);
+
+extern dpo_type_t gbp_policy_dpo_get_type (void);
+
+extern vlib_node_registration_t ip4_gbp_policy_dpo_node;
+extern vlib_node_registration_t ip6_gbp_policy_dpo_node;
+extern vlib_node_registration_t gbp_policy_port_node;
+
+/**
+ * Types exposed for the Data-plane
+ */
+extern dpo_type_t gbp_policy_dpo_type;
+extern gbp_policy_dpo_t *gbp_policy_dpo_pool;
+
+always_inline gbp_policy_dpo_t *
+gbp_policy_dpo_get (index_t index)
+{
+ return (pool_elt_at_index (gbp_policy_dpo_pool, index));
+}
+
+static_always_inline const gbp_policy_dpo_t *
+gbp_classify_get_gpd (const ip4_address_t * ip4, const ip6_address_t * ip6,
+ const u32 fib_index)
+{
+ const gbp_policy_dpo_t *gpd;
+ const dpo_id_t *dpo;
+ const load_balance_t *lb;
+ u32 lbi;
+
+ if (ip4)
+ lbi = ip4_fib_forwarding_lookup (fib_index, ip4);
+ else if (ip6)
+ lbi = ip6_fib_table_fwding_lookup (fib_index, ip6);
+ else
+ return 0;
+
+ lb = load_balance_get (lbi);
+ dpo = load_balance_get_bucket_i (lb, 0);
+
+ if (dpo->dpoi_type != gbp_policy_dpo_type)
+ return 0;
+
+ gpd = gbp_policy_dpo_get (dpo->dpoi_index);
+ return gpd;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+
+#endif
diff --git a/extras/deprecated/plugins/gbp/gbp_policy_node.c b/extras/deprecated/plugins/gbp/gbp_policy_node.c
new file mode 100644
index 00000000000..8c6ef5c2b94
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_policy_node.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_classify.h>
+#include <plugins/gbp/gbp_policy.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_ext_itf.h>
+#include <plugins/gbp/gbp_contract.h>
+
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+
+typedef enum
+{
+ GBP_POLICY_NEXT_DROP,
+ GBP_POLICY_N_NEXT,
+} gbp_policy_next_t;
+
+always_inline dpo_proto_t
+ethertype_to_dpo_proto (u16 etype)
+{
+ etype = clib_net_to_host_u16 (etype);
+
+ switch (etype)
+ {
+ case ETHERNET_TYPE_IP4:
+ return (DPO_PROTO_IP4);
+ case ETHERNET_TYPE_IP6:
+ return (DPO_PROTO_IP6);
+ }
+
+ return (DPO_PROTO_NONE);
+}
+
+always_inline u32
+gbp_rule_l2_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0)
+{
+ const ethernet_header_t *eth0;
+ const dpo_id_t *dpo;
+ dpo_proto_t dproto;
+
+ eth0 = vlib_buffer_get_current (b0);
+ /* pop the ethernet header to prepare for L3 rewrite */
+ vlib_buffer_advance (b0, vnet_buffer (b0)->l2.l2_len);
+
+ dproto = ethertype_to_dpo_proto (eth0->type);
+ dpo = &gu->gu_dpo[GBP_POLICY_NODE_L2][dproto];
+
+ /* save the LB index for the next node and reset the IP flow hash
+ * so it's recalculated */
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
+ vnet_buffer (b0)->ip.flow_hash = 0;
+
+ return (dpo->dpoi_next_node);
+}
+
+static_always_inline gbp_policy_next_t
+gbp_policy_l2_feature_next (gbp_policy_main_t * gpm, vlib_buffer_t * b,
+ const gbp_policy_type_t type)
+{
+ u32 feat_bit;
+
+ switch (type)
+ {
+ case GBP_POLICY_PORT:
+ feat_bit = L2OUTPUT_FEAT_GBP_POLICY_PORT;
+ break;
+ case GBP_POLICY_MAC:
+ feat_bit = L2OUTPUT_FEAT_GBP_POLICY_MAC;
+ break;
+ case GBP_POLICY_LPM:
+ feat_bit = L2OUTPUT_FEAT_GBP_POLICY_LPM;
+ break;
+ default:
+ return GBP_POLICY_NEXT_DROP;
+ }
+
+ return vnet_l2_feature_next (b, gpm->l2_output_feat_next[type], feat_bit);
+}
+
+static uword
+gbp_policy_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, const gbp_policy_type_t type)
+{
+ gbp_main_t *gm = &gbp_main;
+ gbp_policy_main_t *gpm = &gbp_policy_main;
+ u32 n_left_from, *from, *to_next;
+ u32 next_index;
+ u32 n_allow_intra, n_allow_a_bit, n_allow_sclass_1;
+
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+ n_allow_intra = n_allow_a_bit = n_allow_sclass_1 = 0;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ gbp_rule_action_t action0 = GBP_RULE_DENY;
+ const ethernet_header_t *h0;
+ const gbp_endpoint_t *ge0;
+ gbp_contract_error_t err0;
+ u32 acl_match = ~0, rule_match = ~0;
+ gbp_policy_next_t next0;
+ gbp_contract_key_t key0;
+ u32 bi0, sw_if_index0;
+ vlib_buffer_t *b0;
+ gbp_rule_t *rule0;
+
+ next0 = GBP_POLICY_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ /*
+ * Reflection check; in and out on an ivxlan tunnel
+ */
+ if ((~0 != vxlan_gbp_tunnel_by_sw_if_index (sw_if_index0)) &&
+ (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_R))
+ {
+ goto trace;
+ }
+
+ /*
+ * If the A-bit is set then policy has already been applied
+ * and we skip enforcement here.
+ */
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
+ {
+ next0 = gbp_policy_l2_feature_next (gpm, b0, type);
+ n_allow_a_bit++;
+ key0.as_u64 = ~0;
+ goto trace;
+ }
+
+ /*
+ * determine the src and dst EPG
+ */
+
+ /* zero out the key to ensure the pad space is clear */
+ key0.as_u64 = 0;
+ key0.gck_src = vnet_buffer2 (b0)->gbp.sclass;
+ key0.gck_dst = SCLASS_INVALID;
+
+ if (GBP_POLICY_LPM == type)
+ {
+ const ip4_address_t *ip4 = 0;
+ const ip6_address_t *ip6 = 0;
+ const dpo_proto_t proto =
+ gbp_classify_get_ip_address (h0, &ip4, &ip6,
+ GBP_CLASSIFY_GET_IP_DST);
+ if (PREDICT_TRUE (DPO_PROTO_NONE != proto))
+ {
+ const gbp_ext_itf_t *ext_itf =
+ gbp_ext_itf_get (sw_if_index0);
+ const gbp_policy_dpo_t *gpd =
+ gbp_classify_get_gpd (ip4, ip6,
+ ext_itf->gx_fib_index[proto]);
+ if (gpd)
+ key0.gck_dst = gpd->gpd_sclass;
+ }
+ }
+ else
+ {
+ if (GBP_POLICY_PORT == type)
+ ge0 = gbp_endpoint_find_itf (sw_if_index0);
+ else
+ ge0 = gbp_endpoint_find_mac (h0->dst_address,
+ vnet_buffer (b0)->l2.bd_index);
+ if (NULL != ge0)
+ key0.gck_dst = ge0->ge_fwd.gef_sclass;
+ }
+
+ if (SCLASS_INVALID == key0.gck_dst)
+ {
+ /* If you cannot determine the destination EP then drop */
+ b0->error = node->errors[GBP_CONTRACT_ERROR_DROP_NO_DCLASS];
+ goto trace;
+ }
+
+ key0.gck_src = vnet_buffer2 (b0)->gbp.sclass;
+ if (SCLASS_INVALID == key0.gck_src)
+ {
+ /*
+ * the src EPG is not set when the packet arrives on an EPG
+ * uplink interface and we do not need to apply policy
+ */
+ next0 = gbp_policy_l2_feature_next (gpm, b0, type);
+ goto trace;
+ }
+
+ key0.gck_scope =
+ gbp_bridge_domain_get_scope (vnet_buffer (b0)->l2.bd_index);
+
+ action0 =
+ gbp_contract_apply (vm, gm, &key0, b0, &rule0, &n_allow_intra,
+ &n_allow_sclass_1, &acl_match, &rule_match,
+ &err0, GBP_CONTRACT_APPLY_L2);
+ switch (action0)
+ {
+ case GBP_RULE_PERMIT:
+ next0 = gbp_policy_l2_feature_next (gpm, b0, type);
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+ break;
+ case GBP_RULE_REDIRECT:
+ next0 = gbp_rule_l2_redirect (rule0, b0);
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+ break;
+ case GBP_RULE_DENY:
+ next0 = GBP_POLICY_NEXT_DROP;
+ b0->error = node->errors[err0];
+ break;
+ }
+
+ trace:
+ gbp_policy_trace (vm, node, b0, &key0, action0, acl_match,
+ rule_match);
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ GBP_CONTRACT_ERROR_ALLOW_INTRA, n_allow_intra);
+ vlib_node_increment_counter (vm, node->node_index,
+ GBP_CONTRACT_ERROR_ALLOW_A_BIT, n_allow_a_bit);
+ vlib_node_increment_counter (vm, node->node_index,
+ GBP_CONTRACT_ERROR_ALLOW_SCLASS_1,
+ n_allow_sclass_1);
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (gbp_policy_port_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_policy_inline (vm, node, frame, GBP_POLICY_PORT));
+}
+
+VLIB_NODE_FN (gbp_policy_mac_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_policy_inline (vm, node, frame, GBP_POLICY_MAC));
+}
+
+VLIB_NODE_FN (gbp_policy_lpm_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gbp_policy_inline (vm, node, frame, GBP_POLICY_LPM));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_policy_port_node) = {
+ .name = "gbp-policy-port",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_policy_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_contract_error_strings),
+ .error_strings = gbp_contract_error_strings,
+
+ .n_next_nodes = GBP_POLICY_N_NEXT,
+ .next_nodes = {
+ [GBP_POLICY_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
+ .name = "gbp-policy-mac",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_policy_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_contract_error_strings),
+ .error_strings = gbp_contract_error_strings,
+
+ .n_next_nodes = GBP_POLICY_N_NEXT,
+ .next_nodes = {
+ [GBP_POLICY_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (gbp_policy_lpm_node) = {
+ .name = "gbp-policy-lpm",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_policy_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_contract_error_strings),
+ .error_strings = gbp_contract_error_strings,
+
+ .n_next_nodes = GBP_POLICY_N_NEXT,
+ .next_nodes = {
+ [GBP_POLICY_NEXT_DROP] = "error-drop",
+ },
+};
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_recirc.c b/extras/deprecated/plugins/gbp/gbp_recirc.c
new file mode 100644
index 00000000000..8d56f11b4e3
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_recirc.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_recirc.h>
+#include <plugins/gbp/gbp_endpoint_group.h>
+#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_itf.h>
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/fib/fib_table.h>
+
+#include <vlib/unix/plugin.h>
+
+/**
+ * Pool of GBP recircs
+ */
+gbp_recirc_t *gbp_recirc_pool;
+
+/**
+ * Recirc configs keyed by sw_if_index
+ */
+index_t *gbp_recirc_db;
+
+/**
+ * logger
+ */
+vlib_log_class_t gr_logger;
+
+/**
+ * L2 Emulation enable/disable symbols
+ */
+static void (*l2e_enable) (u32 sw_if_index);
+static void (*l2e_disable) (u32 sw_if_index);
+
+#define GBP_RECIRC_DBG(...) \
+ vlib_log_debug (gr_logger, __VA_ARGS__);
+
+u8 *
+format_gbp_recirc (u8 * s, va_list * args)
+{
+ gbp_recirc_t *gr = va_arg (*args, gbp_recirc_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+
+ return format (s, " %U, sclass:%d, ext:%d",
+ format_vnet_sw_if_index_name, vnm,
+ gr->gr_sw_if_index, gr->gr_sclass, gr->gr_is_ext);
+}
+
+int
+gbp_recirc_add (u32 sw_if_index, sclass_t sclass, u8 is_ext)
+{
+ gbp_recirc_t *gr;
+ index_t gri;
+
+ vec_validate_init_empty (gbp_recirc_db, sw_if_index, INDEX_INVALID);
+
+ gri = gbp_recirc_db[sw_if_index];
+
+ if (INDEX_INVALID == gri)
+ {
+ gbp_endpoint_group_t *gg;
+ fib_protocol_t fproto;
+ index_t ggi;
+
+ ggi = gbp_endpoint_group_find (sclass);
+
+ if (INDEX_INVALID == ggi)
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+
+ gbp_endpoint_group_lock (ggi);
+ pool_get_zero (gbp_recirc_pool, gr);
+ gri = gr - gbp_recirc_pool;
+
+ gr->gr_sclass = sclass;
+ gr->gr_is_ext = is_ext;
+ gr->gr_sw_if_index = sw_if_index;
+
+ /*
+ * IP enable the recirc interface
+ */
+ ip4_sw_interface_enable_disable (gr->gr_sw_if_index, 1);
+ ip6_sw_interface_enable_disable (gr->gr_sw_if_index, 1);
+
+ /*
+ * cache the FIB indicies of the EPG
+ */
+ gr->gr_epgi = ggi;
+
+ gg = gbp_endpoint_group_get (gr->gr_epgi);
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ gr->gr_fib_index[fib_proto_to_dpo (fproto)] =
+ gbp_endpoint_group_get_fib_index (gg, fproto);
+ }
+
+ /*
+ * bind to the bridge-domain of the EPG
+ */
+ gr->gr_itf = gbp_itf_l2_add_and_lock (gr->gr_sw_if_index, gg->gg_gbd);
+
+ /*
+ * set the interface into L2 emulation mode
+ */
+ l2e_enable (gr->gr_sw_if_index);
+
+ /*
+ * Packets on the recirculation interface are subject to src-EPG
+ * classification. Recirc interfaces are L2-emulation mode.
+ * for internal EPGs this is via an LPM on all external subnets.
+ * for external EPGs this is via a port mapping.
+ */
+ if (gr->gr_is_ext)
+ {
+ mac_address_t mac;
+ /*
+ * recirc is for post-NAT translation packets going into
+ * the external EPG, these are classified to the NAT EPG
+ * based on its port
+ */
+ mac_address_from_bytes (&mac,
+ vnet_sw_interface_get_hw_address
+ (vnet_get_main (), gr->gr_sw_if_index));
+ gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP,
+ gr->gr_sw_if_index,
+ NULL, &mac, INDEX_INVALID,
+ INDEX_INVALID, gr->gr_sclass,
+ GBP_ENDPOINT_FLAG_NONE,
+ NULL, NULL, &gr->gr_ep);
+ vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-gbp-src-classify",
+ gr->gr_sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-gbp-src-classify",
+ gr->gr_sw_if_index, 1, 0, 0);
+ }
+ else
+ {
+ /*
+ * recirc is for pre-NAT translation packets coming from
+ * the external EPG, these are classified based on a LPM
+ * in the EPG's route-domain
+ */
+ vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-gbp-lpm-classify",
+ gr->gr_sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-gbp-lpm-classify",
+ gr->gr_sw_if_index, 1, 0, 0);
+ }
+
+ gbp_recirc_db[sw_if_index] = gri;
+ }
+ else
+ {
+ gr = gbp_recirc_get (gri);
+ }
+
+ GBP_RECIRC_DBG ("add: %U", format_gbp_recirc, gr);
+ return (0);
+}
+
+int
+gbp_recirc_delete (u32 sw_if_index)
+{
+ gbp_recirc_t *gr;
+ index_t gri;
+
+ if (vec_len (gbp_recirc_db) <= sw_if_index)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ gri = gbp_recirc_db[sw_if_index];
+
+ if (INDEX_INVALID != gri)
+ {
+ gr = pool_elt_at_index (gbp_recirc_pool, gri);
+
+ GBP_RECIRC_DBG ("del: %U", format_gbp_recirc, gr);
+
+ if (gr->gr_is_ext)
+ {
+ gbp_endpoint_unlock (GBP_ENDPOINT_SRC_CP, gr->gr_ep);
+ vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-gbp-src-classify",
+ gr->gr_sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-gbp-src-classify",
+ gr->gr_sw_if_index, 0, 0, 0);
+ }
+ else
+ {
+ vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-gbp-lpm-classify",
+ gr->gr_sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-gbp-lpm-classify",
+ gr->gr_sw_if_index, 0, 0, 0);
+ }
+
+ ip4_sw_interface_enable_disable (gr->gr_sw_if_index, 0);
+ ip6_sw_interface_enable_disable (gr->gr_sw_if_index, 0);
+ l2e_disable (gr->gr_sw_if_index);
+
+ gbp_itf_unlock (&gr->gr_itf);
+
+ gbp_endpoint_group_unlock (gr->gr_epgi);
+ gbp_recirc_db[sw_if_index] = INDEX_INVALID;
+ pool_put (gbp_recirc_pool, gr);
+ return (0);
+ }
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+}
+
+void
+gbp_recirc_walk (gbp_recirc_cb_t cb, void *ctx)
+{
+ gbp_recirc_t *ge;
+
+ /* *INDENT-OFF* */
+ pool_foreach (ge, gbp_recirc_pool)
+ {
+ if (!cb(ge, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+static walk_rc_t
+gbp_recirc_show_one (gbp_recirc_t * gr, void *ctx)
+{
+ vlib_cli_output (ctx, " %U", format_gbp_recirc, gr);
+
+ return (WALK_CONTINUE);
+}
+
+static clib_error_t *
+gbp_recirc_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "Recirculation-Interfaces:");
+ gbp_recirc_walk (gbp_recirc_show_one, vm);
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Recircs and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp recirc}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_recirc_show_node, static) = {
+ .path = "show gbp recirc",
+ .short_help = "show gbp recirc\n",
+ .function = gbp_recirc_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_recirc_init (vlib_main_t * vm)
+{
+ gr_logger = vlib_log_register_class ("gbp", "recirc");
+
+ l2e_enable =
+ vlib_get_plugin_symbol ("l2e_plugin.so", "l2_emulation_enable");
+ l2e_disable =
+ vlib_get_plugin_symbol ("l2e_plugin.so", "l2_emulation_disable");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_recirc_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_recirc.h b/extras/deprecated/plugins/gbp/gbp_recirc.h
new file mode 100644
index 00000000000..2f3354b794e
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_recirc.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_RECIRC_H__
+#define __GBP_RECIRC_H__
+
+#include <plugins/gbp/gbp_types.h>
+#include <plugins/gbp/gbp_itf.h>
+#include <vnet/fib/fib_types.h>
+
+/**
+ * A GBP recirculation interface representation
+ * Thes interfaces join Bridge domains that are internal to those that are
+ * NAT external, so the packets can be NAT translated and then undergo the
+ * whole policy process again.
+ */
+typedef struct gpb_recirc_t_
+{
+ /**
+ * EPG ID that packets will classify to when they arrive on this recirc
+ */
+ sclass_t gr_sclass;
+
+ /**
+ * The index of the EPG
+ */
+ index_t gr_epgi;
+
+ /**
+ * FIB indices the EPG is mapped to
+ */
+ u32 gr_fib_index[DPO_PROTO_NUM];
+
+ /**
+ * Is the interface for packets post-NAT translation (i.e. ext)
+ * or pre-NAT translation (i.e. internal)
+ */
+ u8 gr_is_ext;
+
+ /**
+ */
+ u32 gr_sw_if_index;
+ gbp_itf_hdl_t gr_itf;
+
+ /**
+ * The endpoint created to represent the reric interface
+ */
+ index_t gr_ep;
+} gbp_recirc_t;
+
+extern int gbp_recirc_add (u32 sw_if_index, sclass_t sclass, u8 is_ext);
+extern int gbp_recirc_delete (u32 sw_if_index);
+
+typedef walk_rc_t (*gbp_recirc_cb_t) (gbp_recirc_t * gbpe, void *ctx);
+extern void gbp_recirc_walk (gbp_recirc_cb_t bgpe, void *ctx);
+
+/**
+ * Data plane functions
+ */
+extern gbp_recirc_t *gbp_recirc_pool;
+extern index_t *gbp_recirc_db;
+
+always_inline gbp_recirc_t *
+gbp_recirc_get (u32 sw_if_index)
+{
+ return (pool_elt_at_index (gbp_recirc_pool, gbp_recirc_db[sw_if_index]));
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_route_domain.c b/extras/deprecated/plugins/gbp/gbp_route_domain.c
new file mode 100644
index 00000000000..6cc595d0fa9
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_route_domain.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_endpoint.h>
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/fib/fib_table.h>
+
+/**
+ * A fixed MAC address to use as the source MAC for packets L3 switched
+ * onto the routed uu-fwd interfaces.
+ * Magic values - origin lost to the mists of time...
+ */
+/* *INDENT-OFF* */
+const static mac_address_t GBP_ROUTED_SRC_MAC = {
+ .bytes = {
+ 0x0, 0x22, 0xBD, 0xF8, 0x19, 0xFF,
+ }
+};
+
+const static mac_address_t GBP_ROUTED_DST_MAC = {
+ .bytes = {
+ 00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ }
+};
+/* *INDENT-ON* */
+
+/**
+ * Pool of GBP route_domains
+ */
+gbp_route_domain_t *gbp_route_domain_pool;
+
+/**
+ * DB of route_domains
+ */
+typedef struct gbp_route_domain_db_t
+{
+ uword *gbd_by_rd_id;
+} gbp_route_domain_db_t;
+
+static gbp_route_domain_db_t gbp_route_domain_db;
+static fib_source_t gbp_fib_source;
+
+/**
+ * logger
+ */
+vlib_log_class_t grd_logger;
+
+#define GBP_BD_DBG(...) \
+ vlib_log_debug (grd_logger, __VA_ARGS__);
+
+index_t
+gbp_route_domain_index (const gbp_route_domain_t * grd)
+{
+ return (grd - gbp_route_domain_pool);
+}
+
+gbp_route_domain_t *
+gbp_route_domain_get (index_t i)
+{
+ return (pool_elt_at_index (gbp_route_domain_pool, i));
+}
+
+static void
+gbp_route_domain_lock (index_t i)
+{
+ gbp_route_domain_t *grd;
+
+ grd = gbp_route_domain_get (i);
+ grd->grd_locks++;
+}
+
+index_t
+gbp_route_domain_find (u32 rd_id)
+{
+ uword *p;
+
+ p = hash_get (gbp_route_domain_db.gbd_by_rd_id, rd_id);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+index_t
+gbp_route_domain_find_and_lock (u32 rd_id)
+{
+ index_t grdi;
+
+ grdi = gbp_route_domain_find (rd_id);
+
+ if (INDEX_INVALID != grdi)
+ {
+ gbp_route_domain_lock (grdi);
+ }
+ return (grdi);
+}
+
+static void
+gbp_route_domain_db_add (gbp_route_domain_t * grd)
+{
+ index_t grdi = grd - gbp_route_domain_pool;
+
+ hash_set (gbp_route_domain_db.gbd_by_rd_id, grd->grd_id, grdi);
+}
+
+static void
+gbp_route_domain_db_remove (gbp_route_domain_t * grd)
+{
+ hash_unset (gbp_route_domain_db.gbd_by_rd_id, grd->grd_id);
+}
+
+int
+gbp_route_domain_add_and_lock (u32 rd_id,
+ gbp_scope_t scope,
+ u32 ip4_table_id,
+ u32 ip6_table_id,
+ u32 ip4_uu_sw_if_index, u32 ip6_uu_sw_if_index)
+{
+ gbp_route_domain_t *grd;
+ index_t grdi;
+
+ grdi = gbp_route_domain_find (rd_id);
+
+ if (INDEX_INVALID == grdi)
+ {
+ fib_protocol_t fproto;
+
+ pool_get_zero (gbp_route_domain_pool, grd);
+
+ grd->grd_id = rd_id;
+ grd->grd_scope = scope;
+ grd->grd_table_id[FIB_PROTOCOL_IP4] = ip4_table_id;
+ grd->grd_table_id[FIB_PROTOCOL_IP6] = ip6_table_id;
+ grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4] = ip4_uu_sw_if_index;
+ grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6] = ip6_uu_sw_if_index;
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ grd->grd_fib_index[fproto] =
+ fib_table_find_or_create_and_lock (fproto,
+ grd->grd_table_id[fproto],
+ gbp_fib_source);
+
+ if (~0 != grd->grd_uu_sw_if_index[fproto])
+ {
+ ethernet_header_t *eth;
+ u8 *rewrite;
+
+ rewrite = NULL;
+ vec_validate (rewrite, sizeof (*eth) - 1);
+ eth = (ethernet_header_t *) rewrite;
+
+ eth->type = clib_host_to_net_u16 ((fproto == FIB_PROTOCOL_IP4 ?
+ ETHERNET_TYPE_IP4 :
+ ETHERNET_TYPE_IP6));
+
+ mac_address_to_bytes (gbp_route_domain_get_local_mac (),
+ eth->src_address);
+ mac_address_to_bytes (gbp_route_domain_get_remote_mac (),
+ eth->dst_address);
+
+ /*
+ * create an adjacency out of the uu-fwd interfaces that will
+ * be used when adding subnet routes.
+ */
+ grd->grd_adj[fproto] =
+ adj_nbr_add_or_lock_w_rewrite (fproto,
+ fib_proto_to_link (fproto),
+ &ADJ_BCAST_ADDR,
+ grd->grd_uu_sw_if_index[fproto],
+ rewrite);
+ }
+ else
+ {
+ grd->grd_adj[fproto] = INDEX_INVALID;
+ }
+ }
+
+ gbp_route_domain_db_add (grd);
+ }
+ else
+ {
+ grd = gbp_route_domain_get (grdi);
+ }
+
+ grd->grd_locks++;
+ GBP_BD_DBG ("add: %U", format_gbp_route_domain, grd);
+
+ return (0);
+}
+
+void
+gbp_route_domain_unlock (index_t index)
+{
+ gbp_route_domain_t *grd;
+
+ grd = gbp_route_domain_get (index);
+
+ grd->grd_locks--;
+
+ if (0 == grd->grd_locks)
+ {
+ fib_protocol_t fproto;
+
+ GBP_BD_DBG ("destroy: %U", format_gbp_route_domain, grd);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ fib_table_unlock (grd->grd_fib_index[fproto], fproto, gbp_fib_source);
+ if (INDEX_INVALID != grd->grd_adj[fproto])
+ adj_unlock (grd->grd_adj[fproto]);
+ }
+
+ gbp_route_domain_db_remove (grd);
+
+ pool_put (gbp_route_domain_pool, grd);
+ }
+}
+
+u32
+gbp_route_domain_get_rd_id (index_t grdi)
+{
+ gbp_route_domain_t *grd;
+
+ grd = gbp_route_domain_get (grdi);
+
+ return (grd->grd_id);
+}
+
+gbp_scope_t
+gbp_route_domain_get_scope (index_t grdi)
+{
+ gbp_route_domain_t *grd;
+
+ grd = gbp_route_domain_get (grdi);
+
+ return (grd->grd_scope);
+}
+
+int
+gbp_route_domain_delete (u32 rd_id)
+{
+ index_t grdi;
+
+ GBP_BD_DBG ("del: %d", rd_id);
+ grdi = gbp_route_domain_find (rd_id);
+
+ if (INDEX_INVALID != grdi)
+ {
+ GBP_BD_DBG ("del: %U", format_gbp_route_domain,
+ gbp_route_domain_get (grdi));
+ gbp_route_domain_unlock (grdi);
+
+ return (0);
+ }
+
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+const mac_address_t *
+gbp_route_domain_get_local_mac (void)
+{
+ return (&GBP_ROUTED_SRC_MAC);
+}
+
+const mac_address_t *
+gbp_route_domain_get_remote_mac (void)
+{
+ return (&GBP_ROUTED_DST_MAC);
+}
+
+void
+gbp_route_domain_walk (gbp_route_domain_cb_t cb, void *ctx)
+{
+ gbp_route_domain_t *gbpe;
+
+ /* *INDENT-OFF* */
+ pool_foreach (gbpe, gbp_route_domain_pool)
+ {
+ if (!cb(gbpe, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_route_domain_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 ip4_uu_sw_if_index = ~0;
+ u32 ip6_uu_sw_if_index = ~0;
+ u32 ip4_table_id = ~0;
+ u32 ip6_table_id = ~0;
+ u32 scope = ~0;
+ u32 rd_id = ~0;
+ u8 add = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "ip4-uu %U", unformat_vnet_sw_interface,
+ vnm, &ip4_uu_sw_if_index))
+ ;
+ else if (unformat (input, "ip6-uu %U", unformat_vnet_sw_interface,
+ vnm, &ip6_uu_sw_if_index))
+ ;
+ else if (unformat (input, "ip4-table-id %d", &ip4_table_id))
+ ;
+ else if (unformat (input, "ip6-table-id %d", &ip6_table_id))
+ ;
+ else if (unformat (input, "add"))
+ add = 1;
+ else if (unformat (input, "del"))
+ add = 0;
+ else if (unformat (input, "rd %d", &rd_id))
+ ;
+ else if (unformat (input, "scope %d", &scope))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == rd_id)
+ return clib_error_return (0, "RD-ID must be specified");
+
+ if (add)
+ {
+ if (~0 == ip4_table_id)
+ return clib_error_return (0, "IP4 table-ID must be specified");
+ if (~0 == ip6_table_id)
+ return clib_error_return (0, "IP6 table-ID must be specified");
+
+ gbp_route_domain_add_and_lock (rd_id, scope,
+ ip4_table_id,
+ ip6_table_id,
+ ip4_uu_sw_if_index, ip6_uu_sw_if_index);
+ }
+ else
+ gbp_route_domain_delete (rd_id);
+
+ return (NULL);
+}
+
+/*?
+ * Configure a GBP route-domain
+ *
+ * @cliexpar
+ * @cliexstart{gbp route-domain [del] rd <ID> ip4-table-id <ID> ip6-table-id <ID> [ip4-uu <interface>] [ip6-uu <interface>]}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_route_domain_cli_node, static) = {
+ .path = "gbp route-domain",
+ .short_help = "gbp route-domain [del] rd <ID> ip4-table-id <ID> ip6-table-id <ID> [ip4-uu <interface>] [ip6-uu <interface>]",
+ .function = gbp_route_domain_cli,
+};
+
+u8 *
+format_gbp_route_domain (u8 * s, va_list * args)
+{
+ gbp_route_domain_t *grd = va_arg (*args, gbp_route_domain_t*);
+ vnet_main_t *vnm = vnet_get_main ();
+
+ if (NULL != grd)
+ s = format (s, "[%d] rd:%d ip4-uu:%U ip6-uu:%U locks:%d",
+ grd - gbp_route_domain_pool,
+ grd->grd_id,
+ format_vnet_sw_if_index_name, vnm, grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4],
+ format_vnet_sw_if_index_name, vnm, grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6],
+ grd->grd_locks);
+ else
+ s = format (s, "NULL");
+
+ return (s);
+}
+
+static int
+gbp_route_domain_show_one (gbp_route_domain_t *gb, void *ctx)
+{
+ vlib_main_t *vm;
+
+ vm = ctx;
+ vlib_cli_output (vm, " %U",format_gbp_route_domain, gb);
+
+ return (1);
+}
+
+static clib_error_t *
+gbp_route_domain_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "Route-Domains:");
+ gbp_route_domain_walk (gbp_route_domain_show_one, vm);
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Route_Domains and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp route_domain}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_route_domain_show_node, static) = {
+ .path = "show gbp route-domain",
+ .short_help = "show gbp route-domain\n",
+ .function = gbp_route_domain_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_route_domain_init (vlib_main_t * vm)
+{
+ grd_logger = vlib_log_register_class ("gbp", "rd");
+ gbp_fib_source = fib_source_allocate ("gbp-rd",
+ FIB_SOURCE_PRIORITY_HI,
+ FIB_SOURCE_BH_DROP);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_route_domain_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_route_domain.h b/extras/deprecated/plugins/gbp/gbp_route_domain.h
new file mode 100644
index 00000000000..897c1bdd7ac
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_route_domain.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_ROUTE_DOMAIN_H__
+#define __GBP_ROUTE_DOMAIN_H__
+
+#include <plugins/gbp/gbp_types.h>
+
+#include <vnet/fib/fib_types.h>
+#include <vnet/ethernet/mac_address.h>
+
+/**
+ * A route Domain Representation.
+ * This is a standard route-domain plus all the attributes it must
+ * have to supprt the GBP model.
+ */
+typedef struct gpb_route_domain_t_
+{
+ /**
+ * Route-domain ID
+ */
+ u32 grd_id;
+ gbp_scope_t grd_scope;
+ u32 grd_fib_index[FIB_PROTOCOL_IP_MAX];
+ u32 grd_table_id[FIB_PROTOCOL_IP_MAX];
+
+ /**
+ * The interfaces on which to send packets to unnknown EPs
+ */
+ u32 grd_uu_sw_if_index[FIB_PROTOCOL_IP_MAX];
+
+ /**
+ * adjacencies on the UU interfaces.
+ */
+ u32 grd_adj[FIB_PROTOCOL_IP_MAX];
+
+ u32 grd_locks;
+} gbp_route_domain_t;
+
+extern int gbp_route_domain_add_and_lock (u32 rd_id,
+ gbp_scope_t scope,
+ u32 ip4_table_id,
+ u32 ip6_table_id,
+ u32 ip4_uu_sw_if_index,
+ u32 ip6_uu_sw_if_index);
+extern void gbp_route_domain_unlock (index_t grdi);
+extern index_t gbp_route_domain_find_and_lock (u32 rd_id);
+extern index_t gbp_route_domain_find (u32 rd_id);
+extern index_t gbp_route_domain_index (const gbp_route_domain_t *);
+
+extern int gbp_route_domain_delete (u32 rd_id);
+extern gbp_route_domain_t *gbp_route_domain_get (index_t i);
+extern u32 gbp_route_domain_get_rd_id (index_t i);
+extern gbp_scope_t gbp_route_domain_get_scope (index_t i);
+
+typedef int (*gbp_route_domain_cb_t) (gbp_route_domain_t * gb, void *ctx);
+extern void gbp_route_domain_walk (gbp_route_domain_cb_t bgpe, void *ctx);
+
+extern const mac_address_t *gbp_route_domain_get_local_mac (void);
+extern const mac_address_t *gbp_route_domain_get_remote_mac (void);
+
+extern u8 *format_gbp_route_domain (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_scanner.c b/extras/deprecated/plugins/gbp/gbp_scanner.c
new file mode 100644
index 00000000000..9ae962b7449
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_scanner.c
@@ -0,0 +1,136 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_scanner.h>
+#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_vxlan.h>
+
+/**
+ * Scanner logger
+ */
+vlib_log_class_t gs_logger;
+
+/**
+ * Scanner state
+ */
+static bool gs_enabled;
+
+#define GBP_SCANNER_DBG(...) \
+ vlib_log_debug (gs_logger, __VA_ARGS__);
+
+static uword
+gbp_scanner (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ uword event_type, *event_data = 0;
+ bool do_scan = 0;
+
+ while (1)
+ {
+ do_scan = 0;
+
+ if (gs_enabled)
+ {
+ /* scan every 'inactive threshold' seconds */
+ vlib_process_wait_for_event_or_clock (vm, 2);
+ }
+ else
+ vlib_process_wait_for_event (vm);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+
+ switch (event_type)
+ {
+ case ~0:
+ /* timer expired */
+ do_scan = 1;
+ break;
+
+ case GBP_ENDPOINT_SCAN_START:
+ gs_enabled = 1;
+ break;
+
+ case GBP_ENDPOINT_SCAN_STOP:
+ gs_enabled = 0;
+ break;
+
+ case GBP_ENDPOINT_SCAN_SET_TIME:
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ if (do_scan)
+ {
+ GBP_SCANNER_DBG ("start");
+ gbp_endpoint_scan (vm);
+ GBP_SCANNER_DBG ("stop");
+ }
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_scanner_node) = {
+ .function = gbp_scanner,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "gbp-scanner",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_scanner_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "GBP-scanner: enabled:%d interval:2", gs_enabled);
+
+ return (NULL);
+}
+
+/*?
+ * Show GBP scanner
+ *
+ * @cliexpar
+ * @cliexstart{show gbp scanner}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_scanner_cli_node, static) = {
+ .path = "show gbp scanner",
+ .short_help = "show gbp scanner",
+ .function = gbp_scanner_cli,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_scanner_init (vlib_main_t * vm)
+{
+ gs_logger = vlib_log_register_class ("gbp", "scan");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_scanner_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_scanner.h b/extras/deprecated/plugins/gbp/gbp_scanner.h
new file mode 100644
index 00000000000..1133167d927
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_scanner.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_SCANNER_H__
+#define __GBP_SCANNER_H__
+
+#include <vlib/vlib.h>
+
+typedef enum gbp_scan_event_t_
+{
+ GBP_ENDPOINT_SCAN_START,
+ GBP_ENDPOINT_SCAN_STOP,
+ GBP_ENDPOINT_SCAN_SET_TIME,
+} gbp_scan_event_t;
+
+extern vlib_node_registration_t gbp_scanner_node;
+
+#endif
diff --git a/extras/deprecated/plugins/gbp/gbp_subnet.c b/extras/deprecated/plugins/gbp/gbp_subnet.c
new file mode 100644
index 00000000000..8d3b571657c
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_subnet.c
@@ -0,0 +1,598 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_fwd_dpo.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_route_domain.h>
+
+#include <vnet/fib/fib_table.h>
+#include <vnet/dpo/load_balance.h>
+
+/**
+ * a key for the DB
+ */
+typedef struct gbp_subnet_key_t_
+{
+ fib_prefix_t gsk_pfx;
+ u32 gsk_fib_index;
+} gbp_subnet_key_t;
+
+/**
+ * Subnet
+ */
+typedef struct gbp_subnet_t_
+{
+ gbp_subnet_key_t *gs_key;
+ gbp_subnet_type_t gs_type;
+ index_t gs_rd;
+
+ union
+ {
+ struct
+ {
+ sclass_t gs_sclass;
+ u32 gs_sw_if_index;
+ } gs_stitched_external;
+ struct
+ {
+ sclass_t gs_sclass;
+ } gs_l3_out;
+ };
+
+ fib_node_index_t gs_fei;
+} gbp_subnet_t;
+
+/**
+ * A DB of the subnets; key={pfx,fib-index}
+ */
+uword *gbp_subnet_db;
+
+/**
+ * pool of subnets
+ */
+gbp_subnet_t *gbp_subnet_pool;
+
+static fib_source_t gbp_fib_source;
+
+static index_t
+gbp_subnet_db_find (u32 fib_index, const fib_prefix_t * pfx)
+{
+ gbp_subnet_key_t key = {
+ .gsk_pfx = *pfx,
+ .gsk_fib_index = fib_index,
+ };
+ uword *p;
+
+ p = hash_get_mem (gbp_subnet_db, &key);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+static void
+gbp_subnet_db_add (u32 fib_index, const fib_prefix_t * pfx, gbp_subnet_t * gs)
+{
+ gbp_subnet_key_t *key;
+
+ key = clib_mem_alloc (sizeof (*key));
+
+ clib_memcpy (&(key->gsk_pfx), pfx, sizeof (*pfx));
+ key->gsk_fib_index = fib_index;
+
+ hash_set_mem (gbp_subnet_db, key, (gs - gbp_subnet_pool));
+
+ gs->gs_key = key;
+}
+
+static void
+gbp_subnet_db_del (gbp_subnet_t * gs)
+{
+ hash_unset_mem (gbp_subnet_db, gs->gs_key);
+
+ clib_mem_free (gs->gs_key);
+ gs->gs_key = NULL;
+}
+
+
+static int
+gbp_subnet_transport_add (gbp_subnet_t * gs)
+{
+ dpo_id_t gfd = DPO_INVALID;
+ gbp_route_domain_t *grd;
+ fib_protocol_t fproto;
+
+ fproto = gs->gs_key->gsk_pfx.fp_proto;
+ grd = gbp_route_domain_get (gs->gs_rd);
+
+ if (~0 == grd->grd_uu_sw_if_index[fproto])
+ return (VNET_API_ERROR_INVALID_SW_IF_INDEX);
+
+ gs->gs_fei = fib_table_entry_update_one_path (gs->gs_key->gsk_fib_index,
+ &gs->gs_key->gsk_pfx,
+ gbp_fib_source,
+ FIB_ENTRY_FLAG_NONE,
+ fib_proto_to_dpo (fproto),
+ &ADJ_BCAST_ADDR,
+ grd->grd_uu_sw_if_index
+ [fproto], ~0, 1, NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ dpo_reset (&gfd);
+
+ return (0);
+}
+
+static int
+gbp_subnet_internal_add (gbp_subnet_t * gs)
+{
+ dpo_id_t gfd = DPO_INVALID;
+
+ gbp_fwd_dpo_add_or_lock (fib_proto_to_dpo (gs->gs_key->gsk_pfx.fp_proto),
+ &gfd);
+
+ gs->gs_fei = fib_table_entry_special_dpo_update (gs->gs_key->gsk_fib_index,
+ &gs->gs_key->gsk_pfx,
+ gbp_fib_source,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &gfd);
+
+ dpo_reset (&gfd);
+
+ return (0);
+}
+
+static int
+gbp_subnet_external_add (gbp_subnet_t * gs, u32 sw_if_index, sclass_t sclass)
+{
+ dpo_id_t gpd = DPO_INVALID;
+
+ gs->gs_stitched_external.gs_sclass = sclass;
+ gs->gs_stitched_external.gs_sw_if_index = sw_if_index;
+
+ gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (gs->gs_key->gsk_pfx.fp_proto),
+ gbp_route_domain_get_scope (gs->gs_rd),
+ gs->gs_stitched_external.gs_sclass,
+ gs->gs_stitched_external.gs_sw_if_index, &gpd);
+
+ gs->gs_fei = fib_table_entry_special_dpo_update (gs->gs_key->gsk_fib_index,
+ &gs->gs_key->gsk_pfx,
+ gbp_fib_source,
+ (FIB_ENTRY_FLAG_EXCLUSIVE |
+ FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT),
+ &gpd);
+
+ dpo_reset (&gpd);
+
+ return (0);
+}
+
+static int
+gbp_subnet_l3_out_add (gbp_subnet_t * gs, sclass_t sclass, int is_anon)
+{
+ fib_entry_flag_t flags;
+ dpo_id_t gpd = DPO_INVALID;
+
+ gs->gs_l3_out.gs_sclass = sclass;
+
+ gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (gs->gs_key->gsk_pfx.fp_proto),
+ gbp_route_domain_get_scope (gs->gs_rd),
+ gs->gs_l3_out.gs_sclass, ~0, &gpd);
+
+ flags = FIB_ENTRY_FLAG_INTERPOSE;
+ if (is_anon)
+ flags |= FIB_ENTRY_FLAG_COVERED_INHERIT;
+
+ gs->gs_fei = fib_table_entry_special_dpo_add (gs->gs_key->gsk_fib_index,
+ &gs->gs_key->gsk_pfx,
+ FIB_SOURCE_SPECIAL,
+ flags, &gpd);
+
+ dpo_reset (&gpd);
+
+ return (0);
+}
+
+static void
+gbp_subnet_del_i (index_t gsi)
+{
+ gbp_subnet_t *gs;
+
+ gs = pool_elt_at_index (gbp_subnet_pool, gsi);
+
+ fib_table_entry_delete_index (gs->gs_fei,
+ (GBP_SUBNET_L3_OUT == gs->gs_type
+ || GBP_SUBNET_ANON_L3_OUT ==
+ gs->gs_type) ? FIB_SOURCE_SPECIAL :
+ gbp_fib_source);
+
+ gbp_subnet_db_del (gs);
+ gbp_route_domain_unlock (gs->gs_rd);
+
+ pool_put (gbp_subnet_pool, gs);
+}
+
+int
+gbp_subnet_del (u32 rd_id, const fib_prefix_t * pfx)
+{
+ gbp_route_domain_t *grd;
+ index_t gsi, grdi;
+ u32 fib_index;
+
+ grdi = gbp_route_domain_find (rd_id);
+
+ if (~0 == grdi)
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+
+ grd = gbp_route_domain_get (grdi);
+ fib_index = grd->grd_fib_index[pfx->fp_proto];
+
+ gsi = gbp_subnet_db_find (fib_index, pfx);
+
+ if (INDEX_INVALID == gsi)
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+
+ gbp_subnet_del_i (gsi);
+
+ return (0);
+}
+
+int
+gbp_subnet_add (u32 rd_id,
+ const fib_prefix_t * pfx,
+ gbp_subnet_type_t type, u32 sw_if_index, sclass_t sclass)
+{
+ gbp_route_domain_t *grd;
+ index_t grdi, gsi;
+ gbp_subnet_t *gs;
+ u32 fib_index;
+ int rv;
+
+ switch (type)
+ {
+ case GBP_SUBNET_TRANSPORT:
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ case GBP_SUBNET_L3_OUT:
+ case GBP_SUBNET_ANON_L3_OUT:
+ break;
+ default:
+ return (VNET_API_ERROR_INCORRECT_ADJACENCY_TYPE);
+ }
+
+ grdi = gbp_route_domain_find_and_lock (rd_id);
+
+ if (~0 == grdi)
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+
+ grd = gbp_route_domain_get (grdi);
+ fib_index = grd->grd_fib_index[pfx->fp_proto];
+
+ gsi = gbp_subnet_db_find (fib_index, pfx);
+
+ /*
+ * this is an update if the subnet already exists, so remove the old
+ */
+ if (INDEX_INVALID != gsi)
+ gbp_subnet_del_i (gsi);
+
+ rv = -2;
+
+ pool_get (gbp_subnet_pool, gs);
+
+ gs->gs_type = type;
+ gs->gs_rd = grdi;
+ gbp_subnet_db_add (fib_index, pfx, gs);
+
+ switch (type)
+ {
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ rv = gbp_subnet_internal_add (gs);
+ break;
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ rv = gbp_subnet_external_add (gs, sw_if_index, sclass);
+ break;
+ case GBP_SUBNET_TRANSPORT:
+ rv = gbp_subnet_transport_add (gs);
+ break;
+ case GBP_SUBNET_L3_OUT:
+ rv = gbp_subnet_l3_out_add (gs, sclass, 0 /* is_anon */ );
+ break;
+ case GBP_SUBNET_ANON_L3_OUT:
+ rv = gbp_subnet_l3_out_add (gs, sclass, 1 /* is_anon */ );
+ break;
+ }
+
+ return (rv);
+}
+
+static clib_error_t *
+gbp_subnet_add_del_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_main_t *vnm = vnet_get_main ();
+ fib_prefix_t pfx = {.fp_addr = ip46_address_initializer };
+ int length;
+ u32 rd_id = ~0;
+ u32 sw_if_index = ~0;
+ gbp_subnet_type_t type = ~0;
+ u32 sclass = ~0;
+ int is_add = 1;
+ int rv;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "rd %d", &rd_id))
+ ;
+ else
+ if (unformat
+ (line_input, "prefix %U/%d", unformat_ip4_address,
+ &pfx.fp_addr.ip4, &length))
+ pfx.fp_proto = FIB_PROTOCOL_IP4;
+ else
+ if (unformat
+ (line_input, "prefix %U/%d", unformat_ip6_address,
+ &pfx.fp_addr.ip6, &length))
+ pfx.fp_proto = FIB_PROTOCOL_IP6;
+ else if (unformat (line_input, "type transport"))
+ type = GBP_SUBNET_TRANSPORT;
+ else if (unformat (line_input, "type stitched-internal"))
+ type = GBP_SUBNET_STITCHED_INTERNAL;
+ else if (unformat (line_input, "type stitched-external"))
+ type = GBP_SUBNET_STITCHED_EXTERNAL;
+ else if (unformat (line_input, "type anon-l3-out"))
+ type = GBP_SUBNET_ANON_L3_OUT;
+ else if (unformat (line_input, "type l3-out"))
+ type = GBP_SUBNET_L3_OUT;
+ else
+ if (unformat_user
+ (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "sclass %u", &sclass))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ }
+ unformat_free (line_input);
+
+ pfx.fp_len = length;
+
+ if (is_add)
+ rv = gbp_subnet_add (rd_id, &pfx, type, sw_if_index, sclass);
+ else
+ rv = gbp_subnet_del (rd_id, &pfx);
+
+ switch (rv)
+ {
+ case 0:
+ return 0;
+ case VNET_API_ERROR_NO_SUCH_FIB:
+ return clib_error_return (0, "no such FIB");
+ }
+
+ return clib_error_return (0, "unknown error %d", rv);
+}
+
+/*?
+ * Add Group Based Policy Subnets
+ *
+ * @cliexpar
+ * @cliexstart{gbp subnet [del] rd <ID> prefix <prefix> type <type> [<interface>] [sclass <sclass>]}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_subnet_add_del, static) = {
+ .path = "gbp subnet",
+ .short_help = "gbp subnet [del] rd <ID> prefix <prefix> type <type> [<interface>] [sclass <sclass>]\n",
+ .function = gbp_subnet_add_del_cli,
+};
+/* *INDENT-ON* */
+
+
+
+void
+gbp_subnet_walk (gbp_subnet_cb_t cb, void *ctx)
+{
+ gbp_route_domain_t *grd;
+ gbp_subnet_t *gs;
+ u32 sw_if_index;
+ sclass_t sclass;
+
+ sclass = SCLASS_INVALID;
+ sw_if_index = ~0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (gs, gbp_subnet_pool)
+ {
+ grd = gbp_route_domain_get(gs->gs_rd);
+
+ switch (gs->gs_type)
+ {
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ case GBP_SUBNET_TRANSPORT:
+ /* use defaults above */
+ break;
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ sw_if_index = gs->gs_stitched_external.gs_sw_if_index;
+ sclass = gs->gs_stitched_external.gs_sclass;
+ break;
+ case GBP_SUBNET_L3_OUT:
+ case GBP_SUBNET_ANON_L3_OUT:
+ sclass = gs->gs_l3_out.gs_sclass;
+ break;
+ }
+
+ if (WALK_STOP == cb (grd->grd_id, &gs->gs_key->gsk_pfx,
+ gs->gs_type, sw_if_index, sclass, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+typedef enum gsb_subnet_show_flags_t_
+{
+ GBP_SUBNET_SHOW_BRIEF,
+ GBP_SUBNET_SHOW_DETAILS,
+} gsb_subnet_show_flags_t;
+
+static u8 *
+format_gbp_subnet_type (u8 * s, va_list * args)
+{
+ gbp_subnet_type_t type = va_arg (*args, gbp_subnet_type_t);
+
+ switch (type)
+ {
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ return (format (s, "stitched-internal"));
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ return (format (s, "stitched-external"));
+ case GBP_SUBNET_TRANSPORT:
+ return (format (s, "transport"));
+ case GBP_SUBNET_L3_OUT:
+ return (format (s, "l3-out"));
+ case GBP_SUBNET_ANON_L3_OUT:
+ return (format (s, "anon-l3-out"));
+ }
+
+ return (format (s, "unknown"));
+}
+
+u8 *
+format_gbp_subnet (u8 * s, va_list * args)
+{
+ index_t gsi = va_arg (*args, index_t);
+ gsb_subnet_show_flags_t flags = va_arg (*args, gsb_subnet_show_flags_t);
+ gbp_subnet_t *gs;
+ u32 table_id;
+
+ gs = pool_elt_at_index (gbp_subnet_pool, gsi);
+
+ table_id = fib_table_get_table_id (gs->gs_key->gsk_fib_index,
+ gs->gs_key->gsk_pfx.fp_proto);
+
+ s = format (s, "[%d] tbl:%d %U %U", gsi, table_id,
+ format_fib_prefix, &gs->gs_key->gsk_pfx,
+ format_gbp_subnet_type, gs->gs_type);
+
+ switch (gs->gs_type)
+ {
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ case GBP_SUBNET_TRANSPORT:
+ break;
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ s = format (s, " {sclass:%d %U}", gs->gs_stitched_external.gs_sclass,
+ format_vnet_sw_if_index_name,
+ vnet_get_main (), gs->gs_stitched_external.gs_sw_if_index);
+ break;
+ case GBP_SUBNET_L3_OUT:
+ case GBP_SUBNET_ANON_L3_OUT:
+ s = format (s, " {sclass:%d}", gs->gs_l3_out.gs_sclass);
+ break;
+ }
+
+ switch (flags)
+ {
+ case GBP_SUBNET_SHOW_DETAILS:
+ {
+ s = format (s, "\n %U", format_fib_entry, gs->gs_fei,
+ FIB_ENTRY_FORMAT_DETAIL);
+ }
+ case GBP_SUBNET_SHOW_BRIEF:
+ break;
+ }
+ return (s);
+}
+
+static clib_error_t *
+gbp_subnet_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 gsi;
+
+ gsi = INDEX_INVALID;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &gsi))
+ ;
+ else
+ break;
+ }
+
+ if (INDEX_INVALID != gsi)
+ {
+ vlib_cli_output (vm, "%U", format_gbp_subnet, gsi,
+ GBP_SUBNET_SHOW_DETAILS);
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ pool_foreach_index (gsi, gbp_subnet_pool)
+ {
+ vlib_cli_output (vm, "%U", format_gbp_subnet, gsi,
+ GBP_SUBNET_SHOW_BRIEF);
+ }
+ /* *INDENT-ON* */
+ }
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Subnets
+ *
+ * @cliexpar
+ * @cliexstart{show gbp subnet}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_subnet_show_node, static) = {
+ .path = "show gbp subnet",
+ .short_help = "show gbp subnet\n",
+ .function = gbp_subnet_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_subnet_init (vlib_main_t * vm)
+{
+ gbp_subnet_db = hash_create_mem (0,
+ sizeof (gbp_subnet_key_t), sizeof (u32));
+ gbp_fib_source = fib_source_allocate ("gbp-subnet",
+ FIB_SOURCE_PRIORITY_HI,
+ FIB_SOURCE_BH_SIMPLE);
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_subnet_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_subnet.h b/extras/deprecated/plugins/gbp/gbp_subnet.h
new file mode 100644
index 00000000000..6fbef01ceba
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_subnet.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_SUBNET_H__
+#define __GBP_SUBNET_H__
+
+#include <plugins/gbp/gbp_types.h>
+
+typedef enum gbp_subnet_type_t_
+{
+ GBP_SUBNET_TRANSPORT,
+ GBP_SUBNET_STITCHED_INTERNAL,
+ GBP_SUBNET_STITCHED_EXTERNAL,
+ GBP_SUBNET_L3_OUT,
+ GBP_SUBNET_ANON_L3_OUT,
+} gbp_subnet_type_t;
+
+extern int gbp_subnet_add (u32 rd_id,
+ const fib_prefix_t * pfx,
+ gbp_subnet_type_t type,
+ u32 sw_if_index, sclass_t sclass);
+
+extern int gbp_subnet_del (u32 rd_id, const fib_prefix_t * pfx);
+
+typedef walk_rc_t (*gbp_subnet_cb_t) (u32 rd_id,
+ const fib_prefix_t * pfx,
+ gbp_subnet_type_t type,
+ u32 sw_if_index,
+ sclass_t sclass, void *ctx);
+
+extern void gbp_subnet_walk (gbp_subnet_cb_t cb, void *ctx);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_types.h b/extras/deprecated/plugins/gbp/gbp_types.h
new file mode 100644
index 00000000000..ac983b1cdd2
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_types.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_TYPES_H__
+#define __GBP_TYPES_H__
+
+#include <vnet/vnet.h>
+
+typedef u32 vnid_t;
+#define VNID_INVALID ((u16)~0)
+
+typedef u16 gbp_scope_t;
+typedef u16 sclass_t;
+#define SCLASS_INVALID ((u16)~0)
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_vxlan.c b/extras/deprecated/plugins/gbp/gbp_vxlan.c
new file mode 100644
index 00000000000..77e4d7ac11b
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_vxlan.c
@@ -0,0 +1,654 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_vxlan.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+#include <vlibmemory/api.h>
+#include <vnet/fib/fib_table.h>
+#include <vlib/punt.h>
+
+/**
+ * A reference to a VXLAN-GBP tunnel created as a child/dependent tunnel
+ * of the template GBP-VXLAN tunnel
+ */
+typedef struct vxlan_tunnel_ref_t_
+{
+ gbp_itf_hdl_t vxr_itf;
+ u32 vxr_sw_if_index;
+ index_t vxr_parent;
+ gbp_vxlan_tunnel_layer_t vxr_layer;
+} vxlan_tunnel_ref_t;
+
+/**
+ * DB of added tunnels
+ */
+uword *gv_db;
+
+/**
+ * Logger
+ */
+static vlib_log_class_t gt_logger;
+
+/**
+ * Pool of template tunnels
+ */
+static gbp_vxlan_tunnel_t *gbp_vxlan_tunnel_pool;
+
+/**
+ * Pool of child tunnels
+ */
+static vxlan_tunnel_ref_t *vxlan_tunnel_ref_pool;
+
+/**
+ * DB of template interfaces by SW interface index
+ */
+static index_t *gbp_vxlan_tunnel_db;
+
+/**
+ * DB of child interfaces by SW interface index
+ */
+static index_t *vxlan_tunnel_ref_db;
+
+/**
+ * handle registered with the ;unt infra
+ */
+static vlib_punt_hdl_t punt_hdl;
+
+static char *gbp_vxlan_tunnel_layer_strings[] = {
+#define _(n,s) [GBP_VXLAN_TUN_##n] = s,
+ foreach_gbp_vxlan_tunnel_layer
+#undef _
+};
+
+#define GBP_VXLAN_TUN_DBG(...) \
+ vlib_log_debug (gt_logger, __VA_ARGS__);
+
+
+gbp_vxlan_tunnel_t *
+gbp_vxlan_tunnel_get (index_t gti)
+{
+ return (pool_elt_at_index (gbp_vxlan_tunnel_pool, gti));
+}
+
+static vxlan_tunnel_ref_t *
+vxlan_tunnel_ref_get (index_t vxri)
+{
+ return (pool_elt_at_index (vxlan_tunnel_ref_pool, vxri));
+}
+
+static u8 *
+format_vxlan_tunnel_ref (u8 * s, va_list * args)
+{
+ index_t vxri = va_arg (*args, u32);
+ vxlan_tunnel_ref_t *vxr;
+
+ vxr = vxlan_tunnel_ref_get (vxri);
+
+ s = format (s, "[%U]", format_gbp_itf_hdl, vxr->vxr_itf);
+
+ return (s);
+}
+
+static void
+gdb_vxlan_dep_del (u32 sw_if_index)
+{
+ vxlan_tunnel_ref_t *vxr;
+ gbp_vxlan_tunnel_t *gt;
+ index_t vxri;
+ u32 pos;
+
+ vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
+ vxri = vxr - vxlan_tunnel_ref_pool;
+ gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
+
+ GBP_VXLAN_TUN_DBG ("del-dep:%U", format_vxlan_tunnel_ref, vxri);
+
+ vxlan_tunnel_ref_db[vxr->vxr_sw_if_index] = INDEX_INVALID;
+ pos = vec_search (gt->gt_tuns, vxri);
+
+ ASSERT (~0 != pos);
+ vec_del1 (gt->gt_tuns, pos);
+
+ vnet_vxlan_gbp_tunnel_del (vxr->vxr_sw_if_index);
+
+ pool_put (vxlan_tunnel_ref_pool, vxr);
+}
+
+static gbp_itf_hdl_t
+gdb_vxlan_dep_add (gbp_vxlan_tunnel_t * gt,
+ const ip46_address_t * src, const ip46_address_t * dst)
+{
+ vnet_vxlan_gbp_tunnel_add_del_args_t args = {
+ .is_add = 1,
+ .is_ip6 = !ip46_address_is_ip4 (src),
+ .vni = gt->gt_vni,
+ .src = *src,
+ .dst = *dst,
+ .instance = ~0,
+ .mode = (GBP_VXLAN_TUN_L2 == gt->gt_layer ?
+ VXLAN_GBP_TUNNEL_MODE_L2 : VXLAN_GBP_TUNNEL_MODE_L3),
+ };
+ vxlan_tunnel_ref_t *vxr;
+ u32 sw_if_index;
+ index_t vxri;
+ int rv;
+
+ sw_if_index = ~0;
+ rv = vnet_vxlan_gbp_tunnel_add_del (&args, &sw_if_index);
+
+ if (VNET_API_ERROR_TUNNEL_EXIST == rv)
+ {
+ vxri = vxlan_tunnel_ref_db[sw_if_index];
+
+ vxr = vxlan_tunnel_ref_get (vxri);
+ gbp_itf_lock (vxr->vxr_itf);
+ }
+ else if (0 == rv)
+ {
+ ASSERT (~0 != sw_if_index);
+ GBP_VXLAN_TUN_DBG ("add-dep:%U %U %U %d", format_vnet_sw_if_index_name,
+ vnet_get_main (), sw_if_index,
+ format_ip46_address, src, IP46_TYPE_ANY,
+ format_ip46_address, dst, IP46_TYPE_ANY, gt->gt_vni);
+
+ pool_get_zero (vxlan_tunnel_ref_pool, vxr);
+
+ vxri = (vxr - vxlan_tunnel_ref_pool);
+ vxr->vxr_parent = gt - gbp_vxlan_tunnel_pool;
+ vxr->vxr_sw_if_index = sw_if_index;
+ vxr->vxr_layer = gt->gt_layer;
+
+ /*
+ * store the child both on the parent's list and the global DB
+ */
+ vec_add1 (gt->gt_tuns, vxri);
+
+ vec_validate_init_empty (vxlan_tunnel_ref_db,
+ vxr->vxr_sw_if_index, INDEX_INVALID);
+ vxlan_tunnel_ref_db[vxr->vxr_sw_if_index] = vxri;
+
+ if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
+ {
+ l2output_feat_masks_t ofeat;
+ l2input_feat_masks_t ifeat;
+ gbp_bridge_domain_t *gbd;
+
+ gbd = gbp_bridge_domain_get (gt->gt_gbd);
+ vxr->vxr_itf = gbp_itf_l2_add_and_lock_w_free
+ (vxr->vxr_sw_if_index, gt->gt_gbd, gdb_vxlan_dep_del);
+
+ ofeat = L2OUTPUT_FEAT_GBP_POLICY_MAC;
+ ifeat = L2INPUT_FEAT_NONE;
+
+ if (!(gbd->gb_flags & GBP_BD_FLAG_DO_NOT_LEARN))
+ ifeat |= L2INPUT_FEAT_GBP_LEARN;
+
+ gbp_itf_l2_set_output_feature (vxr->vxr_itf, ofeat);
+ gbp_itf_l2_set_input_feature (vxr->vxr_itf, ifeat);
+ }
+ else
+ {
+ vxr->vxr_itf = gbp_itf_l3_add_and_lock_w_free
+ (vxr->vxr_sw_if_index, gt->gt_grd, gdb_vxlan_dep_del);
+
+ gbp_itf_l3_set_input_feature (vxr->vxr_itf, GBP_ITF_L3_FEAT_LEARN);
+ }
+ }
+ else
+ {
+ return (GBP_ITF_HDL_INVALID);
+ }
+
+ return (vxr->vxr_itf);
+}
+
+u32
+vxlan_gbp_tunnel_get_parent (u32 sw_if_index)
+{
+ ASSERT ((sw_if_index < vec_len (vxlan_tunnel_ref_db)) &&
+ (INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index]));
+
+ gbp_vxlan_tunnel_t *gt;
+ vxlan_tunnel_ref_t *vxr;
+
+ vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
+ gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
+
+ return (gt->gt_sw_if_index);
+}
+
+gbp_itf_hdl_t
+vxlan_gbp_tunnel_lock_itf (u32 sw_if_index)
+{
+ ASSERT ((sw_if_index < vec_len (vxlan_tunnel_ref_db)) &&
+ (INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index]));
+
+ vxlan_tunnel_ref_t *vxr;
+
+ vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
+
+ gbp_itf_lock (vxr->vxr_itf);
+
+ return (vxr->vxr_itf);
+}
+
+
+gbp_vxlan_tunnel_type_t
+gbp_vxlan_tunnel_get_type (u32 sw_if_index)
+{
+ if (sw_if_index < vec_len (vxlan_tunnel_ref_db) &&
+ INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index])
+ {
+ return (VXLAN_GBP_TUNNEL);
+ }
+ else if (sw_if_index < vec_len (gbp_vxlan_tunnel_db) &&
+ INDEX_INVALID != gbp_vxlan_tunnel_db[sw_if_index])
+ {
+ return (GBP_VXLAN_TEMPLATE_TUNNEL);
+ }
+
+ ASSERT (0);
+ return (GBP_VXLAN_TEMPLATE_TUNNEL);
+}
+
+gbp_itf_hdl_t
+gbp_vxlan_tunnel_clone_and_lock (u32 sw_if_index,
+ const ip46_address_t * src,
+ const ip46_address_t * dst)
+{
+ gbp_vxlan_tunnel_t *gt;
+ index_t gti;
+
+ gti = gbp_vxlan_tunnel_db[sw_if_index];
+
+ if (INDEX_INVALID == gti)
+ return (GBP_ITF_HDL_INVALID);
+
+ gt = pool_elt_at_index (gbp_vxlan_tunnel_pool, gti);
+
+ return (gdb_vxlan_dep_add (gt, src, dst));
+}
+
+void
+vxlan_gbp_tunnel_unlock (u32 sw_if_index)
+{
+ /* vxlan_tunnel_ref_t *vxr; */
+ /* index_t vxri; */
+
+ /* vxri = vxlan_tunnel_ref_db[sw_if_index]; */
+
+ /* ASSERT (vxri != INDEX_INVALID); */
+
+ /* vxr = vxlan_tunnel_ref_get (vxri); */
+
+ /* gdb_vxlan_dep_del (vxri); */
+}
+
+void
+gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx)
+{
+ gbp_vxlan_tunnel_t *gt;
+
+ /* *INDENT-OFF* */
+ pool_foreach (gt, gbp_vxlan_tunnel_pool)
+ {
+ if (WALK_CONTINUE != cb(gt, ctx))
+ break;
+ }
+ /* *INDENT-ON* */
+}
+
+static walk_rc_t
+gbp_vxlan_tunnel_show_one (gbp_vxlan_tunnel_t * gt, void *ctx)
+{
+ vlib_cli_output (ctx, "%U", format_gbp_vxlan_tunnel,
+ gt - gbp_vxlan_tunnel_pool);
+
+ return (WALK_CONTINUE);
+}
+
+static u8 *
+format_gbp_vxlan_tunnel_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+
+ return format (s, "gbp-vxlan-%d", dev_instance);
+}
+
+u8 *
+format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args)
+{
+ gbp_vxlan_tunnel_layer_t gl = va_arg (*args, gbp_vxlan_tunnel_layer_t);
+ s = format (s, "%s", gbp_vxlan_tunnel_layer_strings[gl]);
+
+ return (s);
+}
+
+u8 *
+format_gbp_vxlan_tunnel (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+ gbp_vxlan_tunnel_t *gt = gbp_vxlan_tunnel_get (dev_instance);
+ index_t *vxri;
+
+ s = format (s, " [%d] gbp-vxlan-tunnel: hw:%d sw:%d vni:%d %U",
+ dev_instance, gt->gt_hw_if_index,
+ gt->gt_sw_if_index, gt->gt_vni,
+ format_gbp_vxlan_tunnel_layer, gt->gt_layer);
+ if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
+ s = format (s, " BD:%d gbd-index:%d", gt->gt_bd_rd_id, gt->gt_gbd);
+ else
+ s = format (s, " RD:%d grd-index:%d", gt->gt_bd_rd_id, gt->gt_grd);
+
+ s = format (s, " dependents:");
+ vec_foreach (vxri, gt->gt_tuns)
+ {
+ s = format (s, "\n %U, ", format_vxlan_tunnel_ref, *vxri);
+ }
+
+ return s;
+}
+
+typedef struct gbp_vxlan_tx_trace_t_
+{
+ u32 vni;
+} gbp_vxlan_tx_trace_t;
+
+u8 *
+format_gbp_vxlan_tx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_vxlan_tx_trace_t *t = va_arg (*args, gbp_vxlan_tx_trace_t *);
+
+ s = format (s, "GBP-VXLAN: vni:%d", t->vni);
+
+ return (s);
+}
+
+clib_error_t *
+gbp_vxlan_interface_admin_up_down (vnet_main_t * vnm,
+ u32 hw_if_index, u32 flags)
+{
+ vnet_hw_interface_t *hi;
+ u32 ti;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ if (NULL == gbp_vxlan_tunnel_db ||
+ hi->sw_if_index >= vec_len (gbp_vxlan_tunnel_db))
+ return (NULL);
+
+ ti = gbp_vxlan_tunnel_db[hi->sw_if_index];
+
+ if (~0 == ti)
+ /* not one of ours */
+ return (NULL);
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0);
+
+ return (NULL);
+}
+
+static uword
+gbp_vxlan_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (gbp_vxlan_device_class) = {
+ .name = "GBP VXLAN tunnel-template",
+ .format_device_name = format_gbp_vxlan_tunnel_name,
+ .format_device = format_gbp_vxlan_tunnel,
+ .format_tx_trace = format_gbp_vxlan_tx_trace,
+ .admin_up_down_function = gbp_vxlan_interface_admin_up_down,
+ .tx_function = gbp_vxlan_interface_tx,
+};
+
+VNET_HW_INTERFACE_CLASS (gbp_vxlan_hw_interface_class) = {
+ .name = "GBP-VXLAN",
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+/* *INDENT-ON* */
+
+int
+gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
+ u32 bd_rd_id,
+ const ip4_address_t * src, u32 * sw_if_indexp)
+{
+ gbp_vxlan_tunnel_t *gt;
+ index_t gti;
+ uword *p;
+ int rv;
+
+ rv = 0;
+ p = hash_get (gv_db, vni);
+
+ GBP_VXLAN_TUN_DBG ("add: %d %d %d", vni, layer, bd_rd_id);
+
+ if (NULL == p)
+ {
+ vnet_sw_interface_t *si;
+ vnet_hw_interface_t *hi;
+ index_t gbi, grdi;
+ vnet_main_t *vnm;
+
+ gbi = grdi = INDEX_INVALID;
+
+ if (layer == GBP_VXLAN_TUN_L2)
+ {
+ gbi = gbp_bridge_domain_find_and_lock (bd_rd_id);
+
+ if (INDEX_INVALID == gbi)
+ {
+ return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+ }
+ }
+ else
+ {
+ grdi = gbp_route_domain_find_and_lock (bd_rd_id);
+
+ if (INDEX_INVALID == grdi)
+ {
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+ }
+ }
+
+ vnm = vnet_get_main ();
+ pool_get (gbp_vxlan_tunnel_pool, gt);
+ gti = gt - gbp_vxlan_tunnel_pool;
+
+ gt->gt_vni = vni;
+ gt->gt_layer = layer;
+ gt->gt_bd_rd_id = bd_rd_id;
+ gt->gt_src.ip4.as_u32 = src->as_u32;
+ gt->gt_hw_if_index = vnet_register_interface (vnm,
+ gbp_vxlan_device_class.index,
+ gti,
+ gbp_vxlan_hw_interface_class.index,
+ gti);
+
+ hi = vnet_get_hw_interface (vnm, gt->gt_hw_if_index);
+
+ gt->gt_sw_if_index = hi->sw_if_index;
+
+ /* don't flood packets in a BD to these interfaces */
+ si = vnet_get_sw_interface (vnm, gt->gt_sw_if_index);
+ si->flood_class = VNET_FLOOD_CLASS_NO_FLOOD;
+
+ if (layer == GBP_VXLAN_TUN_L2)
+ {
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gbi);
+
+ gt->gt_gbd = gbi;
+ gb->gb_vni = gti;
+ /* set it up as a GBP interface */
+ gt->gt_itf = gbp_itf_l2_add_and_lock (gt->gt_sw_if_index,
+ gt->gt_gbd);
+ gbp_itf_l2_set_input_feature (gt->gt_itf, L2INPUT_FEAT_GBP_LEARN);
+ }
+ else
+ {
+ gt->gt_grd = grdi;
+ gt->gt_itf = gbp_itf_l3_add_and_lock (gt->gt_sw_if_index,
+ gt->gt_grd);
+ gbp_itf_l3_set_input_feature (gt->gt_itf, GBP_ITF_L3_FEAT_LEARN);
+ }
+
+ /*
+ * save the tunnel by VNI and by sw_if_index
+ */
+ hash_set (gv_db, vni, gti);
+
+ vec_validate_init_empty (gbp_vxlan_tunnel_db,
+ gt->gt_sw_if_index, INDEX_INVALID);
+ gbp_vxlan_tunnel_db[gt->gt_sw_if_index] = gti;
+
+ if (sw_if_indexp)
+ *sw_if_indexp = gt->gt_sw_if_index;
+
+ vxlan_gbp_register_udp_ports ();
+ }
+ else
+ {
+ gti = p[0];
+ rv = VNET_API_ERROR_IF_ALREADY_EXISTS;
+ }
+
+ GBP_VXLAN_TUN_DBG ("add: %U", format_gbp_vxlan_tunnel, gti);
+
+ return (rv);
+}
+
+int
+gbp_vxlan_tunnel_del (u32 vni)
+{
+ gbp_vxlan_tunnel_t *gt;
+ uword *p;
+
+ p = hash_get (gv_db, vni);
+
+ if (NULL != p)
+ {
+ vnet_main_t *vnm;
+
+ vnm = vnet_get_main ();
+ gt = gbp_vxlan_tunnel_get (p[0]);
+
+ vxlan_gbp_unregister_udp_ports ();
+
+ GBP_VXLAN_TUN_DBG ("del: %U", format_gbp_vxlan_tunnel,
+ gt - gbp_vxlan_tunnel_pool);
+
+ gbp_endpoint_flush (GBP_ENDPOINT_SRC_DP, gt->gt_sw_if_index);
+ ASSERT (0 == vec_len (gt->gt_tuns));
+ vec_free (gt->gt_tuns);
+
+ gbp_itf_unlock (&gt->gt_itf);
+
+ if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
+ {
+ gbp_bridge_domain_unlock (gt->gt_gbd);
+ }
+ else
+ {
+ gbp_route_domain_unlock (gt->gt_grd);
+ }
+
+ vnet_sw_interface_set_flags (vnm, gt->gt_sw_if_index, 0);
+ vnet_delete_hw_interface (vnm, gt->gt_hw_if_index);
+
+ hash_unset (gv_db, vni);
+ gbp_vxlan_tunnel_db[gt->gt_sw_if_index] = INDEX_INVALID;
+
+ pool_put (gbp_vxlan_tunnel_pool, gt);
+ }
+ else
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ return (0);
+}
+
+static clib_error_t *
+gbp_vxlan_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+
+ vlib_cli_output (vm, "GBP-VXLAN Interfaces:");
+
+ gbp_vxlan_walk (gbp_vxlan_tunnel_show_one, vm);
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy VXLAN tunnels
+ *
+ * @cliexpar
+ * @cliexstart{show gbp vxlan}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_vxlan_show_node, static) = {
+ .path = "show gbp vxlan",
+ .short_help = "show gbp vxlan\n",
+ .function = gbp_vxlan_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_vxlan_init (vlib_main_t * vm)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+
+ gt_logger = vlib_log_register_class ("gbp", "tun");
+
+ punt_hdl = vlib_punt_client_register ("gbp-vxlan");
+
+ vlib_punt_register (punt_hdl,
+ vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4],
+ "gbp-vxlan4");
+
+ return (0);
+}
+
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (gbp_vxlan_init) =
+{
+ .runs_after = VLIB_INITS("punt_init", "vxlan_gbp_init"),
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_vxlan.h b/extras/deprecated/plugins/gbp/gbp_vxlan.h
new file mode 100644
index 00000000000..706fe2a0e85
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_vxlan.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_VXLAN_H__
+#define __GBP_VXLAN_H__
+
+#include <vnet/fib/fib_types.h>
+#include <plugins/gbp/gbp_itf.h>
+
+#define foreach_gbp_vxlan_tunnel_layer \
+ _ (L2, "l2") \
+ _ (L3, "l3")
+
+typedef enum gbp_vxlan_tunnel_layer_t_
+{
+#define _(s,n) GBP_VXLAN_TUN_##s,
+ foreach_gbp_vxlan_tunnel_layer
+#undef _
+} gbp_vxlan_tunnel_layer_t;
+
+/**
+ * GBP VXLAN (template) tunnel.
+ * A template tunnel has only a VNI, it does not have src,dst address.
+ * As such it cannot be used to send traffic. It is used in the RX path
+ * to RX vxlan-gbp packets that do not match an existing tunnel;
+ */
+typedef struct gbp_vxlan_tunnel_t_
+{
+ u32 gt_hw_if_index;
+ u32 gt_sw_if_index;
+ u32 gt_vni;
+
+ /**
+ * The BD or RD value (depending on the layer) that the tunnel is bound to
+ */
+ u32 gt_bd_rd_id;
+ gbp_vxlan_tunnel_layer_t gt_layer;
+
+ union
+ {
+ struct
+ {
+ /**
+ * Reference to the GPB-BD
+ */
+ index_t gt_gbd;
+ };
+ struct
+ {
+ /**
+ * References to the GBP-RD
+ */
+ index_t gt_grd;
+ };
+ };
+
+ /**
+ * gbp-itf config for this interface
+ */
+ gbp_itf_hdl_t gt_itf;
+
+ /**
+ * list of child vxlan-gbp tunnels built from this template
+ */
+ index_t *gt_tuns;
+
+ /**
+ * The source address to use for child tunnels
+ */
+ ip46_address_t gt_src;
+} gbp_vxlan_tunnel_t;
+
+/**
+ * The different types of interfaces that endpoints are learned on
+ */
+typedef enum gbp_vxlan_tunnel_type_t_
+{
+ /**
+ * This is the object type defined above.
+ * A template representation of a vxlan-gbp tunnel. from this tunnel
+ * type, real vxlan-gbp tunnels are created (by cloning the VNI)
+ */
+ GBP_VXLAN_TEMPLATE_TUNNEL,
+
+ /**
+ * A real VXLAN-GBP tunnel (from vnet/vxlan-gbp/...)
+ */
+ VXLAN_GBP_TUNNEL,
+} gbp_vxlan_tunnel_type_t;
+
+extern int gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
+ u32 bd_rd_id,
+ const ip4_address_t * src,
+ u32 * sw_if_indexp);
+extern int gbp_vxlan_tunnel_del (u32 vni);
+
+extern gbp_vxlan_tunnel_type_t gbp_vxlan_tunnel_get_type (u32 sw_if_index);
+
+extern gbp_itf_hdl_t gbp_vxlan_tunnel_clone_and_lock (u32 parent_tunnel,
+ const ip46_address_t *
+ src,
+ const ip46_address_t *
+ dst);
+
+extern u32 vxlan_gbp_tunnel_get_parent (u32 sw_if_index);
+extern gbp_itf_hdl_t vxlan_gbp_tunnel_lock_itf (u32 sw_if_index);
+
+typedef walk_rc_t (*gbp_vxlan_cb_t) (gbp_vxlan_tunnel_t * gt, void *ctx);
+extern void gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx);
+
+extern u8 *format_gbp_vxlan_tunnel (u8 * s, va_list * args);
+extern u8 *format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args);
+
+extern gbp_vxlan_tunnel_t *gbp_vxlan_tunnel_get (index_t gti);
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/gbp_vxlan_node.c b/extras/deprecated/plugins/gbp/gbp_vxlan_node.c
new file mode 100644
index 00000000000..413a9f47e1b
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/gbp_vxlan_node.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <plugins/gbp/gbp_vxlan.h>
+#include <plugins/gbp/gbp_itf.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+#include <vlibmemory/api.h>
+#include <vnet/fib/fib_table.h>
+
+extern uword *gv_db;
+
+typedef struct gbp_vxlan_trace_t_
+{
+ u8 dropped;
+ u32 vni;
+ u32 sw_if_index;
+ u16 sclass;
+ u8 flags;
+} gbp_vxlan_trace_t;
+
+#define foreach_gbp_vxlan_input_next \
+ _(DROP, "error-drop") \
+ _(L2_INPUT, "l2-input") \
+ _(IP4_INPUT, "ip4-input") \
+ _(IP6_INPUT, "ip6-input")
+
+typedef enum
+{
+#define _(s,n) GBP_VXLAN_INPUT_NEXT_##s,
+ foreach_gbp_vxlan_input_next
+#undef _
+ GBP_VXLAN_INPUT_N_NEXT,
+} gbp_vxlan_input_next_t;
+
+
+#define foreach_gbp_vxlan_error \
+ _(DECAPPED, "decapped") \
+ _(LEARNED, "learned")
+
+typedef enum
+{
+#define _(s,n) GBP_VXLAN_ERROR_##s,
+ foreach_gbp_vxlan_error
+#undef _
+ GBP_VXLAN_N_ERROR,
+} gbp_vxlan_input_error_t;
+
+static char *gbp_vxlan_error_strings[] = {
+#define _(n,s) s,
+ foreach_gbp_vxlan_error
+#undef _
+};
+
+static uword
+gbp_vxlan_decap (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_ip4)
+{
+ u32 n_left_to_next, n_left_from, next_index, *to_next, *from;
+
+ next_index = 0;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vxlan_gbp_header_t *vxlan_gbp0;
+ gbp_vxlan_input_next_t next0;
+ gbp_vxlan_tunnel_t *gt0;
+ vlib_buffer_t *b0;
+ u32 bi0, vni0;
+ uword *p;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ next0 = GBP_VXLAN_INPUT_NEXT_DROP;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ vxlan_gbp0 =
+ vlib_buffer_get_current (b0) - sizeof (vxlan_gbp_header_t);
+
+ vni0 = vxlan_gbp_get_vni (vxlan_gbp0);
+ p = hash_get (gv_db, vni0);
+
+ if (PREDICT_FALSE (NULL == p))
+ {
+ gt0 = NULL;
+ next0 = GBP_VXLAN_INPUT_NEXT_DROP;
+ }
+ else
+ {
+ gt0 = gbp_vxlan_tunnel_get (p[0]);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = gt0->gt_sw_if_index;
+
+ if (GBP_VXLAN_TUN_L2 == gt0->gt_layer)
+ /*
+ * An L2 layer tunnel goes into the BD
+ */
+ next0 = GBP_VXLAN_INPUT_NEXT_L2_INPUT;
+ else
+ {
+ /*
+ * An L3 layer tunnel needs to strip the L2 header
+ * an inject into the RD
+ */
+ ethernet_header_t *e0;
+ u16 type0;
+
+ e0 = vlib_buffer_get_current (b0);
+ type0 = clib_net_to_host_u16 (e0->type);
+ switch (type0)
+ {
+ case ETHERNET_TYPE_IP4:
+ next0 = GBP_VXLAN_INPUT_NEXT_IP4_INPUT;
+ break;
+ case ETHERNET_TYPE_IP6:
+ next0 = GBP_VXLAN_INPUT_NEXT_IP6_INPUT;
+ break;
+ default:
+ goto trace;
+ }
+ vlib_buffer_advance (b0, sizeof (*e0));
+ }
+ }
+
+ trace:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gbp_vxlan_trace_t *tr;
+
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->dropped = (next0 == GBP_VXLAN_INPUT_NEXT_DROP);
+ tr->vni = vni0;
+ tr->sw_if_index = (gt0 ? gt0->gt_sw_if_index : ~0);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+ tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+VLIB_NODE_FN (gbp_vxlan4_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return gbp_vxlan_decap (vm, node, from_frame, 1);
+}
+
+static u8 *
+format_gbp_vxlan_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_vxlan_trace_t *t = va_arg (*args, gbp_vxlan_trace_t *);
+
+ s = format (s, "vni:%d dropped:%d rx:%d sclass:%d flags:%U",
+ t->vni, t->dropped, t->sw_if_index,
+ t->sclass, format_vxlan_gbp_header_gpflags, t->flags);
+
+ return (s);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_vxlan4_input_node) =
+{
+ .name = "gbp-vxlan4",
+ .vector_size = sizeof (u32),
+ .n_errors = GBP_VXLAN_N_ERROR,
+ .error_strings = gbp_vxlan_error_strings,
+ .n_next_nodes = GBP_VXLAN_INPUT_N_NEXT,
+ .format_trace = format_gbp_vxlan_rx_trace,
+ .next_nodes = {
+#define _(s,n) [GBP_VXLAN_INPUT_NEXT_##s] = n,
+ foreach_gbp_vxlan_input_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/gbp/test_gbp.py b/extras/deprecated/plugins/gbp/test_gbp.py
new file mode 100644
index 00000000000..c30a729519d
--- /dev/null
+++ b/extras/deprecated/plugins/gbp/test_gbp.py
@@ -0,0 +1,5926 @@
+#!/usr/bin/env python3
+import typing
+from socket import AF_INET6, inet_pton, inet_ntop
+import unittest
+from ipaddress import ip_address, IPv4Network, IPv6Network
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, ARP, Dot1Q
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import (
+ IPv6,
+ ICMPv6ND_NS,
+ ICMPv6NDOptSrcLLAddr,
+ ICMPv6ND_NA,
+ ICMPv6EchoRequest,
+)
+from scapy.utils6 import in6_getnsma, in6_getnsmac
+from scapy.layers.vxlan import VXLAN
+from scapy.data import ETH_P_IP, ETH_P_IPV6
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner
+from vpp_object import VppObject
+from vpp_interface import VppInterface
+from vpp_ip_route import (
+ VppIpRoute,
+ VppRoutePath,
+ VppIpTable,
+ VppIpInterfaceAddress,
+ VppIpInterfaceBind,
+ find_route,
+ FibPathProto,
+ FibPathType,
+)
+from vpp_l2 import (
+ VppBridgeDomain,
+ VppBridgeDomainPort,
+ VppBridgeDomainArpEntry,
+ VppL2FibEntry,
+ find_bridge_domain_port,
+ VppL2Vtr,
+)
+from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint
+from vpp_ip import DpoProto, get_dpo_proto
+from vpp_papi import VppEnum, MACAddress
+from vpp_vxlan_gbp_tunnel import find_vxlan_gbp_tunnel, INDEX_INVALID, \
+ VppVxlanGbpTunnel
+from vpp_neighbor import VppNeighbor
+from vpp_acl import AclRule, VppAcl
+
+NUM_PKTS = 67
+
+
+def find_gbp_endpoint(test, sw_if_index=None, ip=None, mac=None,
+ tep=None, sclass=None, flags=None):
+ if ip:
+ vip = ip
+ if mac:
+ vmac = MACAddress(mac)
+
+ eps = test.vapi.gbp_endpoint_dump()
+
+ for ep in eps:
+ if tep:
+ src = tep[0]
+ dst = tep[1]
+ if src != str(ep.endpoint.tun.src) or \
+ dst != str(ep.endpoint.tun.dst):
+ continue
+ if sw_if_index:
+ if ep.endpoint.sw_if_index != sw_if_index:
+ continue
+ if sclass:
+ if ep.endpoint.sclass != sclass:
+ continue
+ if flags:
+ if flags != (flags & ep.endpoint.flags):
+ continue
+ if ip:
+ for eip in ep.endpoint.ips:
+ if vip == str(eip):
+ return True
+ if mac:
+ if vmac == ep.endpoint.mac:
+ return True
+
+ return False
+
+
+def find_gbp_vxlan(test: VppTestCase, vni):
+ ts = test.vapi.gbp_vxlan_tunnel_dump()
+ for t in ts:
+ if t.tunnel.vni == vni:
+ return True
+ return False
+
+
+class VppGbpEndpoint(VppObject):
+ """
+ GBP Endpoint
+ """
+
+ @property
+ def mac(self):
+ return str(self.vmac)
+
+ @property
+ def ip4(self):
+ return self._ip4
+
+ @property
+ def fip4(self):
+ return self._fip4
+
+ @property
+ def ip6(self):
+ return self._ip6
+
+ @property
+ def fip6(self):
+ return self._fip6
+
+ @property
+ def ips(self):
+ return [self.ip4, self.ip6]
+
+ @property
+ def fips(self):
+ return [self.fip4, self.fip6]
+
+ def __init__(self, test, itf, epg, recirc, ip4, fip4, ip6, fip6,
+ flags=0,
+ tun_src="0.0.0.0",
+ tun_dst="0.0.0.0",
+ mac=True):
+ self._test = test
+ self.itf = itf
+ self.handle = None
+ self.epg = epg
+ self.recirc = recirc
+
+ self._ip4 = ip4
+ self._fip4 = fip4
+ self._ip6 = ip6
+ self._fip6 = fip6
+
+ if mac:
+ self.vmac = MACAddress(self.itf.remote_mac)
+ else:
+ self.vmac = MACAddress("00:00:00:00:00:00")
+
+ self.flags = flags
+ self.tun_src = tun_src
+ self.tun_dst = tun_dst
+
+ def encode(self):
+ ips = [self.ip4, self.ip6]
+ return {
+ "sw_if_index": self.itf.sw_if_index,
+ "ips": ips,
+ "n_ips": len(ips),
+ "mac": self.vmac.packed,
+ "sclass": self.epg.sclass,
+ "flags": self.flags,
+ "tun": {
+ "src": self.tun_src,
+ "dst": self.tun_dst,
+ },
+ }
+
+ def add_vpp_config(self):
+ res = self._test.vapi.gbp_endpoint_add(
+ endpoint=self.encode(),
+ )
+ self.handle = res.handle
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_endpoint_del(handle=self.handle)
+
+ def object_id(self):
+ return "gbp-endpoint:[%d==%d:%s:%d]" % (self.handle,
+ self.itf.sw_if_index,
+ self.ip4,
+ self.epg.sclass)
+
+ def query_vpp_config(self):
+ return find_gbp_endpoint(self._test,
+ self.itf.sw_if_index,
+ self.ip4)
+
+
+class VppGbpRecirc(VppObject):
+ """
+ GBP Recirculation Interface
+ """
+
+ def __init__(self, test, epg, recirc, is_ext=False):
+ self._test = test
+ self.recirc = recirc
+ self.epg = epg
+ self.is_ext = is_ext
+
+ def encode(self):
+ return {
+ "is_ext": self.is_ext,
+ "sw_if_index": self.recirc.sw_if_index,
+ "sclass": self.epg.sclass,
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_recirc_add_del(
+ 1,
+ recirc=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_recirc_add_del(
+ 0,
+ recirc=self.encode(),
+ )
+
+ def object_id(self):
+ return "gbp-recirc:[%d]" % (self.recirc.sw_if_index)
+
+ def query_vpp_config(self):
+ rs = self._test.vapi.gbp_recirc_dump()
+ for r in rs:
+ if r.recirc.sw_if_index == self.recirc.sw_if_index:
+ return True
+ return False
+
+
+class VppGbpExtItf(VppObject):
+ """
+ GBP ExtItfulation Interface
+ """
+
+ def __init__(self, test, itf, bd, rd, anon=False):
+ self._test = test
+ self.itf = itf
+ self.bd = bd
+ self.rd = rd
+ self.flags = 1 if anon else 0
+
+ def encode(self):
+ return {
+ "sw_if_index": self.itf.sw_if_index,
+ "bd_id": self.bd.bd_id,
+ "rd_id": self.rd.rd_id,
+ "flags": self.flags,
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_ext_itf_add_del(
+ 1,
+ ext_itf=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_ext_itf_add_del(
+ 0,
+ ext_itf=self.encode(),
+ )
+
+ def object_id(self):
+ return "gbp-ext-itf:[%d]%s" % (self.itf.sw_if_index,
+ " [anon]" if self.flags else "")
+
+ def query_vpp_config(self):
+ rs = self._test.vapi.gbp_ext_itf_dump()
+ for r in rs:
+ if r.ext_itf.sw_if_index == self.itf.sw_if_index:
+ return True
+ return False
+
+
+class VppGbpSubnet(VppObject):
+ """
+ GBP Subnet
+ """
+
+ def __init__(self, test, rd, address, address_len,
+ type, sw_if_index=0xffffffff, sclass=0xffff):
+ # TODO: replace hardcoded defaults when vpp_papi supports
+ # defaults in typedefs
+ self._test = test
+ self.rd_id = rd.rd_id
+ a = ip_address(address)
+ if 4 == a.version:
+ self.prefix = IPv4Network("%s/%d" % (address, address_len),
+ strict=False)
+ else:
+ self.prefix = IPv6Network("%s/%d" % (address, address_len),
+ strict=False)
+ self.type = type
+ self.sw_if_index = sw_if_index
+ self.sclass = sclass
+
+ def encode(self):
+ return {
+ "type": self.type,
+ "sw_if_index": self.sw_if_index,
+ "sclass": self.sclass,
+ "prefix": self.prefix,
+ "rd_id": self.rd_id,
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_subnet_add_del(
+ is_add=1,
+ subnet=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_subnet_add_del(
+ is_add=0,
+ subnet=self.encode()
+ )
+
+ def object_id(self):
+ return "gbp-subnet:[%d-%s]" % (self.rd_id, self.prefix)
+
+ def query_vpp_config(self):
+ ss = self._test.vapi.gbp_subnet_dump()
+ for s in ss:
+ if s.subnet.rd_id == self.rd_id and \
+ s.subnet.type == self.type and \
+ s.subnet.prefix == self.prefix:
+ return True
+ return False
+
+
+class VppGbpEndpointRetention(object):
+ def __init__(self, remote_ep_timeout=0xffffffff):
+ self.remote_ep_timeout = remote_ep_timeout
+
+ def encode(self):
+ return {'remote_ep_timeout': self.remote_ep_timeout}
+
+
+class VppGbpEndpointGroup(VppObject):
+ """
+ GBP Endpoint Group
+ """
+
+ def __init__(self, test, vnid, sclass, rd, bd, uplink,
+ bvi, bvi_ip4, bvi_ip6=None,
+ retention=VppGbpEndpointRetention()):
+ self._test = test
+ self.uplink = uplink
+ self.bvi = bvi
+ self.bvi_ip4 = bvi_ip4
+ self.bvi_ip6 = bvi_ip6
+ self.vnid = vnid
+ self.bd = bd # VppGbpBridgeDomain
+ self.rd = rd
+ self.sclass = sclass
+ if 0 == self.sclass:
+ self.sclass = 0xffff
+ self.retention = retention
+
+ def encode(self) -> dict:
+ return {
+ "uplink_sw_if_index": self.uplink.sw_if_index
+ if self.uplink else INDEX_INVALID,
+ "bd_id": self.bd.bd.bd_id,
+ "rd_id": self.rd.rd_id,
+ "vnid": self.vnid,
+ "sclass": self.sclass,
+ "retention": self.retention.encode(),
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_endpoint_group_add(epg=self.encode())
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_endpoint_group_del(sclass=self.sclass)
+
+ def object_id(self) -> str:
+ return "gbp-endpoint-group:[%d]" % (self.vnid)
+
+ def query_vpp_config(self) -> bool:
+ epgs = self._test.vapi.gbp_endpoint_group_dump()
+ for epg in epgs:
+ if epg.epg.vnid == self.vnid:
+ return True
+ return False
+
+
+class VppGbpBridgeDomain(VppObject):
+ """
+ GBP Bridge Domain
+ """
+
+ def __init__(self, test, bd, rd, bvi,
+ uu_fwd: typing.Optional[VppVxlanGbpTunnel] = None,
+ bm_flood=None, learn=True,
+ uu_drop=False, bm_drop=False,
+ ucast_arp=False):
+ self._test = test
+ self.bvi = bvi
+ self.uu_fwd = uu_fwd
+ self.bm_flood = bm_flood
+ self.bd = bd
+ self.rd = rd
+
+ e = VppEnum.vl_api_gbp_bridge_domain_flags_t
+
+ self.flags = e.GBP_BD_API_FLAG_NONE
+ if not learn:
+ self.flags |= e.GBP_BD_API_FLAG_DO_NOT_LEARN
+ if uu_drop:
+ self.flags |= e.GBP_BD_API_FLAG_UU_FWD_DROP
+ if bm_drop:
+ self.flags |= e.GBP_BD_API_FLAG_MCAST_DROP
+ if ucast_arp:
+ self.flags |= e.GBP_BD_API_FLAG_UCAST_ARP
+
+ def encode(self) -> dict:
+ return {
+ "flags": self.flags,
+ "bvi_sw_if_index": self.bvi.sw_if_index,
+ "uu_fwd_sw_if_index": self.uu_fwd.sw_if_index
+ if self.uu_fwd else INDEX_INVALID,
+ "bm_flood_sw_if_index": self.bm_flood.sw_if_index
+ if self.bm_flood else INDEX_INVALID,
+ "bd_id": self.bd.bd_id,
+ "rd_id": self.rd.rd_id,
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_bridge_domain_add(
+ bd=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_bridge_domain_del(bd_id=self.bd.bd_id)
+
+ def object_id(self) -> str:
+ return "gbp-bridge-domain:[%d]" % (self.bd.bd_id)
+
+ def query_vpp_config(self) -> bool:
+ bds = self._test.vapi.gbp_bridge_domain_dump()
+ for bd in bds:
+ if bd.bd.bd_id == self.bd.bd_id:
+ return True
+ return False
+
+
+class VppGbpRouteDomain(VppObject):
+ """
+ GBP Route Domain
+ """
+
+ def __init__(self, test, rd_id, scope, t4, t6, ip4_uu=None, ip6_uu=None):
+ self._test = test
+ self.rd_id = rd_id
+ self.scope = scope
+ self.t4 = t4
+ self.t6 = t6
+ self.ip4_uu = ip4_uu
+ self.ip6_uu = ip6_uu
+
+ def encode(self) -> dict:
+ return {
+ "rd_id": self.rd_id,
+ "scope": self.scope,
+ "ip4_table_id": self.t4.table_id,
+ "ip6_table_id": self.t6.table_id,
+ "ip4_uu_sw_if_index": self.ip4_uu.sw_if_index
+ if self.ip4_uu else INDEX_INVALID,
+ "ip6_uu_sw_if_index": self.ip6_uu.sw_if_index
+ if self.ip6_uu else INDEX_INVALID,
+
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_route_domain_add(
+ rd=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_route_domain_del(rd_id=self.rd_id)
+
+ def object_id(self):
+ return "gbp-route-domain:[%d]" % (self.rd_id)
+
+ def query_vpp_config(self):
+ rds = self._test.vapi.gbp_route_domain_dump()
+ for rd in rds:
+ if rd.rd.rd_id == self.rd_id:
+ return True
+ return False
+
+
+class VppGbpContractNextHop:
+ def __init__(self, mac, bd, ip, rd):
+ self.mac = mac
+ self.ip = ip
+ self.bd = bd
+ self.rd = rd
+
+ def encode(self) -> dict:
+ return {
+ "ip": self.ip,
+ "mac": self.mac.packed,
+ "bd_id": self.bd.bd.bd_id,
+ "rd_id": self.rd.rd_id,
+ }
+
+
+class VppGbpContractRule:
+ def __init__(self, action, hash_mode, nhs=None):
+ self.action = action
+ self.hash_mode = hash_mode
+ self.nhs = [] if nhs is None else nhs
+
+ def encode(self) -> dict:
+ nhs = []
+ for nh in self.nhs:
+ nhs.append(nh.encode())
+ while len(nhs) < 8:
+ nhs.append({})
+ return {'action': self.action,
+ 'nh_set': {
+ 'hash_mode': self.hash_mode,
+ 'n_nhs': len(self.nhs),
+ 'nhs': nhs}}
+
+ def __repr__(self):
+ return '<VppGbpContractRule action=%s, hash_mode=%s>' % (
+ self.action, self.hash_mode)
+
+
+class VppGbpContract(VppObject):
+ """
+ GBP Contract
+ """
+
+ def __init__(self, test, scope, sclass, dclass, acl_index,
+ rules: list, allowed_ethertypes: list):
+ self._test = test
+ self.scope = scope
+ self.acl_index = acl_index
+ self.sclass = sclass
+ self.dclass = dclass
+ self.rules = rules
+ self.allowed_ethertypes = allowed_ethertypes
+ while (len(self.allowed_ethertypes) < 16):
+ self.allowed_ethertypes.append(0)
+
+ def encode(self) -> dict:
+ rules = []
+ for r in self.rules:
+ rules.append(r.encode())
+ return {
+ 'acl_index': self.acl_index,
+ 'scope': self.scope,
+ 'sclass': self.sclass,
+ 'dclass': self.dclass,
+ 'n_rules': len(rules),
+ 'rules': rules,
+ 'n_ether_types': len(self.allowed_ethertypes),
+ 'allowed_ethertypes': self.allowed_ethertypes,
+ }
+
+ def add_vpp_config(self):
+ r = self._test.vapi.gbp_contract_add_del(
+ is_add=1,
+ contract=self.encode()
+ )
+
+ self.stats_index = r.stats_index
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_contract_add_del(
+ is_add=0,
+ contract=self.encode(),
+ )
+
+ def object_id(self):
+ return "gbp-contract:[%d:%d:%d:%d]" % (self.scope,
+ self.sclass,
+ self.dclass,
+ self.acl_index)
+
+ def query_vpp_config(self):
+ cs = self._test.vapi.gbp_contract_dump()
+ for c in cs:
+ if c.contract.scope == self.scope \
+ and c.contract.sclass == self.sclass \
+ and c.contract.dclass == self.dclass:
+ return True
+ return False
+
+ def get_drop_stats(self):
+ c = self._test.statistics.get_counter("/net/gbp/contract/drop")
+ return c[0][self.stats_index]
+
+ def get_permit_stats(self):
+ c = self._test.statistics.get_counter("/net/gbp/contract/permit")
+ return c[0][self.stats_index]
+
+
+class VppGbpVxlanTunnel(VppInterface):
+ """
+ GBP VXLAN tunnel
+ """
+
+ def __init__(self, test, vni, bd_rd_id, mode, src):
+ super(VppGbpVxlanTunnel, self).__init__(test)
+ self._test = test
+ self.vni = vni
+ self.bd_rd_id = bd_rd_id
+ self.mode = mode
+ self.src = src
+
+ def encode(self) -> dict:
+ return {
+ "vni": self.vni,
+ "mode": self.mode,
+ "bd_rd_id": self.bd_rd_id,
+ "src": self.src,
+ }
+
+ def add_vpp_config(self):
+ r = self._test.vapi.gbp_vxlan_tunnel_add(
+ tunnel=self.encode(),
+ )
+ self.set_sw_if_index(r.sw_if_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_vxlan_tunnel_del(vni=self.vni)
+
+ def object_id(self):
+ return "gbp-vxlan:%d" % (self.sw_if_index)
+
+ def query_vpp_config(self):
+ return find_gbp_vxlan(self._test, self.vni)
+
+
+@tag_fixme_vpp_workers
+class TestGBP(VppTestCase):
+ """ GBP Test Case """
+
+ @property
+ def nat_config_flags(self):
+ return VppEnum.vl_api_nat_config_flags_t
+
+ @property
+ def nat44_config_flags(self):
+ return VppEnum.vl_api_nat44_config_flags_t
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestGBP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestGBP, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestGBP, self).setUp()
+
+ self.create_pg_interfaces(range(9))
+ self.create_loopback_interfaces(8)
+
+ self.router_mac = MACAddress("00:11:22:33:44:55")
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ for i in self.lo_interfaces:
+ i.admin_up()
+
+ self.vlan_100 = VppDot1QSubint(self, self.pg0, 100)
+ self.vlan_100.admin_up()
+ self.vlan_101 = VppDot1QSubint(self, self.pg0, 101)
+ self.vlan_101.admin_up()
+ self.vlan_102 = VppDot1QSubint(self, self.pg0, 102)
+ self.vlan_102.admin_up()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.admin_down()
+ super(TestGBP, self).tearDown()
+ for i in self.lo_interfaces:
+ i.remove_vpp_config()
+ self.lo_interfaces = []
+ self.vlan_102.remove_vpp_config()
+ self.vlan_101.remove_vpp_config()
+ self.vlan_100.remove_vpp_config()
+
+ def send_and_expect_bridged(self, src, tx, dst):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_bridged6(self, src, tx, dst):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
+ self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
+ return rx
+
+ def send_and_expect_routed(self, src, tx, dst, src_mac):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, src_mac)
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_routed6(self, src, tx, dst, src_mac):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, src_mac)
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
+ self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
+ return rx
+
+ def send_and_expect_natted(self, src, tx, dst, src_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].src, src_ip)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_natted6(self, src, tx, dst, src_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IPv6].src, src_ip)
+ self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
+ return rx
+
+ def send_and_expect_unnatted(self, src, tx, dst, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].dst, dst_ip)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ return rx
+
+ def send_and_expect_unnatted6(self, src, tx, dst, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IPv6].dst, dst_ip)
+ self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
+ return rx
+
+ def send_and_expect_double_natted(self, src, tx, dst, src_ip, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, str(self.router_mac))
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IP].dst, dst_ip)
+ self.assertEqual(r[IP].src, src_ip)
+ return rx
+
+ def send_and_expect_double_natted6(self, src, tx, dst, src_ip, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, str(self.router_mac))
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IPv6].dst, dst_ip)
+ self.assertEqual(r[IPv6].src, src_ip)
+ return rx
+
+ def send_and_expect_no_arp(self, src, tx, dst):
+ self.pg_send(src, tx)
+ dst.get_capture(0, timeout=1)
+ dst.assert_nothing_captured(remark="")
+
+ def send_and_expect_arp(self, src, tx, dst):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[ARP].psrc, tx[0][ARP].psrc)
+ self.assertEqual(r[ARP].pdst, tx[0][ARP].pdst)
+ self.assertEqual(r[ARP].hwsrc, tx[0][ARP].hwsrc)
+ self.assertEqual(r[ARP].hwdst, tx[0][ARP].hwdst)
+ return rx
+
+ def test_gbp(self):
+ """ Group Based Policy """
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+
+ #
+ # Route Domains
+ #
+ gt4 = VppIpTable(self, 0)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 0, is_ip6=True)
+ gt6.add_vpp_config()
+ nt4 = VppIpTable(self, 20)
+ nt4.add_vpp_config()
+ nt6 = VppIpTable(self, 20, is_ip6=True)
+ nt6.add_vpp_config()
+
+ rd0 = VppGbpRouteDomain(self, 0, 400, gt4, gt6, None, None)
+ rd20 = VppGbpRouteDomain(self, 20, 420, nt4, nt6, None, None)
+
+ rd0.add_vpp_config()
+ rd20.add_vpp_config()
+
+ #
+ # Bridge Domains
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd2 = VppBridgeDomain(self, 2)
+ bd20 = VppBridgeDomain(self, 20)
+
+ bd1.add_vpp_config()
+ bd2.add_vpp_config()
+ bd20.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd0, self.loop0)
+ gbd2 = VppGbpBridgeDomain(self, bd2, rd0, self.loop1)
+ gbd20 = VppGbpBridgeDomain(self, bd20, rd20, self.loop2)
+
+ gbd1.add_vpp_config()
+ gbd2.add_vpp_config()
+ gbd20.add_vpp_config()
+
+ #
+ # 3 EPGs, 2 of which share a BD.
+ # 2 NAT EPGs, one for floating-IP subnets, the other for internet
+ #
+ epgs = [VppGbpEndpointGroup(self, 220, 1220, rd0, gbd1,
+ self.pg4, self.loop0,
+ "10.0.0.128", "2001:10::128"),
+ VppGbpEndpointGroup(self, 221, 1221, rd0, gbd1,
+ self.pg5, self.loop0,
+ "10.0.1.128", "2001:10:1::128"),
+ VppGbpEndpointGroup(self, 222, 1222, rd0, gbd2,
+ self.pg6, self.loop1,
+ "10.0.2.128", "2001:10:2::128"),
+ VppGbpEndpointGroup(self, 333, 1333, rd20, gbd20,
+ self.pg7, self.loop2,
+ "11.0.0.128", "3001::128"),
+ VppGbpEndpointGroup(self, 444, 1444, rd20, gbd20,
+ self.pg8, self.loop2,
+ "11.0.0.129", "3001::129")]
+ recircs = [VppGbpRecirc(self, epgs[0], self.loop3),
+ VppGbpRecirc(self, epgs[1], self.loop4),
+ VppGbpRecirc(self, epgs[2], self.loop5),
+ VppGbpRecirc(self, epgs[3], self.loop6, is_ext=True),
+ VppGbpRecirc(self, epgs[4], self.loop7, is_ext=True)]
+
+ epg_nat = epgs[3]
+ recirc_nat = recircs[3]
+
+ #
+ # 4 end-points, 2 in the same subnet, 3 in the same BD
+ #
+ eps = [VppGbpEndpoint(self, self.pg0,
+ epgs[0], recircs[0],
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001::1"),
+ VppGbpEndpoint(self, self.pg1,
+ epgs[0], recircs[0],
+ "10.0.0.2", "11.0.0.2",
+ "2001:10::2", "3001::2"),
+ VppGbpEndpoint(self, self.pg2,
+ epgs[1], recircs[1],
+ "10.0.1.1", "11.0.0.3",
+ "2001:10:1::1", "3001::3"),
+ VppGbpEndpoint(self, self.pg3,
+ epgs[2], recircs[2],
+ "10.0.2.1", "11.0.0.4",
+ "2001:10:2::1", "3001::4")]
+
+ self.vapi.nat44_ed_plugin_enable_disable(enable=1)
+ self.vapi.nat66_plugin_enable_disable(enable=1)
+
+ #
+ # Config related to each of the EPGs
+ #
+ for epg in epgs:
+ # IP config on the BVI interfaces
+ if epg != epgs[1] and epg != epgs[4]:
+ b4 = VppIpInterfaceBind(self, epg.bvi,
+ epg.rd.t4).add_vpp_config()
+ b6 = VppIpInterfaceBind(self, epg.bvi,
+ epg.rd.t6).add_vpp_config()
+ epg.bvi.set_mac(self.router_mac)
+
+ # The BVIs are NAT inside interfaces
+ flags = self.nat_config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=epg.bvi.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat66_add_del_interface(
+ sw_if_index=epg.bvi.sw_if_index,
+ flags=flags, is_add=1)
+
+ if_ip4 = VppIpInterfaceAddress(self, epg.bvi,
+ epg.bvi_ip4, 32,
+ bind=b4).add_vpp_config()
+ if_ip6 = VppIpInterfaceAddress(self, epg.bvi,
+ epg.bvi_ip6, 128,
+ bind=b6).add_vpp_config()
+
+ # EPG uplink interfaces in the RD
+ VppIpInterfaceBind(self, epg.uplink, epg.rd.t4).add_vpp_config()
+ VppIpInterfaceBind(self, epg.uplink, epg.rd.t6).add_vpp_config()
+
+ # add the BD ARP termination entry for BVI IP
+ epg.bd_arp_ip4 = VppBridgeDomainArpEntry(self, epg.bd.bd,
+ str(self.router_mac),
+ epg.bvi_ip4)
+ epg.bd_arp_ip6 = VppBridgeDomainArpEntry(self, epg.bd.bd,
+ str(self.router_mac),
+ epg.bvi_ip6)
+ epg.bd_arp_ip4.add_vpp_config()
+ epg.bd_arp_ip6.add_vpp_config()
+
+ # EPG in VPP
+ epg.add_vpp_config()
+
+ for recirc in recircs:
+ # EPG's ingress recirculation interface maps to its RD
+ VppIpInterfaceBind(self, recirc.recirc,
+ recirc.epg.rd.t4).add_vpp_config()
+ VppIpInterfaceBind(self, recirc.recirc,
+ recirc.epg.rd.t6).add_vpp_config()
+
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=recirc.recirc.sw_if_index, is_add=1)
+ self.vapi.nat66_add_del_interface(
+ sw_if_index=recirc.recirc.sw_if_index, is_add=1)
+
+ recirc.add_vpp_config()
+
+ for recirc in recircs:
+ self.assertTrue(find_bridge_domain_port(self,
+ recirc.epg.bd.bd.bd_id,
+ recirc.recirc.sw_if_index))
+
+ for ep in eps:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ #
+ # routes to the endpoints. We need these since there are no
+ # adj-fibs due to the fact the the BVI address has /32 and
+ # the subnet is not attached.
+ #
+ for (ip, fip) in zip(ep.ips, ep.fips):
+ # Add static mappings for each EP from the 10/8 to 11/8 network
+ if ip_address(ip).version == 4:
+ flags = self.nat_config_flags.NAT_IS_ADDR_ONLY
+ self.vapi.nat44_add_del_static_mapping(
+ is_add=1,
+ local_ip_address=ip,
+ external_ip_address=fip,
+ external_sw_if_index=0xFFFFFFFF,
+ vrf_id=0,
+ flags=flags)
+ else:
+ self.vapi.nat66_add_del_static_mapping(
+ local_ip_address=ip,
+ external_ip_address=fip,
+ vrf_id=0, is_add=1)
+
+ # VPP EP create ...
+ ep.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+
+ # ... results in a Gratuitous ARP/ND on the EPG's uplink
+ rx = ep.epg.uplink.get_capture(len(ep.ips) + 1, timeout=0.2)
+
+ for ii, ip in enumerate(ep.ips):
+ p = rx[ii]
+
+ if ip_address(ip).version == 6:
+ self.assertTrue(p.haslayer(ICMPv6ND_NA))
+ self.assertEqual(p[ICMPv6ND_NA].tgt, ip)
+ else:
+ self.assertTrue(p.haslayer(ARP))
+ self.assertEqual(p[ARP].psrc, ip)
+ self.assertEqual(p[ARP].pdst, ip)
+
+ # add the BD ARP termination entry for floating IP
+ for fip in ep.fips:
+ ba = VppBridgeDomainArpEntry(self, epg_nat.bd.bd, ep.mac,
+ fip)
+ ba.add_vpp_config()
+
+ # floating IPs route via EPG recirc
+ r = VppIpRoute(
+ self, fip, ip_address(fip).max_prefixlen,
+ [VppRoutePath(fip,
+ ep.recirc.recirc.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR,
+ proto=get_dpo_proto(fip))],
+ table_id=20)
+ r.add_vpp_config()
+
+ # L2 FIB entries in the NAT EPG BD to bridge the packets from
+ # the outside direct to the internal EPG
+ lf = VppL2FibEntry(self, epg_nat.bd.bd, ep.mac,
+ ep.recirc.recirc, bvi_mac=0)
+ lf.add_vpp_config()
+
+ #
+ # ARP packets for unknown IP are sent to the EPG uplink
+ #
+ pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ ARP(op="who-has",
+ hwdst="ff:ff:ff:ff:ff:ff",
+ hwsrc=self.pg0.remote_mac,
+ pdst="10.0.0.88",
+ psrc="10.0.0.99"))
+
+ self.vapi.cli("clear trace")
+ self.pg0.add_stream(pkt_arp)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rxd = epgs[0].uplink.get_capture(1)
+
+ #
+ # ARP/ND packets get a response
+ #
+ pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ ARP(op="who-has",
+ hwdst="ff:ff:ff:ff:ff:ff",
+ hwsrc=self.pg0.remote_mac,
+ pdst=epgs[0].bvi_ip4,
+ psrc=eps[0].ip4))
+
+ self.send_and_expect(self.pg0, [pkt_arp], self.pg0)
+
+ nsma = in6_getnsma(inet_pton(AF_INET6, eps[0].ip6))
+ d = inet_ntop(AF_INET6, nsma)
+ pkt_nd = (Ether(dst=in6_getnsmac(nsma),
+ src=self.pg0.remote_mac) /
+ IPv6(dst=d, src=eps[0].ip6) /
+ ICMPv6ND_NS(tgt=epgs[0].bvi_ip6) /
+ ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
+ self.send_and_expect(self.pg0, [pkt_nd], self.pg0)
+
+ #
+ # broadcast packets are flooded
+ #
+ pkt_bcast = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ IP(src=eps[0].ip4, dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.vapi.cli("clear trace")
+ self.pg0.add_stream(pkt_bcast)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rxd = eps[1].itf.get_capture(1)
+ self.assertEqual(rxd[0][Ether].dst, pkt_bcast[Ether].dst)
+ rxd = epgs[0].uplink.get_capture(1)
+ self.assertEqual(rxd[0][Ether].dst, pkt_bcast[Ether].dst)
+
+ #
+ # packets to non-local L3 destinations dropped
+ #
+ pkt_intra_epg_220_ip4 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_inter_epg_222_ip4 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst="10.0.1.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0,
+ pkt_intra_epg_220_ip4 * NUM_PKTS)
+
+ pkt_inter_epg_222_ip6 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[0].ip6,
+ dst="2001:10::99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_assert_no_replies(self.pg0,
+ pkt_inter_epg_222_ip6 * NUM_PKTS)
+
+ #
+ # Add the subnet routes
+ #
+ s41 = VppGbpSubnet(
+ self, rd0, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s42 = VppGbpSubnet(
+ self, rd0, "10.0.1.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s43 = VppGbpSubnet(
+ self, rd0, "10.0.2.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s61 = VppGbpSubnet(
+ self, rd0, "2001:10::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s62 = VppGbpSubnet(
+ self, rd0, "2001:10:1::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s63 = VppGbpSubnet(
+ self, rd0, "2001:10:2::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s41.add_vpp_config()
+ s42.add_vpp_config()
+ s43.add_vpp_config()
+ s61.add_vpp_config()
+ s62.add_vpp_config()
+ s63.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_intra_epg_220_ip4 * NUM_PKTS,
+ eps[0].epg.uplink)
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_222_ip4 * NUM_PKTS,
+ eps[0].epg.uplink)
+ self.send_and_expect_bridged6(eps[0].itf,
+ pkt_inter_epg_222_ip6 * NUM_PKTS,
+ eps[0].epg.uplink)
+
+ self.logger.info(self.vapi.cli("sh ip fib 11.0.0.2"))
+ self.logger.info(self.vapi.cli("sh gbp endpoint-group"))
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ self.logger.info(self.vapi.cli("sh gbp recirc"))
+ self.logger.info(self.vapi.cli("sh int"))
+ self.logger.info(self.vapi.cli("sh int addr"))
+ self.logger.info(self.vapi.cli("sh int feat loop6"))
+ self.logger.info(self.vapi.cli("sh vlib graph ip4-gbp-src-classify"))
+ self.logger.info(self.vapi.cli("sh int feat loop3"))
+ self.logger.info(self.vapi.cli("sh int feat pg0"))
+
+ #
+ # Packet destined to unknown unicast is sent on the epg uplink ...
+ #
+ pkt_intra_epg_220_to_uplink = (Ether(src=self.pg0.remote_mac,
+ dst="00:00:00:33:44:55") /
+ IP(src=eps[0].ip4,
+ dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_intra_epg_220_to_uplink * NUM_PKTS,
+ eps[0].epg.uplink)
+ # ... and nowhere else
+ self.pg1.get_capture(0, timeout=0.1)
+ self.pg1.assert_nothing_captured(remark="Flood onto other VMS")
+
+ pkt_intra_epg_221_to_uplink = (Ether(src=self.pg2.remote_mac,
+ dst="00:00:00:33:44:66") /
+ IP(src=eps[0].ip4,
+ dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(eps[2].itf,
+ pkt_intra_epg_221_to_uplink * NUM_PKTS,
+ eps[2].epg.uplink)
+
+ #
+ # Packets from the uplink are forwarded in the absence of a contract
+ #
+ pkt_intra_epg_220_from_uplink = (Ether(src="00:00:00:33:44:55",
+ dst=self.pg0.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(self.pg4,
+ pkt_intra_epg_220_from_uplink * NUM_PKTS,
+ self.pg0)
+
+ #
+ # in the absence of policy, endpoints in the same EPG
+ # can communicate
+ #
+ pkt_intra_epg = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg1.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst=eps[1].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(self.pg0,
+ pkt_intra_epg * NUM_PKTS,
+ self.pg1)
+
+ #
+ # in the absence of policy, endpoints in the different EPG
+ # cannot communicate
+ #
+ pkt_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg2.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst=eps[2].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg0.remote_mac) /
+ IP(src=eps[2].ip4,
+ dst=eps[0].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst=eps[3].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_221 * NUM_PKTS)
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_222 * NUM_PKTS)
+
+ #
+ # A uni-directional contract from EPG 220 -> 221
+ #
+ rule = AclRule(is_permit=1, proto=17)
+ rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule, rule2])
+ acl.add_vpp_config()
+
+ c1 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[1].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_220_to_221 * NUM_PKTS,
+ eps[2].itf)
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_222 * NUM_PKTS)
+
+ #
+ # contract for the return direction
+ #
+ c2 = VppGbpContract(
+ self, 400, epgs[1].sclass, epgs[0].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_220_to_221 * NUM_PKTS,
+ eps[2].itf)
+ self.send_and_expect_bridged(eps[2].itf,
+ pkt_inter_epg_221_to_220 * NUM_PKTS,
+ eps[0].itf)
+
+ ds = c2.get_drop_stats()
+ self.assertEqual(ds['packets'], 0)
+ ps = c2.get_permit_stats()
+ self.assertEqual(ps['packets'], NUM_PKTS)
+
+ #
+ # the contract does not allow non-IP
+ #
+ pkt_non_ip_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg2.remote_mac) /
+ ARP())
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_non_ip_inter_epg_220_to_221 * 17)
+
+ #
+ # check that inter group is still disabled for the groups
+ # not in the contract.
+ #
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_222 * NUM_PKTS)
+
+ #
+ # A uni-directional contract from EPG 220 -> 222 'L3 routed'
+ #
+ c3 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[2].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh gbp contract"))
+
+ self.send_and_expect_routed(eps[0].itf,
+ pkt_inter_epg_220_to_222 * NUM_PKTS,
+ eps[3].itf,
+ str(self.router_mac))
+ #
+ # remove both contracts, traffic stops in both directions
+ #
+ c2.remove_vpp_config()
+ c1.remove_vpp_config()
+ c3.remove_vpp_config()
+ acl.remove_vpp_config()
+
+ self.send_and_assert_no_replies(eps[2].itf,
+ pkt_inter_epg_221_to_220 * NUM_PKTS)
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_221 * NUM_PKTS)
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_intra_epg * NUM_PKTS,
+ eps[1].itf)
+
+ #
+ # EPs to the outside world
+ #
+
+ # in the EP's RD an external subnet via the NAT EPG's recirc
+ se1 = VppGbpSubnet(
+ self, rd0, "0.0.0.0", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ sclass=epg_nat.sclass)
+ se2 = VppGbpSubnet(
+ self, rd0, "11.0.0.0", 8,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ sclass=epg_nat.sclass)
+ se16 = VppGbpSubnet(
+ self, rd0, "::", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ sclass=epg_nat.sclass)
+ # in the NAT RD an external subnet via the NAT EPG's uplink
+ se3 = VppGbpSubnet(
+ self, rd20, "0.0.0.0", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ sclass=epg_nat.sclass)
+ se36 = VppGbpSubnet(
+ self, rd20, "::", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ sclass=epg_nat.sclass)
+ se4 = VppGbpSubnet(
+ self, rd20, "11.0.0.0", 8,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ sclass=epg_nat.sclass)
+ se1.add_vpp_config()
+ se2.add_vpp_config()
+ se16.add_vpp_config()
+ se3.add_vpp_config()
+ se36.add_vpp_config()
+ se4.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh ip fib 0.0.0.0/0"))
+ self.logger.info(self.vapi.cli("sh ip fib 11.0.0.1"))
+ self.logger.info(self.vapi.cli("sh ip6 fib ::/0"))
+ self.logger.info(self.vapi.cli("sh ip6 fib %s" %
+ eps[0].fip6))
+
+ #
+ # From an EP to an outside address: IN2OUT
+ #
+ pkt_inter_epg_220_to_global = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst="1.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ # no policy yet
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_global * NUM_PKTS)
+ rule = AclRule(is_permit=1, proto=17, ports=1234)
+ rule2 = AclRule(is_permit=1, proto=17, ports=1234,
+ src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)))
+ acl2 = VppAcl(self, rules=[rule, rule2])
+ acl2.add_vpp_config()
+
+ c4 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[3].sclass, acl2.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c4.add_vpp_config()
+
+ self.send_and_expect_natted(eps[0].itf,
+ pkt_inter_epg_220_to_global * NUM_PKTS,
+ self.pg7,
+ eps[0].fip4)
+
+ pkt_inter_epg_220_to_global = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[0].ip6,
+ dst="6001::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_natted6(self.pg0,
+ pkt_inter_epg_220_to_global * NUM_PKTS,
+ self.pg7,
+ eps[0].fip6)
+ #
+ # From a global address to an EP: OUT2IN
+ #
+ pkt_inter_epg_220_from_global = (Ether(src=str(self.router_mac),
+ dst=self.pg0.remote_mac) /
+ IP(dst=eps[0].fip4,
+ src="1.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(
+ self.pg7, pkt_inter_epg_220_from_global * NUM_PKTS)
+
+ c5 = VppGbpContract(
+ self, 400, epgs[3].sclass, epgs[0].sclass, acl2.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c5.add_vpp_config()
+
+ self.send_and_expect_unnatted(self.pg7,
+ pkt_inter_epg_220_from_global * NUM_PKTS,
+ eps[0].itf,
+ eps[0].ip4)
+
+ pkt_inter_epg_220_from_global = (Ether(src=str(self.router_mac),
+ dst=self.pg0.remote_mac) /
+ IPv6(dst=eps[0].fip6,
+ src="6001::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_unnatted6(
+ self.pg7,
+ pkt_inter_epg_220_from_global * NUM_PKTS,
+ eps[0].itf,
+ eps[0].ip6)
+
+ #
+ # From a local VM to another local VM using resp. public addresses:
+ # IN2OUT2IN
+ #
+ pkt_intra_epg_220_global = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst=eps[1].fip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_double_natted(eps[0].itf,
+ pkt_intra_epg_220_global * NUM_PKTS,
+ eps[1].itf,
+ eps[0].fip4,
+ eps[1].ip4)
+
+ pkt_intra_epg_220_global = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[0].ip6,
+ dst=eps[1].fip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_double_natted6(
+ eps[0].itf,
+ pkt_intra_epg_220_global * NUM_PKTS,
+ eps[1].itf,
+ eps[0].fip6,
+ eps[1].ip6)
+
+ #
+ # cleanup
+ #
+ self.vapi.nat44_ed_plugin_enable_disable(enable=0)
+ self.vapi.nat66_plugin_enable_disable(enable=0)
+
+ def wait_for_ep_timeout(self, sw_if_index=None, ip=None, mac=None,
+ tep=None, n_tries=100, s_time=1):
+ # only learnt EP can timeout
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ flags = ep_flags.GBP_API_ENDPOINT_FLAG_LEARNT
+ while (n_tries):
+ if not find_gbp_endpoint(self, sw_if_index, ip, mac, tep=tep,
+ flags=flags):
+ return True
+ n_tries = n_tries - 1
+ self.sleep(s_time)
+ self.assertFalse(find_gbp_endpoint(self, sw_if_index, ip, mac, tep=tep,
+ flags=flags))
+ return False
+
+ def test_gbp_learn_l2(self):
+ """ GBP L2 Endpoint Learning """
+
+ drop_no_contract = self.statistics.get_err_counter(
+ '/err/gbp-policy-port/drop-no-contract')
+ allow_intra_class = self.statistics.get_err_counter(
+ '/err/gbp-policy-port/allow-intra-sclass')
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ learnt = [{'mac': '00:00:11:11:11:01',
+ 'ip': '10.0.0.1',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.0.2',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # Pg2 hosts the vxlan tunnel, hosts on pg2 to act as TEPs
+ # Pg3 hosts the IP4 UU-flood VXLAN tunnel
+ # Pg4 hosts the IP6 UU-flood VXLAN tunnel
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.generate_remote_hosts(4)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ #
+ # Add a mcast destination VXLAN-GBP tunnel for B&M traffic
+ #
+ tun_bm = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ "239.1.1.1", 88,
+ mcast_itf=self.pg4)
+ tun_bm.add_vpp_config()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
+ self.pg3, tun_bm)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi, "10.0.0.128", 32)
+ ip_addr.add_vpp_config()
+
+ #
+ # The Endpoint-group in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+ epg_330 = VppGbpEndpointGroup(self, 330, 113, rd1, gbd1,
+ None, self.loop1,
+ "10.0.1.128",
+ "2001:11::128",
+ VppGbpEndpointRetention(4))
+ epg_330.add_vpp_config()
+
+ #
+ # The VXLAN GBP tunnel is a bridge-port and has L2 endpoint
+ # learning enabled
+ #
+ vx_tun_l2_1 = VppGbpVxlanTunnel(
+ self, 99, bd1.bd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L2,
+ self.pg2.local_ip4)
+ vx_tun_l2_1.add_vpp_config()
+
+ #
+ # A static endpoint that the learnt endpoints are trying to
+ # talk to
+ #
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ self.assertTrue(find_route(self, ep.ip4, 32, table_id=1))
+
+ # a packet with an sclass from an unknown EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[0].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=88, flags=0x88) /
+ Ether(src=learnt[0]["mac"], dst=ep.mac) /
+ IP(src=learnt[0]["ip"], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg2, p)
+
+ self.logger.info(self.vapi.cli("sh error"))
+ self.assert_error_counter_equal(
+ '/err/gbp-policy-port/drop-no-contract',
+ drop_no_contract + 1)
+
+ #
+ # we should not have learnt a new tunnel endpoint, since
+ # the EPG was not learnt.
+ #
+ self.assertEqual(INDEX_INVALID,
+ find_vxlan_gbp_tunnel(self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[0].ip4,
+ 99))
+
+ # ep is not learnt, because the EPG is unknown
+ self.assertEqual(len(self.vapi.gbp_endpoint_dump()), 1)
+
+ #
+ # Learn new EPs from IP packets
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ #
+ # the EP is learnt via the learnt TEP
+ # both from its MAC and its IP
+ #
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ ip=l['ip']))
+
+ self.assert_error_counter_equal(
+ '/err/gbp-policy-port/allow-intra-sclass',
+ allow_intra_class + 2)
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show ip mfib"))
+
+ #
+ # If we sleep for the threshold time, the learnt endpoints should
+ # age out
+ #
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+
+ #
+ # Learn new EPs from GARP packets received on the BD's mcast tunnel
+ #
+ for ii, l in enumerate(learnt):
+ # add some junk in the reserved field of the vxlan-header
+ # next to the VNI. we should accept since reserved bits are
+ # ignored on rx.
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst="239.1.1.1") /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=88, reserved2=0x80, gpid=112, flags=0x88) /
+ Ether(src=l['mac'], dst="ff:ff:ff:ff:ff:ff") /
+ ARP(op="who-has",
+ psrc=l['ip'], pdst=l['ip'],
+ hwsrc=l['mac'], hwdst="ff:ff:ff:ff:ff:ff"))
+
+ rx = self.send_and_expect(self.pg4, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ #
+ # the EP is learnt via the learnt TEP
+ # both from its MAC and its IP
+ #
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ ip=l['ip']))
+
+ #
+ # wait for the learnt endpoints to age out
+ #
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+
+ #
+ # Learn new EPs from L2 packets
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ #
+ # the EP is learnt via the learnt TEP
+ # both from its MAC and its IP
+ #
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+
+ #
+ # wait for the learnt endpoints to age out
+ #
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+
+ #
+ # repeat. the do not learn bit is set so the EPs are not learnt
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0x88, gpflags="D") /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # repeat
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ # set a reserved bit in addition to the G and I
+ # reserved bits should not be checked on rx.
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0xc8) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # Static EP replies to dynamics
+ #
+ self.logger.info(self.vapi.cli("sh l2fib bd_id 1"))
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=l['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 17, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 112)
+ self.assertEqual(rx[VXLAN].vni, 99)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+
+ #
+ # repeat in the other EPG
+ # there's no contract between 220 and 330, but the A-bit is set
+ # so the packet is cleared for delivery
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=113, flags=0x88, gpflags='A') /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # static EP cannot reach the learnt EPs since there is no contract
+ # only test 1 EP as the others could timeout
+ #
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=learnt[0]['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, [p])
+
+ #
+ # refresh the entries after the check for no replies above
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=113, flags=0x88, gpflags='A') /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # Add the contract so they can talk
+ #
+ rule = AclRule(is_permit=1, proto=17)
+ rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule, rule2])
+ acl.add_vpp_config()
+
+ c1 = VppGbpContract(
+ self, 401, epg_220.sclass, epg_330.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=l['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect(self.pg0, [p], self.pg2)
+
+ #
+ # send UU packets from the local EP
+ #
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ self.logger.info(self.vapi.cli("sh bridge-domain 1 detail"))
+ p_uu = (Ether(src=ep.mac, dst="00:11:11:11:11:11") /
+ IP(dst="10.0.0.133", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(ep.itf, [p_uu], gbd1.uu_fwd)
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+
+ p_bm = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
+ IP(dst="10.0.0.133", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect_only(ep.itf, [p_bm], tun_bm.mcast_itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg4.local_ip4)
+ self.assertEqual(rx[IP].dst, "239.1.1.1")
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 112)
+ self.assertEqual(rx[VXLAN].vni, 88)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertFalse(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ rule = AclRule(is_permit=1, proto=17)
+ rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule, rule2])
+ acl.add_vpp_config()
+
+ c2 = VppGbpContract(
+ self, 401, epg_330.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+ #
+ # Check v6 Endpoints learning
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=113, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IPv6(src=l['ip6'], dst=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(
+ self,
+ vx_tun_l2_1.sw_if_index,
+ ip=l['ip6'],
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4]))
+
+ self.logger.info(self.vapi.cli("sh int"))
+ self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
+ self.logger.info(self.vapi.cli("sh gbp vxlan"))
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ self.logger.info(self.vapi.cli("sh gbp interface"))
+
+ #
+ # EP moves to a different TEP
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[2].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=113, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IPv6(src=l['ip6'], dst=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * 1, self.pg0)
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(
+ self,
+ vx_tun_l2_1.sw_if_index,
+ sclass=113,
+ mac=l['mac'],
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4]))
+
+ #
+ # v6 remote EP reachability
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IPv6(dst=l['ip6'], src=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 112)
+ self.assertEqual(rx[VXLAN].vni, 99)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+ self.assertEqual(rx[IPv6].dst, l['ip6'])
+
+ #
+ # EP changes sclass
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[2].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IPv6(src=l['ip6'], dst=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * 1, self.pg0)
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(
+ self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac'],
+ sclass=112,
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4]))
+
+ #
+ # check reachability and contract intra-epg
+ #
+ allow_intra_class = self.statistics.get_err_counter(
+ '/err/gbp-policy-mac/allow-intra-sclass')
+
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IPv6(dst=l['ip6'], src=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ self.assertEqual(rx[VXLAN].gpid, 112)
+ self.assertEqual(rx[VXLAN].vni, 99)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+ self.assertEqual(rx[IPv6].dst, l['ip6'])
+
+ allow_intra_class += NUM_PKTS
+
+ self.assert_error_counter_equal(
+ '/err/gbp-policy-mac/allow-intra-sclass',
+ allow_intra_class)
+
+ #
+ # clean up
+ #
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
+ self.pg4.unconfig_ip4()
+
+ def test_gbp_contract(self):
+ """ GBP Contracts """
+
+ #
+ # Route Domains
+ #
+ gt4 = VppIpTable(self, 0)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 0, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd0 = VppGbpRouteDomain(self, 0, 400, gt4, gt6, None, None)
+
+ rd0.add_vpp_config()
+
+ #
+ # Bridge Domains
+ #
+ bd1 = VppBridgeDomain(self, 1, arp_term=0)
+ bd2 = VppBridgeDomain(self, 2, arp_term=0)
+
+ bd1.add_vpp_config()
+ bd2.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd0, self.loop0)
+ gbd2 = VppGbpBridgeDomain(self, bd2, rd0, self.loop1)
+
+ gbd1.add_vpp_config()
+ gbd2.add_vpp_config()
+
+ #
+ # 3 EPGs, 2 of which share a BD.
+ #
+ epgs = [VppGbpEndpointGroup(self, 220, 1220, rd0, gbd1,
+ None, self.loop0,
+ "10.0.0.128", "2001:10::128"),
+ VppGbpEndpointGroup(self, 221, 1221, rd0, gbd1,
+ None, self.loop0,
+ "10.0.1.128", "2001:10:1::128"),
+ VppGbpEndpointGroup(self, 222, 1222, rd0, gbd2,
+ None, self.loop1,
+ "10.0.2.128", "2001:10:2::128")]
+ #
+ # 4 end-points, 2 in the same subnet, 3 in the same BD
+ #
+ eps = [VppGbpEndpoint(self, self.pg0,
+ epgs[0], None,
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001::1"),
+ VppGbpEndpoint(self, self.pg1,
+ epgs[0], None,
+ "10.0.0.2", "11.0.0.2",
+ "2001:10::2", "3001::2"),
+ VppGbpEndpoint(self, self.pg2,
+ epgs[1], None,
+ "10.0.1.1", "11.0.0.3",
+ "2001:10:1::1", "3001::3"),
+ VppGbpEndpoint(self, self.pg3,
+ epgs[2], None,
+ "10.0.2.1", "11.0.0.4",
+ "2001:10:2::1", "3001::4")]
+
+ #
+ # Config related to each of the EPGs
+ #
+ for epg in epgs:
+ # IP config on the BVI interfaces
+ if epg != epgs[1]:
+ b4 = VppIpInterfaceBind(self, epg.bvi,
+ epg.rd.t4).add_vpp_config()
+ b6 = VppIpInterfaceBind(self, epg.bvi,
+ epg.rd.t6).add_vpp_config()
+ epg.bvi.set_mac(self.router_mac)
+
+ if_ip4 = VppIpInterfaceAddress(self, epg.bvi,
+ epg.bvi_ip4, 32,
+ bind=b4).add_vpp_config()
+ if_ip6 = VppIpInterfaceAddress(self, epg.bvi,
+ epg.bvi_ip6, 128,
+ bind=b6).add_vpp_config()
+
+ # add the BD ARP termination entry for BVI IP
+ epg.bd_arp_ip4 = VppBridgeDomainArpEntry(self, epg.bd.bd,
+ str(self.router_mac),
+ epg.bvi_ip4)
+ epg.bd_arp_ip4.add_vpp_config()
+
+ # EPG in VPP
+ epg.add_vpp_config()
+
+ #
+ # config ep
+ #
+ for ep in eps:
+ ep.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show interface"))
+ self.logger.info(self.vapi.cli("show br"))
+
+ #
+ # Intra epg allowed without contract
+ #
+ pkt_intra_epg_220_to_220 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg1.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst=eps[1].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(self.pg0,
+ pkt_intra_epg_220_to_220 * 65,
+ self.pg1)
+
+ pkt_intra_epg_220_to_220 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg1.remote_mac) /
+ IPv6(src=eps[0].ip6,
+ dst=eps[1].ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged6(self.pg0,
+ pkt_intra_epg_220_to_220 * 65,
+ self.pg1)
+
+ #
+ # Inter epg denied without contract
+ #
+ pkt_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg2.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst=eps[2].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, pkt_inter_epg_220_to_221)
+
+ #
+ # A uni-directional contract from EPG 220 -> 221
+ #
+ rule = AclRule(is_permit=1, proto=17)
+ rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ rule3 = AclRule(is_permit=1, proto=1)
+ acl = VppAcl(self, rules=[rule, rule2, rule3])
+ acl.add_vpp_config()
+
+ c1 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[1].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_220_to_221 * 65,
+ eps[2].itf)
+
+ pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst=eps[3].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_222 * 65)
+
+ #
+ # ping router IP in different BD
+ #
+ pkt_router_ping_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst=epgs[1].bvi_ip4) /
+ ICMP(type='echo-request'))
+
+ self.send_and_expect(self.pg0, [pkt_router_ping_220_to_221], self.pg0)
+
+ pkt_router_ping_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[0].ip6,
+ dst=epgs[1].bvi_ip6) /
+ ICMPv6EchoRequest())
+
+ self.send_and_expect(self.pg0, [pkt_router_ping_220_to_221], self.pg0)
+
+ #
+ # contract for the return direction
+ #
+ c2 = VppGbpContract(
+ self, 400, epgs[1].sclass, epgs[0].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_220_to_221 * 65,
+ eps[2].itf)
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg0.remote_mac) /
+ IP(src=eps[2].ip4,
+ dst=eps[0].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect_bridged(eps[2].itf,
+ pkt_inter_epg_221_to_220 * 65,
+ eps[0].itf)
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[2].ip4,
+ dst=eps[0].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect_routed(eps[2].itf,
+ pkt_inter_epg_221_to_220 * 65,
+ eps[0].itf,
+ str(self.router_mac))
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[2].ip6,
+ dst=eps[0].ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect_routed6(eps[2].itf,
+ pkt_inter_epg_221_to_220 * 65,
+ eps[0].itf,
+ str(self.router_mac))
+
+ #
+ # contract between 220 and 222 uni-direction
+ #
+ c3 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[2].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ self.send_and_expect(eps[0].itf,
+ pkt_inter_epg_220_to_222 * 65,
+ eps[3].itf)
+
+ c3.remove_vpp_config()
+ c1.remove_vpp_config()
+ c2.remove_vpp_config()
+ acl.remove_vpp_config()
+
+ def test_gbp_bd_drop_flags(self):
+ """ GBP BD drop flags """
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # a GBP bridge domain with a BVI only
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
+ None, None,
+ uu_drop=True, bm_drop=True)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32).add_vpp_config()
+
+ #
+ # The Endpoint-group
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(3))
+ epg_220.add_vpp_config()
+
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ #
+ # send UU/BM packet from the local EP with UU drop and BM drop enabled
+ # in bd
+ #
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ p_uu = (Ether(src=ep.mac, dst="00:11:11:11:11:11") /
+ IP(dst="10.0.0.133", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_assert_no_replies(ep.itf, [p_uu])
+
+ p_bm = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
+ IP(dst="10.0.0.133", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_assert_no_replies(ep.itf, [p_bm])
+
+ self.pg3.unconfig_ip4()
+
+ self.logger.info(self.vapi.cli("sh int"))
+
+ def test_gbp_bd_arp_flags(self):
+ """ GBP BD arp flags """
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # Pg4 hosts the IP6 UU-flood VXLAN tunnel
+ #
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ #
+ # Add a mcast destination VXLAN-GBP tunnel for B&M traffic
+ #
+ tun_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ "239.1.1.1", 88,
+ mcast_itf=self.pg4)
+ tun_uu.add_vpp_config()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
+ tun_uu, None,
+ ucast_arp=True)
+ gbd1.add_vpp_config()
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32).add_vpp_config()
+
+ #
+ # The Endpoint-group
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(2))
+ epg_220.add_vpp_config()
+
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ #
+ # send ARP packet from the local EP expect it on the uu interface
+ #
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ p_arp = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
+ ARP(op="who-has",
+ psrc=ep.ip4, pdst="10.0.0.99",
+ hwsrc=ep.mac,
+ hwdst="ff:ff:ff:ff:ff:ff"))
+ self.send_and_expect(ep.itf, [p_arp], self.pg4)
+
+ self.pg4.unconfig_ip4()
+
+ def test_gbp_learn_vlan_l2(self):
+ """ GBP L2 Endpoint w/ VLANs"""
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ learnt = [{'mac': '00:00:11:11:11:01',
+ 'ip': '10.0.0.1',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.0.2',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # Pg2 hosts the vxlan tunnel, hosts on pg2 to act as TEPs
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.generate_remote_hosts(4)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+
+ #
+ # The EP will be on a vlan sub-interface
+ #
+ vlan_11 = VppDot1QSubint(self, self.pg0, 11)
+ vlan_11.admin_up()
+ self.vapi.l2_interface_vlan_tag_rewrite(
+ sw_if_index=vlan_11.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
+ push_dot1q=11)
+
+ bd_uu_fwd = VppVxlanGbpTunnel(self, self.pg3.local_ip4,
+ self.pg3.remote_ip4, 116)
+ bd_uu_fwd.add_vpp_config()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ # The BD is marked as do not learn, so no endpoints are ever
+ # learnt in this BD.
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, bd_uu_fwd,
+ learn=False)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32).add_vpp_config()
+
+ #
+ # The Endpoint-group in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 441, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+
+ #
+ # The VXLAN GBP tunnel is a bridge-port and has L2 endpoint
+ # learning enabled
+ #
+ vx_tun_l2_1 = VppGbpVxlanTunnel(
+ self, 99, bd1.bd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L2,
+ self.pg2.local_ip4)
+ vx_tun_l2_1.add_vpp_config()
+
+ #
+ # A static endpoint that the learnt endpoints are trying to
+ # talk to
+ #
+ ep = VppGbpEndpoint(self, vlan_11,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ self.assertTrue(find_route(self, ep.ip4, 32, table_id=1))
+
+ #
+ # Send to the static EP
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ #
+ # packet to EP has the EP's vlan tag
+ #
+ for rx in rxs:
+ self.assertEqual(rx[Dot1Q].vlan, 11)
+
+ #
+ # the EP is not learnt since the BD setting prevents it
+ # also no TEP too
+ #
+ self.assertFalse(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+ self.assertEqual(INDEX_INVALID,
+ find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99))
+
+ self.assertEqual(len(self.vapi.gbp_endpoint_dump()), 1)
+
+ #
+ # static to remotes
+ # we didn't learn the remotes so they are sent to the UU-fwd
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ Dot1Q(vlan=11) /
+ IP(dst=l['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 17, self.pg3)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg3.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertFalse(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
+
+ def test_gbp_learn_l3(self):
+ """ GBP L3 Endpoint Learning """
+
+ self.vapi.cli("set logging class gbp level debug")
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ learnt = [{'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.1.2',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:03',
+ 'ip': '10.0.1.3',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ tun_ip4_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ self.pg4.remote_ip4, 114)
+ tun_ip6_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ self.pg4.remote_ip4, 116)
+ tun_ip4_uu.add_vpp_config()
+ tun_ip6_uu.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 401, t4, t6, tun_ip4_uu, tun_ip6_uu)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ b4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ b6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg2 hosts the vxlan tunnel
+ # hosts on pg2 to act as TEPs
+ # pg3 is BD uu-fwd
+ # pg4 is RD uu-fwd
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.generate_remote_hosts(4)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, self.pg3)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ self.logger.info(self.vapi.cli("sh gbp route"))
+
+ # ... and has a /32 and /128 applied
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32,
+ bind=b4).add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "2001:10::128", 128,
+ bind=b6).add_vpp_config()
+
+ #
+ # The Endpoint-group in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 441, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+
+ #
+ # The VXLAN GBP tunnel is in L3 mode with learning enabled
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 101, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3.add_vpp_config()
+
+ #
+ # A static endpoint that the learnt endpoints are trying to
+ # talk to
+ #
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ #
+ # learn some remote IPv4 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=l['ip']))
+
+ #
+ # Static IPv4 EP replies to learnt
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=l['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IP].src, ep.ip4)
+ self.assertEqual(inner[IP].dst, l['ip'])
+
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ tep1_sw_if_index,
+ ip=l['ip']))
+
+ #
+ # learn some remote IPv6 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IPv6(src=l['ip6'], dst=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ self.logger.info(self.vapi.cli("show gbp bridge"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show int addr"))
+
+ # endpoint learnt via the TEP
+ self.assertTrue(find_gbp_endpoint(self, ip=l['ip6']))
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show ip fib index 1 %s" % l['ip']))
+
+ #
+ # Static EP replies to learnt
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IPv6(dst=l['ip6'], src=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IPv6].src, ep.ip6)
+ self.assertEqual(inner[IPv6].dst, l['ip6'])
+
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ for l in learnt:
+ self.wait_for_ep_timeout(ip=l['ip'])
+
+ #
+ # Static sends to unknown EP with no route
+ #
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst="10.0.0.99", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, [p])
+
+ #
+ # Add a route to static EP's v4 and v6 subnet
+ #
+ se_10_24 = VppGbpSubnet(
+ self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT)
+ se_10_24.add_vpp_config()
+
+ #
+ # static pings router
+ #
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=epg_220.bvi_ip4, src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg0)
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IPv6(dst=epg_220.bvi_ip6, src=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg0)
+
+ #
+ # packets to address in the subnet are sent on the uu-fwd
+ #
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst="10.0.0.99", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, [p], self.pg4)
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg4.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg4.remote_ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 114)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # policy is not applied to packets sent to the uu-fwd interfaces
+ self.assertFalse(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ #
+ # learn some remote IPv4 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[2].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=l['ip']))
+
+ #
+ # Add a remote endpoint from the API
+ #
+ rep_88 = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.0.88", "11.0.0.88",
+ "2001:10::88", "3001::88",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4,
+ mac=None)
+ rep_88.add_vpp_config()
+
+ #
+ # Add a remote endpoint from the API that matches an existing one
+ # this is a lower priority, hence the packet is sent to the DP leanrt
+ # TEP
+ #
+ rep_2 = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ learnt[0]['ip'], "11.0.0.101",
+ learnt[0]['ip6'], "3001::101",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ mac=None)
+ rep_2.add_vpp_config()
+
+ #
+ # Add a route to the learned EP's v4 subnet
+ # packets should be send on the v4/v6 uu=fwd interface resp.
+ #
+ se_10_1_24 = VppGbpSubnet(
+ self, rd1, "10.0.1.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT)
+ se_10_1_24.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+
+ ips = ["10.0.0.88", learnt[0]['ip']]
+ for ip in ips:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=ip, src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IP].src, ep.ip4)
+ self.assertEqual(inner[IP].dst, ip)
+
+ #
+ # remove the API remote EPs, only API sourced is gone, the DP
+ # learnt one remains
+ #
+ rep_88.remove_vpp_config()
+ rep_2.remove_vpp_config()
+
+ self.assertTrue(find_gbp_endpoint(self, ip=rep_2.ip4))
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(src=ep.ip4, dst=rep_2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg0, [p], self.pg2)
+
+ self.assertFalse(find_gbp_endpoint(self, ip=rep_88.ip4))
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(src=ep.ip4, dst=rep_88.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg0, [p], self.pg4)
+
+ #
+ # to appease the testcase we cannot have the registered EP still
+ # present (because it's DP learnt) when the TC ends so wait until
+ # it is removed
+ #
+ self.wait_for_ep_timeout(ip=rep_88.ip4)
+ self.wait_for_ep_timeout(ip=rep_2.ip4)
+
+ #
+ # Same as above, learn a remote EP via CP and DP
+ # this time remove the DP one first. expect the CP data to remain
+ #
+ rep_3 = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.1.4", "11.0.0.103",
+ "2001::10:3", "3001::103",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ mac=None)
+ rep_3.add_vpp_config()
+
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[2].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src="10.0.1.4", dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=rep_3.ip4,
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4]))
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst="10.0.1.4", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ # host 2 is the DP learned TEP
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
+
+ self.wait_for_ep_timeout(ip=rep_3.ip4,
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4])
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ # host 1 is the CP learned TEP
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+
+ #
+ # shutdown with learnt endpoint present
+ #
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src=learnt[1]['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=l['ip']))
+
+ #
+ # TODO
+ # remote endpoint becomes local
+ #
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
+ self.pg4.unconfig_ip4()
+
+ def test_gbp_redirect(self):
+ """ GBP Endpoint Redirect """
+
+ self.vapi.cli("set logging class gbp level debug")
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ learnt = [{'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.1.2',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:03',
+ 'ip': '10.0.1.3',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 402, t4, t6)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ b_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ b_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg7 hosts a BD's UU-fwd
+ #
+ self.pg7.config_ip4()
+ self.pg7.resolve_arp()
+
+ #
+ # a GBP bridge domains for the EPs
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0)
+ gbd1.add_vpp_config()
+
+ bd2 = VppBridgeDomain(self, 2)
+ bd2.add_vpp_config()
+ gbd2 = VppGbpBridgeDomain(self, bd2, rd1, self.loop1)
+ gbd2.add_vpp_config()
+
+ # ... and has a /32 and /128 applied
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32,
+ bind=b_ip4).add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "2001:10::128", 128,
+ bind=b_ip6).add_vpp_config()
+ ip4_addr = VppIpInterfaceAddress(self, gbd2.bvi,
+ "10.0.1.128", 32).add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd2.bvi,
+ "2001:11::128", 128).add_vpp_config()
+
+ #
+ # The Endpoint-groups in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 440, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(60))
+ epg_220.add_vpp_config()
+ epg_221 = VppGbpEndpointGroup(self, 221, 441, rd1, gbd2,
+ None, gbd2.bvi,
+ "10.0.1.128",
+ "2001:11::128",
+ VppGbpEndpointRetention(60))
+ epg_221.add_vpp_config()
+ epg_222 = VppGbpEndpointGroup(self, 222, 442, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.2.128",
+ "2001:12::128",
+ VppGbpEndpointRetention(60))
+ epg_222.add_vpp_config()
+
+ #
+ # a GBP bridge domains for the SEPs
+ #
+ bd_uu1 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 116)
+ bd_uu1.add_vpp_config()
+ bd_uu2 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 117)
+ bd_uu2.add_vpp_config()
+
+ bd3 = VppBridgeDomain(self, 3)
+ bd3.add_vpp_config()
+ gbd3 = VppGbpBridgeDomain(self, bd3, rd1, self.loop2,
+ bd_uu1, learn=False)
+ gbd3.add_vpp_config()
+ bd4 = VppBridgeDomain(self, 4)
+ bd4.add_vpp_config()
+ gbd4 = VppGbpBridgeDomain(self, bd4, rd1, self.loop3,
+ bd_uu2, learn=False)
+ gbd4.add_vpp_config()
+
+ #
+ # EPGs in which the service endpoints exist
+ #
+ epg_320 = VppGbpEndpointGroup(self, 320, 550, rd1, gbd3,
+ None, gbd1.bvi,
+ "12.0.0.128",
+ "4001:10::128",
+ VppGbpEndpointRetention(60))
+ epg_320.add_vpp_config()
+ epg_321 = VppGbpEndpointGroup(self, 321, 551, rd1, gbd4,
+ None, gbd2.bvi,
+ "12.0.1.128",
+ "4001:11::128",
+ VppGbpEndpointRetention(60))
+ epg_321.add_vpp_config()
+
+ #
+ # three local endpoints
+ #
+ ep1 = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001:10::1")
+ ep1.add_vpp_config()
+ ep2 = VppGbpEndpoint(self, self.pg1,
+ epg_221, None,
+ "10.0.1.1", "11.0.1.1",
+ "2001:11::1", "3001:11::1")
+ ep2.add_vpp_config()
+ ep3 = VppGbpEndpoint(self, self.pg2,
+ epg_222, None,
+ "10.0.2.2", "11.0.2.2",
+ "2001:12::1", "3001:12::1")
+ ep3.add_vpp_config()
+
+ #
+ # service endpoints
+ #
+ sep1 = VppGbpEndpoint(self, self.pg3,
+ epg_320, None,
+ "12.0.0.1", "13.0.0.1",
+ "4001:10::1", "5001:10::1")
+ sep1.add_vpp_config()
+ sep2 = VppGbpEndpoint(self, self.pg4,
+ epg_320, None,
+ "12.0.0.2", "13.0.0.2",
+ "4001:10::2", "5001:10::2")
+ sep2.add_vpp_config()
+ sep3 = VppGbpEndpoint(self, self.pg5,
+ epg_321, None,
+ "12.0.1.1", "13.0.1.1",
+ "4001:11::1", "5001:11::1")
+ sep3.add_vpp_config()
+ # this EP is not installed immediately
+ sep4 = VppGbpEndpoint(self, self.pg6,
+ epg_321, None,
+ "12.0.1.2", "13.0.1.2",
+ "4001:11::2", "5001:11::2")
+
+ #
+ # an L2 switch packet between local EPs in different EPGs
+ # different dest ports on each so the are LB hashed differently
+ #
+ p4 = [(Ether(src=ep1.mac, dst=ep3.mac) /
+ IP(src=ep1.ip4, dst=ep3.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep3.mac, dst=ep1.mac) /
+ IP(src=ep3.ip4, dst=ep1.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ p6 = [(Ether(src=ep1.mac, dst=ep3.mac) /
+ IPv6(src=ep1.ip6, dst=ep3.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep3.mac, dst=ep1.mac) /
+ IPv6(src=ep3.ip6, dst=ep1.ip6) /
+ UDP(sport=1234, dport=1230) /
+ Raw(b'\xa5' * 100))]
+
+ # should be dropped since no contract yet
+ self.send_and_assert_no_replies(self.pg0, [p4[0]])
+ self.send_and_assert_no_replies(self.pg0, [p6[0]])
+
+ #
+ # Add a contract with a rule to load-balance redirect via SEP1 and SEP2
+ # one of the next-hops is via an EP that is not known
+ #
+ rule4 = AclRule(is_permit=1, proto=17)
+ rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule4, rule6])
+ acl.add_vpp_config()
+
+ #
+ # test the src-ip hash mode
+ #
+ c1 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_222.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ c2 = VppGbpContract(
+ self, 402, epg_222.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ #
+ # send again with the contract preset, now packets arrive
+ # at SEP1 or SEP2 depending on the hashing
+ #
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep3.ip4)
+
+ rxs = self.send_and_expect(self.pg2, p4[1] * 17, sep2.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep2.mac)
+ self.assertEqual(rx[IP].src, ep3.ip4)
+ self.assertEqual(rx[IP].dst, ep1.ip4)
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 117)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep4.mac)
+ self.assertEqual(inner[IPv6].src, ep1.ip6)
+ self.assertEqual(inner[IPv6].dst, ep3.ip6)
+
+ rxs = self.send_and_expect(self.pg2, p6[1] * 17, sep3.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep3.mac)
+ self.assertEqual(rx[IPv6].src, ep3.ip6)
+ self.assertEqual(rx[IPv6].dst, ep1.ip6)
+
+ #
+ # programme the unknown EP
+ #
+ sep4.add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep4.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep4.mac)
+ self.assertEqual(rx[IPv6].src, ep1.ip6)
+ self.assertEqual(rx[IPv6].dst, ep3.ip6)
+
+ #
+ # and revert back to unprogrammed
+ #
+ sep4.remove_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 117)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep4.mac)
+ self.assertEqual(inner[IPv6].src, ep1.ip6)
+ self.assertEqual(inner[IPv6].dst, ep3.ip6)
+
+ c1.remove_vpp_config()
+ c2.remove_vpp_config()
+
+ #
+ # test the symmetric hash mode
+ #
+ c1 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_222.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ c2 = VppGbpContract(
+ self, 402, epg_222.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ #
+ # send again with the contract preset, now packets arrive
+ # at SEP1 for both directions
+ #
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep3.ip4)
+
+ rxs = self.send_and_expect(self.pg2, p4[1] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep3.ip4)
+ self.assertEqual(rx[IP].dst, ep1.ip4)
+
+ #
+ # programme the unknown EP for the L3 tests
+ #
+ sep4.add_vpp_config()
+
+ #
+ # an L3 switch packet between local EPs in different EPGs
+ # different dest ports on each so the are LB hashed differently
+ #
+ p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep2.mac, dst=str(self.router_mac)) /
+ IP(src=ep2.ip4, dst=ep1.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep1.ip6, dst=ep2.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep2.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep2.ip6, dst=ep1.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ c3 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep2.ip4)
+
+ #
+ # learn a remote EP in EPG 221
+ # packets coming from unknown remote EPs will be leant & redirected
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 444, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3.add_vpp_config()
+
+ c4 = VppGbpContract(
+ self, 402, epg_221.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c4.add_vpp_config()
+
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=441, flags=0x88) /
+ Ether(src="00:22:22:22:22:33", dst=str(self.router_mac)) /
+ IP(src="10.0.0.88", dst=ep1.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ # unknown remote EP to local EP redirected
+ rxs = self.send_and_expect(self.pg7, [p], sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, "10.0.0.88")
+ self.assertEqual(rx[IP].dst, ep1.ip4)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip="10.0.0.88"))
+
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=441, flags=0x88) /
+ Ether(src="00:22:22:22:22:33", dst=str(self.router_mac)) /
+ IPv6(src="2001:10::88", dst=ep1.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ # unknown remote EP to local EP redirected (ipv6)
+ rxs = self.send_and_expect(self.pg7, [p], sep3.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep3.mac)
+ self.assertEqual(rx[IPv6].src, "2001:10::88")
+ self.assertEqual(rx[IPv6].dst, ep1.ip6)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip="2001:10::88"))
+
+ #
+ # L3 switch from local to remote EP
+ #
+ p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IP(src=ep1.ip4, dst="10.0.0.88") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep1.ip6, dst="2001:10::88") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, "10.0.0.88")
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep4.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep4.mac)
+ self.assertEqual(rx[IPv6].src, ep1.ip6)
+ self.assertEqual(rx[IPv6].dst, "2001:10::88")
+
+ #
+ # test the dst-ip hash mode
+ #
+ c5 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c5.add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, "10.0.0.88")
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep3.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep3.mac)
+ self.assertEqual(rx[IPv6].src, ep1.ip6)
+ self.assertEqual(rx[IPv6].dst, "2001:10::88")
+
+ #
+ # a programmed remote SEP in EPG 320
+ #
+
+ # gbp vxlan tunnel for the remote SEP
+ vx_tun_l3_sep = VppGbpVxlanTunnel(
+ self, 555, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3_sep.add_vpp_config()
+
+ # remote SEP
+ sep5 = VppGbpEndpoint(self, vx_tun_l3_sep,
+ epg_320, None,
+ "12.0.0.10", "13.0.0.10",
+ "4001:10::10", "5001:10::10",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ mac=None)
+ sep5.add_vpp_config()
+
+ #
+ # local l3out redirect tests
+ #
+
+ # add local l3out
+ # the external bd
+ self.loop4.set_mac(self.router_mac)
+ b_lo4_ip4 = VppIpInterfaceBind(self, self.loop4, t4).add_vpp_config()
+ b_lo4_ip6 = VppIpInterfaceBind(self, self.loop4, t6).add_vpp_config()
+ ebd = VppBridgeDomain(self, 100)
+ ebd.add_vpp_config()
+ gebd = VppGbpBridgeDomain(self, ebd, rd1, self.loop4, None, None)
+ gebd.add_vpp_config()
+ # the external epg
+ eepg = VppGbpEndpointGroup(self, 888, 765, rd1, gebd,
+ None, gebd.bvi,
+ "10.1.0.128",
+ "2001:10:1::128",
+ VppGbpEndpointRetention(60))
+ eepg.add_vpp_config()
+ # add subnets to BVI
+ VppIpInterfaceAddress(
+ self,
+ gebd.bvi,
+ "10.1.0.128",
+ 24, bind=b_lo4_ip4).add_vpp_config()
+ VppIpInterfaceAddress(
+ self,
+ gebd.bvi,
+ "2001:10:1::128",
+ 64, bind=b_lo4_ip6).add_vpp_config()
+ # ... which are L3-out subnets
+ VppGbpSubnet(self, rd1, "10.1.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=765).add_vpp_config()
+ VppGbpSubnet(self, rd1, "2001:10:1::128", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=765).add_vpp_config()
+ # external endpoints
+ VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ eep1 = VppGbpEndpoint(self, self.vlan_100, eepg, None, "10.1.0.1",
+ "11.1.0.1", "2001:10:1::1", "3001:10:1::1",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep1.add_vpp_config()
+ VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ eep2 = VppGbpEndpoint(self, self.vlan_101, eepg, None, "10.1.0.2",
+ "11.1.0.2", "2001:10:1::2", "3001:10:1::2",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep2.add_vpp_config()
+
+ # external subnets reachable though eep1 and eep2 respectively
+ VppIpRoute(self, "10.220.0.0", 24,
+ [VppRoutePath(eep1.ip4, eep1.epg.bvi.sw_if_index)],
+ table_id=t4.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10.220.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220).add_vpp_config()
+ VppIpRoute(self, "10:220::", 64,
+ [VppRoutePath(eep1.ip6, eep1.epg.bvi.sw_if_index)],
+ table_id=t6.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10:220::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220).add_vpp_config()
+ VppIpRoute(self, "10.221.0.0", 24,
+ [VppRoutePath(eep2.ip4, eep2.epg.bvi.sw_if_index)],
+ table_id=t4.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10.221.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221).add_vpp_config()
+ VppIpRoute(self, "10:221::", 64,
+ [VppRoutePath(eep2.ip6, eep2.epg.bvi.sw_if_index)],
+ table_id=t6.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10:221::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221).add_vpp_config()
+
+ #
+ # l3out redirect to remote (known, then unknown) SEP
+ #
+
+ # packets from 1 external subnet to the other
+ p = [(Ether(src=eep1.mac, dst=self.router_mac) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.17", dst="10.221.0.65") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=eep1.mac, dst=self.router_mac) /
+ Dot1Q(vlan=100) /
+ IPv6(src="10:220::17", dst="10:221::65") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ # packets should be dropped in absence of contract
+ self.send_and_assert_no_replies(self.pg0, p)
+
+ # contract redirecting to sep5
+ VppGbpContract(
+ self, 402, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip4, sep5.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip6, sep5.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the programmed remote leaf TEP
+ self.assertEqual(rx[VXLAN].vni, 555)
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[Dot1Q].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ # remote SEP: it is now an unknown remote SEP and should go
+ # to spine proxy
+ sep5.remove_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the spine proxy TEP
+ self.assertEqual(rx[VXLAN].vni, epg_320.bd.uu_fwd.vni)
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[Dot1Q].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ #
+ # l3out redirect to local SEP
+ #
+
+ # change the contract between l3out to redirect to local SEPs
+ # instead of remote SEP
+ VppGbpContract(
+ self, 402, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip6, sep1.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p, sep1.itf)
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ rxip = rx[Ether].payload
+ txip = tx[Ether].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ #
+ # redirect remote EP to remote (known then unknown) SEP
+ #
+
+ # remote SEP known again
+ sep5.add_vpp_config()
+
+ # contract to redirect to learnt SEP
+ VppGbpContract(
+ self, 402, epg_221.sclass, epg_222.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip4, sep5.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip6, sep5.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
+
+ # packets from unknown EP 221 to known EP in EPG 222
+ # should be redirected to known remote SEP
+ base = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=441, flags=0x88) /
+ Ether(src="00:22:22:22:22:44", dst=str(self.router_mac)))
+ p = [(base /
+ IP(src="10.0.1.100", dst=ep3.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (base /
+ IPv6(src="2001:10::100", dst=ep3.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ # unknown remote EP to local EP redirected to known remote SEP
+ rxs = self.send_and_expect(self.pg7, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the programmed remote leaf TEP
+ self.assertEqual(rx[VXLAN].vni, 555)
+ self.assertEqual(rx[VXLAN].gpid, epg_221.sclass)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[VXLAN][Ether].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip="10.0.1.100"))
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip="2001:10::100"))
+
+ # remote SEP: it is now an unknown remote SEP and should go
+ # to spine proxy
+ sep5.remove_vpp_config()
+
+ # remote EP (coming from spine proxy) to local EP redirected to
+ # known remote SEP
+ rxs = self.send_and_expect(self.pg7, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the spine proxy TEP
+ self.assertEqual(rx[VXLAN].vni, epg_320.bd.uu_fwd.vni)
+ self.assertEqual(rx[VXLAN].gpid, epg_221.sclass)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[VXLAN][Ether].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ #
+ # cleanup
+ #
+ self.pg7.unconfig_ip4()
+
+ def test_gbp_redirect_extended(self):
+ """ GBP Endpoint Redirect Extended """
+
+ self.vapi.cli("set logging class gbp level debug")
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ learnt = [{'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.1.2',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:03',
+ 'ip': '10.0.1.3',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ # create IPv4 and IPv6 RD UU VxLAN-GBP TEP and bind them to the right
+ # VRF
+ rd_uu4 = VppVxlanGbpTunnel(
+ self,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ 114,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ rd_uu4.add_vpp_config()
+ VppIpInterfaceBind(self, rd_uu4, t4).add_vpp_config()
+
+ rd_uu6 = VppVxlanGbpTunnel(
+ self,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ 115,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ rd_uu6.add_vpp_config()
+ VppIpInterfaceBind(self, rd_uu6, t4).add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 402, t4, t6, rd_uu4, rd_uu6)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+ self.loop1.set_mac(self.router_mac)
+ self.loop2.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ b_lo0_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ b_lo0_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+ b_lo1_ip4 = VppIpInterfaceBind(self, self.loop1, t4).add_vpp_config()
+ b_lo1_ip6 = VppIpInterfaceBind(self, self.loop1, t6).add_vpp_config()
+ b_lo2_ip4 = VppIpInterfaceBind(self, self.loop2, t4).add_vpp_config()
+ b_lo2_ip6 = VppIpInterfaceBind(self, self.loop2, t6).add_vpp_config()
+
+ #
+ # Pg7 hosts a BD's UU-fwd
+ #
+ self.pg7.config_ip4()
+ self.pg7.resolve_arp()
+
+ #
+ # a GBP bridge domains for the EPs
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0)
+ gbd1.add_vpp_config()
+
+ bd2 = VppBridgeDomain(self, 2)
+ bd2.add_vpp_config()
+ gbd2 = VppGbpBridgeDomain(self, bd2, rd1, self.loop1)
+ gbd2.add_vpp_config()
+
+ # ... and has a /32 and /128 applied
+ ip4_addr1 = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32,
+ bind=b_lo0_ip4).add_vpp_config()
+ ip6_addr1 = VppIpInterfaceAddress(self, gbd1.bvi,
+ "2001:10::128", 128,
+ bind=b_lo0_ip6).add_vpp_config()
+ ip4_addr2 = VppIpInterfaceAddress(self, gbd2.bvi,
+ "10.0.1.128", 32,
+ bind=b_lo1_ip4).add_vpp_config()
+ ip6_addr2 = VppIpInterfaceAddress(self, gbd2.bvi,
+ "2001:11::128", 128,
+ bind=b_lo1_ip6).add_vpp_config()
+
+ #
+ # The Endpoint-groups
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 440, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(60))
+ epg_220.add_vpp_config()
+ epg_221 = VppGbpEndpointGroup(self, 221, 441, rd1, gbd2,
+ None, gbd2.bvi,
+ "10.0.1.128",
+ "2001:11::128",
+ VppGbpEndpointRetention(60))
+ epg_221.add_vpp_config()
+
+ #
+ # a GBP bridge domains for the SEPs
+ #
+ bd_uu3 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 116)
+ bd_uu3.add_vpp_config()
+
+ bd3 = VppBridgeDomain(self, 3)
+ bd3.add_vpp_config()
+ gbd3 = VppGbpBridgeDomain(self, bd3, rd1, self.loop2,
+ bd_uu3, learn=False)
+ gbd3.add_vpp_config()
+
+ ip4_addr3 = VppIpInterfaceAddress(self, gbd3.bvi,
+ "12.0.0.128", 32,
+ bind=b_lo2_ip4).add_vpp_config()
+ ip6_addr3 = VppIpInterfaceAddress(self, gbd3.bvi,
+ "4001:10::128", 128,
+ bind=b_lo2_ip6).add_vpp_config()
+
+ #
+ # self.logger.info(self.vapi.cli("show gbp bridge"))
+ # self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+ # self.logger.info(self.vapi.cli("show gbp vxlan"))
+ # self.logger.info(self.vapi.cli("show int addr"))
+ #
+
+ #
+ # EPGs in which the service endpoints exist
+ #
+ epg_320 = VppGbpEndpointGroup(self, 320, 550, rd1, gbd3,
+ None, gbd3.bvi,
+ "12.0.0.128",
+ "4001:10::128",
+ VppGbpEndpointRetention(60))
+ epg_320.add_vpp_config()
+
+ #
+ # endpoints
+ #
+ ep1 = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001:10::1")
+ ep1.add_vpp_config()
+ ep2 = VppGbpEndpoint(self, self.pg1,
+ epg_221, None,
+ "10.0.1.1", "11.0.1.1",
+ "2001:11::1", "3001:11::1")
+ ep2.add_vpp_config()
+
+ #
+ # service endpoints
+ #
+ sep1 = VppGbpEndpoint(self, self.pg3,
+ epg_320, None,
+ "12.0.0.1", "13.0.0.1",
+ "4001:10::1", "5001:10::1")
+ sep2 = VppGbpEndpoint(self, self.pg4,
+ epg_320, None,
+ "12.0.0.2", "13.0.0.2",
+ "4001:10::2", "5001:10::2")
+
+ # sep1 and sep2 are not added to config yet
+ # they are unknown for now
+
+ #
+ # add routes to EPG subnets
+ #
+ VppGbpSubnet(self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT
+ ).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10.0.1.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT
+ ).add_vpp_config()
+
+ #
+ # Local host to known local host in different BD
+ # with SFC contract (source and destination are in
+ # one node and service endpoint in another node)
+ #
+ p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep2.mac, dst=str(self.router_mac)) /
+ IP(src=ep2.ip4, dst=ep1.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep1.ip6, dst=ep2.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep2.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep2.ip6, dst=ep1.ip6) /
+ UDP(sport=1234, dport=1230) /
+ Raw(b'\xa5' * 100))]
+
+ # should be dropped since no contract yet
+ self.send_and_assert_no_replies(self.pg0, [p4[0]])
+ self.send_and_assert_no_replies(self.pg0, [p6[0]])
+
+ #
+ # Add a contract with a rule to load-balance redirect via SEP1 and SEP2
+ # one of the next-hops is via an EP that is not known
+ #
+ rule4 = AclRule(is_permit=1, proto=17)
+ rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule4, rule6])
+ acl.add_vpp_config()
+
+ #
+ # test the src-ip hash mode
+ #
+ c1 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip6, sep1.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ c2 = VppGbpContract(
+ self, 402, epg_221.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip6, sep1.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ # ep1 <--> ep2 redirected through sep1
+ # sep1 is unknown
+ # packet is redirected to sep bd and then go through sep bd UU
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep1.mac)
+ self.assertEqual(inner[IP].src, ep1.ip4)
+ self.assertEqual(inner[IP].dst, ep2.ip4)
+
+ rxs = self.send_and_expect(self.pg1, p4[1] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep1.mac)
+ self.assertEqual(inner[IP].src, ep2.ip4)
+ self.assertEqual(inner[IP].dst, ep1.ip4)
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep1.mac)
+ self.assertEqual(inner[IPv6].src, ep1.ip6)
+ self.assertEqual(inner[IPv6].dst, ep2.ip6)
+
+ rxs = self.send_and_expect(self.pg1, p6[1] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep1.mac)
+ self.assertEqual(inner[IPv6].src, ep2.ip6)
+ self.assertEqual(inner[IPv6].dst, ep1.ip6)
+
+ # configure sep1: it is now local
+ # packets between ep1 and ep2 are redirected locally
+ sep1.add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep2.ip4)
+
+ rxs = self.send_and_expect(self.pg1, p6[1] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IPv6].src, ep2.ip6)
+ self.assertEqual(rx[IPv6].dst, ep1.ip6)
+
+ # packet coming from the l2 spine-proxy to sep1
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=116, gpid=440, gpflags=0x08, flags=0x88) /
+ Ether(src=str(self.router_mac), dst=sep1.mac) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, [p] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep2.ip4)
+
+ # contract for SEP to communicate with dst EP
+ c3 = VppGbpContract(
+ self, 402, epg_320.sclass, epg_221.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC)],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ # temporarily remove ep2, so that ep2 is remote & unknown
+ ep2.remove_vpp_config()
+
+ # packet going back from sep1 to its original dest (ep2)
+ # as ep2 is now unknown (see above), it must go through
+ # the rd UU (packet is routed)
+
+ p1 = (Ether(src=sep1.mac, dst=self.router_mac) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg3, [p1] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 114)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IP].src, ep1.ip4)
+ self.assertEqual(inner[IP].dst, ep2.ip4)
+
+ self.logger.info(self.vapi.cli("show bridge 3 detail"))
+ sep1.remove_vpp_config()
+
+ self.logger.info(self.vapi.cli("show bridge 1 detail"))
+ self.logger.info(self.vapi.cli("show bridge 2 detail"))
+
+ # re-add ep2: it is local again :)
+ ep2.add_vpp_config()
+
+ # packet coming back from the remote sep through rd UU
+ p2 = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=114, gpid=441, gpflags=0x09, flags=0x88) /
+ Ether(src=str(self.router_mac), dst=self.router_mac) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, [p2], self.pg1)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep2.ip4)
+
+ #
+ # bd_uu2.add_vpp_config()
+ #
+
+ #
+ # cleanup
+ #
+ c1.remove_vpp_config()
+ c2.remove_vpp_config()
+ c3.remove_vpp_config()
+ self.pg7.unconfig_ip4()
+
+ def test_gbp_l3_out(self):
+ """ GBP L3 Out """
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ self.vapi.cli("set logging class gbp level debug")
+
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 55, t4, t6)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ b_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ b_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg7 hosts a BD's BUM
+ # Pg1 some other l3 interface
+ #
+ self.pg7.config_ip4()
+ self.pg7.resolve_arp()
+
+ #
+ # a multicast vxlan-gbp tunnel for broadcast in the BD
+ #
+ tun_bm = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
+ "239.1.1.1", 88,
+ mcast_itf=self.pg7)
+ tun_bm.add_vpp_config()
+
+ #
+ # a GBP external bridge domains for the EPs
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, None, tun_bm)
+ gbd1.add_vpp_config()
+
+ #
+ # The Endpoint-groups in which the external endpoints exist
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 113, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+
+ # the BVIs have the subnets applied ...
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi, "10.0.0.128",
+ 24, bind=b_ip4).add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi, "2001:10::128",
+ 64, bind=b_ip6).add_vpp_config()
+
+ # ... which are L3-out subnets
+ l3o_1 = VppGbpSubnet(
+ self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=113)
+ l3o_1.add_vpp_config()
+
+ #
+ # an external interface attached to the outside world and the
+ # external BD
+ #
+ VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ vlan_144 = VppDot1QSubint(self, self.pg0, 144)
+ vlan_144.admin_up()
+ # vlan_102 is not poped
+
+ #
+ # an unicast vxlan-gbp for inter-RD traffic
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 444, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3.add_vpp_config()
+
+ #
+ # External Endpoints
+ #
+ eep1 = VppGbpEndpoint(self, self.vlan_100,
+ epg_220, None,
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001::1",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep1.add_vpp_config()
+ eep2 = VppGbpEndpoint(self, self.vlan_101,
+ epg_220, None,
+ "10.0.0.2", "11.0.0.2",
+ "2001:10::2", "3001::2",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep2.add_vpp_config()
+ eep3 = VppGbpEndpoint(self, self.vlan_102,
+ epg_220, None,
+ "10.0.0.3", "11.0.0.3",
+ "2001:10::3", "3001::3",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep3.add_vpp_config()
+
+ #
+ # A remote external endpoint
+ #
+ rep = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.0.101", "11.0.0.101",
+ "2001:10::101", "3001::101",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ mac=None)
+ rep.add_vpp_config()
+
+ #
+ # EP1 impersonating EP3 is dropped
+ #
+ p = (Ether(src=eep1.mac, dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=100) /
+ ARP(op="who-has",
+ psrc="10.0.0.3", pdst="10.0.0.128",
+ hwsrc=eep1.mac, hwdst="ff:ff:ff:ff:ff:ff"))
+ self.send_and_assert_no_replies(self.pg0, p)
+
+ #
+ # ARP packet from External EPs are accepted and replied to
+ #
+ p_arp = (Ether(src=eep1.mac, dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=100) /
+ ARP(op="who-has",
+ psrc=eep1.ip4, pdst="10.0.0.128",
+ hwsrc=eep1.mac, hwdst="ff:ff:ff:ff:ff:ff"))
+ rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0)
+
+ #
+ # ARP packet from host in remote subnet are accepted and replied to
+ #
+ p_arp = (Ether(src=eep3.mac, dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=102) /
+ ARP(op="who-has",
+ psrc=eep3.ip4, pdst="10.0.0.128",
+ hwsrc=eep3.mac, hwdst="ff:ff:ff:ff:ff:ff"))
+ rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0)
+
+ #
+ # packets destined to unknown addresses in the BVI's subnet
+ # are ARP'd for
+ #
+ p4 = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.0.0.1", dst="10.0.0.88") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ p6 = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IPv6(src="2001:10::1", dst="2001:10::88") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p4 * 1, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ # self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, "239.1.1.1")
+ self.assertEqual(rx[VXLAN].vni, 88)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # policy was applied to the original IP packet
+ self.assertEqual(rx[VXLAN].gpid, 113)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertTrue(inner.haslayer(ARP))
+
+ #
+ # remote to external
+ #
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=113, flags=0x88) /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.0.0.101", dst="10.0.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
+
+ #
+ # local EP pings router
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src=eep1.ip4, dst="10.0.0.128") /
+ ICMP(type='echo-request'))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, eep1.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 100)
+
+ #
+ # local EP pings other local EP
+ #
+ p = (Ether(src=eep1.mac, dst=eep2.mac) /
+ Dot1Q(vlan=100) /
+ IP(src=eep1.ip4, dst=eep2.ip4) /
+ ICMP(type='echo-request'))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, eep1.mac)
+ self.assertEqual(rx[Ether].dst, eep2.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 101)
+
+ #
+ # local EP pings router w/o vlan tag poped
+ #
+ p = (Ether(src=eep3.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=102) /
+ IP(src=eep3.ip4, dst="10.0.0.128") /
+ ICMP(type='echo-request'))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, self.vlan_102.remote_mac)
+
+ #
+ # A ip4 subnet reachable through the external EP1
+ #
+ ip_220 = VppIpRoute(self, "10.220.0.0", 24,
+ [VppRoutePath(eep1.ip4,
+ eep1.epg.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_220.add_vpp_config()
+
+ l3o_220 = VppGbpSubnet(
+ self, rd1, "10.220.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o_220.add_vpp_config()
+
+ #
+ # An ip6 subnet reachable through the external EP1
+ #
+ ip6_220 = VppIpRoute(self, "10:220::", 64,
+ [VppRoutePath(eep1.ip6,
+ eep1.epg.bvi.sw_if_index)],
+ table_id=t6.table_id)
+ ip6_220.add_vpp_config()
+
+ l3o6_220 = VppGbpSubnet(
+ self, rd1, "10:220::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o6_220.add_vpp_config()
+
+ #
+ # A subnet reachable through the external EP2
+ #
+ ip_221 = VppIpRoute(self, "10.221.0.0", 24,
+ [VppRoutePath(eep2.ip4,
+ eep2.epg.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_221.add_vpp_config()
+
+ l3o_221 = VppGbpSubnet(
+ self, rd1, "10.221.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221)
+ l3o_221.add_vpp_config()
+
+ #
+ # ping between hosts in remote subnets
+ # dropped without a contract
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.221.0.1") /
+ ICMP(type='echo-request'))
+
+ self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # contract for the external nets to communicate
+ #
+ rule4 = AclRule(is_permit=1, proto=17)
+ rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule4, rule6])
+ acl.add_vpp_config()
+
+ #
+ # A contract with the wrong scope is not matched
+ #
+ c_44 = VppGbpContract(
+ self, 44, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c_44.add_vpp_config()
+ self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ c1 = VppGbpContract(
+ self, 55, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ #
+ # Contracts allowing ext-net 200 to talk with external EPs
+ #
+ c2 = VppGbpContract(
+ self, 55, 4220, 113, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+ c3 = VppGbpContract(
+ self, 55, 113, 4220, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ #
+ # ping between hosts in remote subnets
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.221.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, eep2.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 101)
+
+ # we did not learn these external hosts
+ self.assertFalse(find_gbp_endpoint(self, ip="10.220.0.1"))
+ self.assertFalse(find_gbp_endpoint(self, ip="10.221.0.1"))
+
+ #
+ # from remote external EP to local external EP
+ #
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=113, flags=0x88) /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.0.0.101", dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
+
+ #
+ # ping from an external host to the remote external EP
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst=rep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ # self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 444)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # the sclass of the ext-net the packet came from
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ # policy was applied to the original IP packet
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ # since it's an external host the reciever should not learn it
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[IP].src, "10.220.0.1")
+ self.assertEqual(inner[IP].dst, rep.ip4)
+
+ #
+ # An external subnet reachable via the remote external EP
+ #
+
+ #
+ # first the VXLAN-GBP tunnel over which it is reached
+ #
+ vx_tun_r1 = VppVxlanGbpTunnel(
+ self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 445,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ vx_tun_r1.add_vpp_config()
+ VppIpInterfaceBind(self, vx_tun_r1, t4).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
+
+ #
+ # then the special adj to resolve through on that tunnel
+ #
+ n1 = VppNeighbor(self,
+ vx_tun_r1.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip4)
+ n1.add_vpp_config()
+
+ #
+ # the route via the adj above
+ #
+ ip_222 = VppIpRoute(self, "10.222.0.0", 24,
+ [VppRoutePath(self.pg7.remote_ip4,
+ vx_tun_r1.sw_if_index)],
+ table_id=t4.table_id)
+ ip_222.add_vpp_config()
+
+ l3o_222 = VppGbpSubnet(
+ self, rd1, "10.222.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4222)
+ l3o_222.add_vpp_config()
+
+ #
+ # ping between hosts in local and remote external subnets
+ # dropped without a contract
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # Add contracts ext-nets for 220 -> 222
+ #
+ c4 = VppGbpContract(
+ self, 55, 4220, 4222, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c4.add_vpp_config()
+
+ #
+ # ping from host in local to remote external subnets
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 3, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 445)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # the sclass of the ext-net the packet came from
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ # policy was applied to the original IP packet
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ # since it's an external host the reciever should not learn it
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[Ether].dst, "00:0c:0c:0c:0c:0c")
+ self.assertEqual(inner[IP].src, "10.220.0.1")
+ self.assertEqual(inner[IP].dst, "10.222.0.1")
+
+ #
+ # make the external subnet ECMP
+ #
+ vx_tun_r2 = VppVxlanGbpTunnel(
+ self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 446,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ vx_tun_r2.add_vpp_config()
+ VppIpInterfaceBind(self, vx_tun_r2, t4).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
+
+ n2 = VppNeighbor(self,
+ vx_tun_r2.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip4)
+ n2.add_vpp_config()
+
+ ip_222.modify([VppRoutePath(self.pg7.remote_ip4,
+ vx_tun_r1.sw_if_index),
+ VppRoutePath(self.pg7.remote_ip4,
+ vx_tun_r2.sw_if_index)])
+
+ #
+ # now expect load-balance
+ #
+ p = [(Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1222, dport=1235) /
+ Raw(b'\xa5' * 100))]
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ self.assertEqual(rxs[0][VXLAN].vni, 445)
+ self.assertEqual(rxs[1][VXLAN].vni, 446)
+
+ #
+ # Same LB test for v6
+ #
+ n3 = VppNeighbor(self,
+ vx_tun_r1.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip6)
+ n3.add_vpp_config()
+ n4 = VppNeighbor(self,
+ vx_tun_r2.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip6)
+ n4.add_vpp_config()
+
+ ip_222_6 = VppIpRoute(self, "10:222::", 64,
+ [VppRoutePath(self.pg7.remote_ip6,
+ vx_tun_r1.sw_if_index),
+ VppRoutePath(self.pg7.remote_ip6,
+ vx_tun_r2.sw_if_index)],
+ table_id=t6.table_id)
+ ip_222_6.add_vpp_config()
+
+ l3o_222_6 = VppGbpSubnet(
+ self, rd1, "10:222::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4222)
+ l3o_222_6.add_vpp_config()
+
+ p = [(Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IPv6(src="10:220::1", dst="10:222::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IPv6(src="10:220::1", dst="10:222::1") /
+ UDP(sport=7777, dport=8881) /
+ Raw(b'\xa5' * 100))]
+
+ self.logger.info(self.vapi.cli("sh ip6 fib 10:222::1"))
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ self.assertEqual(rxs[0][VXLAN].vni, 445)
+ self.assertEqual(rxs[1][VXLAN].vni, 446)
+
+ #
+ # ping from host in remote to local external subnets
+ # there's no contract for this, but the A bit is set.
+ #
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.222.0.1", dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 3, self.pg0)
+ self.assertFalse(find_gbp_endpoint(self, ip="10.222.0.1"))
+
+ #
+ # ping from host in remote to remote external subnets
+ # this is dropped by reflection check.
+ #
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.222.0.1", dst="10.222.0.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
+
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IPv6(src="10:222::1", dst="10:222::2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
+
+ #
+ # local EP
+ #
+ lep1 = VppGbpEndpoint(self, vlan_144,
+ epg_220, None,
+ "10.0.0.44", "11.0.0.44",
+ "2001:10::44", "3001::44")
+ lep1.add_vpp_config()
+
+ #
+ # local EP to local ip4 external subnet
+ #
+ p = (Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IP(src=lep1.ip4, dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, eep1.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 100)
+
+ #
+ # local EP to local ip6 external subnet
+ #
+ p = (Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IPv6(src=lep1.ip6, dst="10:220::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, eep1.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 100)
+
+ #
+ # ip4 and ip6 subnets that load-balance
+ #
+ ip_20 = VppIpRoute(self, "10.20.0.0", 24,
+ [VppRoutePath(eep1.ip4,
+ eep1.epg.bvi.sw_if_index),
+ VppRoutePath(eep2.ip4,
+ eep2.epg.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_20.add_vpp_config()
+
+ l3o_20 = VppGbpSubnet(
+ self, rd1, "10.20.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o_20.add_vpp_config()
+
+ ip6_20 = VppIpRoute(self, "10:20::", 64,
+ [VppRoutePath(eep1.ip6,
+ eep1.epg.bvi.sw_if_index),
+ VppRoutePath(eep2.ip6,
+ eep2.epg.bvi.sw_if_index)],
+ table_id=t6.table_id)
+ ip6_20.add_vpp_config()
+
+ l3o6_20 = VppGbpSubnet(
+ self, rd1, "10:20::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o6_20.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh ip fib 10.20.0.1"))
+ self.logger.info(self.vapi.cli("sh ip6 fib 10:20::1"))
+
+ # two ip6 packets whose port are chosen so they load-balance
+ p = [(Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IPv6(src=lep1.ip6, dst="10:20::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IPv6(src=lep1.ip6, dst="10:20::1") /
+ UDP(sport=124, dport=1230) /
+ Raw(b'\xa5' * 100))]
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg0, 2)
+
+ self.assertEqual(rxs[0][Dot1Q].vlan, 101)
+ self.assertEqual(rxs[1][Dot1Q].vlan, 100)
+
+ # two ip4 packets whose port are chosen so they load-balance
+ p = [(Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IP(src=lep1.ip4, dst="10.20.0.1") /
+ UDP(sport=1235, dport=1235) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IP(src=lep1.ip4, dst="10.20.0.1") /
+ UDP(sport=124, dport=1230) /
+ Raw(b'\xa5' * 100))]
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg0, 2)
+
+ self.assertEqual(rxs[0][Dot1Q].vlan, 101)
+ self.assertEqual(rxs[1][Dot1Q].vlan, 100)
+
+ #
+ # cleanup
+ #
+ ip_222.remove_vpp_config()
+ self.pg7.unconfig_ip4()
+ self.vlan_101.set_vtr(L2_VTR_OP.L2_DISABLED)
+ self.vlan_100.set_vtr(L2_VTR_OP.L2_DISABLED)
+
+ def test_gbp_anon_l3_out(self):
+ """ GBP Anonymous L3 Out """
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ self.vapi.cli("set logging class gbp level debug")
+
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 55, t4, t6)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ bind_l0_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ bind_l0_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg7 hosts a BD's BUM
+ # Pg1 some other l3 interface
+ #
+ self.pg7.config_ip4()
+ self.pg7.resolve_arp()
+
+ #
+ # a GBP external bridge domains for the EPs
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, None, None)
+ gbd1.add_vpp_config()
+
+ #
+ # The Endpoint-groups in which the external endpoints exist
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 113, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+
+ # the BVIs have the subnet applied ...
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 24,
+ bind=bind_l0_ip4).add_vpp_config()
+
+ # ... which is an Anonymous L3-out subnets
+ l3o_1 = VppGbpSubnet(
+ self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_ANON_L3_OUT,
+ sclass=113)
+ l3o_1.add_vpp_config()
+
+ #
+ # an external interface attached to the outside world and the
+ # external BD
+ #
+ VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
+
+ #
+ # vlan_100 and vlan_101 are anonymous l3-out interfaces
+ #
+ ext_itf = VppGbpExtItf(self, self.vlan_100, bd1, rd1, anon=True)
+ ext_itf.add_vpp_config()
+ ext_itf = VppGbpExtItf(self, self.vlan_101, bd1, rd1, anon=True)
+ ext_itf.add_vpp_config()
+
+ #
+ # an unicast vxlan-gbp for inter-RD traffic
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 444, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3.add_vpp_config()
+
+ #
+ # A remote external endpoint
+ #
+ rep = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.0.201", "11.0.0.201",
+ "2001:10::201", "3001::101",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ mac=None)
+ rep.add_vpp_config()
+
+ #
+ # ARP packet from host in external subnet are accepted, flooded and
+ # replied to. We expect 2 packets:
+ # - APR request flooded over the other vlan subif
+ # - ARP reply from BVI
+ #
+ p_arp = (Ether(src=self.vlan_100.remote_mac,
+ dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=100) /
+ ARP(op="who-has",
+ psrc="10.0.0.100",
+ pdst="10.0.0.128",
+ hwsrc=self.vlan_100.remote_mac,
+ hwdst="ff:ff:ff:ff:ff:ff"))
+ rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0, n_rx=2)
+
+ p_arp = (Ether(src=self.vlan_101.remote_mac,
+ dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=101) /
+ ARP(op="who-has",
+ psrc='10.0.0.101',
+ pdst="10.0.0.128",
+ hwsrc=self.vlan_101.remote_mac,
+ hwdst="ff:ff:ff:ff:ff:ff"))
+ rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0, n_rx=2)
+
+ #
+ # remote to external
+ #
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=vx_tun_l3.vni, gpid=epg_220.sclass, flags=0x88) /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src=str(rep.ip4), dst="10.0.0.100") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
+
+ #
+ # local EP pings router
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.0.0.100", dst="10.0.0.128") /
+ ICMP(type='echo-request'))
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, self.vlan_100.remote_mac)
+ self.assertEqual(rx[Dot1Q].vlan, 100)
+
+ #
+ # local EP pings other local EP
+ #
+ p = (Ether(src=self.vlan_100.remote_mac,
+ dst=self.vlan_101.remote_mac) /
+ Dot1Q(vlan=100) /
+ IP(src="10.0.0.100", dst="10.0.0.101") /
+ ICMP(type='echo-request'))
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.vlan_100.remote_mac)
+ self.assertEqual(rx[Ether].dst, self.vlan_101.remote_mac)
+ self.assertEqual(rx[Dot1Q].vlan, 101)
+
+ #
+ # A subnet reachable through an external router on vlan 100
+ #
+ ip_220 = VppIpRoute(self, "10.220.0.0", 24,
+ [VppRoutePath("10.0.0.100",
+ epg_220.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_220.add_vpp_config()
+
+ l3o_220 = VppGbpSubnet(
+ self, rd1, "10.220.0.0", 24,
+ # note: this a "regular" L3 out subnet (not connected)
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o_220.add_vpp_config()
+
+ #
+ # A subnet reachable through an external router on vlan 101
+ #
+ ip_221 = VppIpRoute(self, "10.221.0.0", 24,
+ [VppRoutePath("10.0.0.101",
+ epg_220.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_221.add_vpp_config()
+
+ l3o_221 = VppGbpSubnet(
+ self, rd1, "10.221.0.0", 24,
+ # note: this a "regular" L3 out subnet (not connected)
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221)
+ l3o_221.add_vpp_config()
+
+ #
+ # ping between hosts in remote subnets
+ # dropped without a contract
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.221.0.1") /
+ ICMP(type='echo-request'))
+
+ rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # contract for the external nets to communicate
+ #
+ rule4 = AclRule(is_permit=1, proto=17)
+ rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule4, rule6])
+ acl.add_vpp_config()
+
+ c1 = VppGbpContract(
+ self, 55, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ #
+ # Contracts allowing ext-net 200 to talk with external EPs
+ #
+ c2 = VppGbpContract(
+ self, 55, 4220, 113, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+ c3 = VppGbpContract(
+ self, 55, 113, 4220, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ #
+ # ping between hosts in remote subnets
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.221.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, self.vlan_101.remote_mac)
+ self.assertEqual(rx[Dot1Q].vlan, 101)
+
+ # we did not learn these external hosts
+ self.assertFalse(find_gbp_endpoint(self, ip="10.220.0.1"))
+ self.assertFalse(find_gbp_endpoint(self, ip="10.221.0.1"))
+
+ #
+ # from remote external EP to local external EP
+ #
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=113, flags=0x88) /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src=rep.ip4, dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
+
+ #
+ # ping from an external host to the remote external EP
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst=rep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ # self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 444)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # the sclass of the ext-net the packet came from
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ # policy was applied to the original IP packet
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ # since it's an external host the reciever should not learn it
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[IP].src, "10.220.0.1")
+ self.assertEqual(inner[IP].dst, rep.ip4)
+
+ #
+ # An external subnet reachable via the remote external EP
+ #
+
+ #
+ # first the VXLAN-GBP tunnel over which it is reached
+ #
+ vx_tun_r = VppVxlanGbpTunnel(
+ self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 445,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ vx_tun_r.add_vpp_config()
+ VppIpInterfaceBind(self, vx_tun_r, t4).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
+
+ #
+ # then the special adj to resolve through on that tunnel
+ #
+ n1 = VppNeighbor(self,
+ vx_tun_r.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip4)
+ n1.add_vpp_config()
+
+ #
+ # the route via the adj above
+ #
+ ip_222 = VppIpRoute(self, "10.222.0.0", 24,
+ [VppRoutePath(self.pg7.remote_ip4,
+ vx_tun_r.sw_if_index)],
+ table_id=t4.table_id)
+ ip_222.add_vpp_config()
+
+ l3o_222 = VppGbpSubnet(
+ self, rd1, "10.222.0.0", 24,
+ # note: this a "regular" l3out subnet (not connected)
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4222)
+ l3o_222.add_vpp_config()
+
+ #
+ # ping between hosts in local and remote external subnets
+ # dropped without a contract
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # Add contracts ext-nets for 220 -> 222
+ #
+ c4 = VppGbpContract(
+ self, 55, 4220, 4222, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c4.add_vpp_config()
+
+ #
+ # ping from host in local to remote external subnets
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 3, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 445)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # the sclass of the ext-net the packet came from
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ # policy was applied to the original IP packet
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ # since it's an external host the reciever should not learn it
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[Ether].dst, "00:0c:0c:0c:0c:0c")
+ self.assertEqual(inner[IP].src, "10.220.0.1")
+ self.assertEqual(inner[IP].dst, "10.222.0.1")
+
+ #
+ # ping from host in remote to local external subnets
+ # there's no contract for this, but the A bit is set.
+ #
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.222.0.1", dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 3, self.pg0)
+ self.assertFalse(find_gbp_endpoint(self, ip="10.222.0.1"))
+
+ #
+ # ping from host in remote to remote external subnets
+ # this is dropped by reflection check.
+ #
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.222.0.1", dst="10.222.0.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
+
+ #
+ # cleanup
+ #
+ self.vlan_101.set_vtr(L2_VTR_OP.L2_DISABLED)
+ self.vlan_100.set_vtr(L2_VTR_OP.L2_DISABLED)
+ self.pg7.unconfig_ip4()
+ # make sure the programmed EP is no longer learnt from DP
+ self.wait_for_ep_timeout(sw_if_index=rep.itf.sw_if_index, ip=rep.ip4)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/extras/deprecated/plugins/l2e/CMakeLists.txt b/extras/deprecated/plugins/l2e/CMakeLists.txt
new file mode 100644
index 00000000000..2bfb05a43e6
--- /dev/null
+++ b/extras/deprecated/plugins/l2e/CMakeLists.txt
@@ -0,0 +1,28 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(l2e
+ SOURCES
+ l2e_node.c
+ l2e_api.c
+ l2e.c
+
+ MULTIARCH_SOURCES
+ l2e_node.c
+
+ API_FILES
+ l2e.api
+
+ INSTALL_HEADERS
+ l2e.h
+)
diff --git a/extras/deprecated/plugins/l2e/l2e.api b/extras/deprecated/plugins/l2e/l2e.api
new file mode 100644
index 00000000000..586e2bae5ca
--- /dev/null
+++ b/extras/deprecated/plugins/l2e/l2e.api
@@ -0,0 +1,39 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option version = "1.0.0";
+import "vnet/interface_types.api";
+
+/** \brief L2 emulation at L3
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface the operation is applied to
+ @param enable - Turn the service on or off
+*/
+autoreply define l2_emulation
+{
+ option status="in_progress";
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ bool enable;
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/l2e/l2e.c b/extras/deprecated/plugins/l2e/l2e.c
new file mode 100644
index 00000000000..4c6eac50446
--- /dev/null
+++ b/extras/deprecated/plugins/l2e/l2e.c
@@ -0,0 +1,198 @@
+/*
+ * l2e.c : Extract L3 packets from the L2 input and feed
+ * them into the L3 path.
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/l2e/l2e.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/ip/ip.h>
+
+l2_emulation_main_t l2_emulation_main;
+
+/**
+ * A zero'd out struct we can use in the vec_validate
+ */
+static const l2_emulation_t ezero = { };
+
+__clib_export void
+l2_emulation_enable (u32 sw_if_index)
+{
+ l2_emulation_main_t *em = &l2_emulation_main;
+ vec_validate_init_empty (em->l2_emulations, sw_if_index, ezero);
+
+ l2_emulation_t *l23e = &em->l2_emulations[sw_if_index];
+
+ l23e->enabled = 1;
+
+ /*
+ * L3 enable the interface - using IP unnumbered from the control
+ * plane may not be possible since there may be no BVI interface
+ * to which to unnumber
+ */
+ ip4_sw_interface_enable_disable (sw_if_index, 1);
+ ip6_sw_interface_enable_disable (sw_if_index, 1);
+
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_L2_EMULATION, 1);
+}
+
+
+__clib_export void
+l2_emulation_disable (u32 sw_if_index)
+{
+ l2_emulation_main_t *em = &l2_emulation_main;
+ if (vec_len (em->l2_emulations) >= sw_if_index)
+ {
+ l2_emulation_t *l23e = &em->l2_emulations[sw_if_index];
+ clib_memset (l23e, 0, sizeof (*l23e));
+
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_L2_EMULATION, 0);
+ ip4_sw_interface_enable_disable (sw_if_index, 0);
+ ip6_sw_interface_enable_disable (sw_if_index, 0);
+ }
+}
+
+static clib_error_t *
+l2_emulation_interface_add_del (vnet_main_t * vnm,
+ u32 sw_if_index, u32 is_add)
+{
+ l2_emulation_main_t *em = &l2_emulation_main;
+ if (is_add)
+ {
+ vec_validate_init_empty (em->l2_emulations, sw_if_index, ezero);
+ }
+
+ return (NULL);
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION (l2_emulation_interface_add_del);
+
+static clib_error_t *
+l2_emulation_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+ u8 enable = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "enable"))
+ enable = 1;
+ else if (unformat (input, "disable"))
+ enable = 0;
+ else
+ break;
+ }
+
+ if (~0 == sw_if_index)
+ return clib_error_return (0, "interface must be specified");
+
+ if (enable)
+ l2_emulation_enable (sw_if_index);
+ else
+ l2_emulation_disable (sw_if_index);
+
+ return (NULL);
+}
+
+/*?
+ * Configure l2 emulation.
+ * When the interface is in L2 mode, configure the extraction of L3
+ * packets out of the L2 path and into the L3 path.
+ *
+ * @cliexpar
+ * @cliexstart{set interface l2 input l2-emulation <interface-name> [disable]}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2_emulation_cli_node, static) = {
+ .path = "set interface l2 l2-emulation",
+ .short_help =
+ "set interface l2 l2-emulation <interface-name> [disable|enable]\n",
+ .function = l2_emulation_cli,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2_emulation_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ l2_emulation_main_t *em = &l2_emulation_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ l2_emulation_t *l23e;
+ u32 sw_if_index;
+
+ vec_foreach_index (sw_if_index, em->l2_emulations)
+ {
+ l23e = &em->l2_emulations[sw_if_index];
+ if (l23e->enabled)
+ {
+ vlib_cli_output (vm, "%U\n",
+ format_vnet_sw_if_index_name, vnm, sw_if_index);
+ }
+ }
+ return (NULL);
+}
+
+/*?
+ * Show l2 emulation.
+ * When the interface is in L2 mode, configure the extraction of L3
+ * packets out of the L2 path and into the L3 path.
+ *
+ * @cliexpar
+ * @cliexstart{show interface l2 l2-emulation}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (l2_emulation_show_node, static) = {
+ .path = "show interface l2 l2-emulation",
+ .short_help = "show interface l2 l2-emulation\n",
+ .function = l2_emulation_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+l2_emulation_init (vlib_main_t * vm)
+{
+ l2_emulation_main_t *em = &l2_emulation_main;
+ vlib_node_t *node;
+
+ node = vlib_get_node_by_name (vm, (u8 *) "l2-emulation");
+ em->l2_emulation_node_index = node->index;
+
+ /* Initialize the feature next-node indexes */
+ feat_bitmap_init_next_nodes (vm,
+ em->l2_emulation_node_index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ em->l2_input_feat_next);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (l2_emulation_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/l2e/l2e.h b/extras/deprecated/plugins/l2e/l2e.h
new file mode 100644
index 00000000000..e548d333f9d
--- /dev/null
+++ b/extras/deprecated/plugins/l2e/l2e.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_l2_emulation_h
+#define included_vnet_l2_emulation_h
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/**
+ * Per-interface L2 configuration
+ */
+typedef struct l2_emulation_t_
+{
+ /**
+ * Enabled or Disabled.
+ * this is required since one L3 protocl can be enabled, but others not
+ */
+ u8 enabled;
+} l2_emulation_t;
+
+/**
+ * per-packet trace data
+ */
+typedef struct l2_emulation_trace_t_
+{
+ /* per-pkt trace data */
+ u8 extracted;
+} l2_emulation_trace_t;
+
+/**
+ * Grouping of global data for the L2 emulation feature
+ */
+typedef struct l2_emulation_main_t_
+{
+ u16 msg_id_base;
+
+ u32 l2_emulation_node_index;
+
+ /**
+ * Per-interface vector of emulation configs
+ */
+ l2_emulation_t *l2_emulations;
+
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 l2_input_feat_next[32];
+} l2_emulation_main_t;
+
+/**
+ * L2 Emulation is a feautre that is applied to L2 ports to 'extract'
+ * IP packets from the L2 path and inject them into the L3 path (i.e.
+ * into the appropriate ip[4|6]_input node).
+ * L3 routes in the table_id for that interface should then be configured
+ * as DVR routes, therefore the forwarded packet has the L2 header
+ * preserved and togehter the L3 routed system behaves like an L2 bridge.
+ */
+extern void l2_emulation_enable (u32 sw_if_index);
+extern void l2_emulation_disable (u32 sw_if_index);
+
+extern l2_emulation_main_t l2_emulation_main;
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/l2e/l2e_api.c b/extras/deprecated/plugins/l2e/l2e_api.c
new file mode 100644
index 00000000000..fe2fb7ee06e
--- /dev/null
+++ b/extras/deprecated/plugins/l2e/l2e_api.c
@@ -0,0 +1,89 @@
+/*
+ *------------------------------------------------------------------
+ * l2e_api.c - layer 2 emulation api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vpp/app/version.h>
+
+#include <l2e/l2e.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+/* define message IDs */
+#include <l2e/l2e.api_enum.h>
+#include <l2e/l2e.api_types.h>
+
+#include <vlibapi/api_helper_macros.h>
+
+#define L2E_MSG_BASE l2em->msg_id_base
+
+static void
+vl_api_l2_emulation_t_handler (vl_api_l2_emulation_t * mp)
+{
+ l2_emulation_main_t *l2em = &l2_emulation_main;
+ vl_api_l2_emulation_reply_t *rmp;
+ int rv = 0;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ if (mp->enable)
+ l2_emulation_enable (sw_if_index);
+ else
+ l2_emulation_disable (sw_if_index);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_L2_EMULATION_REPLY + L2E_MSG_BASE);
+}
+
+#include <l2e/l2e.api.c>
+static clib_error_t *
+l2e_init (vlib_main_t * vm)
+{
+ l2_emulation_main_t *l2em = &l2_emulation_main;
+
+ /* Ask for a correctly-sized block of API message decode slots */
+ l2em->msg_id_base = setup_message_id_table ();
+
+ return (NULL);
+}
+
+VLIB_API_INIT_FUNCTION (l2e_init);
+
+/* *INDENT-OFF* */
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Layer 2 (L2) Emulation",
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/plugins/l2e/l2e_node.c b/extras/deprecated/plugins/l2e/l2e_node.c
new file mode 100644
index 00000000000..71c9b4bc6af
--- /dev/null
+++ b/extras/deprecated/plugins/l2e/l2e_node.c
@@ -0,0 +1,283 @@
+/*
+ * l2e_node.c : l2 emulation node
+ *
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <plugins/l2e/l2e.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+
+#define foreach_l2_emulation \
+ _(IP4, "Extract IPv4") \
+ _(IP6, "Extract IPv6")
+
+typedef enum
+{
+#define _(sym,str) L2_EMULATION_ERROR_##sym,
+ foreach_l2_emulation
+#undef _
+ L2_EMULATION_N_ERROR,
+} l2_emulation_error_t;
+
+static char *l2_emulation_error_strings[] = {
+#define _(sym,string) string,
+ foreach_l2_emulation
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) L2_EMULATION_NEXT_##sym,
+ foreach_l2_emulation
+#undef _
+ L2_EMULATION_N_NEXT,
+} l2_emulation_next_t;
+
+/* packet trace format function */
+static u8 *
+format_l2_emulation_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ l2_emulation_trace_t *t = va_arg (*args, l2_emulation_trace_t *);
+
+ s = format (s, "l2-emulation: %s", (t->extracted ? "yes" : "no"));
+
+ return s;
+}
+
+VLIB_NODE_FN (l2_emulation_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ l2_emulation_main_t *em = &l2_emulation_main;
+ u32 n_left_from, *from, *to_next;
+ l2_emulation_next_t next_index;
+ u32 ip4_hits = 0;
+ u32 ip6_hits = 0;
+
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ u32 sw_if_index0, sw_if_index1;
+ u16 ether_type0, ether_type1;
+ u32 next0 = ~0, next1 = ~0;
+ u8 l2_len0, l2_len1;
+ u32 bi0, bi1;
+ u8 *h0, *h1;
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ l2_len0 = vnet_buffer (b0)->l2.l2_len;
+ l2_len1 = vnet_buffer (b1)->l2.l2_len;
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ ether_type0 = clib_net_to_host_u16 (*(u16 *) (h0 + l2_len0 - 2));
+ ether_type1 = clib_net_to_host_u16 (*(u16 *) (h1 + l2_len1 - 2));
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ /*
+ * only extract unicast
+ */
+ if (PREDICT_TRUE (!(h0[0] & 0x1)))
+ {
+ switch (ether_type0)
+ {
+ case ETHERNET_TYPE_IP4:
+ ASSERT (em->l2_emulations[sw_if_index0].enabled);
+ ++ip4_hits;
+ next0 = L2_EMULATION_NEXT_IP4;
+ vlib_buffer_advance (b0, l2_len0);
+ break;
+ case ETHERNET_TYPE_IP6:
+ ASSERT (em->l2_emulations[sw_if_index0].enabled);
+ ++ip6_hits;
+ next0 = L2_EMULATION_NEXT_IP6;
+ vlib_buffer_advance (b0, l2_len0);
+ default:
+ break;
+ }
+ }
+ if (PREDICT_TRUE (!(h1[0] & 0x1)))
+ {
+ switch (ether_type1)
+ {
+ case ETHERNET_TYPE_IP4:
+ ASSERT (em->l2_emulations[sw_if_index1].enabled);
+ ++ip4_hits;
+ next1 = L2_EMULATION_NEXT_IP4;
+ vlib_buffer_advance (b1, l2_len1);
+ break;
+ case ETHERNET_TYPE_IP6:
+ ASSERT (em->l2_emulations[sw_if_index1].enabled);
+ ++ip6_hits;
+ next1 = L2_EMULATION_NEXT_IP6;
+ vlib_buffer_advance (b1, l2_len1);
+ default:
+ break;
+ }
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_emulation_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->extracted = (next0 != ~0);
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_emulation_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->extracted = (next1 != ~0);
+ }
+
+ /* Determine the next node and remove ourself from bitmap */
+ if (PREDICT_TRUE (next0 == ~0))
+ next0 = vnet_l2_feature_next (b0, em->l2_input_feat_next,
+ L2INPUT_FEAT_L2_EMULATION);
+
+ /* Determine the next node and remove ourself from bitmap */
+ if (PREDICT_TRUE (next1 == ~0))
+ next1 = vnet_l2_feature_next (b1, em->l2_input_feat_next,
+ L2INPUT_FEAT_L2_EMULATION);
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 sw_if_index0;
+ u16 ether_type0;
+ u32 next0 = ~0;
+ u8 l2_len0;
+ u32 bi0;
+ u8 *h0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ l2_len0 = vnet_buffer (b0)->l2.l2_len;
+
+ h0 = vlib_buffer_get_current (b0);
+ ether_type0 = clib_net_to_host_u16 (*(u16 *) (h0 + l2_len0 - 2));
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /*
+ * only extract unicast
+ */
+ if (PREDICT_TRUE (!(h0[0] & 0x1)))
+ {
+ switch (ether_type0)
+ {
+ case ETHERNET_TYPE_IP4:
+ ASSERT (em->l2_emulations[sw_if_index0].enabled);
+ ++ip4_hits;
+ next0 = L2_EMULATION_NEXT_IP4;
+ vlib_buffer_advance (b0, l2_len0);
+ break;
+ case ETHERNET_TYPE_IP6:
+ ASSERT (em->l2_emulations[sw_if_index0].enabled);
+ ++ip6_hits;
+ next0 = L2_EMULATION_NEXT_IP6;
+ vlib_buffer_advance (b0, l2_len0);
+ default:
+ break;
+ }
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ l2_emulation_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->extracted = (next0 != ~0);
+ }
+
+ /* Determine the next node and remove ourself from bitmap */
+ if (PREDICT_TRUE (next0 == ~0))
+ next0 = vnet_l2_feature_next (b0, em->l2_input_feat_next,
+ L2INPUT_FEAT_L2_EMULATION);
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_EMULATION_ERROR_IP4, ip4_hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ L2_EMULATION_ERROR_IP6, ip6_hits);
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (l2_emulation_node) = {
+ .name = "l2-emulation",
+ .vector_size = sizeof (u32),
+ .format_trace = format_l2_emulation_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(l2_emulation_error_strings),
+ .error_strings = l2_emulation_error_strings,
+
+ .n_next_nodes = L2_EMULATION_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [L2_EMULATION_NEXT_IP4] = "ip4-input",
+ [L2_EMULATION_NEXT_IP6] = "ip6-input",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/vnet/vxlan-gbp/decap.c b/extras/deprecated/vnet/vxlan-gbp/decap.c
new file mode 100644
index 00000000000..927c778b211
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/decap.c
@@ -0,0 +1,1050 @@
+/*
+ * decap.c: vxlan gbp tunnel decap packet processing
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+
+typedef struct
+{
+ u32 next_index;
+ u32 tunnel_index;
+ u32 error;
+ u32 vni;
+ u16 sclass;
+ u8 flags;
+} vxlan_gbp_rx_trace_t;
+
+static u8 *
+format_vxlan_gbp_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_gbp_rx_trace_t *t = va_arg (*args, vxlan_gbp_rx_trace_t *);
+
+ if (t->tunnel_index == ~0)
+ return format (s,
+ "VXLAN_GBP decap error - tunnel for vni %d does not exist",
+ t->vni);
+ return format (s,
+ "VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d"
+ " flags %U next %d error %d",
+ t->tunnel_index, t->vni, t->sclass,
+ format_vxlan_gbp_header_gpflags, t->flags,
+ t->next_index, t->error);
+}
+
+always_inline u32
+buf_fib_index (vlib_buffer_t * b, u32 is_ip4)
+{
+ u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
+ if (sw_if_index != (u32) ~ 0)
+ return sw_if_index;
+
+ u32 *fib_index_by_sw_if_index = is_ip4 ?
+ ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index;
+ sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
+
+ return vec_elt (fib_index_by_sw_if_index, sw_if_index);
+}
+
+typedef vxlan4_gbp_tunnel_key_t last_tunnel_cache4;
+
+always_inline vxlan_gbp_tunnel_t *
+vxlan4_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache4 * cache,
+ u32 fib_index, ip4_header_t * ip4_0,
+ vxlan_gbp_header_t * vxlan_gbp0)
+{
+ /*
+ * Check unicast first since that's where most of the traffic comes from
+ * Make sure VXLAN_GBP tunnel exist according to packet SIP, DIP and VNI
+ */
+ vxlan4_gbp_tunnel_key_t key4;
+ int rv;
+
+ key4.key[1] = (((u64) fib_index << 32) |
+ (vxlan_gbp0->vni_reserved &
+ clib_host_to_net_u32 (0xffffff00)));
+ key4.key[0] =
+ (((u64) ip4_0->dst_address.as_u32 << 32) | ip4_0->src_address.as_u32);
+
+ if (PREDICT_FALSE (key4.key[0] != cache->key[0] ||
+ key4.key[1] != cache->key[1]))
+ {
+ rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key,
+ &key4);
+ if (PREDICT_FALSE (rv == 0))
+ {
+ *cache = key4;
+ return (pool_elt_at_index (vxm->tunnels, cache->value));
+ }
+ }
+ else
+ {
+ return (pool_elt_at_index (vxm->tunnels, cache->value));
+ }
+
+ /* No unicast match - try multicast */
+ if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address)))
+ return (NULL);
+
+ key4.key[0] = ip4_0->dst_address.as_u32;
+ /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */
+ rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4);
+
+ if (PREDICT_FALSE (rv != 0))
+ return (NULL);
+
+ return (pool_elt_at_index (vxm->tunnels, key4.value));
+}
+
+typedef vxlan6_gbp_tunnel_key_t last_tunnel_cache6;
+
+always_inline vxlan_gbp_tunnel_t *
+vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache,
+ u32 fib_index, ip6_header_t * ip6_0,
+ vxlan_gbp_header_t * vxlan_gbp0)
+{
+ /* Make sure VXLAN_GBP tunnel exist according to packet SIP and VNI */
+ vxlan6_gbp_tunnel_key_t key6 = {
+ .key = {
+ [0] = ip6_0->src_address.as_u64[0],
+ [1] = ip6_0->src_address.as_u64[1],
+ [2] = ((((u64) fib_index) << 32) |
+ (vxlan_gbp0->vni_reserved &
+ clib_host_to_net_u32 (0xffffff00))),
+ }
+ };
+ int rv;
+
+ if (PREDICT_FALSE
+ (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
+ {
+ rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
+ &key6);
+ if (PREDICT_FALSE (rv != 0))
+ return NULL;
+
+ *cache = key6;
+ }
+ vxlan_gbp_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
+
+ /* Validate VXLAN_GBP tunnel SIP against packet DIP */
+ if (PREDICT_FALSE
+ (!ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
+ {
+ /* try multicast */
+ if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address)))
+ return 0;
+
+ /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */
+ key6.key[0] = ip6_0->dst_address.as_u64[0];
+ key6.key[1] = ip6_0->dst_address.as_u64[1];
+ rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
+ &key6);
+ if (PREDICT_FALSE (rv != 0))
+ return 0;
+
+ }
+
+ return t0;
+}
+
+always_inline vxlan_gbp_input_next_t
+vxlan_gbp_tunnel_get_next (const vxlan_gbp_tunnel_t * t, vlib_buffer_t * b0)
+{
+ if (VXLAN_GBP_TUNNEL_MODE_L2 == t->mode)
+ return (VXLAN_GBP_INPUT_NEXT_L2_INPUT);
+ else
+ {
+ ethernet_header_t *e0;
+ u16 type0;
+
+ e0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, sizeof (*e0));
+ type0 = clib_net_to_host_u16 (e0->type);
+ switch (type0)
+ {
+ case ETHERNET_TYPE_IP4:
+ return (VXLAN_GBP_INPUT_NEXT_IP4_INPUT);
+ case ETHERNET_TYPE_IP6:
+ return (VXLAN_GBP_INPUT_NEXT_IP6_INPUT);
+ }
+ }
+ return (VXLAN_GBP_INPUT_NEXT_DROP);
+}
+
+always_inline uword
+vxlan_gbp_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_ip4)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vnet_main_t *vnm = vxm->vnet_main;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_combined_counter_main_t *rx_counter =
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
+ vlib_combined_counter_main_t *drop_counter =
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
+ last_tunnel_cache4 last4;
+ last_tunnel_cache6 last6;
+ u32 pkts_decapsulated = 0;
+ u32 thread_index = vlib_get_thread_index ();
+
+ if (is_ip4)
+ clib_memset (&last4, 0xff, sizeof last4);
+ else
+ clib_memset (&last6, 0xff, sizeof last6);
+
+ u32 next_index = node->cached_next_index;
+
+ u32 *from = vlib_frame_vector_args (from_frame);
+ u32 n_left_from = from_frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 *to_next, n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ u32 bi0 = to_next[0] = from[0];
+ u32 bi1 = to_next[1] = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ vlib_buffer_t *b0, *b1;
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* udp leaves current_data pointing at the vxlan_gbp header */
+ void *cur0 = vlib_buffer_get_current (b0);
+ void *cur1 = vlib_buffer_get_current (b1);
+ vxlan_gbp_header_t *vxlan_gbp0 = cur0;
+ vxlan_gbp_header_t *vxlan_gbp1 = cur1;
+
+ ip4_header_t *ip4_0, *ip4_1;
+ ip6_header_t *ip6_0, *ip6_1;
+ if (is_ip4)
+ {
+ ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
+ ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
+ }
+ else
+ {
+ ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
+ ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
+ }
+
+ u32 fi0 = buf_fib_index (b0, is_ip4);
+ u32 fi1 = buf_fib_index (b1, is_ip4);
+
+ vxlan_gbp_tunnel_t *t0, *t1;
+ if (is_ip4)
+ {
+ t0 =
+ vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0);
+ t1 =
+ vxlan4_gbp_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan_gbp1);
+ }
+ else
+ {
+ t0 =
+ vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0);
+ t1 =
+ vxlan6_gbp_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan_gbp1);
+ }
+
+ u32 len0 = vlib_buffer_length_in_chain (vm, b0);
+ u32 len1 = vlib_buffer_length_in_chain (vm, b1);
+
+ vxlan_gbp_input_next_t next0, next1;
+ u8 error0 = 0, error1 = 0;
+ u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
+ u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1);
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ /* pop vxlan_gbp */
+ vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
+ vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
+
+ u8 i_and_g0 = ((flags0 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI);
+ u8 i_and_g1 = ((flags1 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI);
+
+ /* Validate VXLAN_GBP tunnel encap-fib index against packet */
+ if (PREDICT_FALSE (t0 == NULL || !i_and_g0))
+ {
+ if (t0 != NULL && !i_and_g0)
+ {
+ error0 = VXLAN_GBP_ERROR_BAD_FLAGS;
+ vlib_increment_combined_counter
+ (drop_counter, thread_index, t0->sw_if_index, 1, len0);
+ next0 = VXLAN_GBP_INPUT_NEXT_DROP;
+ }
+ else
+ {
+ error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_GBP_INPUT_NEXT_PUNT;
+ if (is_ip4)
+ b0->punt_reason =
+ vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4];
+ else
+ b0->punt_reason =
+ vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6];
+ }
+ b0->error = node->errors[error0];
+ }
+ else
+ {
+ next0 = vxlan_gbp_tunnel_get_next (t0, b0);
+
+ /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
+ vlib_increment_combined_counter
+ (rx_counter, thread_index, t0->sw_if_index, 1, len0);
+ pkts_decapsulated++;
+ }
+
+ vnet_buffer2 (b0)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp0) |
+ VXLAN_GBP_GPFLAGS_R);
+ vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+
+
+ if (PREDICT_FALSE (t1 == NULL || !i_and_g1))
+ {
+ if (t1 != NULL && !i_and_g1)
+ {
+ error1 = VXLAN_GBP_ERROR_BAD_FLAGS;
+ vlib_increment_combined_counter
+ (drop_counter, thread_index, t1->sw_if_index, 1, len1);
+ next1 = VXLAN_GBP_INPUT_NEXT_DROP;
+ }
+ else
+ {
+ error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_GBP_INPUT_NEXT_PUNT;
+ if (is_ip4)
+ b1->punt_reason =
+ vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4];
+ else
+ b1->punt_reason =
+ vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6];
+ }
+ b1->error = node->errors[error1];
+ }
+ else
+ {
+ next1 = vxlan_gbp_tunnel_get_next (t1, b1);
+
+ /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
+ pkts_decapsulated++;
+
+ vlib_increment_combined_counter
+ (rx_counter, thread_index, t1->sw_if_index, 1, len1);
+ }
+
+ vnet_buffer2 (b1)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp1) |
+ VXLAN_GBP_GPFLAGS_R);
+
+ vnet_buffer2 (b1)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
+
+ vnet_update_l2_len (b0);
+ vnet_update_l2_len (b1);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gbp_rx_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
+ tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
+ tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+ }
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gbp_rx_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->next_index = next1;
+ tr->error = error1;
+ tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels;
+ tr->vni = vxlan_gbp_get_vni (vxlan_gbp1);
+ tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0 = to_next[0] = from[0];
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+
+ /* udp leaves current_data pointing at the vxlan_gbp header */
+ void *cur0 = vlib_buffer_get_current (b0);
+ vxlan_gbp_header_t *vxlan_gbp0 = cur0;
+ ip4_header_t *ip4_0;
+ ip6_header_t *ip6_0;
+ if (is_ip4)
+ ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
+ else
+ ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
+
+ u32 fi0 = buf_fib_index (b0, is_ip4);
+
+ vxlan_gbp_tunnel_t *t0;
+ if (is_ip4)
+ t0 = vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0);
+ else
+ t0 = vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0);
+
+ uword len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ vxlan_gbp_input_next_t next0;
+ u8 error0 = 0;
+ u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
+
+ /* pop (ip, udp, vxlan_gbp) */
+ vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
+
+ u8 i_and_g0 = ((flags0 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI);
+
+ /* Validate VXLAN_GBP tunnel encap-fib index against packet */
+ if (PREDICT_FALSE (t0 == NULL || !i_and_g0))
+ {
+ if (t0 != NULL && !i_and_g0)
+ {
+ error0 = VXLAN_GBP_ERROR_BAD_FLAGS;
+ vlib_increment_combined_counter
+ (drop_counter, thread_index, t0->sw_if_index, 1, len0);
+ next0 = VXLAN_GBP_INPUT_NEXT_DROP;
+ }
+ else
+ {
+ error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_GBP_INPUT_NEXT_PUNT;
+ if (is_ip4)
+ b0->punt_reason =
+ vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4];
+ else
+ b0->punt_reason =
+ vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6];
+ }
+ b0->error = node->errors[error0];
+ }
+ else
+ {
+ next0 = vxlan_gbp_tunnel_get_next (t0, b0);
+ /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
+ pkts_decapsulated++;
+
+ vlib_increment_combined_counter
+ (rx_counter, thread_index, t0->sw_if_index, 1, len0);
+ }
+ vnet_buffer2 (b0)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp0) |
+ VXLAN_GBP_GPFLAGS_R);
+
+ vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gbp_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
+ tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
+ tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ /* Do we still need this now that tunnel tx stats is kept? */
+ u32 node_idx =
+ is_ip4 ? vxlan4_gbp_input_node.index : vxlan6_gbp_input_node.index;
+ vlib_node_increment_counter (vm, node_idx, VXLAN_GBP_ERROR_DECAPSULATED,
+ pkts_decapsulated);
+
+ return from_frame->n_vectors;
+}
+
+VLIB_NODE_FN (vxlan4_gbp_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 1);
+}
+
+VLIB_NODE_FN (vxlan6_gbp_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 0);
+}
+
+static char *vxlan_gbp_error_strings[] = {
+#define vxlan_gbp_error(n,s) s,
+#include <vnet/vxlan-gbp/vxlan_gbp_error.def>
+#undef vxlan_gbp_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan4_gbp_input_node) =
+{
+ .name = "vxlan4-gbp-input",
+ .vector_size = sizeof (u32),
+ .n_errors = VXLAN_GBP_N_ERROR,
+ .error_strings = vxlan_gbp_error_strings,
+ .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT,
+ .format_trace = format_vxlan_gbp_rx_trace,
+ .next_nodes = {
+#define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gbp_input_next
+#undef _
+ },
+};
+
+VLIB_REGISTER_NODE (vxlan6_gbp_input_node) =
+{
+ .name = "vxlan6-gbp-input",
+ .vector_size = sizeof (u32),
+ .n_errors = VXLAN_GBP_N_ERROR,
+ .error_strings = vxlan_gbp_error_strings,
+ .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gbp_input_next
+#undef _
+ },
+ .format_trace = format_vxlan_gbp_rx_trace,
+};
+/* *INDENT-ON* */
+
+typedef enum
+{
+ IP_VXLAN_GBP_BYPASS_NEXT_DROP,
+ IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP,
+ IP_VXLAN_GBP_BYPASS_N_NEXT,
+} ip_vxlan_gbp_bypass_next_t;
+
+always_inline uword
+ip_vxlan_gbp_bypass_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, u32 is_ip4)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_input_node.index);
+ ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
+ ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ if (is_ip4)
+ addr4.data_u32 = ~0;
+ else
+ ip6_address_set_zero (&addr6);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ ip4_header_t *ip40, *ip41;
+ ip6_header_t *ip60, *ip61;
+ udp_header_t *udp0, *udp1;
+ u32 bi0, ip_len0, udp_len0, flags0, next0;
+ u32 bi1, ip_len1, udp_len1, flags1, next1;
+ i32 len_diff0, len_diff1;
+ u8 error0, good_udp0, proto0;
+ u8 error1, good_udp1, proto1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ if (is_ip4)
+ {
+ ip40 = vlib_buffer_get_current (b0);
+ ip41 = vlib_buffer_get_current (b1);
+ }
+ else
+ {
+ ip60 = vlib_buffer_get_current (b0);
+ ip61 = vlib_buffer_get_current (b1);
+ }
+
+ /* Setup packet for next IP feature */
+ vnet_feature_next (&next0, b0);
+ vnet_feature_next (&next1, b1);
+
+ if (is_ip4)
+ {
+ /* Treat IP frag packets as "experimental" protocol for now
+ until support of IP frag reassembly is implemented */
+ proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
+ proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
+ }
+ else
+ {
+ proto0 = ip60->protocol;
+ proto1 = ip61->protocol;
+ }
+
+ /* Process packet 0 */
+ if (proto0 != IP_PROTOCOL_UDP)
+ goto exit0; /* not UDP packet */
+
+ if (is_ip4)
+ udp0 = ip4_next_header (ip40);
+ else
+ udp0 = ip6_next_header (ip60);
+
+ if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
+ goto exit0; /* not VXLAN_GBP packet */
+
+ /* Validate DIP against VTEPs */
+ if (is_ip4)
+ {
+ if (addr4.as_u32 != ip40->dst_address.as_u32)
+ {
+ if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
+ goto exit0; /* no local VTEP for VXLAN_GBP packet */
+ addr4 = ip40->dst_address;
+ }
+ }
+ else
+ {
+ if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
+ {
+ if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
+ goto exit0; /* no local VTEP for VXLAN_GBP packet */
+ addr6 = ip60->dst_address;
+ }
+ }
+
+ flags0 = b0->flags;
+ good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp0 |= udp0->checksum == 0;
+
+ /* Verify UDP length */
+ if (is_ip4)
+ ip_len0 = clib_net_to_host_u16 (ip40->length);
+ else
+ ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+ len_diff0 = ip_len0 - udp_len0;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp0))
+ {
+ if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
+ else
+ flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
+ good_udp0 =
+ (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+ }
+ }
+
+ if (is_ip4)
+ {
+ error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
+ }
+
+ next0 = error0 ?
+ IP_VXLAN_GBP_BYPASS_NEXT_DROP :
+ IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
+ b0->error = error0 ? error_node->errors[error0] : 0;
+
+ /* vxlan-gbp-input node expect current at VXLAN_GBP header */
+ if (is_ip4)
+ vlib_buffer_advance (b0,
+ sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+ else
+ vlib_buffer_advance (b0,
+ sizeof (ip6_header_t) +
+ sizeof (udp_header_t));
+
+ exit0:
+ /* Process packet 1 */
+ if (proto1 != IP_PROTOCOL_UDP)
+ goto exit1; /* not UDP packet */
+
+ if (is_ip4)
+ udp1 = ip4_next_header (ip41);
+ else
+ udp1 = ip6_next_header (ip61);
+
+ if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
+ goto exit1; /* not VXLAN_GBP packet */
+
+ /* Validate DIP against VTEPs */
+ if (is_ip4)
+ {
+ if (addr4.as_u32 != ip41->dst_address.as_u32)
+ {
+ if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
+ goto exit1; /* no local VTEP for VXLAN_GBP packet */
+ addr4 = ip41->dst_address;
+ }
+ }
+ else
+ {
+ if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
+ {
+ if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
+ goto exit1; /* no local VTEP for VXLAN_GBP packet */
+ addr6 = ip61->dst_address;
+ }
+ }
+
+ flags1 = b1->flags;
+ good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp1 |= udp1->checksum == 0;
+
+ /* Verify UDP length */
+ if (is_ip4)
+ ip_len1 = clib_net_to_host_u16 (ip41->length);
+ else
+ ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
+ udp_len1 = clib_net_to_host_u16 (udp1->length);
+ len_diff1 = ip_len1 - udp_len1;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp1))
+ {
+ if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
+ else
+ flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
+ good_udp1 =
+ (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+ }
+ }
+
+ if (is_ip4)
+ {
+ error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
+ error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
+ }
+
+ next1 = error1 ?
+ IP_VXLAN_GBP_BYPASS_NEXT_DROP :
+ IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
+ b1->error = error1 ? error_node->errors[error1] : 0;
+
+ /* vxlan_gbp-input node expect current at VXLAN_GBP header */
+ if (is_ip4)
+ vlib_buffer_advance (b1,
+ sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+ else
+ vlib_buffer_advance (b1,
+ sizeof (ip6_header_t) +
+ sizeof (udp_header_t));
+
+ exit1:
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ ip4_header_t *ip40;
+ ip6_header_t *ip60;
+ udp_header_t *udp0;
+ u32 bi0, ip_len0, udp_len0, flags0, next0;
+ i32 len_diff0;
+ u8 error0, good_udp0, proto0;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ if (is_ip4)
+ ip40 = vlib_buffer_get_current (b0);
+ else
+ ip60 = vlib_buffer_get_current (b0);
+
+ /* Setup packet for next IP feature */
+ vnet_feature_next (&next0, b0);
+
+ if (is_ip4)
+ /* Treat IP4 frag packets as "experimental" protocol for now
+ until support of IP frag reassembly is implemented */
+ proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
+ else
+ proto0 = ip60->protocol;
+
+ if (proto0 != IP_PROTOCOL_UDP)
+ goto exit; /* not UDP packet */
+
+ if (is_ip4)
+ udp0 = ip4_next_header (ip40);
+ else
+ udp0 = ip6_next_header (ip60);
+
+ if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
+ goto exit; /* not VXLAN_GBP packet */
+
+ /* Validate DIP against VTEPs */
+ if (is_ip4)
+ {
+ if (addr4.as_u32 != ip40->dst_address.as_u32)
+ {
+ if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
+ goto exit; /* no local VTEP for VXLAN_GBP packet */
+ addr4 = ip40->dst_address;
+ }
+ }
+ else
+ {
+ if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
+ {
+ if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
+ goto exit; /* no local VTEP for VXLAN_GBP packet */
+ addr6 = ip60->dst_address;
+ }
+ }
+
+ flags0 = b0->flags;
+ good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp0 |= udp0->checksum == 0;
+
+ /* Verify UDP length */
+ if (is_ip4)
+ ip_len0 = clib_net_to_host_u16 (ip40->length);
+ else
+ ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+ len_diff0 = ip_len0 - udp_len0;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp0))
+ {
+ if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
+ else
+ flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
+ good_udp0 =
+ (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+ }
+ }
+
+ if (is_ip4)
+ {
+ error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
+ }
+
+ next0 = error0 ?
+ IP_VXLAN_GBP_BYPASS_NEXT_DROP :
+ IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
+ b0->error = error0 ? error_node->errors[error0] : 0;
+
+ /* vxlan_gbp-input node expect current at VXLAN_GBP header */
+ if (is_ip4)
+ vlib_buffer_advance (b0,
+ sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+ else
+ vlib_buffer_advance (b0,
+ sizeof (ip6_header_t) +
+ sizeof (udp_header_t));
+
+ exit:
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (ip4_vxlan_gbp_bypass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_vxlan_gbp_bypass_node) =
+{
+ .name = "ip4-vxlan-gbp-bypass",
+ .vector_size = sizeof (u32),
+ .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT,
+ .next_nodes = {
+ [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop",
+ [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan4-gbp-input",
+ },
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_forward_next_trace,
+};
+/* *INDENT-ON* */
+
+#ifndef CLIB_MARCH_VARIANT
+/* Dummy init function to get us linked in. */
+clib_error_t *
+ip4_vxlan_gbp_bypass_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip4_vxlan_gbp_bypass_init);
+#endif /* CLIB_MARCH_VARIANT */
+
+VLIB_NODE_FN (ip6_vxlan_gbp_bypass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip6_vxlan_gbp_bypass_node) =
+{
+ .name = "ip6-vxlan-gbp-bypass",
+ .vector_size = sizeof (u32),
+ .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT,
+ .next_nodes = {
+ [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop",
+ [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan6-gbp-input",
+ },
+ .format_buffer = format_ip6_header,
+ .format_trace = format_ip6_forward_next_trace,
+};
+/* *INDENT-ON* */
+
+#ifndef CLIB_MARCH_VARIANT
+/* Dummy init function to get us linked in. */
+clib_error_t *
+ip6_vxlan_gbp_bypass_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip6_vxlan_gbp_bypass_init);
+#endif /* CLIB_MARCH_VARIANT */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/vnet/vxlan-gbp/dir.dox b/extras/deprecated/vnet/vxlan-gbp/dir.dox
new file mode 100644
index 00000000000..6e63c90b17b
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/dir.dox
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+@dir
+@brief VXLAN-GBP Code.
+
+This directory contains source code to support VXLAN-GBP.
+
+*/
+/*? %%clicmd:group_label VXLAN-GBP CLI %% ?*/
diff --git a/extras/deprecated/vnet/vxlan-gbp/encap.c b/extras/deprecated/vnet/vxlan-gbp/encap.c
new file mode 100644
index 00000000000..2a4e8a8e312
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/encap.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface_output.h>
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+#include <vnet/qos/qos_types.h>
+#include <vnet/adj/rewrite.h>
+
+/* Statistics (not all errors) */
+#define foreach_vxlan_gbp_encap_error \
+_(ENCAPSULATED, "good packets encapsulated")
+
+static char *vxlan_gbp_encap_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_gbp_encap_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
+ foreach_vxlan_gbp_encap_error
+#undef _
+ VXLAN_GBP_ENCAP_N_ERROR,
+} vxlan_gbp_encap_error_t;
+
+typedef enum
+{
+ VXLAN_GBP_ENCAP_NEXT_DROP,
+ VXLAN_GBP_ENCAP_N_NEXT,
+} vxlan_gbp_encap_next_t;
+
+typedef struct
+{
+ u32 tunnel_index;
+ u32 vni;
+ u16 sclass;
+ u8 flags;
+} vxlan_gbp_encap_trace_t;
+
+#ifndef CLIB_MARCH_VARIANT
+u8 *
+format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
+
+ s =
+ format (s,
+ "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %U",
+ t->tunnel_index, t->vni, t->sclass,
+ format_vxlan_gbp_header_gpflags, t->flags);
+ return s;
+}
+#endif /* CLIB_MARCH_VARIANT */
+
+always_inline uword
+vxlan_gbp_encap_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vnet_main_t *vnm = vxm->vnet_main;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_combined_counter_main_t *tx_counter =
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
+ u32 pkts_encapsulated = 0;
+ u32 thread_index = vlib_get_thread_index ();
+ u32 sw_if_index0 = 0, sw_if_index1 = 0;
+ u32 next0 = 0, next1 = 0;
+ vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
+ index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+
+ next_index = node->cached_next_index;
+
+ STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
+ STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
+
+ u8 const underlay_hdr_len = is_ip4 ?
+ sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
+ u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
+ u32 const csum_flags =
+ is_ip4 ? VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID :
+ VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
+ u32 const outer_packet_csum_offload_flags =
+ is_ip4 ? VNET_BUFFER_OFFLOAD_F_IP_CKSUM | VNET_BUFFER_OFFLOAD_F_UDP_CKSUM :
+ VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
+ u32 const inner_packet_removed_flags =
+ VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_IS_IP6 |
+ VNET_BUFFER_F_L2_HDR_OFFSET_VALID | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
+ 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
+ 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ u32 bi0 = to_next[0] = from[0];
+ u32 bi1 = to_next[1] = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ u32 or_flags = b[0]->flags | b[1]->flags;
+ if (csum_offload && (or_flags & VNET_BUFFER_F_OFFLOAD))
+ {
+ /* Only calculate the non-GSO packet csum offload */
+ if ((b[0]->flags & VNET_BUFFER_F_GSO) == 0)
+ {
+ vnet_calc_checksums_inline (vm, b[0],
+ b[0]->flags &
+ VNET_BUFFER_F_IS_IP4,
+ b[0]->flags &
+ VNET_BUFFER_F_IS_IP6);
+ b[0]->flags &= ~inner_packet_removed_flags;
+ }
+ if ((b[1]->flags & VNET_BUFFER_F_GSO) == 0)
+ {
+ vnet_calc_checksums_inline (vm, b[1],
+ b[1]->flags &
+ VNET_BUFFER_F_IS_IP4,
+ b[1]->flags &
+ VNET_BUFFER_F_IS_IP6);
+ b[1]->flags &= ~inner_packet_removed_flags;
+ }
+ }
+
+ u32 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
+ u32 flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
+
+ /* Get next node index and adj index from tunnel next_dpo */
+ if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
+ {
+ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ vnet_hw_interface_t *hi0 =
+ vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ t0 = &vxm->tunnels[hi0->dev_instance];
+ /* Note: change to always set next0 if it may set to drop */
+ next0 = t0->next_dpo.dpoi_next_node;
+ dpoi_idx0 = t0->next_dpo.dpoi_index;
+ }
+
+ /* Get next node index and adj index from tunnel next_dpo */
+ if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
+ {
+ if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX])
+ {
+ sw_if_index1 = sw_if_index0;
+ t1 = t0;
+ next1 = next0;
+ dpoi_idx1 = dpoi_idx0;
+ }
+ else
+ {
+ sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
+ vnet_hw_interface_t *hi1 =
+ vnet_get_sup_hw_interface (vnm, sw_if_index1);
+ t1 = &vxm->tunnels[hi1->dev_instance];
+ /* Note: change to always set next1 if it may set to drop */
+ next1 = t1->next_dpo.dpoi_next_node;
+ dpoi_idx1 = t1->next_dpo.dpoi_index;
+ }
+ }
+
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpoi_idx0;
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpoi_idx1;
+
+ ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
+ ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
+ vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b[0]),
+ vlib_buffer_get_current (b[1]),
+ underlay_hdr_len);
+
+ vlib_buffer_advance (b[0], -underlay_hdr_len);
+ vlib_buffer_advance (b[1], -underlay_hdr_len);
+
+ u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
+ u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
+ u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
+ u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
+
+ void *underlay0 = vlib_buffer_get_current (b[0]);
+ void *underlay1 = vlib_buffer_get_current (b[1]);
+
+ ip4_header_t *ip4_0, *ip4_1;
+ qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
+ ip6_header_t *ip6_0, *ip6_1;
+ udp_header_t *udp0, *udp1;
+ vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
+ u8 *l3_0, *l3_1;
+ if (is_ip4)
+ {
+ ip4_vxlan_gbp_header_t *hdr0 = underlay0;
+ ip4_vxlan_gbp_header_t *hdr1 = underlay1;
+
+ /* Fix the IP4 checksum and length */
+ ip4_0 = &hdr0->ip4;
+ ip4_1 = &hdr1->ip4;
+ ip4_0->length = clib_host_to_net_u16 (len0);
+ ip4_1->length = clib_host_to_net_u16 (len1);
+
+ if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+ {
+ ip4_0_tos = vnet_buffer2 (b[0])->qos.bits;
+ ip4_0->tos = ip4_0_tos;
+ }
+ if (PREDICT_FALSE (b[1]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+ {
+ ip4_1_tos = vnet_buffer2 (b[1])->qos.bits;
+ ip4_1->tos = ip4_1_tos;
+ }
+
+ l3_0 = (u8 *) ip4_0;
+ l3_1 = (u8 *) ip4_1;
+ udp0 = &hdr0->udp;
+ udp1 = &hdr1->udp;
+ vxlan_gbp0 = &hdr0->vxlan_gbp;
+ vxlan_gbp1 = &hdr1->vxlan_gbp;
+ }
+ else /* ipv6 */
+ {
+ ip6_vxlan_gbp_header_t *hdr0 = underlay0;
+ ip6_vxlan_gbp_header_t *hdr1 = underlay1;
+
+ /* Fix IP6 payload length */
+ ip6_0 = &hdr0->ip6;
+ ip6_1 = &hdr1->ip6;
+ ip6_0->payload_length = payload_l0;
+ ip6_1->payload_length = payload_l1;
+
+ l3_0 = (u8 *) ip6_0;
+ l3_1 = (u8 *) ip6_1;
+ udp0 = &hdr0->udp;
+ udp1 = &hdr1->udp;
+ vxlan_gbp0 = &hdr0->vxlan_gbp;
+ vxlan_gbp1 = &hdr1->vxlan_gbp;
+ }
+
+ /* Fix UDP length and set source port */
+ udp0->length = payload_l0;
+ udp0->src_port = flow_hash0;
+ udp1->length = payload_l1;
+ udp1->src_port = flow_hash1;
+
+ /* set source class and gpflags */
+ vxlan_gbp0->gpflags = vnet_buffer2 (b[0])->gbp.flags;
+ vxlan_gbp1->gpflags = vnet_buffer2 (b[1])->gbp.flags;
+ vxlan_gbp0->sclass =
+ clib_host_to_net_u16 (vnet_buffer2 (b[0])->gbp.sclass);
+ vxlan_gbp1->sclass =
+ clib_host_to_net_u16 (vnet_buffer2 (b[1])->gbp.sclass);
+
+ if (csum_offload)
+ {
+ b[0]->flags |= csum_flags;
+ vnet_buffer (b[0])->l3_hdr_offset = l3_0 - b[0]->data;
+ vnet_buffer (b[0])->l4_hdr_offset = (u8 *) udp0 - b[0]->data;
+ vnet_buffer_offload_flags_set (b[0],
+ outer_packet_csum_offload_flags);
+ b[1]->flags |= csum_flags;
+ vnet_buffer (b[1])->l3_hdr_offset = l3_1 - b[1]->data;
+ vnet_buffer (b[1])->l4_hdr_offset = (u8 *) udp1 - b[1]->data;
+ vnet_buffer_offload_flags_set (b[1],
+ outer_packet_csum_offload_flags);
+ }
+ /* IPv4 UDP checksum only if checksum offload is used */
+ else if (is_ip4)
+ {
+ ip_csum_t sum0 = ip4_0->checksum;
+ sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
+ length /* changed member */ );
+ if (PREDICT_FALSE (ip4_0_tos))
+ {
+ sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
+ tos /* changed member */ );
+ }
+ ip4_0->checksum = ip_csum_fold (sum0);
+ ip_csum_t sum1 = ip4_1->checksum;
+ sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
+ length /* changed member */ );
+ if (PREDICT_FALSE (ip4_1_tos))
+ {
+ sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
+ tos /* changed member */ );
+ }
+ ip4_1->checksum = ip_csum_fold (sum1);
+ }
+ /* IPv6 UDP checksum is mandatory */
+ else
+ {
+ int bogus = 0;
+
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
+ (vm, b[0], ip6_0, &bogus);
+ ASSERT (bogus == 0);
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
+ (vm, b[1], ip6_1, &bogus);
+ ASSERT (bogus == 0);
+ if (udp1->checksum == 0)
+ udp1->checksum = 0xffff;
+ }
+
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+ vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
+
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index0, 1, len0);
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index1, 1, len1);
+ pkts_encapsulated += 2;
+
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gbp_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ tr->tunnel_index = t0 - vxm->tunnels;
+ tr->vni = t0->vni;
+ tr->sclass = vnet_buffer2 (b[0])->gbp.sclass;
+ tr->flags = vnet_buffer2 (b[0])->gbp.flags;
+ }
+
+ if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gbp_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b[1], sizeof (*tr));
+ tr->tunnel_index = t1 - vxm->tunnels;
+ tr->vni = t1->vni;
+ tr->sclass = vnet_buffer2 (b[1])->gbp.sclass;
+ tr->flags = vnet_buffer2 (b[1])->gbp.flags;
+ }
+ b += 2;
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0 = to_next[0] = from[0];
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ if (csum_offload && (b[0]->flags & VNET_BUFFER_F_OFFLOAD))
+ {
+ /* Only calculate the non-GSO packet csum offload */
+ if ((b[0]->flags & VNET_BUFFER_F_GSO) == 0)
+ {
+ vnet_calc_checksums_inline (vm, b[0],
+ b[0]->flags &
+ VNET_BUFFER_F_IS_IP4,
+ b[0]->flags &
+ VNET_BUFFER_F_IS_IP6);
+ b[0]->flags &= ~inner_packet_removed_flags;
+ }
+ }
+
+ u32 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
+
+ /* Get next node index and adj index from tunnel next_dpo */
+ if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
+ {
+ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ vnet_hw_interface_t *hi0 =
+ vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ t0 = &vxm->tunnels[hi0->dev_instance];
+ /* Note: change to always set next0 if it may be set to drop */
+ next0 = t0->next_dpo.dpoi_next_node;
+ dpoi_idx0 = t0->next_dpo.dpoi_index;
+ }
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpoi_idx0;
+
+ ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
+ vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b[0]),
+ underlay_hdr_len);
+
+ vlib_buffer_advance (b[0], -underlay_hdr_len);
+ void *underlay0 = vlib_buffer_get_current (b[0]);
+
+ u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
+ u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
+
+ vxlan_gbp_header_t *vxlan_gbp0;
+ udp_header_t *udp0;
+ ip4_header_t *ip4_0;
+ qos_bits_t ip4_0_tos = 0;
+ ip6_header_t *ip6_0;
+ u8 *l3_0;
+ if (is_ip4)
+ {
+ ip4_vxlan_gbp_header_t *hdr = underlay0;
+
+ /* Fix the IP4 checksum and length */
+ ip4_0 = &hdr->ip4;
+ ip4_0->length = clib_host_to_net_u16 (len0);
+
+ if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+ {
+ ip4_0_tos = vnet_buffer2 (b[0])->qos.bits;
+ ip4_0->tos = ip4_0_tos;
+ }
+
+ l3_0 = (u8 *) ip4_0;
+ udp0 = &hdr->udp;
+ vxlan_gbp0 = &hdr->vxlan_gbp;
+ }
+ else /* ip6 path */
+ {
+ ip6_vxlan_gbp_header_t *hdr = underlay0;
+
+ /* Fix IP6 payload length */
+ ip6_0 = &hdr->ip6;
+ ip6_0->payload_length = payload_l0;
+
+ l3_0 = (u8 *) ip6_0;
+ udp0 = &hdr->udp;
+ vxlan_gbp0 = &hdr->vxlan_gbp;
+ }
+
+ /* Fix UDP length and set source port */
+ udp0->length = payload_l0;
+ udp0->src_port = flow_hash0;
+
+ /* set source class and gpflags */
+ vxlan_gbp0->gpflags = vnet_buffer2 (b[0])->gbp.flags;
+ vxlan_gbp0->sclass =
+ clib_host_to_net_u16 (vnet_buffer2 (b[0])->gbp.sclass);
+
+ if (csum_offload)
+ {
+ b[0]->flags |= csum_flags;
+ vnet_buffer (b[0])->l3_hdr_offset = l3_0 - b[0]->data;
+ vnet_buffer (b[0])->l4_hdr_offset = (u8 *) udp0 - b[0]->data;
+ vnet_buffer_offload_flags_set (b[0],
+ outer_packet_csum_offload_flags);
+ }
+ /* IPv4 UDP checksum only if checksum offload is used */
+ else if (is_ip4)
+ {
+ ip_csum_t sum0 = ip4_0->checksum;
+ sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
+ length /* changed member */ );
+ if (PREDICT_FALSE (ip4_0_tos))
+ {
+ sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
+ tos /* changed member */ );
+ }
+ ip4_0->checksum = ip_csum_fold (sum0);
+ }
+ /* IPv6 UDP checksum is mandatory */
+ else
+ {
+ int bogus = 0;
+
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
+ (vm, b[0], ip6_0, &bogus);
+ ASSERT (bogus == 0);
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+ }
+
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index0, 1, len0);
+ pkts_encapsulated++;
+
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gbp_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ tr->tunnel_index = t0 - vxm->tunnels;
+ tr->vni = t0->vni;
+ tr->sclass = vnet_buffer2 (b[0])->gbp.sclass;
+ tr->flags = vnet_buffer2 (b[0])->gbp.flags;
+ }
+ b += 1;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Do we still need this now that tunnel tx stats is kept? */
+ vlib_node_increment_counter (vm, node->node_index,
+ VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
+ pkts_encapsulated);
+
+ return from_frame->n_vectors;
+}
+
+VLIB_NODE_FN (vxlan4_gbp_encap_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ /* Disable chksum offload as setup overhead in tx node is not worthwhile
+ for ip4 header checksum only, unless udp checksum is also required */
+ return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
+ /* csum_offload */ 0);
+}
+
+VLIB_NODE_FN (vxlan6_gbp_encap_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ /* Enable checksum offload for ip6 as udp checksum is mandatory, */
+ return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
+ /* csum_offload */ 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
+{
+ .name = "vxlan4-gbp-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_gbp_encap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
+ .error_strings = vxlan_gbp_encap_error_strings,
+ .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
+ .next_nodes = {
+ [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
+{
+ .name = "vxlan6-gbp-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_gbp_encap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
+ .error_strings = vxlan_gbp_encap_error_strings,
+ .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
+ .next_nodes = {
+ [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/vnet/vxlan-gbp/test_vxlan_gbp.py b/extras/deprecated/vnet/vxlan-gbp/test_vxlan_gbp.py
new file mode 100644
index 00000000000..f332aced7d8
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/test_vxlan_gbp.py
@@ -0,0 +1,293 @@
+#!/usr/bin/env python3
+
+import socket
+from util import ip4_range, reassemble4_ether
+import unittest
+from framework import VppTestCase, VppTestRunner
+from template_bd import BridgeDomain
+
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.inet import IP, UDP
+from scapy.layers.vxlan import VXLAN
+
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
+
+
+class TestVxlanGbp(VppTestCase):
+ """ VXLAN GBP Test Case """
+
+ @property
+ def frame_request(self):
+ """ Ethernet frame modeling a generic request """
+ return (Ether(src='00:00:00:00:00:01', dst='00:00:00:00:00:02') /
+ IP(src='1.2.3.4', dst='4.3.2.1') /
+ UDP(sport=10000, dport=20000) /
+ Raw(b'\xa5' * 100))
+
+ @property
+ def frame_reply(self):
+ """ Ethernet frame modeling a generic reply """
+ return (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
+ IP(src='4.3.2.1', dst='1.2.3.4') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 100))
+
+ def encapsulate(self, pkt, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN GBP header with
+ its UDP, IP and Ethernet fields
+ """
+ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags, gpflags=self.gpflags,
+ gpid=self.sclass) / pkt)
+
+ def ip_range(self, start, end):
+ """ range of remote ip's """
+ return ip4_range(self.pg0.remote_ip4, start, end)
+
+ def decapsulate(self, pkt):
+ """
+ Decapsulate the original payload frame by removing VXLAN header
+ """
+ # check if is set G and I flag
+ self.assertEqual(pkt[VXLAN].flags, int('0x88', 16))
+ return pkt[VXLAN].payload
+
+ # Method for checking VXLAN GBP encapsulation.
+ #
+ def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
+ # TODO: add error messages
+ # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
+ # by VPP using ARP.
+ self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
+ else:
+ self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
+ # Verify VXLAN GBP tunnel source IP is VPP_IP and destination IP is
+ # MY_IP.
+ self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
+ else:
+ self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
+ # Verify UDP destination port is VXLAN GBP 48879, source UDP port could
+ # be arbitrary.
+ self.assertEqual(pkt[UDP].dport, type(self).dport)
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt)
+ # Verify VNI
+ # pkt.show()
+ self.assertEqual(pkt[VXLAN].vni, vni)
+ # Verify Source Class
+ self.assertEqual(pkt[VXLAN].gpid, 0)
+
+ @classmethod
+ def create_vxlan_gbp_flood_test_bd(cls, vni, n_ucast_tunnels):
+ # Create 2 ucast vxlan tunnels under bd
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_ucast_tunnels
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(cls.pg0.remote_ip4,
+ ip_range_start,
+ ip_range_end):
+ # add host route so dest_ip4 will not be resolved
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ r = cls.vapi.vxlan_gbp_tunnel_add_del(
+ tunnel={
+ 'src': cls.pg0.local_ip4,
+ 'dst': dest_ip4,
+ 'vni': vni,
+ 'instance': INVALID_INDEX,
+ 'mcast_sw_if_index': INVALID_INDEX,
+ 'mode': 1,
+ },
+ is_add=1
+ )
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=vni)
+
+ # Class method to start the VXLAN GBP test case.
+ # Overrides setUpClass method in VppTestCase class.
+ # Python try..except statement is used to ensure that the tear down of
+ # the class will be executed even if exception is raised.
+ # @param cls The class pointer.
+ @classmethod
+ def setUpClass(cls):
+ super(TestVxlanGbp, cls).setUpClass()
+
+ try:
+ cls.dport = 48879
+ cls.flags = 0x88
+ cls.gpflags = 0x0
+ cls.sclass = 0
+
+ # Create 2 pg interfaces.
+ cls.create_pg_interfaces(range(4))
+ for pg in cls.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv4 addresses on VPP pg0.
+ cls.pg0.config_ip4()
+
+ # Resolve MAC address for VPP's IP address on pg0.
+ cls.pg0.resolve_arp()
+
+ # Create VXLAN GBP VTEP on VPP pg0, and put vxlan_gbp_tunnel0 and
+ # pg1 into BD.
+ cls.single_tunnel_bd = 1
+ cls.single_tunnel_vni = 0xabcde
+ r = cls.vapi.vxlan_gbp_tunnel_add_del(
+ tunnel={
+ 'src': cls.pg0.local_ip4,
+ 'dst': cls.pg0.remote_ip4,
+ 'vni': cls.single_tunnel_vni,
+ 'instance': INVALID_INDEX,
+ 'mcast_sw_if_index': INVALID_INDEX,
+ 'mode': 1,
+ },
+ is_add=1
+ )
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg1.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+
+ # Setup vni 2 to test multicast flooding
+ cls.n_ucast_tunnels = 2
+ # Setup vni 3 to test unicast flooding
+ cls.ucast_flood_bd = 3
+ cls.create_vxlan_gbp_flood_test_bd(cls.ucast_flood_bd,
+ cls.n_ucast_tunnels)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg3.sw_if_index,
+ bd_id=cls.ucast_flood_bd)
+ except Exception:
+ super(TestVxlanGbp, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestVxlanGbp, cls).tearDownClass()
+
+ def assert_eq_pkts(self, pkt1, pkt2):
+ """ Verify the Ether, IP, UDP, payload are equal in both
+ packets
+ """
+ self.assertEqual(pkt1[Ether].src, pkt2[Ether].src)
+ self.assertEqual(pkt1[Ether].dst, pkt2[Ether].dst)
+ self.assertEqual(pkt1[IP].src, pkt2[IP].src)
+ self.assertEqual(pkt1[IP].dst, pkt2[IP].dst)
+ self.assertEqual(pkt1[UDP].sport, pkt2[UDP].sport)
+ self.assertEqual(pkt1[UDP].dport, pkt2[UDP].dport)
+ self.assertEqual(pkt1[Raw], pkt2[Raw])
+
+ def test_decap(self):
+ """ Decapsulation test
+ Send encapsulated frames from pg0
+ Verify receipt of decapsulated frames on pg1
+ """
+ encapsulated_pkt = self.encapsulate(self.frame_request,
+ self.single_tunnel_vni)
+
+ self.pg0.add_stream([encapsulated_pkt, ])
+
+ self.pg1.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's the non-encapsulated
+ # frame
+ out = self.pg1.get_capture(1)
+ pkt = out[0]
+ self.assert_eq_pkts(pkt, self.frame_request)
+
+ def test_encap(self):
+ """ Encapsulation test
+ Send frames from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg1.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's correctly encapsulated.
+ out = self.pg0.get_capture(1)
+ pkt = out[0]
+ self.check_encapsulation(pkt, self.single_tunnel_vni)
+
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, self.frame_reply)
+
+ def test_ucast_flood(self):
+ """ Unicast flood test
+ Send frames from pg3
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg3.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Get packet from each tunnel and assert it's correctly encapsulated.
+ out = self.pg0.get_capture(self.n_ucast_tunnels)
+ for pkt in out:
+ self.check_encapsulation(pkt, self.ucast_flood_bd, True)
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, self.frame_reply)
+
+ def test_encap_big_packet(self):
+ """ Encapsulation test send big frame from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+
+ self.vapi.sw_interface_set_mtu(self.pg0.sw_if_index, [1500, 0, 0, 0])
+
+ frame = (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
+ IP(src='4.3.2.1', dst='1.2.3.4') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 1450))
+
+ self.pg1.add_stream([frame])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's correctly encapsulated.
+ out = self.pg0.get_capture(2)
+ pkt = reassemble4_ether(out)
+ self.check_encapsulation(pkt, self.single_tunnel_vni)
+
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, frame)
+
+# Method to define VPP actions before tear down of the test case.
+# Overrides tearDown method in VppTestCase class.
+# @param self The object pointer.
+ def tearDown(self):
+ super(TestVxlanGbp, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+ self.logger.info(self.vapi.cli("show error"))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/extras/deprecated/vnet/vxlan-gbp/vpp_vxlan_gbp_tunnel.py b/extras/deprecated/vnet/vxlan-gbp/vpp_vxlan_gbp_tunnel.py
new file mode 100644
index 00000000000..0898bd9f810
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/vpp_vxlan_gbp_tunnel.py
@@ -0,0 +1,75 @@
+
+from vpp_interface import VppInterface
+from vpp_papi import VppEnum
+
+
+INDEX_INVALID = 0xffffffff
+
+
+def find_vxlan_gbp_tunnel(test, src, dst, vni):
+ ts = test.vapi.vxlan_gbp_tunnel_dump(INDEX_INVALID)
+ for t in ts:
+ if src == str(t.tunnel.src) and \
+ dst == str(t.tunnel.dst) and \
+ t.tunnel.vni == vni:
+ return t.tunnel.sw_if_index
+ return INDEX_INVALID
+
+
+class VppVxlanGbpTunnel(VppInterface):
+ """
+ VPP VXLAN GBP interface
+ """
+
+ def __init__(self, test, src, dst, vni, mcast_itf=None, mode=None,
+ is_ipv6=None, encap_table_id=None, instance=0xffffffff):
+ """ Create VXLAN-GBP Tunnel interface """
+ super(VppVxlanGbpTunnel, self).__init__(test)
+ self.src = src
+ self.dst = dst
+ self.vni = vni
+ self.mcast_itf = mcast_itf
+ self.ipv6 = is_ipv6
+ self.encap_table_id = encap_table_id
+ self.instance = instance
+ if not mode:
+ self.mode = (VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L2)
+ else:
+ self.mode = mode
+
+ def encode(self):
+ return {
+ 'src': self.src,
+ 'dst': self.dst,
+ 'mode': self.mode,
+ 'vni': self.vni,
+ 'mcast_sw_if_index': self.mcast_itf.sw_if_index
+ if self.mcast_itf else INDEX_INVALID,
+ 'encap_table_id': self.encap_table_id,
+ 'instance': self.instance,
+ }
+
+ def add_vpp_config(self):
+ reply = self.test.vapi.vxlan_gbp_tunnel_add_del(
+ is_add=1,
+ tunnel=self.encode(),
+ )
+ self.set_sw_if_index(reply.sw_if_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self.test.vapi.vxlan_gbp_tunnel_add_del(
+ is_add=0,
+ tunnel=self.encode(),
+ )
+
+ def query_vpp_config(self):
+ return (INDEX_INVALID != find_vxlan_gbp_tunnel(self._test,
+ self.src,
+ self.dst,
+ self.vni))
+
+ def object_id(self):
+ return "vxlan-gbp-%d-%d-%s-%s" % (self.sw_if_index, self.vni,
+ self.src, self.dst)
diff --git a/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.api b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.api
new file mode 100644
index 00000000000..68566697000
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.api
@@ -0,0 +1,100 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option version = "1.1.1";
+import "vnet/ip/ip_types.api";
+import "vnet/interface_types.api";
+
+enum vxlan_gbp_api_tunnel_mode
+{
+ VXLAN_GBP_API_TUNNEL_MODE_L2,
+ VXLAN_GBP_API_TUNNEL_MODE_L3,
+};
+
+/** \brief Definition of a VXLAN GBP tunnel
+ @param instance - optional unique custom device instance, else ~0.
+ @param src - Source IP address
+ @param dst - Destination IP address, can be multicast
+ @param mcast_sw_if_index - Interface for multicast destination
+ @param encap_table_id - Encap route table
+ @param vni - The VXLAN Network Identifier, uint24
+ @param sw_ifindex - Ignored in add message, set in details
+*/
+typedef vxlan_gbp_tunnel
+{
+ u32 instance;
+ vl_api_address_t src;
+ vl_api_address_t dst;
+ vl_api_interface_index_t mcast_sw_if_index;
+ u32 encap_table_id;
+ u32 vni;
+ vl_api_interface_index_t sw_if_index;
+ vl_api_vxlan_gbp_api_tunnel_mode_t mode;
+};
+
+/** \brief Create or delete a VXLAN-GBP tunnel
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_add - Use 1 to create the tunnel, 0 to remove it
+*/
+define vxlan_gbp_tunnel_add_del
+{
+ u32 client_index;
+ u32 context;
+ bool is_add [default=true];
+ vl_api_vxlan_gbp_tunnel_t tunnel;
+ option in_progress;
+};
+
+define vxlan_gbp_tunnel_add_del_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+ option in_progress;
+};
+
+define vxlan_gbp_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index [default=0xffffffff];
+ option in_progress;
+};
+
+define vxlan_gbp_tunnel_details
+{
+ u32 context;
+ vl_api_vxlan_gbp_tunnel_t tunnel;
+ option in_progress;
+};
+
+/** \brief Interface set vxlan-bypass request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface used to reach neighbor
+ @param is_ipv6 - if non-zero, enable ipv6-vxlan-bypass, else ipv4-vxlan-bypass
+ @param enable - if non-zero enable, else disable
+*/
+autoreply define sw_interface_set_vxlan_gbp_bypass
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ bool is_ipv6;
+ bool enable [default=true];
+ option in_progress;
+};
diff --git a/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.c b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.c
new file mode 100644
index 00000000000..eb685b8a40c
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.c
@@ -0,0 +1,1193 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/punt.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry_track.h>
+#include <vnet/mfib/mfib_table.h>
+#include <vnet/adj/adj_mcast.h>
+#include <vnet/adj/rewrite.h>
+#include <vnet/interface.h>
+#include <vlib/vlib.h>
+
+/**
+ * @file
+ * @brief VXLAN GBP.
+ *
+ * VXLAN GBP provides the features of vxlan and carry group policy id.
+ */
+static vlib_punt_hdl_t punt_hdl;
+
+vxlan_gbp_main_t vxlan_gbp_main;
+
+u8 *
+format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args)
+{
+ vxlan_gbp_tunnel_mode_t mode = va_arg (*args, vxlan_gbp_tunnel_mode_t);
+
+ switch (mode)
+ {
+ case VXLAN_GBP_TUNNEL_MODE_L2:
+ s = format (s, "L2");
+ break;
+ case VXLAN_GBP_TUNNEL_MODE_L3:
+ s = format (s, "L3");
+ break;
+ }
+ return (s);
+}
+
+u8 *
+format_vxlan_gbp_tunnel (u8 * s, va_list * args)
+{
+ vxlan_gbp_tunnel_t *t = va_arg (*args, vxlan_gbp_tunnel_t *);
+
+ s = format (s,
+ "[%d] instance %d src %U dst %U vni %d fib-idx %d"
+ " sw-if-idx %d mode %U ",
+ t->dev_instance, t->user_instance,
+ format_ip46_address, &t->src, IP46_TYPE_ANY,
+ format_ip46_address, &t->dst, IP46_TYPE_ANY,
+ t->vni, t->encap_fib_index, t->sw_if_index,
+ format_vxlan_gbp_tunnel_mode, t->mode);
+
+ s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index);
+
+ if (PREDICT_FALSE (ip46_address_is_multicast (&t->dst)))
+ s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index);
+
+ return s;
+}
+
+static u8 *
+format_vxlan_gbp_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vxlan_gbp_tunnel_t *t;
+
+ if (dev_instance == ~0)
+ return format (s, "<cached-unused>");
+
+ if (dev_instance >= vec_len (vxm->tunnels))
+ return format (s, "<improperly-referenced>");
+
+ t = pool_elt_at_index (vxm->tunnels, dev_instance);
+
+ return format (s, "vxlan_gbp_tunnel%d", t->user_instance);
+}
+
+static clib_error_t *
+vxlan_gbp_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
+ VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+
+ return /* no error */ 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (vxlan_gbp_device_class, static) = {
+ .name = "VXLAN-GBP",
+ .format_device_name = format_vxlan_gbp_name,
+ .format_tx_trace = format_vxlan_gbp_encap_trace,
+ .admin_up_down_function = vxlan_gbp_interface_admin_up_down,
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_vxlan_gbp_header_with_length (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ s = format (s, "unimplemented dev %u", dev_instance);
+ return s;
+}
+
+/* *INDENT-OFF* */
+VNET_HW_INTERFACE_CLASS (vxlan_gbp_hw_class) = {
+ .name = "VXLAN-GBP",
+ .format_header = format_vxlan_gbp_header_with_length,
+ .build_rewrite = default_build_rewrite,
+};
+/* *INDENT-ON* */
+
+static void
+vxlan_gbp_tunnel_restack_dpo (vxlan_gbp_tunnel_t * t)
+{
+ u8 is_ip4 = ip46_address_is_ip4 (&t->dst);
+ dpo_id_t dpo = DPO_INVALID;
+ fib_forward_chain_type_t forw_type = is_ip4 ?
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6;
+
+ fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo);
+
+ /* vxlan_gbp uses the payload hash as the udp source port
+ * hence the packet's hash is unknown
+ * skip single bucket load balance dpo's */
+ while (DPO_LOAD_BALANCE == dpo.dpoi_type)
+ {
+ load_balance_t *lb = load_balance_get (dpo.dpoi_index);
+ if (lb->lb_n_buckets > 1)
+ break;
+
+ dpo_copy (&dpo, load_balance_get_bucket_i (lb, 0));
+ }
+
+ u32 encap_index = is_ip4 ?
+ vxlan4_gbp_encap_node.index : vxlan6_gbp_encap_node.index;
+ dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+ dpo_reset (&dpo);
+}
+
+static vxlan_gbp_tunnel_t *
+vxlan_gbp_tunnel_from_fib_node (fib_node_t * node)
+{
+ ASSERT (FIB_NODE_TYPE_VXLAN_GBP_TUNNEL == node->fn_type);
+ return ((vxlan_gbp_tunnel_t *) (((char *) node) -
+ STRUCT_OFFSET_OF (vxlan_gbp_tunnel_t,
+ node)));
+}
+
+/**
+ * Function definition to backwalk a FIB node -
+ * Here we will restack the new dpo of VXLAN DIP to encap node.
+ */
+static fib_node_back_walk_rc_t
+vxlan_gbp_tunnel_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+{
+ vxlan_gbp_tunnel_restack_dpo (vxlan_gbp_tunnel_from_fib_node (node));
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t *
+vxlan_gbp_tunnel_fib_node_get (fib_node_index_t index)
+{
+ vxlan_gbp_tunnel_t *t;
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+
+ t = pool_elt_at_index (vxm->tunnels, index);
+
+ return (&t->node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+vxlan_gbp_tunnel_last_lock_gone (fib_node_t * node)
+{
+ /*
+ * The VXLAN GBP tunnel is a root of the graph. As such
+ * it never has children and thus is never locked.
+ */
+ ASSERT (0);
+}
+
+/*
+ * Virtual function table registered by VXLAN GBP tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t vxlan_gbp_vft = {
+ .fnv_get = vxlan_gbp_tunnel_fib_node_get,
+ .fnv_last_lock = vxlan_gbp_tunnel_last_lock_gone,
+ .fnv_back_walk = vxlan_gbp_tunnel_back_walk,
+};
+
+
+#define foreach_copy_field \
+_(vni) \
+_(mode) \
+_(mcast_sw_if_index) \
+_(encap_fib_index) \
+_(src) \
+_(dst)
+
+static void
+vxlan_gbp_rewrite (vxlan_gbp_tunnel_t * t, bool is_ip6)
+{
+ union
+ {
+ ip4_vxlan_gbp_header_t h4;
+ ip6_vxlan_gbp_header_t h6;
+ } h;
+ int len = is_ip6 ? sizeof h.h6 : sizeof h.h4;
+
+ udp_header_t *udp;
+ vxlan_gbp_header_t *vxlan_gbp;
+ /* Fixed portion of the (outer) ip header */
+
+ clib_memset (&h, 0, sizeof (h));
+ if (!is_ip6)
+ {
+ ip4_header_t *ip = &h.h4.ip4;
+ udp = &h.h4.udp, vxlan_gbp = &h.h4.vxlan_gbp;
+ ip->ip_version_and_header_length = 0x45;
+ ip->ttl = 254;
+ ip->protocol = IP_PROTOCOL_UDP;
+
+ ip->src_address = t->src.ip4;
+ ip->dst_address = t->dst.ip4;
+
+ /* we fix up the ip4 header length and checksum after-the-fact */
+ ip->checksum = ip4_header_checksum (ip);
+ }
+ else
+ {
+ ip6_header_t *ip = &h.h6.ip6;
+ udp = &h.h6.udp, vxlan_gbp = &h.h6.vxlan_gbp;
+ ip->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (6 << 28);
+ ip->hop_limit = 255;
+ ip->protocol = IP_PROTOCOL_UDP;
+
+ ip->src_address = t->src.ip6;
+ ip->dst_address = t->dst.ip6;
+ }
+
+ /* UDP header, randomize src port on something, maybe? */
+ udp->src_port = clib_host_to_net_u16 (47789);
+ udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp);
+
+ /* VXLAN header */
+ vxlan_gbp_set_header (vxlan_gbp, t->vni);
+ vnet_rewrite_set_data (*t, &h, len);
+}
+
+static uword
+vtep_addr_ref (ip46_address_t * ip)
+{
+ uword *vtep = ip46_address_is_ip4 (ip) ?
+ hash_get (vxlan_gbp_main.vtep4, ip->ip4.as_u32) :
+ hash_get_mem (vxlan_gbp_main.vtep6, &ip->ip6);
+ if (vtep)
+ return ++(*vtep);
+ ip46_address_is_ip4 (ip) ?
+ hash_set (vxlan_gbp_main.vtep4, ip->ip4.as_u32, 1) :
+ hash_set_mem_alloc (&vxlan_gbp_main.vtep6, &ip->ip6, 1);
+ return 1;
+}
+
+static uword
+vtep_addr_unref (ip46_address_t * ip)
+{
+ uword *vtep = ip46_address_is_ip4 (ip) ?
+ hash_get (vxlan_gbp_main.vtep4, ip->ip4.as_u32) :
+ hash_get_mem (vxlan_gbp_main.vtep6, &ip->ip6);
+ ALWAYS_ASSERT (vtep);
+ if (--(*vtep) != 0)
+ return *vtep;
+ ip46_address_is_ip4 (ip) ?
+ hash_unset (vxlan_gbp_main.vtep4, ip->ip4.as_u32) :
+ hash_unset_mem_free (&vxlan_gbp_main.vtep6, &ip->ip6);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(union
+{
+ struct
+ {
+ fib_node_index_t mfib_entry_index;
+ adj_index_t mcast_adj_index;
+ };
+ u64 as_u64;
+}) mcast_shared_t;
+/* *INDENT-ON* */
+
+static inline mcast_shared_t
+mcast_shared_get (ip46_address_t * ip)
+{
+ ASSERT (ip46_address_is_multicast (ip));
+ uword *p = hash_get_mem (vxlan_gbp_main.mcast_shared, ip);
+ ALWAYS_ASSERT (p);
+ mcast_shared_t ret = {.as_u64 = *p };
+ return ret;
+}
+
+static inline void
+mcast_shared_add (ip46_address_t * dst, fib_node_index_t mfei, adj_index_t ai)
+{
+ mcast_shared_t new_ep = {
+ .mcast_adj_index = ai,
+ .mfib_entry_index = mfei,
+ };
+
+ hash_set_mem_alloc (&vxlan_gbp_main.mcast_shared, dst, new_ep.as_u64);
+}
+
+static inline void
+mcast_shared_remove (ip46_address_t * dst)
+{
+ mcast_shared_t ep = mcast_shared_get (dst);
+
+ adj_unlock (ep.mcast_adj_index);
+ mfib_table_entry_delete_index (ep.mfib_entry_index, MFIB_SOURCE_VXLAN_GBP);
+
+ hash_unset_mem_free (&vxlan_gbp_main.mcast_shared, dst);
+}
+
+inline void
+vxlan_gbp_register_udp_ports (void)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+
+ if (vxm->udp_ports_registered == 0)
+ {
+ udp_register_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan_gbp,
+ vxlan4_gbp_input_node.index, /* is_ip4 */ 1);
+ udp_register_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan6_gbp,
+ vxlan6_gbp_input_node.index, /* is_ip4 */ 0);
+ }
+ /*
+ * Counts the number of vxlan_gbp tunnels
+ */
+ vxm->udp_ports_registered += 1;
+}
+
+inline void
+vxlan_gbp_unregister_udp_ports (void)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+
+ ASSERT (vxm->udp_ports_registered != 0);
+
+ if (vxm->udp_ports_registered == 1)
+ {
+ udp_unregister_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan_gbp,
+ /* is_ip4 */ 1);
+ udp_unregister_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan6_gbp,
+ /* is_ip4 */ 0);
+ }
+
+ vxm->udp_ports_registered -= 1;
+}
+
+int vnet_vxlan_gbp_tunnel_add_del
+ (vnet_vxlan_gbp_tunnel_add_del_args_t * a, u32 * sw_if_indexp)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vxlan_gbp_tunnel_t *t = 0;
+ vnet_main_t *vnm = vxm->vnet_main;
+ u64 *p;
+ u32 sw_if_index = ~0;
+ vxlan4_gbp_tunnel_key_t key4;
+ vxlan6_gbp_tunnel_key_t key6;
+ u32 is_ip6 = a->is_ip6;
+
+ int not_found;
+ if (!is_ip6)
+ {
+ key4.key[0] = ip46_address_is_multicast (&a->dst) ?
+ a->dst.ip4.as_u32 :
+ a->dst.ip4.as_u32 | (((u64) a->src.ip4.as_u32) << 32);
+ key4.key[1] = (((u64) a->encap_fib_index) << 32)
+ | clib_host_to_net_u32 (a->vni << 8);
+ not_found =
+ clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key,
+ &key4);
+ p = &key4.value;
+ }
+ else
+ {
+ key6.key[0] = a->dst.ip6.as_u64[0];
+ key6.key[1] = a->dst.ip6.as_u64[1];
+ key6.key[2] = (((u64) a->encap_fib_index) << 32)
+ | clib_host_to_net_u32 (a->vni << 8);
+ not_found =
+ clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
+ &key6);
+ p = &key6.value;
+ }
+
+ if (not_found)
+ p = 0;
+
+ if (a->is_add)
+ {
+ l2input_main_t *l2im = &l2input_main;
+ u32 dev_instance; /* real dev instance tunnel index */
+ u32 user_instance; /* request and actual instance number */
+
+ /* adding a tunnel: tunnel must not already exist */
+ if (p)
+ {
+ t = pool_elt_at_index (vxm->tunnels, *p);
+ *sw_if_indexp = t->sw_if_index;
+ return VNET_API_ERROR_TUNNEL_EXIST;
+ }
+ pool_get_aligned (vxm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ clib_memset (t, 0, sizeof (*t));
+ dev_instance = t - vxm->tunnels;
+
+ /* copy from arg structure */
+#define _(x) t->x = a->x;
+ foreach_copy_field;
+#undef _
+
+ vxlan_gbp_rewrite (t, is_ip6);
+ /*
+ * Reconcile the real dev_instance and a possible requested instance.
+ */
+ user_instance = a->instance;
+ if (user_instance == ~0)
+ user_instance = dev_instance;
+ if (hash_get (vxm->instance_used, user_instance))
+ {
+ pool_put (vxm->tunnels, t);
+ return VNET_API_ERROR_INSTANCE_IN_USE;
+ }
+ hash_set (vxm->instance_used, user_instance, 1);
+
+ t->dev_instance = dev_instance; /* actual */
+ t->user_instance = user_instance; /* name */
+
+ /* copy the key */
+ int add_failed;
+ if (is_ip6)
+ {
+ key6.value = (u64) dev_instance;
+ add_failed =
+ clib_bihash_add_del_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, &key6,
+ 1 /*add */ );
+ }
+ else
+ {
+ key4.value = (u64) dev_instance;
+ add_failed =
+ clib_bihash_add_del_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4,
+ 1 /*add */ );
+ }
+
+ if (add_failed)
+ {
+ pool_put (vxm->tunnels, t);
+ return VNET_API_ERROR_INVALID_REGISTRATION;
+ }
+
+ vxlan_gbp_register_udp_ports ();
+
+ t->hw_if_index = vnet_register_interface
+ (vnm, vxlan_gbp_device_class.index, dev_instance,
+ vxlan_gbp_hw_class.index, dev_instance);
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, t->hw_if_index);
+
+ /* Set vxlan_gbp tunnel output node */
+ u32 encap_index = !is_ip6 ?
+ vxlan4_gbp_encap_node.index : vxlan6_gbp_encap_node.index;
+ vnet_set_interface_output_node (vnm, t->hw_if_index, encap_index);
+
+ t->sw_if_index = sw_if_index = hi->sw_if_index;
+
+ if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode)
+ {
+ ip4_sw_interface_enable_disable (t->sw_if_index, 1);
+ ip6_sw_interface_enable_disable (t->sw_if_index, 1);
+ }
+
+ vec_validate_init_empty (vxm->tunnel_index_by_sw_if_index, sw_if_index,
+ ~0);
+ vxm->tunnel_index_by_sw_if_index[sw_if_index] = dev_instance;
+
+ /* setup l2 input config with l2 feature and bd 0 to drop packet */
+ vec_validate (l2im->configs, sw_if_index);
+ l2im->configs[sw_if_index].feature_bitmap = L2INPUT_FEAT_DROP;
+ l2im->configs[sw_if_index].bd_index = 0;
+
+ vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
+ si->flags &= ~VNET_SW_INTERFACE_FLAG_HIDDEN;
+ vnet_sw_interface_set_flags (vnm, sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+
+ fib_node_init (&t->node, FIB_NODE_TYPE_VXLAN_GBP_TUNNEL);
+ fib_prefix_t tun_dst_pfx;
+ vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL;
+
+ fib_prefix_from_ip46_addr (&t->dst, &tun_dst_pfx);
+ if (!ip46_address_is_multicast (&t->dst))
+ {
+ /* Unicast tunnel -
+ * source the FIB entry for the tunnel's destination
+ * and become a child thereof. The tunnel will then get poked
+ * when the forwarding for the entry updates, and the tunnel can
+ * re-stack accordingly
+ */
+ vtep_addr_ref (&t->src);
+ t->fib_entry_index = fib_entry_track (t->encap_fib_index,
+ &tun_dst_pfx,
+ FIB_NODE_TYPE_VXLAN_GBP_TUNNEL,
+ dev_instance,
+ &t->sibling_index);
+ vxlan_gbp_tunnel_restack_dpo (t);
+ }
+ else
+ {
+ /* Multicast tunnel -
+ * as the same mcast group can be used for multiple mcast tunnels
+ * with different VNIs, create the output fib adjacency only if
+ * it does not already exist
+ */
+ fib_protocol_t fp = fib_ip_proto (is_ip6);
+
+ if (vtep_addr_ref (&t->dst) == 1)
+ {
+ fib_node_index_t mfei;
+ adj_index_t ai;
+ fib_route_path_t path = {
+ .frp_proto = fib_proto_to_dpo (fp),
+ .frp_addr = zero_addr,
+ .frp_sw_if_index = 0xffffffff,
+ .frp_fib_index = ~0,
+ .frp_weight = 0,
+ .frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
+ };
+ const mfib_prefix_t mpfx = {
+ .fp_proto = fp,
+ .fp_len = (is_ip6 ? 128 : 32),
+ .fp_grp_addr = tun_dst_pfx.fp_addr,
+ };
+
+ /*
+ * Setup the (*,G) to receive traffic on the mcast group
+ * - the forwarding interface is for-us
+ * - the accepting interface is that from the API
+ */
+ mfib_table_entry_path_update (t->encap_fib_index, &mpfx,
+ MFIB_SOURCE_VXLAN_GBP,
+ MFIB_ENTRY_FLAG_NONE, &path);
+
+ path.frp_sw_if_index = a->mcast_sw_if_index;
+ path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
+ mfei = mfib_table_entry_path_update (
+ t->encap_fib_index, &mpfx, MFIB_SOURCE_VXLAN_GBP,
+ MFIB_ENTRY_FLAG_NONE, &path);
+
+ /*
+ * Create the mcast adjacency to send traffic to the group
+ */
+ ai = adj_mcast_add_or_lock (fp,
+ fib_proto_to_link (fp),
+ a->mcast_sw_if_index);
+
+ /*
+ * create a new end-point
+ */
+ mcast_shared_add (&t->dst, mfei, ai);
+ }
+
+ dpo_id_t dpo = DPO_INVALID;
+ mcast_shared_t ep = mcast_shared_get (&t->dst);
+
+ /* Stack shared mcast dst mac addr rewrite on encap */
+ dpo_set (&dpo, DPO_ADJACENCY_MCAST,
+ fib_proto_to_dpo (fp), ep.mcast_adj_index);
+
+ dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+ dpo_reset (&dpo);
+ flood_class = VNET_FLOOD_CLASS_TUNNEL_MASTER;
+ }
+
+ vnet_get_sw_interface (vnet_get_main (), sw_if_index)->flood_class =
+ flood_class;
+ }
+ else
+ {
+ /* deleting a tunnel: tunnel must exist */
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ u32 instance = p[0];
+ t = pool_elt_at_index (vxm->tunnels, instance);
+
+ sw_if_index = t->sw_if_index;
+ vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */ );
+
+ if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode)
+ {
+ ip4_sw_interface_enable_disable (t->sw_if_index, 0);
+ ip6_sw_interface_enable_disable (t->sw_if_index, 0);
+ }
+
+ vxm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;
+
+ if (!is_ip6)
+ clib_bihash_add_del_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4,
+ 0 /*del */ );
+ else
+ clib_bihash_add_del_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, &key6,
+ 0 /*del */ );
+
+ if (!ip46_address_is_multicast (&t->dst))
+ {
+ vtep_addr_unref (&t->src);
+ fib_entry_untrack (t->fib_entry_index, t->sibling_index);
+ }
+ else if (vtep_addr_unref (&t->dst) == 0)
+ {
+ mcast_shared_remove (&t->dst);
+ }
+
+ vxlan_gbp_unregister_udp_ports ();
+ vnet_delete_hw_interface (vnm, t->hw_if_index);
+ hash_unset (vxm->instance_used, t->user_instance);
+
+ fib_node_deinit (&t->node);
+ pool_put (vxm->tunnels, t);
+ }
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ return 0;
+}
+
+int
+vnet_vxlan_gbp_tunnel_del (u32 sw_if_index)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vxlan_gbp_tunnel_t *t = 0;
+ u32 ti;
+
+ if (sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ ti = vxm->tunnel_index_by_sw_if_index[sw_if_index];
+ if (~0 != ti)
+ {
+ t = pool_elt_at_index (vxm->tunnels, ti);
+
+ vnet_vxlan_gbp_tunnel_add_del_args_t args = {
+ .is_add = 0,
+ .is_ip6 = !ip46_address_is_ip4 (&t->src),
+ .vni = t->vni,
+ .src = t->src,
+ .dst = t->dst,
+ .instance = ~0,
+ };
+
+ return (vnet_vxlan_gbp_tunnel_add_del (&args, NULL));
+ }
+
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+}
+
+static uword
+get_decap_next_for_node (u32 node_index, u32 ipv4_set)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vlib_main_t *vm = vxm->vlib_main;
+ uword input_node = (ipv4_set) ? vxlan4_gbp_input_node.index :
+ vxlan6_gbp_input_node.index;
+
+ return vlib_node_add_next (vm, input_node, node_index);
+}
+
+static uword
+unformat_decap_next (unformat_input_t * input, va_list * args)
+{
+ u32 *result = va_arg (*args, u32 *);
+ u32 ipv4_set = va_arg (*args, int);
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vlib_main_t *vm = vxm->vlib_main;
+ u32 node_index;
+ u32 tmp;
+
+ if (unformat (input, "l2"))
+ *result = VXLAN_GBP_INPUT_NEXT_L2_INPUT;
+ else if (unformat (input, "node %U", unformat_vlib_node, vm, &node_index))
+ *result = get_decap_next_for_node (node_index, ipv4_set);
+ else if (unformat (input, "%d", &tmp))
+ *result = tmp;
+ else
+ return 0;
+ return 1;
+}
+
+static clib_error_t *
+vxlan_gbp_tunnel_add_del_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ ip46_address_t src = ip46_address_initializer, dst =
+ ip46_address_initializer;
+ vxlan_gbp_tunnel_mode_t mode = VXLAN_GBP_TUNNEL_MODE_L2;
+ u8 is_add = 1;
+ u8 src_set = 0;
+ u8 dst_set = 0;
+ u8 grp_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
+ u32 instance = ~0;
+ u32 encap_fib_index = 0;
+ u32 mcast_sw_if_index = ~0;
+ u32 decap_next_index = VXLAN_GBP_INPUT_NEXT_L2_INPUT;
+ u32 vni = 0;
+ u32 table_id;
+ clib_error_t *parse_error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ {
+ is_add = 0;
+ }
+ else if (unformat (line_input, "instance %d", &instance))
+ ;
+ else if (unformat (line_input, "src %U",
+ unformat_ip46_address, &src, IP46_TYPE_ANY))
+ {
+ src_set = 1;
+ ip46_address_is_ip4 (&src) ? (ipv4_set = 1) : (ipv6_set = 1);
+ }
+ else if (unformat (line_input, "dst %U",
+ unformat_ip46_address, &dst, IP46_TYPE_ANY))
+ {
+ dst_set = 1;
+ ip46_address_is_ip4 (&dst) ? (ipv4_set = 1) : (ipv6_set = 1);
+ }
+ else if (unformat (line_input, "group %U %U",
+ unformat_ip46_address, &dst, IP46_TYPE_ANY,
+ unformat_vnet_sw_interface,
+ vnet_get_main (), &mcast_sw_if_index))
+ {
+ grp_set = dst_set = 1;
+ ip46_address_is_ip4 (&dst) ? (ipv4_set = 1) : (ipv6_set = 1);
+ }
+ else if (unformat (line_input, "encap-vrf-id %d", &table_id))
+ {
+ encap_fib_index =
+ fib_table_find (fib_ip_proto (ipv6_set), table_id);
+ }
+ else if (unformat (line_input, "decap-next %U", unformat_decap_next,
+ &decap_next_index, ipv4_set))
+ ;
+ else if (unformat (line_input, "vni %d", &vni))
+ ;
+ else
+ {
+ parse_error = clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ break;
+ }
+ }
+
+ unformat_free (line_input);
+
+ if (parse_error)
+ return parse_error;
+
+ if (encap_fib_index == ~0)
+ return clib_error_return (0, "nonexistent encap-vrf-id %d", table_id);
+
+ if (src_set == 0)
+ return clib_error_return (0, "tunnel src address not specified");
+
+ if (dst_set == 0)
+ return clib_error_return (0, "tunnel dst address not specified");
+
+ if (grp_set && !ip46_address_is_multicast (&dst))
+ return clib_error_return (0, "tunnel group address not multicast");
+
+ if (grp_set == 0 && ip46_address_is_multicast (&dst))
+ return clib_error_return (0, "dst address must be unicast");
+
+ if (grp_set && mcast_sw_if_index == ~0)
+ return clib_error_return (0, "tunnel nonexistent multicast device");
+
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+
+ if (ip46_address_cmp (&src, &dst) == 0)
+ return clib_error_return (0, "src and dst addresses are identical");
+
+ if (decap_next_index == ~0)
+ return clib_error_return (0, "next node not found");
+
+ if (vni == 0)
+ return clib_error_return (0, "vni not specified");
+
+ if (vni >> 24)
+ return clib_error_return (0, "vni %d out of range", vni);
+
+ vnet_vxlan_gbp_tunnel_add_del_args_t a = {
+ .is_add = is_add,
+ .is_ip6 = ipv6_set,
+ .instance = instance,
+#define _(x) .x = x,
+ foreach_copy_field
+#undef _
+ };
+
+ u32 tunnel_sw_if_index;
+ int rv = vnet_vxlan_gbp_tunnel_add_del (&a, &tunnel_sw_if_index);
+
+ switch (rv)
+ {
+ case 0:
+ if (is_add)
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
+ vnet_get_main (), tunnel_sw_if_index);
+ break;
+
+ case VNET_API_ERROR_TUNNEL_EXIST:
+ return clib_error_return (0, "tunnel already exists...");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "tunnel does not exist...");
+
+ case VNET_API_ERROR_INSTANCE_IN_USE:
+ return clib_error_return (0, "Instance is in use");
+
+ default:
+ return clib_error_return
+ (0, "vnet_vxlan_gbp_tunnel_add_del returned %d", rv);
+ }
+
+ return 0;
+}
+
+/*?
+ * Add or delete a VXLAN Tunnel.
+ *
+ * VXLAN provides the features needed to allow L2 bridge domains (BDs)
+ * to span multiple servers. This is done by building an L2 overlay on
+ * top of an L3 network underlay using VXLAN tunnels.
+ *
+ * This makes it possible for servers to be co-located in the same data
+ * center or be separated geographically as long as they are reachable
+ * through the underlay L3 network.
+ *
+ * You can refer to this kind of L2 overlay bridge domain as a VXLAN
+ * (Virtual eXtensible VLAN) segment.
+ *
+ * @cliexpar
+ * Example of how to create a VXLAN Tunnel:
+ * @cliexcmd{create vxlan_gbp tunnel src 10.0.3.1 dst 10.0.3.3 vni 13 encap-vrf-id 7}
+ * Example of how to create a VXLAN Tunnel with a known name, vxlan_gbp_tunnel42:
+ * @cliexcmd{create vxlan_gbp tunnel src 10.0.3.1 dst 10.0.3.3 instance 42}
+ * Example of how to create a multicast VXLAN Tunnel with a known name, vxlan_gbp_tunnel23:
+ * @cliexcmd{create vxlan_gbp tunnel src 10.0.3.1 group 239.1.1.1 GigabitEthernet0/8/0 instance 23}
+ * Example of how to delete a VXLAN Tunnel:
+ * @cliexcmd{create vxlan_gbp tunnel src 10.0.3.1 dst 10.0.3.3 vni 13 del}
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (create_vxlan_gbp_tunnel_command, static) = {
+ .path = "create vxlan-gbp tunnel",
+ .short_help =
+ "create vxlan-gbp tunnel src <local-vtep-addr>"
+ " {dst <remote-vtep-addr>|group <mcast-vtep-addr> <intf-name>} vni <nn>"
+ " [instance <id>]"
+ " [encap-vrf-id <nn>] [decap-next [l2|node <name>]] [del]",
+ .function = vxlan_gbp_tunnel_add_del_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_vxlan_gbp_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vxlan_gbp_tunnel_t *t;
+ int raw = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "raw"))
+ raw = 1;
+ else
+ return clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, input);
+ }
+
+ if (pool_elts (vxm->tunnels) == 0)
+ vlib_cli_output (vm, "No vxlan-gbp tunnels configured...");
+
+/* *INDENT-OFF* */
+ pool_foreach (t, vxm->tunnels)
+ {
+ vlib_cli_output (vm, "%U", format_vxlan_gbp_tunnel, t);
+ }
+/* *INDENT-ON* */
+
+ if (raw)
+ {
+ vlib_cli_output (vm, "Raw IPv4 Hash Table:\n%U\n",
+ format_bihash_16_8, &vxm->vxlan4_gbp_tunnel_by_key,
+ 1 /* verbose */ );
+ vlib_cli_output (vm, "Raw IPv6 Hash Table:\n%U\n",
+ format_bihash_24_8, &vxm->vxlan6_gbp_tunnel_by_key,
+ 1 /* verbose */ );
+ }
+
+ return 0;
+}
+
+/*?
+ * Display all the VXLAN Tunnel entries.
+ *
+ * @cliexpar
+ * Example of how to display the VXLAN Tunnel entries:
+ * @cliexstart{show vxlan_gbp tunnel}
+ * [0] src 10.0.3.1 dst 10.0.3.3 vni 13 encap_fib_index 0 sw_if_index 5 decap_next l2
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_vxlan_gbp_tunnel_command, static) = {
+ .path = "show vxlan-gbp tunnel",
+ .short_help = "show vxlan-gbp tunnel [raw]",
+ .function = show_vxlan_gbp_tunnel_command_fn,
+};
+/* *INDENT-ON* */
+
+
+void
+vnet_int_vxlan_gbp_bypass_mode (u32 sw_if_index, u8 is_ip6, u8 is_enable)
+{
+ if (is_ip6)
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-vxlan-gbp-bypass",
+ sw_if_index, is_enable, 0, 0);
+ else
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-gbp-bypass",
+ sw_if_index, is_enable, 0, 0);
+}
+
+
+static clib_error_t *
+set_ip_vxlan_gbp_bypass (u32 is_ip6,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index, is_enable;
+
+ sw_if_index = ~0;
+ is_enable = 1;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user
+ (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "del"))
+ is_enable = 0;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 == sw_if_index)
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ vnet_int_vxlan_gbp_bypass_mode (sw_if_index, is_ip6, is_enable);
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+static clib_error_t *
+set_ip4_vxlan_gbp_bypass (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ return set_ip_vxlan_gbp_bypass (0, input, cmd);
+}
+
+/*?
+ * This command adds the 'ip4-vxlan-gbp-bypass' graph node for a given
+ * interface. By adding the IPv4 vxlan_gbp-bypass graph node to an interface,
+ * the node checks for and validate input vxlan_gbp packet and bypass
+ * ip4-lookup, ip4-local, ip4-udp-lookup nodes to speedup vxlan_gbp packet
+ * forwarding. This node will cause extra overhead to for non-vxlan_gbp packets
+ * which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip4-vxlan_gbp-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan_gbp-bypass}
+ * Name Next Previous
+ * ip4-vxlan-gbp-bypass error-drop [0]
+ * vxlan4-gbp-input [1]
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip4-vxlan-gbp-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-gbp-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip4-vxlan-gbp-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-gbp-bypass}
+ * Name Next Previous
+ * ip4-vxlan-gbp-bypass error-drop [0] ip4-input
+ * vxlan4-gbp-input [1] ip4-input-no-checksum
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabled on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ * ...
+ * ipv4 unicast:
+ * ip4-vxlan-gbp-bypass
+ * ip4-lookup
+ * ...
+ * @cliexend
+ *
+ * Example of how to disable ip4-vxlan-gbp-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-gbp-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip_vxlan_gbp_bypass_command, static) = {
+ .path = "set interface ip vxlan-gbp-bypass",
+ .function = set_ip4_vxlan_gbp_bypass,
+ .short_help = "set interface ip vxlan-gbp-bypass <interface> [del]",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+set_ip6_vxlan_gbp_bypass (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ return set_ip_vxlan_gbp_bypass (1, input, cmd);
+}
+
+/*?
+ * This command adds the 'ip6-vxlan-gbp-bypass' graph node for a given
+ * interface. By adding the IPv6 vxlan-gbp-bypass graph node to an interface,
+ * the node checks for and validate input vxlan_gbp packet and bypass
+ * ip6-lookup, ip6-local, ip6-udp-lookup nodes to speedup vxlan_gbp packet
+ * forwarding. This node will cause extra overhead to for non-vxlan packets
+ * which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip6-vxlan-gbp-bypass is enabled:
+ * @cliexstart{show vlib graph ip6-vxlan-gbp-bypass}
+ * Name Next Previous
+ * ip6-vxlan-gbp-bypass error-drop [0]
+ * vxlan6-gbp-input [1]
+ * ip6-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip6-vxlan-gbp-bypass on an interface:
+ * @cliexcmd{set interface ip6 vxlan-gbp-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip6-vxlan-gbp-bypass is enabled:
+ * @cliexstart{show vlib graph ip6-vxlan-gbp-bypass}
+ * Name Next Previous
+ * ip6-vxlan-gbp-bypass error-drop [0] ip6-input
+ * vxlan6-gbp-input [1] ip4-input-no-checksum
+ * ip6-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabled on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ * ...
+ * ipv6 unicast:
+ * ip6-vxlan-gbp-bypass
+ * ip6-lookup
+ * ...
+ * @cliexend
+ *
+ * Example of how to disable ip6-vxlan-gbp-bypass on an interface:
+ * @cliexcmd{set interface ip6 vxlan-gbp-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_interface_ip6_vxlan_gbp_bypass_command, static) = {
+ .path = "set interface ip6 vxlan-gbp-bypass",
+ .function = set_ip6_vxlan_gbp_bypass,
+ .short_help = "set interface ip6 vxlan-gbp-bypass <interface> [del]",
+};
+/* *INDENT-ON* */
+
+#define VXLAN_GBP_HASH_NUM_BUCKETS (2 * 1024)
+#define VXLAN_GBP_HASH_MEMORY_SIZE (1 << 20)
+
+clib_error_t *
+vxlan_gbp_init (vlib_main_t * vm)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+
+ vxm->vnet_main = vnet_get_main ();
+ vxm->vlib_main = vm;
+
+ /* initialize the ip6 hash */
+ clib_bihash_init_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, "vxlan4-gbp",
+ VXLAN_GBP_HASH_NUM_BUCKETS,
+ VXLAN_GBP_HASH_MEMORY_SIZE);
+ clib_bihash_init_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, "vxlan6-gbp",
+ VXLAN_GBP_HASH_NUM_BUCKETS,
+ VXLAN_GBP_HASH_MEMORY_SIZE);
+ vxm->vtep6 = hash_create_mem (0, sizeof (ip6_address_t), sizeof (uword));
+ vxm->mcast_shared = hash_create_mem (0,
+ sizeof (ip46_address_t),
+ sizeof (mcast_shared_t));
+
+ fib_node_register_type (FIB_NODE_TYPE_VXLAN_GBP_TUNNEL, &vxlan_gbp_vft);
+
+ punt_hdl = vlib_punt_client_register ("vxlan-gbp");
+
+ vlib_punt_reason_alloc (punt_hdl, "VXLAN-GBP-no-such-v4-tunnel", NULL, NULL,
+ &vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4],
+ VNET_PUNT_REASON_F_IP4_PACKET,
+ format_vnet_punt_reason_flags);
+ vlib_punt_reason_alloc (punt_hdl, "VXLAN-GBP-no-such-v6-tunnel", NULL, NULL,
+ &vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6],
+ VNET_PUNT_REASON_F_IP6_PACKET,
+ format_vnet_punt_reason_flags);
+
+ return (0);
+}
+
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (vxlan_gbp_init) =
+{
+ .runs_after = VLIB_INITS("punt_init"),
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.h b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.h
new file mode 100644
index 00000000000..fe93587cb00
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vnet_vxlan_gbp_h
+#define included_vnet_vxlan_gbp_h
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/bihash_16_8.h>
+#include <vppinfra/bihash_24_8.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/l2/l2_bd.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/udp/udp_local.h>
+#include <vnet/udp/udp_packet.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/adj/adj_types.h>
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4; /* 20 bytes */
+ udp_header_t udp; /* 8 bytes */
+ vxlan_gbp_header_t vxlan_gbp; /* 8 bytes */
+}) ip4_vxlan_gbp_header_t;
+
+typedef CLIB_PACKED (struct {
+ ip6_header_t ip6; /* 40 bytes */
+ udp_header_t udp; /* 8 bytes */
+ vxlan_gbp_header_t vxlan_gbp; /* 8 bytes */
+}) ip6_vxlan_gbp_header_t;
+/* *INDENT-ON* */
+
+/*
+* Key fields: remote ip, vni on incoming VXLAN packet
+* all fields in NET byte order
+*/
+typedef clib_bihash_kv_16_8_t vxlan4_gbp_tunnel_key_t;
+
+/*
+* Key fields: remote ip, vni and fib index on incoming VXLAN packet
+* ip, vni fields in NET byte order
+* fib index field in host byte order
+*/
+typedef clib_bihash_kv_24_8_t vxlan6_gbp_tunnel_key_t;
+
+typedef enum vxlan_gbp_tunnel_mode_t_
+{
+ VXLAN_GBP_TUNNEL_MODE_L2,
+ VXLAN_GBP_TUNNEL_MODE_L3,
+} vxlan_gbp_tunnel_mode_t;
+
+extern u8 *format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args);
+
+typedef struct
+{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /* FIB DPO for IP forwarding of VXLAN encap packet */
+ dpo_id_t next_dpo;
+
+ /* flags */
+ u16 flags;
+
+ /* vxlan VNI in HOST byte order */
+ u32 vni;
+
+ /* tunnel src and dst addresses */
+ ip46_address_t src;
+ ip46_address_t dst;
+
+ /* mcast packet output intfc index (used only if dst is mcast) */
+ u32 mcast_sw_if_index;
+
+ /* The FIB index for src/dst addresses */
+ u32 encap_fib_index;
+
+ /* vnet intfc index */
+ u32 sw_if_index;
+ u32 hw_if_index;
+
+ /** Next node after VxLAN-GBP encap */
+ uword encap_next_node;
+
+ /**
+ * Tunnel mode.
+ * L2 tunnels decap to L2 path, L3 tunnels to the L3 path
+ */
+ vxlan_gbp_tunnel_mode_t mode;
+
+ /**
+ * Linkage into the FIB object graph
+ */
+ fib_node_t node;
+
+ /*
+ * The FIB entry for (depending on VXLAN-GBP tunnel is unicast or mcast)
+ * sending unicast VXLAN-GBP encap packets or receiving mcast VXLAN-GBP packets
+ */
+ fib_node_index_t fib_entry_index;
+ adj_index_t mcast_adj_index;
+
+ /**
+ * The tunnel is a child of the FIB entry for its destination. This is
+ * so it receives updates when the forwarding information for that entry
+ * changes.
+ * The tunnels sibling index on the FIB entry's dependency list.
+ */
+ u32 sibling_index;
+
+ u32 dev_instance; /* Real device instance in tunnel vector */
+ u32 user_instance; /* Instance name being shown to user */
+
+
+ VNET_DECLARE_REWRITE;
+} vxlan_gbp_tunnel_t;
+
+#define foreach_vxlan_gbp_input_next \
+ _(DROP, "error-drop") \
+ _(PUNT, "punt-dispatch") \
+ _(L2_INPUT, "l2-input") \
+ _(IP4_INPUT, "ip4-input") \
+ _(IP6_INPUT, "ip6-input")
+
+typedef enum
+{
+#define _(s,n) VXLAN_GBP_INPUT_NEXT_##s,
+ foreach_vxlan_gbp_input_next
+#undef _
+ VXLAN_GBP_INPUT_N_NEXT,
+} vxlan_gbp_input_next_t;
+
+typedef enum
+{
+#define vxlan_gbp_error(n,s) VXLAN_GBP_ERROR_##n,
+#include <vnet/vxlan-gbp/vxlan_gbp_error.def>
+#undef vxlan_gbp_error
+ VXLAN_GBP_N_ERROR,
+} vxlan_gbp_input_error_t;
+
+/**
+ * Call back function packets that do not match a configured tunnel
+ */
+typedef vxlan_gbp_input_next_t (*vxlan_bgp_no_tunnel_t) (vlib_buffer_t * b,
+ u32 thread_index,
+ u8 is_ip6);
+
+typedef struct
+{
+ /* vector of encap tunnel instances */
+ vxlan_gbp_tunnel_t *tunnels;
+
+ /* lookup tunnel by key */
+ clib_bihash_16_8_t vxlan4_gbp_tunnel_by_key; /* keyed on ipv4.dst + fib + vni */
+ clib_bihash_24_8_t vxlan6_gbp_tunnel_by_key; /* keyed on ipv6.dst + fib + vni */
+
+ /* local VTEP IPs ref count used by vxlan-bypass node to check if
+ received VXLAN packet DIP matches any local VTEP address */
+ uword *vtep4; /* local ip4 VTEPs keyed on their ip4 addr */
+ uword *vtep6; /* local ip6 VTEPs keyed on their ip6 addr */
+
+ /* mcast shared info */
+ uword *mcast_shared; /* keyed on mcast ip46 addr */
+
+ /* Mapping from sw_if_index to tunnel index */
+ u32 *tunnel_index_by_sw_if_index;
+
+ /* On demand udp port registration */
+ u32 udp_ports_registered;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+
+ /* Record used instances */
+ uword *instance_used;
+
+ /**
+ * Punt reasons for no such tunnel
+ */
+ vlib_punt_reason_t punt_no_such_tunnel[FIB_PROTOCOL_IP_MAX];
+} vxlan_gbp_main_t;
+
+extern vxlan_gbp_main_t vxlan_gbp_main;
+
+extern vlib_node_registration_t vxlan4_gbp_input_node;
+extern vlib_node_registration_t vxlan6_gbp_input_node;
+extern vlib_node_registration_t vxlan4_gbp_encap_node;
+extern vlib_node_registration_t vxlan6_gbp_encap_node;
+extern void vxlan_gbp_register_udp_ports (void);
+extern void vxlan_gbp_unregister_udp_ports (void);
+
+u8 *format_vxlan_gbp_encap_trace (u8 * s, va_list * args);
+
+typedef struct
+{
+ u8 is_add;
+ u8 is_ip6;
+ u32 instance;
+ vxlan_gbp_tunnel_mode_t mode;
+ ip46_address_t src, dst;
+ u32 mcast_sw_if_index;
+ u32 encap_fib_index;
+ u32 vni;
+} vnet_vxlan_gbp_tunnel_add_del_args_t;
+
+int vnet_vxlan_gbp_tunnel_add_del
+ (vnet_vxlan_gbp_tunnel_add_del_args_t * a, u32 * sw_if_indexp);
+int vnet_vxlan_gbp_tunnel_del (u32 sw_if_indexp);
+
+void vnet_int_vxlan_gbp_bypass_mode (u32 sw_if_index, u8 is_ip6,
+ u8 is_enable);
+
+always_inline u32
+vxlan_gbp_tunnel_by_sw_if_index (u32 sw_if_index)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+
+ if (sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index))
+ return ~0;
+
+ return (vxm->tunnel_index_by_sw_if_index[sw_if_index]);
+}
+
+#endif /* included_vnet_vxlan_gbp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_api.c b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_api.c
new file mode 100644
index 00000000000..a3f2246f463
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_api.c
@@ -0,0 +1,217 @@
+/*
+ *------------------------------------------------------------------
+ * vxlan_gbp_api.c - vxlan gbp api
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/feature/feature.h>
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/ip/ip_types_api.h>
+#include <vnet/format_fns.h>
+
+#include <vxlan-gbp/vxlan_gbp.api_enum.h>
+#include <vxlan-gbp/vxlan_gbp.api_types.h>
+
+#define REPLY_MSG_ID_BASE msg_id_base
+#include <vlibapi/api_helper_macros.h>
+
+static u16 msg_id_base;
+
+static void
+ vl_api_sw_interface_set_vxlan_gbp_bypass_t_handler
+ (vl_api_sw_interface_set_vxlan_gbp_bypass_t * mp)
+{
+ vl_api_sw_interface_set_vxlan_gbp_bypass_reply_t *rmp;
+ int rv = 0;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ vnet_int_vxlan_gbp_bypass_mode (sw_if_index, mp->is_ipv6, mp->enable);
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_VXLAN_GBP_BYPASS_REPLY);
+}
+
+static int
+vxlan_gbp_tunnel_mode_decode (vl_api_vxlan_gbp_api_tunnel_mode_t in,
+ vxlan_gbp_tunnel_mode_t * out)
+{
+ in = clib_net_to_host_u32 (in);
+
+ switch (in)
+ {
+ case VXLAN_GBP_API_TUNNEL_MODE_L2:
+ *out = VXLAN_GBP_TUNNEL_MODE_L2;
+ return (0);
+ case VXLAN_GBP_API_TUNNEL_MODE_L3:
+ *out = VXLAN_GBP_TUNNEL_MODE_L3;
+ return (0);
+ }
+ return (VNET_API_ERROR_INVALID_VALUE);
+}
+
+static void vl_api_vxlan_gbp_tunnel_add_del_t_handler
+ (vl_api_vxlan_gbp_tunnel_add_del_t * mp)
+{
+ vl_api_vxlan_gbp_tunnel_add_del_reply_t *rmp;
+ vxlan_gbp_tunnel_mode_t mode;
+ ip46_address_t src, dst;
+ ip46_type_t itype;
+ int rv = 0;
+ u32 sw_if_index = ~0;
+ u32 fib_index;
+
+ itype = ip_address_decode (&mp->tunnel.src, &src);
+ itype = ip_address_decode (&mp->tunnel.dst, &dst);
+
+ fib_index = fib_table_find (fib_proto_from_ip46 (itype),
+ ntohl (mp->tunnel.encap_table_id));
+ if (fib_index == ~0)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+
+ rv = vxlan_gbp_tunnel_mode_decode (mp->tunnel.mode, &mode);
+
+ if (rv)
+ goto out;
+
+ vnet_vxlan_gbp_tunnel_add_del_args_t a = {
+ .is_add = mp->is_add,
+ .is_ip6 = (itype == IP46_TYPE_IP6),
+ .instance = ntohl (mp->tunnel.instance),
+ .mcast_sw_if_index = ntohl (mp->tunnel.mcast_sw_if_index),
+ .encap_fib_index = fib_index,
+ .vni = ntohl (mp->tunnel.vni),
+ .dst = dst,
+ .src = src,
+ .mode = mode,
+ };
+
+ /* Check src & dst are different */
+ if (ip46_address_cmp (&a.dst, &a.src) == 0)
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+ if (ip46_address_is_multicast (&a.dst) &&
+ !vnet_sw_if_index_is_api_valid (a.mcast_sw_if_index))
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto out;
+ }
+
+ rv = vnet_vxlan_gbp_tunnel_add_del (&a, &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_VXLAN_GBP_TUNNEL_ADD_DEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void send_vxlan_gbp_tunnel_details
+ (vxlan_gbp_tunnel_t * t, vl_api_registration_t * reg, u32 context)
+{
+ vl_api_vxlan_gbp_tunnel_details_t *rmp;
+ ip46_type_t itype = (ip46_address_is_ip4 (&t->dst) ?
+ IP46_TYPE_IP4 : IP46_TYPE_IP6);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ clib_memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id =
+ ntohs (VL_API_VXLAN_GBP_TUNNEL_DETAILS + REPLY_MSG_ID_BASE);
+
+ ip_address_encode (&t->src, itype, &rmp->tunnel.src);
+ ip_address_encode (&t->dst, itype, &rmp->tunnel.dst);
+ rmp->tunnel.encap_table_id =
+ fib_table_get_table_id (t->encap_fib_index, fib_proto_from_ip46 (itype));
+
+ rmp->tunnel.instance = htonl (t->user_instance);
+ rmp->tunnel.mcast_sw_if_index = htonl (t->mcast_sw_if_index);
+ rmp->tunnel.vni = htonl (t->vni);
+ rmp->tunnel.sw_if_index = htonl (t->sw_if_index);
+ rmp->context = context;
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+static void vl_api_vxlan_gbp_tunnel_dump_t_handler
+ (vl_api_vxlan_gbp_tunnel_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vxlan_gbp_tunnel_t *t;
+ u32 sw_if_index;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (t, vxm->tunnels)
+ {
+ send_vxlan_gbp_tunnel_details(t, reg, mp->context);
+ }
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vxm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &vxm->tunnels[vxm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_gbp_tunnel_details (t, reg, mp->context);
+ }
+}
+
+#include <vxlan-gbp/vxlan_gbp.api.c>
+static clib_error_t *
+vxlan_gbp_api_hookup (vlib_main_t * vm)
+{
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ msg_id_base = setup_message_id_table ();
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (vxlan_gbp_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_error.def b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_error.def
new file mode 100644
index 00000000000..43ad4dac064
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_error.def
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+vxlan_gbp_error (DECAPSULATED, "good packets decapsulated")
+vxlan_gbp_error (NO_SUCH_TUNNEL, "no such tunnel packets")
+vxlan_gbp_error (BAD_FLAGS, "packets with bad flags field in vxlan gbp header")
diff --git a/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_packet.c b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_packet.c
new file mode 100644
index 00000000000..01c7a19bfb9
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_packet.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+u8 *
+format_vxlan_gbp_header_flags (u8 * s, va_list * args)
+{
+ vxlan_gbp_flags_t flags = va_arg (*args, int);
+
+ if (VXLAN_GBP_FLAGS_NONE == flags)
+ {
+ s = format (s, "None");
+ }
+#define _(n,f) { \
+ if (VXLAN_GBP_FLAGS_##f & flags) \
+ s = format (s, #f); \
+ }
+ foreach_vxlan_gbp_flags
+#undef _
+ return (s);
+}
+
+u8 *
+format_vxlan_gbp_header_gpflags (u8 * s, va_list * args)
+{
+ vxlan_gbp_gpflags_t flags = va_arg (*args, int);
+
+ if (VXLAN_GBP_GPFLAGS_NONE == flags)
+ {
+ s = format (s, "None");
+ }
+#define _(n,f) { \
+ if (VXLAN_GBP_GPFLAGS_##f & flags) \
+ s = format (s, #f); \
+ }
+ foreach_vxlan_gbp_gpflags
+#undef _
+ return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_packet.h b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_packet.h
new file mode 100644
index 00000000000..e655b333b89
--- /dev/null
+++ b/extras/deprecated/vnet/vxlan-gbp/vxlan_gbp_packet.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vxlan_gbp_packet_h__
+#define __included_vxlan_gbp_packet_h__ 1
+
+#include <vlib/vlib.h>
+
+/*
+ * From draft-smith-vxlan-group-policy-04.txt
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |G|R|R|R|I|R|R|R|R|D|E|S|A|R|R|R| Group Policy ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * G bit: Bit 0 of the initial word is defined as the G (Group Based
+ * Policy Extension) bit.
+ *
+ * I bit: where the I flag MUST be set to 1 for a valid
+ * VXLAN Network ID (VNI).
+ *
+ * D bit: Bit 9 of the initial word is defined as the Don't Learn bit.
+ * When set, this bit indicates that the egress VTEP MUST NOT learn the
+ * source address of the encapsulated frame.
+ *
+ * E bit: Bit 10 of the initial word is defined as the bounce packet.
+ * When set, this bit indicates that packet is bounced and must be
+ * dropped.
+ *
+ * S bit: Bit 11 of the initial word is defined as the source policy
+ * applied bit.
+ *
+ * A bit: Bit 12 of the initial word is defined as the A (Policy
+ * Applied) bit. This bit is only defined as the A bit when the G bit
+ * is set to 1.
+ *
+ * A = 1 indicates that the group policy has already been applied to
+ * this packet. Policies MUST NOT be applied by devices when the A
+ * bit is set.
+ *
+ * A = 0 indicates that the group policy has not been applied to this
+ * packet. Group policies MUST be applied by devices when the A bit
+ * is set to 0 and the destination Group has been determined.
+ * Devices that apply the Group policy MUST set the A bit to 1 after
+ * the policy has been applied.
+ *
+ * Group Policy ID: 16 bit identifier that indicates the source TSI
+ * Group membership being encapsulated by VXLAN. Its value is source
+ * class id.
+ *
+ * FOR INTERNAL USE ONLY
+ * R bit: Bit 12 of the initial word is defined as the reflection bit
+ * Set on packet rx checked on tx and dropped if set. this prevents
+ * packets recieved on an iVXLAN tunnel being reflected back to
+ * another.
+ */
+
+typedef struct
+{
+ union
+ {
+ struct
+ {
+ union
+ {
+ struct
+ {
+ u8 flag_g_i;
+ u8 gpflags;
+ };
+ u16 flags;
+ };
+ u16 sclass;
+ };
+ u32 flags_sclass_as_u32;
+ };
+ u32 vni_reserved;
+} vxlan_gbp_header_t;
+
+#define foreach_vxlan_gbp_flags \
+ _ (0x80, G) \
+ _ (0x08, I)
+
+typedef enum
+{
+ VXLAN_GBP_FLAGS_NONE = 0,
+#define _(n,f) VXLAN_GBP_FLAGS_##f = n,
+ foreach_vxlan_gbp_flags
+#undef _
+} __attribute__ ((packed)) vxlan_gbp_flags_t;
+
+#define VXLAN_GBP_FLAGS_GI (VXLAN_GBP_FLAGS_G|VXLAN_GBP_FLAGS_I)
+
+#define foreach_vxlan_gbp_gpflags \
+_ (0x40, D) \
+_ (0x20, E) \
+_ (0x10, S) \
+_ (0x08, A) \
+_ (0x04, R)
+
+typedef enum
+{
+ VXLAN_GBP_GPFLAGS_NONE = 0,
+#define _(n,f) VXLAN_GBP_GPFLAGS_##f = n,
+ foreach_vxlan_gbp_gpflags
+#undef _
+} __attribute__ ((packed)) vxlan_gbp_gpflags_t;
+
+static inline u32
+vxlan_gbp_get_vni (vxlan_gbp_header_t * h)
+{
+ u32 vni_reserved_host_byte_order;
+
+ vni_reserved_host_byte_order = clib_net_to_host_u32 (h->vni_reserved);
+ return vni_reserved_host_byte_order >> 8;
+}
+
+static inline u16
+vxlan_gbp_get_sclass (vxlan_gbp_header_t * h)
+{
+ u16 sclass_host_byte_order;
+
+ sclass_host_byte_order = clib_net_to_host_u16 (h->sclass);
+ return sclass_host_byte_order;
+}
+
+static inline vxlan_gbp_gpflags_t
+vxlan_gbp_get_gpflags (vxlan_gbp_header_t * h)
+{
+ return h->gpflags;
+}
+
+static inline vxlan_gbp_flags_t
+vxlan_gbp_get_flags (vxlan_gbp_header_t * h)
+{
+ return h->flag_g_i;
+}
+
+static inline void
+vxlan_gbp_set_header (vxlan_gbp_header_t * h, u32 vni)
+{
+ h->vni_reserved = clib_host_to_net_u32 (vni << 8);
+ h->flags_sclass_as_u32 = 0;
+ h->flag_g_i = VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G;
+}
+
+extern u8 *format_vxlan_gbp_header_flags (u8 * s, va_list * args);
+extern u8 *format_vxlan_gbp_header_gpflags (u8 * s, va_list * args);
+
+#endif /* __included_vxlan_gbp_packet_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */