aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--extras/vom/vom/CMakeLists.txt18
-rw-r--r--extras/vom/vom/gbp_bridge_domain.cpp232
-rw-r--r--extras/vom/vom/gbp_bridge_domain.hpp183
-rw-r--r--extras/vom/vom/gbp_bridge_domain_cmds.cpp133
-rw-r--r--extras/vom/vom/gbp_bridge_domain_cmds.hpp131
-rw-r--r--extras/vom/vom/gbp_contract_cmds.hpp2
-rw-r--r--extras/vom/vom/gbp_endpoint_group.cpp23
-rw-r--r--extras/vom/vom/gbp_endpoint_group.hpp15
-rw-r--r--extras/vom/vom/gbp_endpoint_group_cmds.cpp7
-rw-r--r--extras/vom/vom/gbp_endpoint_group_cmds.hpp6
-rw-r--r--extras/vom/vom/gbp_route_domain.cpp232
-rw-r--r--extras/vom/vom/gbp_route_domain.hpp183
-rw-r--r--extras/vom/vom/gbp_route_domain_cmds.cpp135
-rw-r--r--extras/vom/vom/gbp_route_domain_cmds.hpp131
-rw-r--r--extras/vom/vom/gbp_subnet.cpp85
-rw-r--r--extras/vom/vom/gbp_subnet.hpp59
-rw-r--r--extras/vom/vom/gbp_subnet_cmds.cpp37
-rw-r--r--extras/vom/vom/gbp_subnet_cmds.hpp4
-rw-r--r--extras/vom/vom/gbp_vxlan.cpp231
-rw-r--r--extras/vom/vom/gbp_vxlan.hpp186
-rw-r--r--extras/vom/vom/gbp_vxlan_cmds.cpp137
-rw-r--r--extras/vom/vom/gbp_vxlan_cmds.hpp135
-rw-r--r--extras/vom/vom/gbp_vxlan_tunnel.cpp186
-rw-r--r--extras/vom/vom/interface_factory.cpp4
-rw-r--r--src/plugins/gbp/CMakeLists.txt18
-rw-r--r--src/plugins/gbp/gbp.api169
-rw-r--r--src/plugins/gbp/gbp_api.c496
-rw-r--r--src/plugins/gbp/gbp_bridge_domain.c368
-rw-r--r--src/plugins/gbp/gbp_bridge_domain.h80
-rw-r--r--src/plugins/gbp/gbp_classify.c72
-rw-r--r--src/plugins/gbp/gbp_endpoint.c899
-rw-r--r--src/plugins/gbp/gbp_endpoint.h187
-rw-r--r--src/plugins/gbp/gbp_endpoint_group.c260
-rw-r--r--src/plugins/gbp/gbp_endpoint_group.h63
-rw-r--r--src/plugins/gbp/gbp_itf.c212
-rw-r--r--src/plugins/gbp/gbp_itf.h42
-rw-r--r--src/plugins/gbp/gbp_learn.c756
-rw-r--r--src/plugins/gbp/gbp_learn.h40
-rw-r--r--src/plugins/gbp/gbp_policy.c139
-rw-r--r--src/plugins/gbp/gbp_policy_dpo.c68
-rw-r--r--src/plugins/gbp/gbp_recirc.c85
-rw-r--r--src/plugins/gbp/gbp_recirc.h8
-rw-r--r--src/plugins/gbp/gbp_route_domain.c413
-rw-r--r--src/plugins/gbp/gbp_route_domain.h85
-rw-r--r--src/plugins/gbp/gbp_scanner.c104
-rw-r--r--src/plugins/gbp/gbp_scanner.h31
-rw-r--r--src/plugins/gbp/gbp_subnet.c419
-rw-r--r--src/plugins/gbp/gbp_subnet.h26
-rw-r--r--src/plugins/gbp/gbp_vxlan.c880
-rw-r--r--src/plugins/gbp/gbp_vxlan.h133
-rw-r--r--src/vnet/CMakeLists.txt1
-rw-r--r--src/vnet/ethernet/mac_address.h13
-rw-r--r--src/vnet/interface_funcs.h7
-rw-r--r--src/vnet/ip/ip_types_api.c1
-rw-r--r--src/vnet/l2/l2.api1
-rw-r--r--src/vnet/l2/l2_input.h3
-rw-r--r--src/vnet/l2/l2_output.h3
-rw-r--r--src/vnet/vxlan-gbp/decap.c127
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp.api2
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp.c99
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp.h40
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp_api.c3
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp_packet.c60
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp_packet.h25
-rw-r--r--test/framework.py30
-rw-r--r--test/test_gbp.py1405
-rw-r--r--test/test_l2_flood.py2
-rw-r--r--test/vpp_interface.py3
-rw-r--r--test/vpp_ip.py12
-rw-r--r--test/vpp_ip_route.py100
-rw-r--r--test/vpp_l2.py221
-rw-r--r--test/vpp_mac.py15
-rw-r--r--test/vpp_papi_provider.py166
-rw-r--r--test/vpp_vxlan_gbp_tunnel.py69
74 files changed, 9896 insertions, 1060 deletions
diff --git a/extras/vom/vom/CMakeLists.txt b/extras/vom/vom/CMakeLists.txt
index 5a5e5a7df0e..a2c35addb05 100644
--- a/extras/vom/vom/CMakeLists.txt
+++ b/extras/vom/vom/CMakeLists.txt
@@ -68,16 +68,22 @@ endif()
if(GBP_FILE)
list(APPEND VOM_SOURCES
- gbp_recirc_cmds.cpp
- gbp_recirc.cpp
- gbp_subnet_cmds.cpp
- gbp_subnet.cpp
+ gbp_contract_cmds.cpp
+ gbp_contract.cpp
+ gbp_bridge_domain_cmds.cpp
+ gbp_bridge_domain.cpp
gbp_endpoint_cmds.cpp
gbp_endpoint.cpp
gbp_endpoint_group_cmds.cpp
gbp_endpoint_group.cpp
- gbp_contract_cmds.cpp
- gbp_contract.cpp
+ gbp_recirc_cmds.cpp
+ gbp_recirc.cpp
+ gbp_route_domain_cmds.cpp
+ gbp_route_domain.cpp
+ gbp_subnet_cmds.cpp
+ gbp_subnet.cpp
+ gbp_vxlan.cpp
+ gbp_vxlan_cmds.cpp
)
endif()
diff --git a/extras/vom/vom/gbp_bridge_domain.cpp b/extras/vom/vom/gbp_bridge_domain.cpp
new file mode 100644
index 00000000000..6cad1954e5d
--- /dev/null
+++ b/extras/vom/vom/gbp_bridge_domain.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vom/gbp_bridge_domain.hpp"
+#include "vom/gbp_bridge_domain_cmds.hpp"
+#include "vom/interface.hpp"
+#include "vom/l2_binding.hpp"
+#include "vom/singular_db_funcs.hpp"
+
+namespace VOM {
+
+/**
+ * A DB of al the interfaces, key on the name
+ */
+singular_db<uint32_t, gbp_bridge_domain> gbp_bridge_domain::m_db;
+
+gbp_bridge_domain::event_handler gbp_bridge_domain::m_evh;
+
+/**
+ * Construct a new object matching the desried state
+ */
+gbp_bridge_domain::gbp_bridge_domain(const bridge_domain& bd)
+ : m_id(bd.id())
+ , m_bd(bd.singular())
+{
+}
+
+gbp_bridge_domain::gbp_bridge_domain(const bridge_domain& bd,
+ const interface& bvi,
+ const interface& uu_fwd)
+ : m_id(bd.id())
+ , m_bd(bd.singular())
+ , m_bvi(bvi.singular())
+ , m_uu_fwd(uu_fwd.singular())
+{
+}
+
+gbp_bridge_domain::gbp_bridge_domain(const gbp_bridge_domain& bd)
+ : m_id(bd.id())
+ , m_bd(bd.m_bd)
+{
+}
+
+const gbp_bridge_domain::key_t
+gbp_bridge_domain::key() const
+{
+ return (m_bd->key());
+}
+
+uint32_t
+gbp_bridge_domain::id() const
+{
+ return (m_bd->id());
+}
+
+bool
+gbp_bridge_domain::operator==(const gbp_bridge_domain& b) const
+{
+ bool equal = true;
+
+ if (m_bvi && b.m_bvi)
+ equal &= (m_bvi->key() == b.m_bvi->key());
+ else if (!m_bvi && !b.m_bvi)
+ ;
+ else
+ equal = false;
+
+ if (m_uu_fwd && b.m_uu_fwd)
+ equal &= (m_uu_fwd->key() == b.m_uu_fwd->key());
+ else if (!m_uu_fwd && !b.m_uu_fwd)
+ ;
+ else
+ equal = false;
+
+ return ((m_bd->key() == b.m_bd->key()) && equal);
+}
+
+void
+gbp_bridge_domain::sweep()
+{
+ if (rc_t::OK == m_id.rc()) {
+ HW::enqueue(new gbp_bridge_domain_cmds::delete_cmd(m_id));
+ }
+ HW::write();
+}
+
+void
+gbp_bridge_domain::replay()
+{
+ if (rc_t::OK == m_id.rc()) {
+ if (m_bvi && m_uu_fwd)
+ HW::enqueue(new gbp_bridge_domain_cmds::create_cmd(m_id, m_bvi->handle(),
+ m_uu_fwd->handle()));
+ else
+ HW::enqueue(new gbp_bridge_domain_cmds::create_cmd(
+ m_id, handle_t::INVALID, handle_t::INVALID));
+ }
+}
+
+gbp_bridge_domain::~gbp_bridge_domain()
+{
+ sweep();
+
+ // not in the DB anymore.
+ m_db.release(m_id.data(), this);
+}
+
+std::string
+gbp_bridge_domain::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-bridge-domain:[" << m_bd->to_string() << "]";
+
+ return (s.str());
+}
+
+std::shared_ptr<gbp_bridge_domain>
+gbp_bridge_domain::find(const key_t& key)
+{
+ return (m_db.find(key));
+}
+
+void
+gbp_bridge_domain::update(const gbp_bridge_domain& desired)
+{
+ /*
+ * the desired state is always that the interface should be created
+ */
+ if (rc_t::OK != m_id.rc()) {
+ if (m_bvi && m_uu_fwd)
+ HW::enqueue(new gbp_bridge_domain_cmds::create_cmd(m_id, m_bvi->handle(),
+ m_uu_fwd->handle()));
+ else
+ HW::enqueue(new gbp_bridge_domain_cmds::create_cmd(
+ m_id, handle_t::INVALID, handle_t::INVALID));
+ }
+}
+
+std::shared_ptr<gbp_bridge_domain>
+gbp_bridge_domain::find_or_add(const gbp_bridge_domain& temp)
+{
+ return (m_db.find_or_add(temp.m_id.data(), temp));
+}
+
+std::shared_ptr<gbp_bridge_domain>
+gbp_bridge_domain::singular() const
+{
+ return find_or_add(*this);
+}
+
+void
+gbp_bridge_domain::dump(std::ostream& os)
+{
+ db_dump(m_db, os);
+}
+
+void
+gbp_bridge_domain::event_handler::handle_populate(const client_db::key_t& key)
+{
+ /*
+ * dump VPP Bridge domains
+ */
+ std::shared_ptr<gbp_bridge_domain_cmds::dump_cmd> cmd =
+ std::make_shared<gbp_bridge_domain_cmds::dump_cmd>();
+
+ HW::enqueue(cmd);
+ HW::write();
+
+ for (auto& record : *cmd) {
+ auto& payload = record.get_payload();
+
+ std::shared_ptr<interface> uu_fwd =
+ interface::find(payload.bd.uu_fwd_sw_if_index);
+ std::shared_ptr<interface> bvi =
+ interface::find(payload.bd.bvi_sw_if_index);
+
+ if (uu_fwd && bvi) {
+ gbp_bridge_domain bd(payload.bd.bd_id, *bvi, *uu_fwd);
+ OM::commit(key, bd);
+ VOM_LOG(log_level_t::DEBUG) << "dump: " << bd.to_string();
+ } else {
+ gbp_bridge_domain bd(payload.bd.bd_id);
+ OM::commit(key, bd);
+ VOM_LOG(log_level_t::DEBUG) << "dump: " << bd.to_string();
+ }
+ }
+}
+
+gbp_bridge_domain::event_handler::event_handler()
+{
+ OM::register_listener(this);
+ inspect::register_handler({ "gbd", "gbridge" }, "GBP Bridge Domains", this);
+}
+
+void
+gbp_bridge_domain::event_handler::handle_replay()
+{
+ m_db.replay();
+}
+
+dependency_t
+gbp_bridge_domain::event_handler::order() const
+{
+ return (dependency_t::TABLE);
+}
+
+void
+gbp_bridge_domain::event_handler::show(std::ostream& os)
+{
+ db_dump(m_db, os);
+}
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/gbp_bridge_domain.hpp b/extras/vom/vom/gbp_bridge_domain.hpp
new file mode 100644
index 00000000000..0d7d58ecc05
--- /dev/null
+++ b/extras/vom/vom/gbp_bridge_domain.hpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VOM_GBP_BRIDGE_DOMAIN_H__
+#define __VOM_GBP_BRIDGE_DOMAIN_H__
+
+#include "vom/bridge_domain.hpp"
+#include "vom/interface.hpp"
+#include "vom/singular_db.hpp"
+#include "vom/types.hpp"
+
+namespace VOM {
+
+/**
+ * A entry in the ARP termination table of a Bridge Domain
+ */
+class gbp_bridge_domain : public object_base
+{
+public:
+ /**
+ * The key for a bridge_domain is the pari of EPG-IDs
+ */
+ typedef bridge_domain::key_t key_t;
+
+ /**
+ * Construct a GBP bridge_domain
+ */
+ gbp_bridge_domain(const bridge_domain& bd);
+
+ gbp_bridge_domain(const bridge_domain& bd,
+ const interface& bvi,
+ const interface& uu_fwd);
+
+ /**
+ * Copy Construct
+ */
+ gbp_bridge_domain(const gbp_bridge_domain& r);
+
+ /**
+ * Destructor
+ */
+ ~gbp_bridge_domain();
+
+ /**
+ * Return the object's key
+ */
+ const key_t key() const;
+
+ /**
+ * Return the bridge domain's VPP ID
+ */
+ uint32_t id() const;
+
+ /**
+ * comparison operator
+ */
+ bool operator==(const gbp_bridge_domain& bdae) const;
+
+ /**
+ * Return the matching 'singular instance'
+ */
+ std::shared_ptr<gbp_bridge_domain> singular() const;
+
+ /**
+ * Find the instnace of the bridge_domain domain in the OM
+ */
+ static std::shared_ptr<gbp_bridge_domain> find(const key_t& k);
+
+ /**
+ * Dump all bridge_domain-doamin into the stream provided
+ */
+ static void dump(std::ostream& os);
+
+ /**
+ * replay the object to create it in hardware
+ */
+ void replay(void);
+
+ /**
+ * Convert to string for debugging
+ */
+ std::string to_string() const;
+
+private:
+ /**
+ * Class definition for listeners to OM events
+ */
+ class event_handler : public OM::listener, public inspect::command_handler
+ {
+ public:
+ event_handler();
+ virtual ~event_handler() = default;
+
+ /**
+ * Handle a populate event
+ */
+ void handle_populate(const client_db::key_t& key);
+
+ /**
+ * Handle a replay event
+ */
+ void handle_replay();
+
+ /**
+ * Show the object in the Singular DB
+ */
+ void show(std::ostream& os);
+
+ /**
+ * Get the sortable Id of the listener
+ */
+ dependency_t order() const;
+ };
+
+ /**
+ * event_handler to register with OM
+ */
+ static event_handler m_evh;
+
+ /**
+ * Commit the acculmulated changes into VPP. i.e. to a 'HW" write.
+ */
+ void update(const gbp_bridge_domain& obj);
+
+ /**
+ * Find or add the instance of the bridge_domain domain in the OM
+ */
+ static std::shared_ptr<gbp_bridge_domain> find_or_add(
+ const gbp_bridge_domain& temp);
+
+ /*
+ * It's the VPPHW class that updates the objects in HW
+ */
+ friend class OM;
+
+ /**
+ * It's the singular_db class that calls replay()
+ */
+ friend class singular_db<key_t, gbp_bridge_domain>;
+
+ /**
+ * Sweep/reap the object if still stale
+ */
+ void sweep(void);
+
+ /**
+ * HW configuration for the result of creating the endpoint
+ */
+ HW::item<uint32_t> m_id;
+
+ std::shared_ptr<bridge_domain> m_bd;
+ std::shared_ptr<interface> m_bvi;
+ std::shared_ptr<interface> m_uu_fwd;
+
+ /**
+ * A map of all bridge_domains
+ */
+ static singular_db<key_t, gbp_bridge_domain> m_db;
+};
+
+}; // namespace
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
+
+#endif
diff --git a/extras/vom/vom/gbp_bridge_domain_cmds.cpp b/extras/vom/vom/gbp_bridge_domain_cmds.cpp
new file mode 100644
index 00000000000..60f7cddd9f3
--- /dev/null
+++ b/extras/vom/vom/gbp_bridge_domain_cmds.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vom/gbp_bridge_domain_cmds.hpp"
+
+namespace VOM {
+namespace gbp_bridge_domain_cmds {
+
+create_cmd::create_cmd(HW::item<uint32_t>& item,
+ const handle_t bvi,
+ const handle_t uu_fwd)
+ : rpc_cmd(item)
+ , m_bvi(bvi)
+ , m_uu_fwd(uu_fwd)
+{
+}
+
+bool
+create_cmd::operator==(const create_cmd& other) const
+{
+ return ((m_hw_item.data() == other.m_hw_item.data()) &&
+ (m_bvi == other.m_bvi) && (m_uu_fwd == other.m_uu_fwd));
+}
+
+rc_t
+create_cmd::issue(connection& con)
+{
+ msg_t req(con.ctx(), std::ref(*this));
+
+ auto& payload = req.get_request().get_payload();
+
+ payload.bd.bd_id = m_hw_item.data();
+ payload.bd.bvi_sw_if_index = m_bvi.value();
+ payload.bd.uu_fwd_sw_if_index = m_uu_fwd.value();
+
+ VAPI_CALL(req.execute());
+
+ return (wait());
+}
+
+std::string
+create_cmd::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-bridge-domain: " << m_hw_item.to_string()
+ << " bvi:" << m_bvi.to_string() << " uu-fwd:" << m_uu_fwd.to_string();
+
+ return (s.str());
+}
+
+delete_cmd::delete_cmd(HW::item<uint32_t>& item)
+ : rpc_cmd(item)
+{
+}
+
+bool
+delete_cmd::operator==(const delete_cmd& other) const
+{
+ return (m_hw_item.data() == other.m_hw_item.data());
+}
+
+rc_t
+delete_cmd::issue(connection& con)
+{
+ msg_t req(con.ctx(), std::ref(*this));
+
+ auto& payload = req.get_request().get_payload();
+
+ payload.bd_id = m_hw_item.data();
+
+ VAPI_CALL(req.execute());
+
+ wait();
+ m_hw_item.set(rc_t::NOOP);
+
+ return rc_t::OK;
+}
+
+std::string
+delete_cmd::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-bridge-domain: " << m_hw_item.to_string();
+
+ return (s.str());
+}
+
+bool
+dump_cmd::operator==(const dump_cmd& other) const
+{
+ return (true);
+}
+
+rc_t
+dump_cmd::issue(connection& con)
+{
+ m_dump.reset(new msg_t(con.ctx(), std::ref(*this)));
+
+ VAPI_CALL(m_dump->execute());
+
+ wait();
+
+ return rc_t::OK;
+}
+
+std::string
+dump_cmd::to_string() const
+{
+ return ("gbp-bridge-domain-dump");
+}
+
+}; // namespace gbp_bridge_domain_cmds
+}; // namespace VOM
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/gbp_bridge_domain_cmds.hpp b/extras/vom/vom/gbp_bridge_domain_cmds.hpp
new file mode 100644
index 00000000000..e7c501fc598
--- /dev/null
+++ b/extras/vom/vom/gbp_bridge_domain_cmds.hpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VOM_GBP_BRIDGE_DOMAIN_CMDS_H__
+#define __VOM_GBP_BRIDGE_DOMAIN_CMDS_H__
+
+#include "vom/dump_cmd.hpp"
+#include "vom/gbp_bridge_domain.hpp"
+#include "vom/rpc_cmd.hpp"
+
+#include <vapi/gbp.api.vapi.hpp>
+
+namespace VOM {
+namespace gbp_bridge_domain_cmds {
+/**
+ * A command class that creates an Bridge-Domain
+ */
+class create_cmd
+ : public rpc_cmd<HW::item<uint32_t>, vapi::Gbp_bridge_domain_add>
+{
+public:
+ /**
+ * Constructor
+ */
+ create_cmd(HW::item<uint32_t>& item,
+ const handle_t bvi,
+ const handle_t uu_fwd);
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const create_cmd& i) const;
+
+private:
+ const handle_t m_bvi;
+ const handle_t m_uu_fwd;
+};
+
+/**
+ * A cmd class that Delete an Bridge-Domain
+ */
+class delete_cmd
+ : public rpc_cmd<HW::item<uint32_t>, vapi::Gbp_bridge_domain_del>
+{
+public:
+ /**
+ * Constructor
+ */
+ delete_cmd(HW::item<uint32_t>& item);
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const delete_cmd& i) const;
+};
+
+/**
+ * A cmd class that Dumps all the bridge domains
+ */
+class dump_cmd : public VOM::dump_cmd<vapi::Gbp_bridge_domain_dump>
+{
+public:
+ /**
+ * Constructor
+ */
+ dump_cmd() = default;
+ dump_cmd(const dump_cmd& d) = default;
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const dump_cmd& i) const;
+
+private:
+ /**
+ * HW reutrn code
+ */
+ HW::item<bool> item;
+};
+
+}; // gbp_bridge_domain_cmds
+}; // VOM
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
+
+#endif
diff --git a/extras/vom/vom/gbp_contract_cmds.hpp b/extras/vom/vom/gbp_contract_cmds.hpp
index 31b9a00ed14..7e4447663fd 100644
--- a/extras/vom/vom/gbp_contract_cmds.hpp
+++ b/extras/vom/vom/gbp_contract_cmds.hpp
@@ -91,7 +91,7 @@ private:
};
/**
- * A cmd class that Dumps all the GBP endpoints
+ * A cmd class that Dumps all the GBP contracts
*/
class dump_cmd : public VOM::dump_cmd<vapi::Gbp_contract_dump>
{
diff --git a/extras/vom/vom/gbp_endpoint_group.cpp b/extras/vom/vom/gbp_endpoint_group.cpp
index d9f0d38d594..bd68d77a064 100644
--- a/extras/vom/vom/gbp_endpoint_group.cpp
+++ b/extras/vom/vom/gbp_endpoint_group.cpp
@@ -26,8 +26,8 @@ gbp_endpoint_group::event_handler gbp_endpoint_group::m_evh;
gbp_endpoint_group::gbp_endpoint_group(epg_id_t epg_id,
const interface& itf,
- const route_domain& rd,
- const bridge_domain& bd)
+ const gbp_route_domain& rd,
+ const gbp_bridge_domain& bd)
: m_hw(false)
, m_epg_id(epg_id)
, m_itf(itf.singular())
@@ -64,10 +64,10 @@ gbp_endpoint_group::id() const
}
bool
-gbp_endpoint_group::operator==(const gbp_endpoint_group& gbpe) const
+gbp_endpoint_group::operator==(const gbp_endpoint_group& gg) const
{
- return (key() == gbpe.key() && (m_itf == gbpe.m_itf) && (m_rd == gbpe.m_rd) &&
- (m_bd == gbpe.m_bd));
+ return (key() == gg.key() && (m_itf == gg.m_itf) && (m_rd == gg.m_rd) &&
+ (m_bd == gg.m_bd));
}
void
@@ -84,7 +84,7 @@ gbp_endpoint_group::replay()
{
if (m_hw) {
HW::enqueue(new gbp_endpoint_group_cmds::create_cmd(
- m_hw, m_epg_id, m_bd->id(), m_rd->table_id(), m_itf->handle()));
+ m_hw, m_epg_id, m_bd->id(), m_rd->id(), m_itf->handle()));
}
}
@@ -104,7 +104,7 @@ gbp_endpoint_group::update(const gbp_endpoint_group& r)
{
if (rc_t::OK != m_hw.rc()) {
HW::enqueue(new gbp_endpoint_group_cmds::create_cmd(
- m_hw, m_epg_id, m_bd->id(), m_rd->table_id(), m_itf->handle()));
+ m_hw, m_epg_id, m_bd->id(), m_rd->id(), m_itf->handle()));
}
}
@@ -159,12 +159,13 @@ gbp_endpoint_group::event_handler::handle_populate(const client_db::key_t& key)
std::shared_ptr<interface> itf =
interface::find(payload.epg.uplink_sw_if_index);
- std::shared_ptr<route_domain> rd =
- route_domain::find(payload.epg.ip4_table_id);
- std::shared_ptr<bridge_domain> bd = bridge_domain::find(payload.epg.bd_id);
+ std::shared_ptr<gbp_route_domain> rd =
+ gbp_route_domain::find(payload.epg.rd_id);
+ std::shared_ptr<gbp_bridge_domain> bd =
+ gbp_bridge_domain::find(payload.epg.bd_id);
VOM_LOG(log_level_t::DEBUG) << "data: [" << payload.epg.uplink_sw_if_index
- << ", " << payload.epg.ip4_table_id << ", "
+ << ", " << payload.epg.rd_id << ", "
<< payload.epg.bd_id << "]";
if (itf && bd && rd) {
diff --git a/extras/vom/vom/gbp_endpoint_group.hpp b/extras/vom/vom/gbp_endpoint_group.hpp
index f7c900f20be..d609b89fe2f 100644
--- a/extras/vom/vom/gbp_endpoint_group.hpp
+++ b/extras/vom/vom/gbp_endpoint_group.hpp
@@ -20,8 +20,8 @@
#include "vom/singular_db.hpp"
#include "vom/types.hpp"
-#include "vom/bridge_domain.hpp"
-#include "vom/route_domain.hpp"
+#include "vom/gbp_bridge_domain.hpp"
+#include "vom/gbp_route_domain.hpp"
namespace VOM {
@@ -46,8 +46,11 @@ public:
*/
gbp_endpoint_group(epg_id_t epg_id,
const interface& itf,
- const route_domain& rd,
- const bridge_domain& bd);
+ const gbp_route_domain& rd,
+ const gbp_bridge_domain& bd);
+ gbp_endpoint_group(epg_id_t epg_id,
+ const gbp_route_domain& rd,
+ const gbp_bridge_domain& bd);
/**
* Copy Construct
@@ -179,12 +182,12 @@ private:
/**
* The route-domain the EPG uses
*/
- std::shared_ptr<route_domain> m_rd;
+ std::shared_ptr<gbp_route_domain> m_rd;
/**
* The bridge-domain the EPG uses
*/
- std::shared_ptr<bridge_domain> m_bd;
+ std::shared_ptr<gbp_bridge_domain> m_bd;
/**
* A map of all bridge_domains
diff --git a/extras/vom/vom/gbp_endpoint_group_cmds.cpp b/extras/vom/vom/gbp_endpoint_group_cmds.cpp
index a7b46f8a80b..45523a6326e 100644
--- a/extras/vom/vom/gbp_endpoint_group_cmds.cpp
+++ b/extras/vom/vom/gbp_endpoint_group_cmds.cpp
@@ -44,12 +44,10 @@ create_cmd::issue(connection& con)
msg_t req(con.ctx(), std::ref(*this));
auto& payload = req.get_request().get_payload();
- payload.is_add = 1;
payload.epg.uplink_sw_if_index = m_itf.value();
payload.epg.epg_id = m_epg_id;
payload.epg.bd_id = m_bd_id;
- payload.epg.ip4_table_id = m_rd_id;
- payload.epg.ip6_table_id = m_rd_id;
+ payload.epg.rd_id = m_rd_id;
VAPI_CALL(req.execute());
@@ -85,8 +83,7 @@ delete_cmd::issue(connection& con)
msg_t req(con.ctx(), std::ref(*this));
auto& payload = req.get_request().get_payload();
- payload.is_add = 0;
- payload.epg.epg_id = m_epg_id;
+ payload.epg_id = m_epg_id;
VAPI_CALL(req.execute());
diff --git a/extras/vom/vom/gbp_endpoint_group_cmds.hpp b/extras/vom/vom/gbp_endpoint_group_cmds.hpp
index 4cf88cff116..39f69e081ef 100644
--- a/extras/vom/vom/gbp_endpoint_group_cmds.hpp
+++ b/extras/vom/vom/gbp_endpoint_group_cmds.hpp
@@ -27,8 +27,7 @@ namespace gbp_endpoint_group_cmds {
/**
* A command class that creates or updates the GBP endpoint_group
*/
-class create_cmd
- : public rpc_cmd<HW::item<bool>, vapi::Gbp_endpoint_group_add_del>
+class create_cmd : public rpc_cmd<HW::item<bool>, vapi::Gbp_endpoint_group_add>
{
public:
/**
@@ -65,8 +64,7 @@ private:
/**
* A cmd class that deletes a GBP endpoint_group
*/
-class delete_cmd
- : public rpc_cmd<HW::item<bool>, vapi::Gbp_endpoint_group_add_del>
+class delete_cmd : public rpc_cmd<HW::item<bool>, vapi::Gbp_endpoint_group_del>
{
public:
/**
diff --git a/extras/vom/vom/gbp_route_domain.cpp b/extras/vom/vom/gbp_route_domain.cpp
new file mode 100644
index 00000000000..2786297b0c8
--- /dev/null
+++ b/extras/vom/vom/gbp_route_domain.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vom/gbp_route_domain.hpp"
+#include "vom/gbp_route_domain_cmds.hpp"
+#include "vom/interface.hpp"
+#include "vom/l2_binding.hpp"
+#include "vom/singular_db_funcs.hpp"
+
+namespace VOM {
+
+/**
+ * A DB of al the interfaces, key on the name
+ */
+singular_db<uint32_t, gbp_route_domain> gbp_route_domain::m_db;
+
+gbp_route_domain::event_handler gbp_route_domain::m_evh;
+
+/**
+ * Construct a new object matching the desried state
+ */
+gbp_route_domain::gbp_route_domain(const gbp_route_domain& rd)
+ : m_id(rd.id())
+ , m_rd(rd.m_rd)
+{
+}
+
+gbp_route_domain::gbp_route_domain(const route_domain& rd,
+ const interface& ip4_uu_fwd,
+ const interface& ip6_uu_fwd)
+ : m_id(rd.table_id())
+ , m_rd(rd.singular())
+ , m_ip4_uu_fwd(ip4_uu_fwd.singular())
+ , m_ip6_uu_fwd(ip6_uu_fwd.singular())
+{
+}
+
+gbp_route_domain::gbp_route_domain(const route_domain& rd)
+ : m_id(rd.table_id())
+ , m_rd(rd.singular())
+{
+}
+
+const gbp_route_domain::key_t
+gbp_route_domain::key() const
+{
+ return (m_rd->key());
+}
+
+uint32_t
+gbp_route_domain::id() const
+{
+ return (m_rd->table_id());
+}
+
+bool
+gbp_route_domain::operator==(const gbp_route_domain& b) const
+{
+ bool equal = true;
+
+ if (m_ip4_uu_fwd && b.m_ip4_uu_fwd)
+ equal &= (m_ip4_uu_fwd->key() == b.m_ip4_uu_fwd->key());
+ else if (!m_ip4_uu_fwd && !b.m_ip4_uu_fwd)
+ ;
+ else
+ equal = false;
+
+ if (m_ip6_uu_fwd && b.m_ip6_uu_fwd)
+ equal &= (m_ip6_uu_fwd->key() == b.m_ip6_uu_fwd->key());
+ else if (!m_ip6_uu_fwd && !b.m_ip6_uu_fwd)
+ ;
+ else
+ equal = false;
+
+ return ((m_rd->key() == b.m_rd->key()) && equal);
+}
+
+void
+gbp_route_domain::sweep()
+{
+ if (rc_t::OK == m_id.rc()) {
+ HW::enqueue(new gbp_route_domain_cmds::delete_cmd(m_id));
+ }
+ HW::write();
+}
+
+void
+gbp_route_domain::replay()
+{
+ if (rc_t::OK == m_id.rc()) {
+ if (m_ip4_uu_fwd && m_ip6_uu_fwd)
+ HW::enqueue(new gbp_route_domain_cmds::create_cmd(
+ m_id, m_ip4_uu_fwd->handle(), m_ip6_uu_fwd->handle()));
+ else
+ HW::enqueue(new gbp_route_domain_cmds::create_cmd(m_id, handle_t::INVALID,
+ handle_t::INVALID));
+ }
+}
+
+gbp_route_domain::~gbp_route_domain()
+{
+ sweep();
+
+ // not in the DB anymore.
+ m_db.release(m_id.data(), this);
+}
+
+std::string
+gbp_route_domain::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-route-domain:[" << m_rd->to_string() << "]";
+
+ return (s.str());
+}
+
+std::shared_ptr<gbp_route_domain>
+gbp_route_domain::find(const key_t& key)
+{
+ return (m_db.find(key));
+}
+
+void
+gbp_route_domain::update(const gbp_route_domain& desired)
+{
+ /*
+ * the desired state is always that the interface should be created
+ */
+ if (rc_t::OK != m_id.rc()) {
+ if (m_ip4_uu_fwd && m_ip6_uu_fwd)
+ HW::enqueue(new gbp_route_domain_cmds::create_cmd(
+ m_id, m_ip4_uu_fwd->handle(), m_ip6_uu_fwd->handle()));
+ else
+ HW::enqueue(new gbp_route_domain_cmds::create_cmd(m_id, handle_t::INVALID,
+ handle_t::INVALID));
+ }
+}
+
+std::shared_ptr<gbp_route_domain>
+gbp_route_domain::find_or_add(const gbp_route_domain& temp)
+{
+ return (m_db.find_or_add(temp.m_id.data(), temp));
+}
+
+std::shared_ptr<gbp_route_domain>
+gbp_route_domain::singular() const
+{
+ return find_or_add(*this);
+}
+
+void
+gbp_route_domain::dump(std::ostream& os)
+{
+ db_dump(m_db, os);
+}
+
+void
+gbp_route_domain::event_handler::handle_populate(const client_db::key_t& key)
+{
+ /*
+ * dump VPP Route domains
+ */
+ std::shared_ptr<gbp_route_domain_cmds::dump_cmd> cmd =
+ std::make_shared<gbp_route_domain_cmds::dump_cmd>();
+
+ HW::enqueue(cmd);
+ HW::write();
+
+ for (auto& record : *cmd) {
+ auto& payload = record.get_payload();
+
+ std::shared_ptr<interface> ip6_uu_fwd =
+ interface::find(payload.rd.ip6_uu_sw_if_index);
+ std::shared_ptr<interface> ip4_uu_fwd =
+ interface::find(payload.rd.ip4_uu_sw_if_index);
+
+ if (ip6_uu_fwd && ip4_uu_fwd) {
+ gbp_route_domain rd(payload.rd.rd_id, *ip4_uu_fwd, *ip6_uu_fwd);
+ OM::commit(key, rd);
+ VOM_LOG(log_level_t::DEBUG) << "dump: " << rd.to_string();
+ } else {
+ gbp_route_domain rd(payload.rd.rd_id);
+ OM::commit(key, rd);
+ VOM_LOG(log_level_t::DEBUG) << "dump: " << rd.to_string();
+ }
+ }
+}
+
+gbp_route_domain::event_handler::event_handler()
+{
+ OM::register_listener(this);
+ inspect::register_handler({ "grd", "groute" }, "GBP Route Domains", this);
+}
+
+void
+gbp_route_domain::event_handler::handle_replay()
+{
+ m_db.replay();
+}
+
+dependency_t
+gbp_route_domain::event_handler::order() const
+{
+ return (dependency_t::TABLE);
+}
+
+void
+gbp_route_domain::event_handler::show(std::ostream& os)
+{
+ db_dump(m_db, os);
+}
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/gbp_route_domain.hpp b/extras/vom/vom/gbp_route_domain.hpp
new file mode 100644
index 00000000000..6dc37d1981e
--- /dev/null
+++ b/extras/vom/vom/gbp_route_domain.hpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VOM_GBP_ROUTE_DOMAIN_H__
+#define __VOM_GBP_ROUTE_DOMAIN_H__
+
+#include "vom/interface.hpp"
+#include "vom/route_domain.hpp"
+#include "vom/singular_db.hpp"
+#include "vom/types.hpp"
+
+namespace VOM {
+
+/**
+ * A entry in the ARP termination table of a Route Domain
+ */
+class gbp_route_domain : public object_base
+{
+public:
+ /**
+ * The key for a route_domain is the pari of EPG-IDs
+ */
+ typedef route_domain::key_t key_t;
+
+ /**
+ * Construct a GBP route_domain
+ */
+ gbp_route_domain(const route_domain& rd);
+
+ gbp_route_domain(const route_domain& rd,
+ const interface& ip4_uu_fwd,
+ const interface& ip6_uu_fwd);
+
+ /**
+ * Copy Construct
+ */
+ gbp_route_domain(const gbp_route_domain& r);
+
+ /**
+ * Destructor
+ */
+ ~gbp_route_domain();
+
+ /**
+ * Return the object's key
+ */
+ const key_t key() const;
+
+ /**
+ * Return the route domain's VPP ID
+ */
+ uint32_t id() const;
+
+ /**
+ * comparison operator
+ */
+ bool operator==(const gbp_route_domain& rdae) const;
+
+ /**
+ * Return the matching 'singular instance'
+ */
+ std::shared_ptr<gbp_route_domain> singular() const;
+
+ /**
+ * Find the instnace of the route_domain domain in the OM
+ */
+ static std::shared_ptr<gbp_route_domain> find(const key_t& k);
+
+ /**
+ * Dump all route_domain-doamin into the stream provided
+ */
+ static void dump(std::ostream& os);
+
+ /**
+ * replay the object to create it in hardware
+ */
+ void replay(void);
+
+ /**
+ * Convert to string for debugging
+ */
+ std::string to_string() const;
+
+private:
+ /**
+ * Class definition for listeners to OM events
+ */
+ class event_handler : public OM::listener, public inspect::command_handler
+ {
+ public:
+ event_handler();
+ virtual ~event_handler() = default;
+
+ /**
+ * Handle a populate event
+ */
+ void handle_populate(const client_db::key_t& key);
+
+ /**
+ * Handle a replay event
+ */
+ void handle_replay();
+
+ /**
+ * Show the object in the Singular DB
+ */
+ void show(std::ostream& os);
+
+ /**
+ * Get the sortable Id of the listener
+ */
+ dependency_t order() const;
+ };
+
+ /**
+ * event_handler to register with OM
+ */
+ static event_handler m_evh;
+
+ /**
+ * Commit the acculmulated changes into VPP. i.e. to a 'HW" write.
+ */
+ void update(const gbp_route_domain& obj);
+
+ /**
+ * Find or add the instance of the route_domain domain in the OM
+ */
+ static std::shared_ptr<gbp_route_domain> find_or_add(
+ const gbp_route_domain& temp);
+
+ /*
+ * It's the VPPHW class that updates the objects in HW
+ */
+ friend class OM;
+
+ /**
+ * It's the singular_db class that calls replay()
+ */
+ friend class singular_db<key_t, gbp_route_domain>;
+
+ /**
+ * Sweep/reap the object if still stale
+ */
+ void sweep(void);
+
+ /**
+ * HW configuration for the result of creating the endpoint
+ */
+ HW::item<uint32_t> m_id;
+
+ std::shared_ptr<route_domain> m_rd;
+ std::shared_ptr<interface> m_ip4_uu_fwd;
+ std::shared_ptr<interface> m_ip6_uu_fwd;
+
+ /**
+ * A map of all route_domains
+ */
+ static singular_db<key_t, gbp_route_domain> m_db;
+};
+
+}; // namespace
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
+
+#endif
diff --git a/extras/vom/vom/gbp_route_domain_cmds.cpp b/extras/vom/vom/gbp_route_domain_cmds.cpp
new file mode 100644
index 00000000000..0862d8a36f1
--- /dev/null
+++ b/extras/vom/vom/gbp_route_domain_cmds.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vom/gbp_route_domain_cmds.hpp"
+
+namespace VOM {
+namespace gbp_route_domain_cmds {
+
+create_cmd::create_cmd(HW::item<uint32_t>& item,
+ const handle_t ip4_uu_fwd,
+ const handle_t ip6_uu_fwd)
+ : rpc_cmd(item)
+ , m_ip4_uu_fwd(ip4_uu_fwd)
+ , m_ip6_uu_fwd(ip6_uu_fwd)
+{
+}
+
+bool
+create_cmd::operator==(const create_cmd& other) const
+{
+ return ((m_hw_item.data() == other.m_hw_item.data()) &&
+ (m_ip4_uu_fwd == other.m_ip4_uu_fwd) &&
+ (m_ip6_uu_fwd == other.m_ip6_uu_fwd));
+}
+
+rc_t
+create_cmd::issue(connection& con)
+{
+ msg_t req(con.ctx(), std::ref(*this));
+
+ auto& payload = req.get_request().get_payload();
+
+ payload.rd.rd_id = m_hw_item.data();
+ payload.rd.ip4_uu_sw_if_index = m_ip4_uu_fwd.value();
+ payload.rd.ip6_uu_sw_if_index = m_ip6_uu_fwd.value();
+
+ VAPI_CALL(req.execute());
+
+ return (wait());
+}
+
+std::string
+create_cmd::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-route-domain: " << m_hw_item.to_string()
+ << " ip4-uu-fwd:" << m_ip4_uu_fwd.to_string()
+ << " ip6-uu-fwd:" << m_ip6_uu_fwd.to_string();
+
+ return (s.str());
+}
+
+delete_cmd::delete_cmd(HW::item<uint32_t>& item)
+ : rpc_cmd(item)
+{
+}
+
+bool
+delete_cmd::operator==(const delete_cmd& other) const
+{
+ return (m_hw_item.data() == other.m_hw_item.data());
+}
+
+rc_t
+delete_cmd::issue(connection& con)
+{
+ msg_t req(con.ctx(), std::ref(*this));
+
+ auto& payload = req.get_request().get_payload();
+
+ payload.rd_id = m_hw_item.data();
+
+ VAPI_CALL(req.execute());
+
+ wait();
+ m_hw_item.set(rc_t::NOOP);
+
+ return rc_t::OK;
+}
+
+std::string
+delete_cmd::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-route-domain: " << m_hw_item.to_string();
+
+ return (s.str());
+}
+
+bool
+dump_cmd::operator==(const dump_cmd& other) const
+{
+ return (true);
+}
+
+rc_t
+dump_cmd::issue(connection& con)
+{
+ m_dump.reset(new msg_t(con.ctx(), std::ref(*this)));
+
+ VAPI_CALL(m_dump->execute());
+
+ wait();
+
+ return rc_t::OK;
+}
+
+std::string
+dump_cmd::to_string() const
+{
+ return ("gbp-route-domain-dump");
+}
+
+}; // namespace gbp_route_domain_cmds
+}; // namespace VOM
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/gbp_route_domain_cmds.hpp b/extras/vom/vom/gbp_route_domain_cmds.hpp
new file mode 100644
index 00000000000..249ba901329
--- /dev/null
+++ b/extras/vom/vom/gbp_route_domain_cmds.hpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VOM_GBP_ROUTE_DOMAIN_CMDS_H__
+#define __VOM_GBP_ROUTE_DOMAIN_CMDS_H__
+
+#include "vom/dump_cmd.hpp"
+#include "vom/gbp_route_domain.hpp"
+#include "vom/rpc_cmd.hpp"
+
+#include <vapi/gbp.api.vapi.hpp>
+
+namespace VOM {
+namespace gbp_route_domain_cmds {
+/**
+ * A command class that creates an Route-Domain
+ */
+class create_cmd
+ : public rpc_cmd<HW::item<uint32_t>, vapi::Gbp_route_domain_add>
+{
+public:
+ /**
+ * Constructor
+ */
+ create_cmd(HW::item<uint32_t>& item,
+ const handle_t ip4_uu_fwd,
+ const handle_t ip6_uu_fwd);
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const create_cmd& i) const;
+
+private:
+ const handle_t m_ip4_uu_fwd;
+ const handle_t m_ip6_uu_fwd;
+};
+
+/**
+ * A cmd class that Delete an Route-Domain
+ */
+class delete_cmd
+ : public rpc_cmd<HW::item<uint32_t>, vapi::Gbp_route_domain_del>
+{
+public:
+ /**
+ * Constructor
+ */
+ delete_cmd(HW::item<uint32_t>& item);
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const delete_cmd& i) const;
+};
+
+/**
+ * A cmd class that Dumps all the route domains
+ */
+class dump_cmd : public VOM::dump_cmd<vapi::Gbp_route_domain_dump>
+{
+public:
+ /**
+ * Constructor
+ */
+ dump_cmd() = default;
+ dump_cmd(const dump_cmd& d) = default;
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const dump_cmd& i) const;
+
+private:
+ /**
+ * HW reutrn code
+ */
+ HW::item<bool> item;
+};
+
+}; // gbp_route_domain_cmds
+}; // VOM
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
+
+#endif
diff --git a/extras/vom/vom/gbp_subnet.cpp b/extras/vom/vom/gbp_subnet.cpp
index 1bc024da854..2221c616dbb 100644
--- a/extras/vom/vom/gbp_subnet.cpp
+++ b/extras/vom/vom/gbp_subnet.cpp
@@ -25,31 +25,38 @@ gbp_subnet::type_t::type_t(int v, const std::string s)
{
}
-const gbp_subnet::type_t gbp_subnet::type_t::INTERNAL(0, "internal");
-const gbp_subnet::type_t gbp_subnet::type_t::EXTERNAL(1, "external");
+const gbp_subnet::type_t gbp_subnet::type_t::STITCHED_INTERNAL(
+ 0,
+ "stitched-internal");
+const gbp_subnet::type_t gbp_subnet::type_t::STITCHED_EXTERNAL(
+ 1,
+ "stitched-external");
+const gbp_subnet::type_t gbp_subnet::type_t::TRANSPORT(1, "transport");
singular_db<gbp_subnet::key_t, gbp_subnet> gbp_subnet::m_db;
gbp_subnet::event_handler gbp_subnet::m_evh;
-gbp_subnet::gbp_subnet(const route_domain& rd, const route::prefix_t& prefix)
+gbp_subnet::gbp_subnet(const gbp_route_domain& rd,
+ const route::prefix_t& prefix,
+ const type_t& type)
: m_hw(false)
, m_rd(rd.singular())
, m_prefix(prefix)
- , m_type(type_t::INTERNAL)
+ , m_type(type)
, m_recirc(nullptr)
, m_epg(nullptr)
{
}
-gbp_subnet::gbp_subnet(const route_domain& rd,
+gbp_subnet::gbp_subnet(const gbp_route_domain& rd,
const route::prefix_t& prefix,
const gbp_recirc& recirc,
const gbp_endpoint_group& epg)
: m_hw(false)
, m_rd(rd.singular())
, m_prefix(prefix)
- , m_type(type_t::EXTERNAL)
+ , m_type(type_t::STITCHED_EXTERNAL)
, m_recirc(recirc.singular())
, m_epg(epg.singular())
{
@@ -88,8 +95,7 @@ void
gbp_subnet::sweep()
{
if (m_hw) {
- HW::enqueue(
- new gbp_subnet_cmds::delete_cmd(m_hw, m_rd->table_id(), m_prefix));
+ HW::enqueue(new gbp_subnet_cmds::delete_cmd(m_hw, m_rd->id(), m_prefix));
}
HW::write();
}
@@ -99,7 +105,7 @@ gbp_subnet::replay()
{
if (m_hw) {
HW::enqueue(new gbp_subnet_cmds::create_cmd(
- m_hw, m_rd->table_id(), m_prefix, (m_type == type_t::INTERNAL),
+ m_hw, m_rd->id(), m_prefix, m_type,
(m_recirc ? m_recirc->handle() : handle_t::INVALID),
(m_epg ? m_epg->id() : ~0)));
}
@@ -126,7 +132,7 @@ gbp_subnet::update(const gbp_subnet& r)
{
if (rc_t::OK != m_hw.rc()) {
HW::enqueue(new gbp_subnet_cmds::create_cmd(
- m_hw, m_rd->table_id(), m_prefix, (m_type == type_t::INTERNAL),
+ m_hw, m_rd->id(), m_prefix, m_type,
(m_recirc ? m_recirc->handle() : handle_t::INVALID),
(m_epg ? m_epg->id() : ~0)));
} else {
@@ -136,7 +142,7 @@ gbp_subnet::update(const gbp_subnet& r)
m_type = r.m_type;
HW::enqueue(new gbp_subnet_cmds::create_cmd(
- m_hw, m_rd->table_id(), m_prefix, (m_type == type_t::INTERNAL),
+ m_hw, m_rd->id(), m_prefix, m_type,
(m_recirc ? m_recirc->handle() : handle_t::INVALID),
(m_epg ? m_epg->id() : ~0)));
}
@@ -192,27 +198,37 @@ gbp_subnet::event_handler::handle_populate(const client_db::key_t& key)
auto& payload = record.get_payload();
route::prefix_t pfx = from_api(payload.subnet.prefix);
- std::shared_ptr<route_domain> rd =
- route_domain::find(payload.subnet.table_id);
+ std::shared_ptr<gbp_route_domain> rd =
+ gbp_route_domain::find(payload.subnet.rd_id);
if (rd) {
- if (payload.subnet.is_internal) {
- gbp_subnet gs(*rd, pfx);
- OM::commit(key, gs);
- VOM_LOG(log_level_t::DEBUG) << "read: " << gs.to_string();
- } else {
- std::shared_ptr<interface> itf =
- interface::find(payload.subnet.sw_if_index);
- std::shared_ptr<gbp_endpoint_group> epg =
- gbp_endpoint_group::find(payload.subnet.epg_id);
-
- if (itf && epg) {
- std::shared_ptr<gbp_recirc> recirc = gbp_recirc::find(itf->key());
-
- if (recirc) {
- gbp_subnet gs(*rd, pfx, *recirc, *epg);
- OM::commit(key, gs);
- VOM_LOG(log_level_t::DEBUG) << "read: " << gs.to_string();
+ switch (payload.subnet.type) {
+ case GBP_API_SUBNET_TRANSPORT: {
+ gbp_subnet gs(*rd, pfx, type_t::TRANSPORT);
+ OM::commit(key, gs);
+ VOM_LOG(log_level_t::DEBUG) << "read: " << gs.to_string();
+ break;
+ }
+ case GBP_API_SUBNET_STITCHED_INTERNAL: {
+ gbp_subnet gs(*rd, pfx, type_t::STITCHED_INTERNAL);
+ OM::commit(key, gs);
+ VOM_LOG(log_level_t::DEBUG) << "read: " << gs.to_string();
+ break;
+ }
+ case GBP_API_SUBNET_STITCHED_EXTERNAL: {
+ std::shared_ptr<interface> itf =
+ interface::find(payload.subnet.sw_if_index);
+ std::shared_ptr<gbp_endpoint_group> epg =
+ gbp_endpoint_group::find(payload.subnet.epg_id);
+
+ if (itf && epg) {
+ std::shared_ptr<gbp_recirc> recirc = gbp_recirc::find(itf->key());
+
+ if (recirc) {
+ gbp_subnet gs(*rd, pfx, *recirc, *epg);
+ OM::commit(key, gs);
+ VOM_LOG(log_level_t::DEBUG) << "read: " << gs.to_string();
+ }
}
}
}
@@ -231,6 +247,15 @@ gbp_subnet::event_handler::show(std::ostream& os)
{
db_dump(m_db, os);
}
+
+std::ostream&
+operator<<(std::ostream& os, const gbp_subnet::key_t& key)
+{
+ os << "[" << key.first << ", " << key.second << "]";
+
+ return os;
+}
+
} // namespace VOM
/*
diff --git a/extras/vom/vom/gbp_subnet.hpp b/extras/vom/vom/gbp_subnet.hpp
index b4adb40ae45..e08f1a25e11 100644
--- a/extras/vom/vom/gbp_subnet.hpp
+++ b/extras/vom/vom/gbp_subnet.hpp
@@ -16,9 +16,11 @@
#ifndef __VOM_GBP_SUBNET_H__
#define __VOM_GBP_SUBNET_H__
+#include <ostream>
+
#include "vom/gbp_endpoint_group.hpp"
#include "vom/gbp_recirc.hpp"
-#include "vom/route.hpp"
+#include "vom/gbp_route_domain.hpp"
#include "vom/singular_db.hpp"
namespace VOM {
@@ -31,17 +33,41 @@ public:
/**
* The key for a GBP subnet; table and prefix
*/
- typedef std::pair<route_domain::key_t, route::prefix_t> key_t;
+ typedef std::pair<gbp_route_domain::key_t, route::prefix_t> key_t;
+
+ struct type_t : public enum_base<type_t>
+ {
+ /**
+ * Internal subnet is reachable through the source EPG's
+ * uplink interface.
+ */
+ const static type_t STITCHED_INTERNAL;
+
+ /**
+ * External subnet requires NAT translation before egress.
+ */
+ const static type_t STITCHED_EXTERNAL;
+
+ /**
+ * A transport subnet, sent via the RD's UU-fwd interface
+ */
+ const static type_t TRANSPORT;
+
+ private:
+ type_t(int v, const std::string s);
+ };
/**
- * Construct an internal GBP subnet
- */
- gbp_subnet(const route_domain& rd, const route::prefix_t& prefix);
+ * Construct an internal GBP subnet
+ */
+ gbp_subnet(const gbp_route_domain& rd,
+ const route::prefix_t& prefix,
+ const type_t& type);
/**
* Construct an external GBP subnet
*/
- gbp_subnet(const route_domain& rd,
+ gbp_subnet(const gbp_route_domain& rd,
const route::prefix_t& prefix,
const gbp_recirc& recirc,
const gbp_endpoint_group& epg);
@@ -92,23 +118,6 @@ public:
std::string to_string() const;
private:
- struct type_t : public enum_base<type_t>
- {
- /**
- * Internal subnet is reachable through the source EPG's
- * uplink interface.
- */
- const static type_t INTERNAL;
-
- /**
- * External subnet requires NAT translation before egress.
- */
- const static type_t EXTERNAL;
-
- private:
- type_t(int v, const std::string s);
- };
-
/**
* Class definition for listeners to OM events
*/
@@ -177,7 +186,7 @@ private:
/**
* the route domain the prefix is in
*/
- const std::shared_ptr<route_domain> m_rd;
+ const std::shared_ptr<gbp_route_domain> m_rd;
/**
* prefix to match
@@ -205,6 +214,8 @@ private:
static singular_db<key_t, gbp_subnet> m_db;
};
+std::ostream& operator<<(std::ostream& os, const gbp_subnet::key_t& key);
+
}; // namespace
/*
diff --git a/extras/vom/vom/gbp_subnet_cmds.cpp b/extras/vom/vom/gbp_subnet_cmds.cpp
index 79fdf175ee1..3dcd652dd19 100644
--- a/extras/vom/vom/gbp_subnet_cmds.cpp
+++ b/extras/vom/vom/gbp_subnet_cmds.cpp
@@ -22,13 +22,13 @@ namespace gbp_subnet_cmds {
create_cmd::create_cmd(HW::item<bool>& item,
route::table_id_t rd,
const route::prefix_t& prefix,
- bool internal,
+ const gbp_subnet::type_t& type,
const handle_t& itf,
epg_id_t epg_id)
: rpc_cmd(item)
, m_rd(rd)
, m_prefix(prefix)
- , m_internal(internal)
+ , m_type(type)
, m_itf(itf)
, m_epg_id(epg_id)
{
@@ -38,8 +38,21 @@ bool
create_cmd::operator==(const create_cmd& other) const
{
return ((m_itf == other.m_itf) && (m_rd == other.m_rd) &&
- (m_prefix == other.m_prefix) && (m_itf == other.m_itf) &&
- (m_epg_id == other.m_epg_id));
+ (m_prefix == other.m_prefix) && (m_type == other.m_type) &&
+ (m_itf == other.m_itf) && (m_epg_id == other.m_epg_id));
+}
+
+static vapi_enum_gbp_subnet_type
+gbp_subnet_type_to_api(const gbp_subnet::type_t& type)
+{
+ if (gbp_subnet::type_t::STITCHED_INTERNAL == type)
+ return (GBP_API_SUBNET_STITCHED_INTERNAL);
+ if (gbp_subnet::type_t::STITCHED_EXTERNAL == type)
+ return (GBP_API_SUBNET_STITCHED_EXTERNAL);
+ if (gbp_subnet::type_t::TRANSPORT == type)
+ return (GBP_API_SUBNET_TRANSPORT);
+
+ return (GBP_API_SUBNET_STITCHED_INTERNAL);
}
rc_t
@@ -49,8 +62,8 @@ create_cmd::issue(connection& con)
auto& payload = req.get_request().get_payload();
payload.is_add = 1;
- payload.subnet.is_internal = m_internal;
- payload.subnet.table_id = m_rd;
+ payload.subnet.type = gbp_subnet_type_to_api(m_type);
+ payload.subnet.rd_id = m_rd;
payload.subnet.sw_if_index = m_itf.value();
payload.subnet.epg_id = m_epg_id;
payload.subnet.prefix = to_api(m_prefix);
@@ -64,9 +77,9 @@ std::string
create_cmd::to_string() const
{
std::ostringstream s;
- s << "gbp-subnet-create: " << m_hw_item.to_string()
- << "internal:" << m_internal << ", " << m_rd << ":" << m_prefix.to_string()
- << " itf:" << m_itf << " epg-id:" << m_epg_id;
+ s << "gbp-subnet-create: " << m_hw_item.to_string() << "type:" << m_type
+ << ", " << m_rd << ":" << m_prefix.to_string() << " itf:" << m_itf
+ << " epg-id:" << m_epg_id;
return (s.str());
}
@@ -93,13 +106,9 @@ delete_cmd::issue(connection& con)
auto& payload = req.get_request().get_payload();
payload.is_add = 0;
- payload.subnet.table_id = m_rd;
+ payload.subnet.rd_id = m_rd;
payload.subnet.prefix = to_api(m_prefix);
- payload.subnet.is_internal = 0;
- payload.subnet.sw_if_index = ~0;
- payload.subnet.epg_id = ~0;
-
VAPI_CALL(req.execute());
return (wait());
diff --git a/extras/vom/vom/gbp_subnet_cmds.hpp b/extras/vom/vom/gbp_subnet_cmds.hpp
index 118303b7178..da2a4c509d1 100644
--- a/extras/vom/vom/gbp_subnet_cmds.hpp
+++ b/extras/vom/vom/gbp_subnet_cmds.hpp
@@ -36,7 +36,7 @@ public:
create_cmd(HW::item<bool>& item,
route::table_id_t rd,
const route::prefix_t& prefix,
- bool internal,
+ const gbp_subnet::type_t& type,
const handle_t& itf,
epg_id_t epg_id);
@@ -58,7 +58,7 @@ public:
private:
const route::table_id_t m_rd;
const route::prefix_t m_prefix;
- const bool m_internal;
+ const gbp_subnet::type_t& m_type;
const handle_t m_itf;
const epg_id_t m_epg_id;
};
diff --git a/extras/vom/vom/gbp_vxlan.cpp b/extras/vom/vom/gbp_vxlan.cpp
new file mode 100644
index 00000000000..af4467abfbb
--- /dev/null
+++ b/extras/vom/vom/gbp_vxlan.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vom/gbp_vxlan.hpp"
+#include "vom/gbp_vxlan_cmds.hpp"
+#include "vom/interface.hpp"
+#include "vom/singular_db_funcs.hpp"
+
+namespace VOM {
+
+const std::string GBP_VXLAN_NAME = "gbp-vxlan";
+
+/**
+ * A DB of al the interfaces, key on the name
+ */
+singular_db<gbp_vxlan::key_t, gbp_vxlan> gbp_vxlan::m_db;
+
+gbp_vxlan::event_handler gbp_vxlan::m_evh;
+
+gbp_vxlan::gbp_vxlan(uint32_t vni, const gbp_route_domain& grd)
+ : interface(mk_name(vni),
+ interface::type_t::UNKNOWN,
+ interface::admin_state_t::UP)
+ , m_vni(vni)
+ , m_gbd()
+ , m_grd(grd.singular())
+{
+}
+gbp_vxlan::gbp_vxlan(uint32_t vni, const gbp_bridge_domain& gbd)
+ : interface(mk_name(vni),
+ interface::type_t::UNKNOWN,
+ interface::admin_state_t::UP)
+ , m_vni(vni)
+ , m_gbd(gbd.singular())
+ , m_grd()
+{
+}
+
+gbp_vxlan::gbp_vxlan(const gbp_vxlan& vt)
+ : interface(vt)
+ , m_vni(vt.m_vni)
+ , m_gbd(vt.m_gbd)
+ , m_grd(vt.m_grd)
+{
+}
+
+std::string
+gbp_vxlan::mk_name(uint32_t vni)
+{
+ std::ostringstream s;
+
+ s << GBP_VXLAN_NAME << "-" << vni;
+
+ return (s.str());
+}
+
+const gbp_vxlan::key_t
+gbp_vxlan::key() const
+{
+ return (m_vni);
+}
+
+bool
+gbp_vxlan::operator==(const gbp_vxlan& vt) const
+{
+ return (m_vni == vt.m_vni);
+}
+
+void
+gbp_vxlan::sweep()
+{
+ if (rc_t::OK == m_hdl) {
+ HW::enqueue(new gbp_vxlan_cmds::delete_cmd(m_hdl, m_vni));
+ }
+ HW::write();
+}
+
+void
+gbp_vxlan::replay()
+{
+ if (rc_t::OK == m_hdl) {
+ if (m_grd)
+ HW::enqueue(new gbp_vxlan_cmds::create_cmd(m_hdl, name(), m_vni, false,
+ m_grd->id()));
+ else if (m_gbd)
+ HW::enqueue(new gbp_vxlan_cmds::create_cmd(m_hdl, name(), m_vni, true,
+ m_gbd->id()));
+ }
+}
+
+gbp_vxlan::~gbp_vxlan()
+{
+ sweep();
+ m_db.release(key(), this);
+}
+
+std::string
+gbp_vxlan::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-vxlan:[" << m_vni << "]";
+
+ return (s.str());
+}
+
+std::shared_ptr<gbp_vxlan>
+gbp_vxlan::find(const key_t key)
+{
+ return (m_db.find(key));
+}
+
+void
+gbp_vxlan::update(const gbp_vxlan& desired)
+{
+ /*
+ * the desired state is always that the interface should be created
+ */
+ if (rc_t::OK != m_hdl) {
+ if (m_grd)
+ HW::enqueue(new gbp_vxlan_cmds::create_cmd(m_hdl, name(), m_vni, false,
+ m_grd->id()));
+ else if (m_gbd)
+ HW::enqueue(new gbp_vxlan_cmds::create_cmd(m_hdl, name(), m_vni, true,
+ m_gbd->id()));
+ }
+}
+
+std::shared_ptr<gbp_vxlan>
+gbp_vxlan::find_or_add(const gbp_vxlan& temp)
+{
+ return (m_db.find_or_add(temp.key(), temp));
+}
+
+std::shared_ptr<gbp_vxlan>
+gbp_vxlan::singular() const
+{
+ return find_or_add(*this);
+}
+
+std::shared_ptr<interface>
+gbp_vxlan::singular_i() const
+{
+ return find_or_add(*this);
+}
+
+void
+gbp_vxlan::dump(std::ostream& os)
+{
+ db_dump(m_db, os);
+}
+
+void
+gbp_vxlan::event_handler::handle_populate(const client_db::key_t& key)
+{
+ /*
+ * dump VPP Bridge domains
+ */
+ std::shared_ptr<gbp_vxlan_cmds::dump_cmd> cmd =
+ std::make_shared<gbp_vxlan_cmds::dump_cmd>();
+
+ HW::enqueue(cmd);
+ HW::write();
+
+ for (auto& record : *cmd) {
+ auto& payload = record.get_payload();
+
+ if (GBP_VXLAN_TUNNEL_MODE_L3 == payload.tunnel.mode) {
+ auto rd = gbp_route_domain::find(payload.tunnel.bd_rd_id);
+
+ if (rd) {
+ gbp_vxlan vt(payload.tunnel.vni, *rd);
+ OM::commit(key, vt);
+ VOM_LOG(log_level_t::DEBUG) << "dump: " << vt.to_string();
+ }
+ } else {
+ auto bd = gbp_bridge_domain::find(payload.tunnel.bd_rd_id);
+
+ if (bd) {
+ gbp_vxlan vt(payload.tunnel.vni, *bd);
+ OM::commit(key, vt);
+ VOM_LOG(log_level_t::DEBUG) << "dump: " << vt.to_string();
+ }
+ }
+ }
+}
+
+gbp_vxlan::event_handler::event_handler()
+{
+ OM::register_listener(this);
+ inspect::register_handler({ "gvt", "gbp-vxlan-tunnel" }, "GBP VXLAN Tunnels",
+ this);
+}
+
+void
+gbp_vxlan::event_handler::handle_replay()
+{
+ m_db.replay();
+}
+
+dependency_t
+gbp_vxlan::event_handler::order() const
+{
+ return (dependency_t::BINDING);
+}
+
+void
+gbp_vxlan::event_handler::show(std::ostream& os)
+{
+ db_dump(m_db, os);
+}
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/gbp_vxlan.hpp b/extras/vom/vom/gbp_vxlan.hpp
new file mode 100644
index 00000000000..cae67d8ad17
--- /dev/null
+++ b/extras/vom/vom/gbp_vxlan.hpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VOM_GBP_VXLAN_H__
+#define __VOM_GBP_VXLAN_H__
+
+#include "vom/gbp_bridge_domain.hpp"
+#include "vom/gbp_route_domain.hpp"
+#include "vom/hw.hpp"
+#include "vom/inspect.hpp"
+#include "vom/interface.hpp"
+#include "vom/singular_db.hpp"
+
+namespace VOM {
+/**
+ * A representation of a GBP_VXLAN Tunnel in VPP
+ */
+class gbp_vxlan : public interface
+{
+public:
+ /**
+ * The VNI is the key
+ */
+ typedef uint32_t key_t;
+
+ /**
+ * Construct a new object matching the desried state
+ */
+ gbp_vxlan(uint32_t vni, const gbp_bridge_domain& gbd);
+ gbp_vxlan(uint32_t vni, const gbp_route_domain& grd);
+
+ /*
+ * Destructor
+ */
+ ~gbp_vxlan();
+
+ /**
+ * Copy constructor
+ */
+ gbp_vxlan(const gbp_vxlan& o);
+
+ bool operator==(const gbp_vxlan& vt) const;
+
+ /**
+ * Return the matching 'singular instance'
+ */
+ std::shared_ptr<gbp_vxlan> singular() const;
+
+ /**
+ * Return the object's key
+ */
+ const key_t key() const;
+
+ /**
+ * Debug rpint function
+ */
+ virtual std::string to_string() const;
+
+ /**
+ * Return VPP's handle to this object
+ */
+ const handle_t& handle() const;
+
+ /**
+ * Dump all L3Configs into the stream provided
+ */
+ static void dump(std::ostream& os);
+
+ /**
+ * Find the GBP_VXLAN tunnel in the OM
+ */
+ static std::shared_ptr<gbp_vxlan> find(const key_t k);
+
+private:
+ /**
+ * Class definition for listeners to OM events
+ */
+ class event_handler : public OM::listener, public inspect::command_handler
+ {
+ public:
+ event_handler();
+ virtual ~event_handler() = default;
+
+ /**
+ * Handle a populate event
+ */
+ void handle_populate(const client_db::key_t& key);
+
+ /**
+ * Handle a replay event
+ */
+ void handle_replay();
+
+ /**
+ * Show the object in the Singular DB
+ */
+ void show(std::ostream& os);
+
+ /**
+ * Get the sortable Id of the listener
+ */
+ dependency_t order() const;
+ };
+
+ /**
+ * Event handle to register with OM
+ */
+ static event_handler m_evh;
+
+ /**
+ * Commit the acculmulated changes into VPP. i.e. to a 'HW" write.
+ */
+ void update(const gbp_vxlan& obj);
+
+ /**
+ * Return the matching 'instance' of the sub-interface
+ * over-ride from the base class
+ */
+ std::shared_ptr<interface> singular_i() const;
+
+ /**
+ * Find the GBP_VXLAN tunnel in the OM
+ */
+ static std::shared_ptr<gbp_vxlan> find_or_add(const gbp_vxlan& temp);
+
+ /*
+ * It's the VPPHW class that updates the objects in HW
+ */
+ friend class OM;
+
+ /**
+ * It's the singular_db class that calls replay()
+ */
+ friend class singular_db<key_t, gbp_vxlan>;
+
+ /**
+ * Sweep/reap the object if still stale
+ */
+ void sweep(void);
+
+ /**
+ * replay the object to create it in hardware
+ */
+ void replay(void);
+
+ /**
+ * Tunnel VNI/key
+ */
+ uint32_t m_vni;
+ std::shared_ptr<gbp_bridge_domain> m_gbd;
+ std::shared_ptr<gbp_route_domain> m_grd;
+
+ /**
+ * A map of all VLAN tunnela against thier key
+ */
+ static singular_db<key_t, gbp_vxlan> m_db;
+
+ /**
+ * Construct a unique name for the tunnel
+ */
+ static std::string mk_name(uint32_t vni);
+};
+
+}; // namespace VOM
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
+
+#endif
diff --git a/extras/vom/vom/gbp_vxlan_cmds.cpp b/extras/vom/vom/gbp_vxlan_cmds.cpp
new file mode 100644
index 00000000000..90a77fbb896
--- /dev/null
+++ b/extras/vom/vom/gbp_vxlan_cmds.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vom/gbp_vxlan_cmds.hpp"
+
+#include <vapi/tap.api.vapi.hpp>
+
+namespace VOM {
+namespace gbp_vxlan_cmds {
+create_cmd::create_cmd(HW::item<handle_t>& item,
+ const std::string& name,
+ uint32_t vni,
+ bool is_l2,
+ uint32_t bd_rd)
+ : interface::create_cmd<vapi::Gbp_vxlan_tunnel_add>(item, name)
+ , m_vni(vni)
+ , m_is_l2(is_l2)
+ , m_bd_rd(bd_rd)
+{
+}
+
+rc_t
+create_cmd::issue(connection& con)
+{
+ msg_t req(con.ctx(), std::ref(*this));
+
+ auto& payload = req.get_request().get_payload();
+
+ payload.tunnel.vni = m_vni;
+ payload.tunnel.bd_rd_id = m_bd_rd;
+ if (m_is_l2)
+ payload.tunnel.mode = GBP_VXLAN_TUNNEL_MODE_L2;
+ else
+ payload.tunnel.mode = GBP_VXLAN_TUNNEL_MODE_L3;
+
+ VAPI_CALL(req.execute());
+
+ wait();
+ if (m_hw_item.rc() == rc_t::OK) {
+ insert_interface();
+ }
+
+ return (m_hw_item.rc());
+}
+
+std::string
+create_cmd::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-vxlan-create: " << m_hw_item.to_string() << " vni:" << m_vni
+ << " bd/rd:" << m_bd_rd;
+
+ return (s.str());
+}
+
+delete_cmd::delete_cmd(HW::item<handle_t>& item, uint32_t vni)
+ : interface::delete_cmd<vapi::Gbp_vxlan_tunnel_del>(item)
+ , m_vni(vni)
+{
+}
+
+rc_t
+delete_cmd::issue(connection& con)
+{
+ msg_t req(con.ctx(), std::ref(*this));
+
+ auto& payload = req.get_request().get_payload();
+ payload.vni = m_vni;
+
+ VAPI_CALL(req.execute());
+
+ wait();
+ m_hw_item.set(rc_t::NOOP);
+
+ remove_interface();
+ return rc_t::OK;
+}
+
+std::string
+delete_cmd::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-vxlan-delete: " << m_hw_item.to_string() << " vni:" << m_vni;
+
+ return (s.str());
+}
+
+dump_cmd::dump_cmd()
+{
+}
+
+bool
+dump_cmd::operator==(const dump_cmd& other) const
+{
+ return (true);
+}
+
+rc_t
+dump_cmd::issue(connection& con)
+{
+ m_dump.reset(new msg_t(con.ctx(), std::ref(*this)));
+
+ VAPI_CALL(m_dump->execute());
+
+ wait();
+
+ return rc_t::OK;
+}
+
+std::string
+dump_cmd::to_string() const
+{
+ return ("gbp-vxlan-dump");
+}
+
+} // namespace gbp_vxlan_cmds
+} // namespace VOM
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/gbp_vxlan_cmds.hpp b/extras/vom/vom/gbp_vxlan_cmds.hpp
new file mode 100644
index 00000000000..a42a6531f20
--- /dev/null
+++ b/extras/vom/vom/gbp_vxlan_cmds.hpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VOM_GBP_VXLAN_CMDS_H__
+#define __VOM_GBP_VXLAN_CMDS_H__
+
+#include "vom/dump_cmd.hpp"
+#include "vom/gbp_vxlan.hpp"
+#include "vom/interface.hpp"
+
+#include <vapi/gbp.api.vapi.hpp>
+
+namespace VOM {
+namespace gbp_vxlan_cmds {
+/**
+ * A command class that creates an Bridge-Domain
+ */
+class create_cmd : public interface::create_cmd<vapi::Gbp_vxlan_tunnel_add>
+{
+public:
+ /**
+ * Constructor
+ */
+ create_cmd(HW::item<handle_t>& item,
+ const std::string& name,
+ uint32_t vni,
+ bool is_l2,
+ uint32_t bd_rd);
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const create_cmd& i) const;
+
+private:
+ uint32_t m_vni;
+ bool m_is_l2;
+ uint32_t m_bd_rd;
+};
+
+/**
+ * A cmd class that Delete an Bridge-Domain
+ */
+class delete_cmd : public interface::delete_cmd<vapi::Gbp_vxlan_tunnel_del>
+{
+public:
+ /**
+ * Constructor
+ */
+ delete_cmd(HW::item<handle_t>& item, uint32_t vni);
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const delete_cmd& i) const;
+
+private:
+ uint32_t m_vni;
+};
+
+/**
+ * A cmd class that Dumps all the bridge domains
+ */
+class dump_cmd : public VOM::dump_cmd<vapi::Gbp_vxlan_tunnel_dump>
+{
+public:
+ /**
+ * Constructor
+ */
+ dump_cmd();
+ dump_cmd(const dump_cmd& d);
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const dump_cmd& i) const;
+
+private:
+ /**
+ * HW reutrn code
+ */
+ HW::item<bool> item;
+};
+
+}; // gbp_vxlan_cmds
+}; // VOM
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
+
+#endif
diff --git a/extras/vom/vom/gbp_vxlan_tunnel.cpp b/extras/vom/vom/gbp_vxlan_tunnel.cpp
new file mode 100644
index 00000000000..2219c04b1c2
--- /dev/null
+++ b/extras/vom/vom/gbp_vxlan_tunnel.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vom/gbp_vxlan_tunnel.hpp"
+#include "vom/gbp_vxlan_tunnel_cmds.hpp"
+#include "vom/interface.hpp"
+#include "vom/singular_db_funcs.hpp"
+
+namespace VOM {
+
+/**
+ * A DB of al the interfaces, key on the name
+ */
+singular_db<uint32_t, gbp_vxlan_tunnel> gbp_vxlan_tunnel::m_db;
+
+gbp_vxlan_tunnel::event_handler gbp_vxlan_tunnel::m_evh;
+
+/**
+ * Construct a new object matching the desried state
+ */
+gbp_vxlan_tunnel::gbp_vxlan_tunnel(const vxlan_tunnel& vt)
+ : interface(vt)
+ , m_vni(vt.m_vni)
+{
+}
+
+gbp_vxlan_tunnel::gbp_vxlan_tunnel(uint32_t vni)
+ : interface(mk_name(vni),
+ interface::type_t::UNKNOWN,
+ interface::admin_state_t::UP)
+ , m_vni(vt.m_vni)
+{
+}
+
+const gbp_vxlan_tunnel::key_t
+gbp_vxlan_tunnel::key() const
+{
+ return (m_vni);
+}
+
+bool
+gbp_vxlan_tunnel::operator==(const gbp_vxlan_tunnel& vt) const
+{
+ return (m_vni == vt.m_vni);
+}
+
+void
+gbp_vxlan_tunnel::sweep()
+{
+ if (rc_t::OK == m_id.rc()) {
+ HW::enqueue(new gbp_vxlan_tunnel_cmds::delete_cmd(m_vni));
+ }
+ HW::write();
+}
+
+void
+gbp_vxlan_tunnel::replay()
+{
+ if (rc_t::OK == m_hdl) {
+ HW::enqueue(new gbp_vxlan_tunnel_cmds::create_cmd(m_vni));
+ }
+}
+
+gbp_vxlan_tunnel::~gbp_vxlan_tunnel()
+{
+ sweep();
+ m_db.release(m_id.data(), this);
+}
+
+std::string
+gbp_vxlan_tunnel::to_string() const
+{
+ std::ostringstream s;
+ s << "gbp-vxlan:[" << m_vni << "]";
+
+ return (s.str());
+}
+
+std::shared_ptr<gbp_vxlan_tunnel>
+gbp_vxlan_tunnel::find(const key_t& key)
+{
+ return (m_db.find(key));
+}
+
+void
+gbp_vxlan_tunnel::update(const gbp_vxlan_tunnel& desired)
+{
+ /*
+ * the desired state is always that the interface should be created
+ */
+ if (rc_t::OK != m_hdl) {
+ HW::enqueue(new gbp_vxlan_tunnel_cmds::create_cmd(m_vni));
+ }
+}
+
+std::shared_ptr<gbp_vxlan_tunnel>
+gbp_vxlan_tunnel::find_or_add(const gbp_vxlan_tunnel& temp)
+{
+ return (m_db.find_or_add(temp.m_id.data(), temp));
+}
+
+std::shared_ptr<gbp_vxlan_tunnel>
+gbp_vxlan_tunnel::singular() const
+{
+ return find_or_add(*this);
+}
+
+void
+gbp_vxlan_tunnel::dump(std::ostream& os)
+{
+ db_dump(m_db, os);
+}
+
+void
+gbp_vxlan_tunnel::event_handler::handle_populate(const client_db::key_t& key)
+{
+ /*
+ * dump VPP Bridge domains
+ */
+ std::shared_ptr<gbp_vxlan_tunnel_cmds::dump_cmd> cmd =
+ std::make_shared<gbp_vxlan_tunnel_cmds::dump_cmd>();
+
+ HW::enqueue(cmd);
+ HW::write();
+
+ for (auto& record : *cmd) {
+ auto& payload = record.get_payload();
+
+ gbp_vxlan_tunnel vt(payload.tunnel.vni, );
+ OM::commit(key, vt);
+ VOM_LOG(log_level_t::DEBUG) << "dump: " << vt.to_string();
+ }
+ else
+ {
+ gbp_vxlan_tunnel vt(payload.vt.vt_id);
+ OM::commit(key, vt);
+ VOM_LOG(log_level_t::DEBUG) << "dump: " << vt.to_string();
+ }
+}
+}
+
+gbp_vxlan_tunnel::event_handler::event_handler()
+{
+ OM::register_listener(this);
+ inspect::register_handler({ "gvt", "gbp-vxlan-tunnel" }, "GBP VXLAN Tunnels",
+ this);
+}
+
+void
+gbp_vxlan_tunnel::event_handler::handle_replay()
+{
+ m_db.replay();
+}
+
+dependency_t
+gbp_vxlan_tunnel::event_handler::order() const
+{
+ return (dependency_t::INTERFACE);
+}
+
+void
+gbp_vxlan_tunnel::event_handler::show(std::ostream& os)
+{
+ db_dump(m_db, os);
+}
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/interface_factory.cpp b/extras/vom/vom/interface_factory.cpp
index f4425130a4e..fd135f5820e 100644
--- a/extras/vom/vom/interface_factory.cpp
+++ b/extras/vom/vom/interface_factory.cpp
@@ -38,6 +38,10 @@ interface_factory::new_interface(const vapi_payload_sw_interface_details& vd)
l2_address_t l2_address(vd.l2_address, vd.l2_address_length);
std::string tag = "";
+ if (interface::type_t::UNKNOWN == type) {
+ return sp;
+ }
+
sp = interface::find(hdl);
if (sp) {
sp->set(state);
diff --git a/src/plugins/gbp/CMakeLists.txt b/src/plugins/gbp/CMakeLists.txt
index c099060a1de..377197a7c08 100644
--- a/src/plugins/gbp/CMakeLists.txt
+++ b/src/plugins/gbp/CMakeLists.txt
@@ -13,17 +13,23 @@
add_vpp_plugin(gbp
SOURCES
- gbp_subnet.c
+ gbp_api.c
+ gbp_bridge_domain.c
+ gbp_classify.c
gbp_contract.c
gbp_endpoint.c
gbp_endpoint_group.c
- gbp_classify.c
- gbp_recirc.c
- gbp_policy.c
- gbp_policy_dpo.c
gbp_fwd.c
gbp_fwd_dpo.c
- gbp_api.c
+ gbp_itf.c
+ gbp_learn.c
+ gbp_policy.c
+ gbp_policy_dpo.c
+ gbp_recirc.c
+ gbp_route_domain.c
+ gbp_scanner.c
+ gbp_subnet.c
+ gbp_vxlan.c
API_FILES
gbp.api
diff --git a/src/plugins/gbp/gbp.api b/src/plugins/gbp/gbp.api
index d7c6d83c3b7..bf42243e0d0 100644
--- a/src/plugins/gbp/gbp.api
+++ b/src/plugins/gbp/gbp.api
@@ -19,16 +19,96 @@ option version = "2.0.0";
import "vnet/ip/ip_types.api";
import "vnet/ethernet/ethernet_types.api";
+typedef gbp_bridge_domain
+{
+ u32 bd_id;
+ u32 bvi_sw_if_index;
+ u32 uu_fwd_sw_if_index;
+};
+
+autoreply define gbp_bridge_domain_add
+{
+ u32 client_index;
+ u32 context;
+ vl_api_gbp_bridge_domain_t bd;
+};
+autoreply define gbp_bridge_domain_del
+{
+ u32 client_index;
+ u32 context;
+ u32 bd_id;
+};
+autoreply define gbp_bridge_domain_dump
+{
+ u32 client_index;
+ u32 context;
+};
+define gbp_bridge_domain_details
+{
+ u32 context;
+ vl_api_gbp_bridge_domain_t bd;
+};
+
+typedef gbp_route_domain
+{
+ u32 rd_id;
+ u32 ip4_table_id;
+ u32 ip6_table_id;
+ u32 ip4_uu_sw_if_index;
+ u32 ip6_uu_sw_if_index;
+};
+
+autoreply define gbp_route_domain_add
+{
+ u32 client_index;
+ u32 context;
+ vl_api_gbp_route_domain_t rd;
+};
+autoreply define gbp_route_domain_del
+{
+ u32 client_index;
+ u32 context;
+ u32 rd_id;
+};
+autoreply define gbp_route_domain_dump
+{
+ u32 client_index;
+ u32 context;
+};
+define gbp_route_domain_details
+{
+ u32 context;
+ vl_api_gbp_route_domain_t rd;
+};
+
/** \brief Endpoint
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
*/
+enum gbp_endpoint_flags
+{
+ NONE = 0,
+ BOUNCE = 0x1,
+ REMOTE = 0x2,
+ LEARNT = 0x4,
+ /* hey Ole WTF */
+ REMOTE_LEARNT = 0x6,
+};
+
+typedef gbp_endpoint_tun
+{
+ vl_api_address_t src;
+ vl_api_address_t dst;
+};
+
typedef gbp_endpoint
{
u32 sw_if_index;
u16 epg_id;
+ vl_api_gbp_endpoint_flags_t flags;
vl_api_mac_address_t mac;
+ vl_api_gbp_endpoint_tun_t tun;
u8 n_ips;
vl_api_address_t ips[n_ips];
};
@@ -63,6 +143,8 @@ define gbp_endpoint_dump
define gbp_endpoint_details
{
u32 context;
+ f64 age;
+ u32 handle;
vl_api_gbp_endpoint_t endpoint;
};
@@ -70,18 +152,22 @@ typeonly define gbp_endpoint_group
{
u16 epg_id;
u32 bd_id;
- u32 ip4_table_id;
- u32 ip6_table_id;
+ u32 rd_id;
u32 uplink_sw_if_index;
};
-autoreply define gbp_endpoint_group_add_del
+autoreply define gbp_endpoint_group_add
{
u32 client_index;
u32 context;
- u8 is_add;
vl_api_gbp_endpoint_group_t epg;
};
+autoreply define gbp_endpoint_group_del
+{
+ u32 client_index;
+ u32 context;
+ u16 epg_id;
+};
define gbp_endpoint_group_dump
{
@@ -122,12 +208,19 @@ define gbp_recirc_details
vl_api_gbp_recirc_t recirc;
};
+enum gbp_subnet_type
+{
+ GBP_API_SUBNET_TRANSPORT,
+ GBP_API_SUBNET_STITCHED_INTERNAL,
+ GBP_API_SUBNET_STITCHED_EXTERNAL,
+};
+
typeonly define gbp_subnet
{
- u32 table_id;
+ u32 rd_id;
u32 sw_if_index;
u16 epg_id;
- u8 is_internal;
+ vl_api_gbp_subnet_type_t type;
vl_api_prefix_t prefix;
};
@@ -178,6 +271,70 @@ define gbp_contract_details
vl_api_gbp_contract_t contract;
};
+/**
+ * @brief Set the time throeshold after which an endpoint is
+ considered inative and is aged/reaped by the scanner
+ * @param threshold In seconds
+ */
+autoreply define gbp_endpoint_learn_set_inactive_threshold
+{
+ u32 client_index;
+ u32 context;
+ u32 threshold;
+};
+
+/**
+ * @brief Configure a 'base' tunnel from which learned tunnels
+ * are permitted to derive
+ * A base tunnel consists only of the VNI, any src,dst IP
+ * pair is thus allowed.
+ */
+enum gbp_vxlan_tunnel_mode
+{
+ GBP_VXLAN_TUNNEL_MODE_L2,
+ GBP_VXLAN_TUNNEL_MODE_L3,
+};
+
+typedef gbp_vxlan_tunnel
+{
+ u32 vni;
+ vl_api_gbp_vxlan_tunnel_mode_t mode;
+ u32 bd_rd_id;
+};
+
+define gbp_vxlan_tunnel_add
+{
+ u32 client_index;
+ u32 context;
+ vl_api_gbp_vxlan_tunnel_t tunnel;
+};
+
+define gbp_vxlan_tunnel_add_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+autoreply define gbp_vxlan_tunnel_del
+{
+ u32 client_index;
+ u32 context;
+ u32 vni;
+};
+
+define gbp_vxlan_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+define gbp_vxlan_tunnel_details
+{
+ u32 context;
+ vl_api_gbp_vxlan_tunnel_t tunnel;
+};
+
/*
* Local Variables:
* eval: (c-set-style "gnu")
diff --git a/src/plugins/gbp/gbp_api.c b/src/plugins/gbp/gbp_api.c
index 6bd1abc727e..faf036e9d81 100644
--- a/src/plugins/gbp/gbp_api.c
+++ b/src/plugins/gbp/gbp_api.c
@@ -25,6 +25,11 @@
#include <vpp/app/version.h>
#include <gbp/gbp.h>
+#include <gbp/gbp_learn.h>
+#include <gbp/gbp_itf.h>
+#include <gbp/gbp_vxlan.h>
+#include <gbp/gbp_bridge_domain.h>
+#include <gbp/gbp_route_domain.h>
#include <vlibapi/api.h>
#include <vlibmemory/api.h>
@@ -59,12 +64,23 @@
_(GBP_ENDPOINT_DUMP, gbp_endpoint_dump) \
_(GBP_SUBNET_ADD_DEL, gbp_subnet_add_del) \
_(GBP_SUBNET_DUMP, gbp_subnet_dump) \
- _(GBP_ENDPOINT_GROUP_ADD_DEL, gbp_endpoint_group_add_del) \
+ _(GBP_ENDPOINT_GROUP_ADD, gbp_endpoint_group_add) \
+ _(GBP_ENDPOINT_GROUP_DEL, gbp_endpoint_group_del) \
_(GBP_ENDPOINT_GROUP_DUMP, gbp_endpoint_group_dump) \
+ _(GBP_BRIDGE_DOMAIN_ADD, gbp_bridge_domain_add) \
+ _(GBP_BRIDGE_DOMAIN_DEL, gbp_bridge_domain_del) \
+ _(GBP_BRIDGE_DOMAIN_DUMP, gbp_bridge_domain_dump) \
+ _(GBP_ROUTE_DOMAIN_ADD, gbp_route_domain_add) \
+ _(GBP_ROUTE_DOMAIN_DEL, gbp_route_domain_del) \
+ _(GBP_ROUTE_DOMAIN_DUMP, gbp_route_domain_dump) \
_(GBP_RECIRC_ADD_DEL, gbp_recirc_add_del) \
_(GBP_RECIRC_DUMP, gbp_recirc_dump) \
_(GBP_CONTRACT_ADD_DEL, gbp_contract_add_del) \
- _(GBP_CONTRACT_DUMP, gbp_contract_dump)
+ _(GBP_CONTRACT_DUMP, gbp_contract_dump) \
+ _(GBP_ENDPOINT_LEARN_SET_INACTIVE_THRESHOLD, gbp_endpoint_learn_set_inactive_threshold) \
+ _(GBP_VXLAN_TUNNEL_ADD, gbp_vxlan_tunnel_add) \
+ _(GBP_VXLAN_TUNNEL_DEL, gbp_vxlan_tunnel_del) \
+ _(GBP_VXLAN_TUNNEL_DUMP, gbp_vxlan_tunnel_dump)
gbp_main_t gbp_main;
@@ -72,10 +88,46 @@ static u16 msg_id_base;
#define GBP_MSG_BASE msg_id_base
+static gbp_endpoint_flags_t
+gbp_endpoint_flags_decode (vl_api_gbp_endpoint_flags_t v)
+{
+ gbp_endpoint_flags_t f = GBP_ENDPOINT_FLAG_NONE;
+
+ v = ntohl (v);
+
+ if (v & BOUNCE)
+ f |= GBP_ENDPOINT_FLAG_BOUNCE;
+ if (v & REMOTE)
+ f |= GBP_ENDPOINT_FLAG_REMOTE;
+ if (v & LEARNT)
+ f |= GBP_ENDPOINT_FLAG_LEARNT;
+
+ return (f);
+}
+
+static vl_api_gbp_endpoint_flags_t
+gbp_endpoint_flags_encode (gbp_endpoint_flags_t f)
+{
+ vl_api_gbp_endpoint_flags_t v = 0;
+
+
+ if (f & GBP_ENDPOINT_FLAG_BOUNCE)
+ v |= BOUNCE;
+ if (f & GBP_ENDPOINT_FLAG_REMOTE)
+ v |= REMOTE;
+ if (f & GBP_ENDPOINT_FLAG_LEARNT)
+ v |= LEARNT;
+
+ v = htonl (v);
+
+ return (v);
+}
+
static void
vl_api_gbp_endpoint_add_t_handler (vl_api_gbp_endpoint_add_t * mp)
{
vl_api_gbp_endpoint_add_reply_t *rmp;
+ gbp_endpoint_flags_t gef;
u32 sw_if_index, handle;
ip46_address_t *ips;
mac_address_t mac;
@@ -83,10 +135,9 @@ vl_api_gbp_endpoint_add_t_handler (vl_api_gbp_endpoint_add_t * mp)
VALIDATE_SW_IF_INDEX (&(mp->endpoint));
+ gef = gbp_endpoint_flags_decode (mp->endpoint.flags), ips = NULL;
sw_if_index = ntohl (mp->endpoint.sw_if_index);
- ips = NULL;
-
if (mp->endpoint.n_ips)
{
vec_validate (ips, mp->endpoint.n_ips - 1);
@@ -98,11 +149,23 @@ vl_api_gbp_endpoint_add_t_handler (vl_api_gbp_endpoint_add_t * mp)
}
mac_address_decode (&mp->endpoint.mac, &mac);
- rv = gbp_endpoint_update (sw_if_index, ips, &mac,
- ntohs (mp->endpoint.epg_id), &handle);
+ if (GBP_ENDPOINT_FLAG_REMOTE & gef)
+ {
+ ip46_address_t tun_src, tun_dst;
- vec_free (ips);
+ ip_address_decode (&mp->endpoint.tun.src, &tun_src);
+ ip_address_decode (&mp->endpoint.tun.dst, &tun_dst);
+ rv = gbp_endpoint_update (sw_if_index, ips, &mac,
+ ntohs (mp->endpoint.epg_id),
+ gef, &tun_src, &tun_dst, &handle);
+ }
+ else
+ {
+ rv = gbp_endpoint_update (sw_if_index, ips, &mac,
+ ntohs (mp->endpoint.epg_id),
+ gef, NULL, NULL, &handle);
+ }
BAD_SW_IF_INDEX_LABEL;
/* *INDENT-OFF* */
@@ -124,6 +187,19 @@ vl_api_gbp_endpoint_del_t_handler (vl_api_gbp_endpoint_del_t * mp)
REPLY_MACRO (VL_API_GBP_ENDPOINT_DEL_REPLY + GBP_MSG_BASE);
}
+static void
+ vl_api_gbp_endpoint_learn_set_inactive_threshold_t_handler
+ (vl_api_gbp_endpoint_learn_set_inactive_threshold_t * mp)
+{
+ vl_api_gbp_endpoint_learn_set_inactive_threshold_reply_t *rmp;
+ int rv = 0;
+
+ gbp_learn_set_inactive_threshold (ntohl (mp->threshold));
+
+ REPLY_MACRO (VL_API_GBP_ENDPOINT_LEARN_SET_INACTIVE_THRESHOLD_REPLY +
+ GBP_MSG_BASE);
+}
+
typedef struct gbp_walk_ctx_t_
{
vl_api_registration_t *reg;
@@ -131,14 +207,17 @@ typedef struct gbp_walk_ctx_t_
} gbp_walk_ctx_t;
static walk_rc_t
-gbp_endpoint_send_details (gbp_endpoint_t * gbpe, void *args)
+gbp_endpoint_send_details (index_t gei, void *args)
{
vl_api_gbp_endpoint_details_t *mp;
+ gbp_endpoint_t *ge;
gbp_walk_ctx_t *ctx;
u8 n_ips, ii;
ctx = args;
- n_ips = vec_len (gbpe->ge_ips);
+ ge = gbp_endpoint_get (gei);
+
+ n_ips = vec_len (ge->ge_ips);
mp = vl_msg_api_alloc (sizeof (*mp) + (sizeof (*mp->endpoint.ips) * n_ips));
if (!mp)
return 1;
@@ -147,15 +226,28 @@ gbp_endpoint_send_details (gbp_endpoint_t * gbpe, void *args)
mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_DETAILS + GBP_MSG_BASE);
mp->context = ctx->context;
- mp->endpoint.sw_if_index = ntohl (gbpe->ge_sw_if_index);
- mp->endpoint.epg_id = ntohs (gbpe->ge_epg_id);
+ if (gbp_endpoint_is_remote (ge))
+ {
+ mp->endpoint.sw_if_index = ntohl (ge->tun.ge_parent_sw_if_index);
+ ip_address_encode (&ge->tun.ge_src, IP46_TYPE_ANY,
+ &mp->endpoint.tun.src);
+ ip_address_encode (&ge->tun.ge_dst, IP46_TYPE_ANY,
+ &mp->endpoint.tun.dst);
+ }
+ else
+ {
+ mp->endpoint.sw_if_index = ntohl (ge->ge_sw_if_index);
+ }
+ mp->endpoint.epg_id = ntohs (ge->ge_epg_id);
mp->endpoint.n_ips = n_ips;
- mac_address_encode (&gbpe->ge_mac, &mp->endpoint.mac);
+ mp->endpoint.flags = gbp_endpoint_flags_encode (ge->ge_flags);
+ mp->handle = htonl (gei);
+ mp->age = vlib_time_now (vlib_get_main ()) - ge->ge_last_time;
+ mac_address_encode (&ge->ge_mac, &mp->endpoint.mac);
- vec_foreach_index (ii, gbpe->ge_ips)
+ vec_foreach_index (ii, ge->ge_ips)
{
- ip_address_encode (&gbpe->ge_ips[ii], IP46_TYPE_ANY,
- &mp->endpoint.ips[ii]);
+ ip_address_encode (&ge->ge_ips[ii], IP46_TYPE_ANY, &mp->endpoint.ips[ii]);
}
vl_api_send_msg (ctx->reg, (u8 *) mp);
@@ -181,58 +273,158 @@ vl_api_gbp_endpoint_dump_t_handler (vl_api_gbp_endpoint_dump_t * mp)
}
static void
- vl_api_gbp_endpoint_group_add_del_t_handler
- (vl_api_gbp_endpoint_group_add_del_t * mp)
+ vl_api_gbp_endpoint_group_add_t_handler
+ (vl_api_gbp_endpoint_group_add_t * mp)
{
- vl_api_gbp_endpoint_group_add_del_reply_t *rmp;
- u32 uplink_sw_if_index;
+ vl_api_gbp_endpoint_group_add_reply_t *rmp;
int rv = 0;
- uplink_sw_if_index = ntohl (mp->epg.uplink_sw_if_index);
- if (!vnet_sw_if_index_is_api_valid (uplink_sw_if_index))
- goto bad_sw_if_index;
+ rv = gbp_endpoint_group_add_and_lock (ntohs (mp->epg.epg_id),
+ ntohl (mp->epg.bd_id),
+ ntohl (mp->epg.rd_id),
+ ntohl (mp->epg.uplink_sw_if_index));
- if (mp->is_add)
- {
- rv = gbp_endpoint_group_add (ntohs (mp->epg.epg_id),
- ntohl (mp->epg.bd_id),
- ntohl (mp->epg.ip4_table_id),
- ntohl (mp->epg.ip6_table_id),
- uplink_sw_if_index);
- }
- else
+ REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+ vl_api_gbp_endpoint_group_del_t_handler
+ (vl_api_gbp_endpoint_group_del_t * mp)
+{
+ vl_api_gbp_endpoint_group_del_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_endpoint_group_delete (ntohs (mp->epg_id));
+
+ REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_bridge_domain_add_t_handler (vl_api_gbp_bridge_domain_add_t * mp)
+{
+ vl_api_gbp_bridge_domain_add_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_bridge_domain_add_and_lock (ntohl (mp->bd.bd_id),
+ ntohl (mp->bd.bvi_sw_if_index),
+ ntohl (mp->bd.uu_fwd_sw_if_index));
+
+ REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_bridge_domain_del_t_handler (vl_api_gbp_bridge_domain_del_t * mp)
+{
+ vl_api_gbp_bridge_domain_del_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_bridge_domain_delete (ntohl (mp->bd_id));
+
+ REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_route_domain_add_t_handler (vl_api_gbp_route_domain_add_t * mp)
+{
+ vl_api_gbp_route_domain_add_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_route_domain_add_and_lock (ntohl (mp->rd.rd_id),
+ ntohl (mp->rd.ip4_table_id),
+ ntohl (mp->rd.ip6_table_id),
+ ntohl (mp->rd.ip4_uu_sw_if_index),
+ ntohl (mp->rd.ip6_uu_sw_if_index));
+
+ REPLY_MACRO (VL_API_GBP_ROUTE_DOMAIN_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_route_domain_del_t_handler (vl_api_gbp_route_domain_del_t * mp)
+{
+ vl_api_gbp_route_domain_del_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_route_domain_delete (ntohl (mp->rd_id));
+
+ REPLY_MACRO (VL_API_GBP_ROUTE_DOMAIN_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static int
+gub_subnet_type_from_api (vl_api_gbp_subnet_type_t a, gbp_subnet_type_t * t)
+{
+ a = clib_net_to_host_u32 (a);
+
+ switch (a)
{
- gbp_endpoint_group_delete (ntohs (mp->epg.epg_id));
+ case GBP_API_SUBNET_TRANSPORT:
+ *t = GBP_SUBNET_TRANSPORT;
+ return (0);
+ case GBP_API_SUBNET_STITCHED_INTERNAL:
+ *t = GBP_SUBNET_STITCHED_INTERNAL;
+ return (0);
+ case GBP_API_SUBNET_STITCHED_EXTERNAL:
+ *t = GBP_SUBNET_STITCHED_EXTERNAL;
+ return (0);
}
- BAD_SW_IF_INDEX_LABEL;
-
- REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_ADD_DEL_REPLY + GBP_MSG_BASE);
+ return (-1);
}
static void
vl_api_gbp_subnet_add_del_t_handler (vl_api_gbp_subnet_add_del_t * mp)
{
vl_api_gbp_subnet_add_del_reply_t *rmp;
+ gbp_subnet_type_t type;
fib_prefix_t pfx;
int rv = 0;
ip_prefix_decode (&mp->subnet.prefix, &pfx);
- rv = gbp_subnet_add_del (ntohl (mp->subnet.table_id),
- &pfx,
- ntohl (mp->subnet.sw_if_index),
- ntohs (mp->subnet.epg_id),
- mp->is_add, mp->subnet.is_internal);
+ rv = gub_subnet_type_from_api (mp->subnet.type, &type);
+
+ if (0 != rv)
+ goto out;
+ if (mp->is_add)
+ rv = gbp_subnet_add (ntohl (mp->subnet.rd_id),
+ &pfx, type,
+ ntohl (mp->subnet.sw_if_index),
+ ntohs (mp->subnet.epg_id));
+ else
+ rv = gbp_subnet_del (ntohl (mp->subnet.rd_id), &pfx);
+
+out:
REPLY_MACRO (VL_API_GBP_SUBNET_ADD_DEL_REPLY + GBP_MSG_BASE);
}
-static int
-gbp_subnet_send_details (u32 table_id,
+static vl_api_gbp_subnet_type_t
+gub_subnet_type_to_api (gbp_subnet_type_t t)
+{
+ vl_api_gbp_subnet_type_t a = 0;
+
+ switch (t)
+ {
+ case GBP_SUBNET_TRANSPORT:
+ a = GBP_API_SUBNET_TRANSPORT;
+ break;
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ a = GBP_API_SUBNET_STITCHED_INTERNAL;
+ break;
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ a = GBP_API_SUBNET_STITCHED_EXTERNAL;
+ break;
+ }
+
+ a = clib_host_to_net_u32 (a);
+
+ return (a);
+}
+
+static walk_rc_t
+gbp_subnet_send_details (u32 rd_id,
const fib_prefix_t * pfx,
- u32 sw_if_index,
- epg_id_t epg, u8 is_internal, void *args)
+ gbp_subnet_type_t type,
+ u32 sw_if_index, epg_id_t epg, void *args)
{
vl_api_gbp_subnet_details_t *mp;
gbp_walk_ctx_t *ctx;
@@ -246,15 +438,15 @@ gbp_subnet_send_details (u32 table_id,
mp->_vl_msg_id = ntohs (VL_API_GBP_SUBNET_DETAILS + GBP_MSG_BASE);
mp->context = ctx->context;
- mp->subnet.is_internal = is_internal;
+ mp->subnet.type = gub_subnet_type_to_api (type);
mp->subnet.sw_if_index = ntohl (sw_if_index);
mp->subnet.epg_id = ntohs (epg);
- mp->subnet.table_id = ntohl (table_id);
+ mp->subnet.rd_id = ntohl (rd_id);
ip_prefix_encode (pfx, &mp->subnet.prefix);
vl_api_send_msg (ctx->reg, (u8 *) mp);
- return (1);
+ return (WALK_CONTINUE);
}
static void
@@ -275,7 +467,7 @@ vl_api_gbp_subnet_dump_t_handler (vl_api_gbp_subnet_dump_t * mp)
}
static int
-gbp_endpoint_group_send_details (gbp_endpoint_group_t * gepg, void *args)
+gbp_endpoint_group_send_details (gbp_endpoint_group_t * gg, void *args)
{
vl_api_gbp_endpoint_group_details_t *mp;
gbp_walk_ctx_t *ctx;
@@ -289,11 +481,10 @@ gbp_endpoint_group_send_details (gbp_endpoint_group_t * gepg, void *args)
mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_GROUP_DETAILS + GBP_MSG_BASE);
mp->context = ctx->context;
- mp->epg.uplink_sw_if_index = ntohl (gepg->gepg_uplink_sw_if_index);
- mp->epg.epg_id = ntohs (gepg->gepg_id);
- mp->epg.bd_id = ntohl (gepg->gepg_bd);
- mp->epg.ip4_table_id = ntohl (gepg->gepg_rd[FIB_PROTOCOL_IP4]);
- mp->epg.ip6_table_id = ntohl (gepg->gepg_rd[FIB_PROTOCOL_IP6]);
+ mp->epg.uplink_sw_if_index = ntohl (gg->gg_uplink_sw_if_index);
+ mp->epg.epg_id = ntohs (gg->gg_id);
+ mp->epg.bd_id = ntohl (gbp_endpoint_group_get_bd_id (gg));
+ mp->epg.rd_id = ntohl (gg->gg_rd);
vl_api_send_msg (ctx->reg, (u8 *) mp);
@@ -318,6 +509,90 @@ vl_api_gbp_endpoint_group_dump_t_handler (vl_api_gbp_endpoint_group_dump_t *
gbp_endpoint_group_walk (gbp_endpoint_group_send_details, &ctx);
}
+static int
+gbp_bridge_domain_send_details (gbp_bridge_domain_t * gb, void *args)
+{
+ vl_api_gbp_bridge_domain_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_BRIDGE_DOMAIN_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->bd.bd_id = ntohl (gb->gb_bd_id);
+ mp->bd.bvi_sw_if_index = ntohl (gb->gb_bvi_sw_if_index);
+ mp->bd.uu_fwd_sw_if_index = ntohl (gb->gb_uu_fwd_sw_if_index);
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (1);
+}
+
+static void
+vl_api_gbp_bridge_domain_dump_t_handler (vl_api_gbp_bridge_domain_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_bridge_domain_walk (gbp_bridge_domain_send_details, &ctx);
+}
+
+static int
+gbp_route_domain_send_details (gbp_route_domain_t * grd, void *args)
+{
+ vl_api_gbp_route_domain_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_GBP_ROUTE_DOMAIN_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->rd.rd_id = ntohl (grd->grd_id);
+ mp->rd.ip4_uu_sw_if_index =
+ ntohl (grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4]);
+ mp->rd.ip6_uu_sw_if_index =
+ ntohl (grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6]);
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (1);
+}
+
+static void
+vl_api_gbp_route_domain_dump_t_handler (vl_api_gbp_route_domain_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_route_domain_walk (gbp_route_domain_send_details, &ctx);
+}
+
static void
vl_api_gbp_recirc_add_del_t_handler (vl_api_gbp_recirc_add_del_t * mp)
{
@@ -439,6 +714,121 @@ vl_api_gbp_contract_dump_t_handler (vl_api_gbp_contract_dump_t * mp)
gbp_contract_walk (gbp_contract_send_details, &ctx);
}
+static int
+gbp_vxlan_tunnel_mode_2_layer (vl_api_gbp_vxlan_tunnel_mode_t mode,
+ gbp_vxlan_tunnel_layer_t * l)
+{
+ mode = clib_net_to_host_u32 (mode);
+
+ switch (mode)
+ {
+ case GBP_VXLAN_TUNNEL_MODE_L2:
+ *l = GBP_VXLAN_TUN_L2;
+ return (0);
+ case GBP_VXLAN_TUNNEL_MODE_L3:
+ *l = GBP_VXLAN_TUN_L3;
+ return (0);
+ }
+ return (-1);
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_add_t_handler (vl_api_gbp_vxlan_tunnel_add_t * mp)
+{
+ vl_api_gbp_vxlan_tunnel_add_reply_t *rmp;
+ gbp_vxlan_tunnel_layer_t layer;
+ u32 sw_if_index;
+ int rv = 0;
+
+ rv = gbp_vxlan_tunnel_mode_2_layer (mp->tunnel.mode, &layer);
+
+ if (0 != rv)
+ goto out;
+
+ rv = gbp_vxlan_tunnel_add (ntohl (mp->tunnel.vni),
+ layer,
+ ntohl (mp->tunnel.bd_rd_id), &sw_if_index);
+
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_GBP_VXLAN_TUNNEL_ADD_REPLY + GBP_MSG_BASE,
+ ({
+ rmp->sw_if_index = htonl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_del_t_handler (vl_api_gbp_vxlan_tunnel_add_t * mp)
+{
+ vl_api_gbp_vxlan_tunnel_del_reply_t *rmp;
+ int rv = 0;
+
+ rv = gbp_vxlan_tunnel_del (ntohl (mp->tunnel.vni));
+
+ REPLY_MACRO (VL_API_GBP_VXLAN_TUNNEL_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static vl_api_gbp_vxlan_tunnel_mode_t
+gbp_vxlan_tunnel_layer_2_mode (gbp_vxlan_tunnel_layer_t layer)
+{
+ vl_api_gbp_vxlan_tunnel_mode_t mode = GBP_VXLAN_TUNNEL_MODE_L2;
+
+ switch (layer)
+ {
+ case GBP_VXLAN_TUN_L2:
+ mode = GBP_VXLAN_TUNNEL_MODE_L2;
+ break;
+ case GBP_VXLAN_TUN_L3:
+ mode = GBP_VXLAN_TUNNEL_MODE_L3;
+ break;
+ }
+ mode = clib_host_to_net_u32 (mode);
+
+ return (mode);
+}
+
+static walk_rc_t
+gbp_vxlan_tunnel_send_details (gbp_vxlan_tunnel_t * gt, void *args)
+{
+ vl_api_gbp_vxlan_tunnel_details_t *mp;
+ gbp_walk_ctx_t *ctx;
+
+ ctx = args;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return 1;
+
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = htons (VL_API_GBP_VXLAN_TUNNEL_DETAILS + GBP_MSG_BASE);
+ mp->context = ctx->context;
+
+ mp->tunnel.vni = htonl (gt->gt_vni);
+ mp->tunnel.mode = gbp_vxlan_tunnel_layer_2_mode (gt->gt_layer);
+ mp->tunnel.bd_rd_id = htonl (gt->gt_bd_rd_id);
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (1);
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_dump_t_handler (vl_api_gbp_vxlan_tunnel_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ gbp_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ gbp_vxlan_walk (gbp_vxlan_tunnel_send_details, &ctx);
+}
+
/*
* gbp_api_hookup
* Add vpe's API message handlers to the table.
diff --git a/src/plugins/gbp/gbp_bridge_domain.c b/src/plugins/gbp/gbp_bridge_domain.c
new file mode 100644
index 00000000000..b7812eb1645
--- /dev/null
+++ b/src/plugins/gbp/gbp_bridge_domain.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_endpoint.h>
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_bvi.h>
+#include <vnet/l2/l2_fib.h>
+
+/**
+ * Pool of GBP bridge_domains
+ */
+gbp_bridge_domain_t *gbp_bridge_domain_pool;
+
+/**
+ * DB of bridge_domains
+ */
+typedef struct gbp_bridge_domain_db_t
+{
+ uword *gbd_by_bd_id;
+} gbp_bridge_domain_db_t;
+
+static gbp_bridge_domain_db_t gbp_bridge_domain_db;
+
+/**
+ * logger
+ */
+vlib_log_class_t gb_logger;
+
+#define GBP_BD_DBG(...) \
+ vlib_log_debug (gb_logger, __VA_ARGS__);
+
+gbp_bridge_domain_t *
+gbp_bridge_domain_get (index_t i)
+{
+ return (pool_elt_at_index (gbp_bridge_domain_pool, i));
+}
+
+static void
+gbp_bridge_domain_lock (index_t i)
+{
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (i);
+ gb->gb_locks++;
+}
+
+static index_t
+gbp_bridge_domain_find (u32 bd_id)
+{
+ uword *p;
+
+ p = hash_get (gbp_bridge_domain_db.gbd_by_bd_id, bd_id);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+index_t
+gbp_bridge_domain_find_and_lock (u32 bd_id)
+{
+ uword *p;
+
+ p = hash_get (gbp_bridge_domain_db.gbd_by_bd_id, bd_id);
+
+ if (NULL != p)
+ {
+ gbp_bridge_domain_lock (p[0]);
+ return p[0];
+ }
+ return (INDEX_INVALID);
+}
+
+static void
+gbp_bridge_domain_db_add (gbp_bridge_domain_t * gb)
+{
+ index_t gbi = gb - gbp_bridge_domain_pool;
+
+ hash_set (gbp_bridge_domain_db.gbd_by_bd_id, gb->gb_bd_id, gbi);
+}
+
+static void
+gbp_bridge_domain_db_remove (gbp_bridge_domain_t * gb)
+{
+ hash_unset (gbp_bridge_domain_db.gbd_by_bd_id, gb->gb_bd_id);
+}
+
+int
+gbp_bridge_domain_add_and_lock (u32 bd_id,
+ u32 bvi_sw_if_index, u32 uu_fwd_sw_if_index)
+{
+ gbp_bridge_domain_t *gb;
+ index_t gbi;
+
+ gbi = gbp_bridge_domain_find (bd_id);
+
+ if (INDEX_INVALID == gbi)
+ {
+ u32 bd_index;
+
+ bd_index = bd_find_index (&bd_main, bd_id);
+
+ if (~0 == bd_index)
+ return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+
+ /*
+ * unset learning in the bridge
+ */
+ bd_set_flags (vlib_get_main (), bd_index, L2_LEARN, 0);
+
+ pool_get (gbp_bridge_domain_pool, gb);
+ memset (gb, 0, sizeof (*gb));
+
+ gb->gb_bd_id = bd_id;
+ gb->gb_bd_index = bd_index;
+ gb->gb_uu_fwd_sw_if_index = uu_fwd_sw_if_index;
+ gb->gb_bvi_sw_if_index = bvi_sw_if_index;
+ gb->gb_locks = 1;
+
+ /*
+ * Set the BVI and uu-flood interfaces into the BD
+ */
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L2_BRIDGE, gb->gb_bvi_sw_if_index,
+ bd_index, L2_BD_PORT_TYPE_BVI, 0, 0);
+ if (~0 != gb->gb_uu_fwd_sw_if_index)
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L2_BRIDGE, gb->gb_uu_fwd_sw_if_index,
+ bd_index, L2_BD_PORT_TYPE_UU_FWD, 0, 0);
+
+ /*
+ * Add the BVI's MAC to the L2FIB
+ */
+ l2fib_add_entry (vnet_sw_interface_get_hw_address
+ (vnet_get_main (), gb->gb_bvi_sw_if_index),
+ gb->gb_bd_index, gb->gb_bvi_sw_if_index,
+ (L2FIB_ENTRY_RESULT_FLAG_STATIC |
+ L2FIB_ENTRY_RESULT_FLAG_BVI));
+
+ gbp_bridge_domain_db_add (gb);
+ }
+ else
+ {
+ gb = gbp_bridge_domain_get (gbi);
+ gb->gb_locks++;
+ }
+
+ GBP_BD_DBG ("add: %U", format_gbp_bridge_domain, gb);
+
+ return (0);
+}
+
+void
+gbp_bridge_domain_unlock (index_t index)
+{
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (index);
+
+ gb->gb_locks--;
+
+ if (0 == gb->gb_locks)
+ {
+ GBP_BD_DBG ("destroy: %U", format_gbp_bridge_domain, gb);
+
+ l2fib_del_entry (vnet_sw_interface_get_hw_address
+ (vnet_get_main (), gb->gb_bvi_sw_if_index),
+ gb->gb_bd_index, gb->gb_bvi_sw_if_index);
+
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L3, gb->gb_bvi_sw_if_index,
+ gb->gb_bd_index, L2_BD_PORT_TYPE_BVI, 0, 0);
+ if (~0 != gb->gb_uu_fwd_sw_if_index)
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L3, gb->gb_uu_fwd_sw_if_index,
+ gb->gb_bd_index, L2_BD_PORT_TYPE_UU_FWD, 0, 0);
+
+ gbp_bridge_domain_db_remove (gb);
+
+ pool_put (gbp_bridge_domain_pool, gb);
+ }
+}
+
+int
+gbp_bridge_domain_delete (u32 bd_id)
+{
+ index_t gbi;
+
+ GBP_BD_DBG ("del: %d", bd_id);
+ gbi = gbp_bridge_domain_find (bd_id);
+
+ if (INDEX_INVALID != gbi)
+ {
+ GBP_BD_DBG ("del: %U", format_gbp_bridge_domain,
+ gbp_bridge_domain_get (gbi));
+ gbp_bridge_domain_unlock (gbi);
+
+ return (0);
+ }
+
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+void
+gbp_bridge_domain_walk (gbp_bridge_domain_cb_t cb, void *ctx)
+{
+ gbp_bridge_domain_t *gbpe;
+
+ /* *INDENT-OFF* */
+ pool_foreach(gbpe, gbp_bridge_domain_pool,
+ {
+ if (!cb(gbpe, ctx))
+ break;
+ });
+ /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_bridge_domain_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 uu_fwd_sw_if_index = ~0;
+ u32 bvi_sw_if_index = ~0;
+ u32 bd_id = ~0;
+ u8 add = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "bvi %U", unformat_vnet_sw_interface,
+ vnm, &bvi_sw_if_index))
+ ;
+ else if (unformat (input, "uu-flood %U", unformat_vnet_sw_interface,
+ vnm, &uu_fwd_sw_if_index))
+ ;
+ else if (unformat (input, "add"))
+ add = 1;
+ else if (unformat (input, "del"))
+ add = 0;
+ else if (unformat (input, "bd %d", &bd_id))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == bd_id)
+ return clib_error_return (0, "EPG-ID must be specified");
+
+ if (add)
+ {
+ if (~0 == bvi_sw_if_index)
+ return clib_error_return (0, "interface must be specified");
+
+ gbp_bridge_domain_add_and_lock (bd_id,
+ bvi_sw_if_index, uu_fwd_sw_if_index);
+ }
+ else
+ gbp_bridge_domain_delete (bd_id);
+
+ return (NULL);
+}
+
+/*?
+ * Configure a GBP bridge-domain
+ *
+ * @cliexpar
+ * @cliexstart{set gbp bridge-domain [del] bd <ID> bvi <interface> uu-flood <interface>}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_bridge_domain_cli_node, static) = {
+ .path = "gbp bridge-domain",
+ .short_help = "gbp bridge-domain [del] epg bd <ID> bvi <interface> uu-flood <interface>",
+ .function = gbp_bridge_domain_cli,
+};
+
+u8 *
+format_gbp_bridge_domain (u8 * s, va_list * args)
+{
+ gbp_bridge_domain_t *gb = va_arg (*args, gbp_bridge_domain_t*);
+ vnet_main_t *vnm = vnet_get_main ();
+
+ if (NULL != gb)
+ s = format (s, "[%d] bd:[%d,%d], bvi:%U uu-flood:%U locks:%d",
+ gb - gbp_bridge_domain_pool,
+ gb->gb_bd_id,
+ gb->gb_bd_index,
+ format_vnet_sw_if_index_name, vnm, gb->gb_bvi_sw_if_index,
+ format_vnet_sw_if_index_name, vnm, gb->gb_uu_fwd_sw_if_index,
+ gb->gb_locks);
+ else
+ s = format (s, "NULL");
+
+ return (s);
+}
+
+static int
+gbp_bridge_domain_show_one (gbp_bridge_domain_t *gb, void *ctx)
+{
+ vlib_main_t *vm;
+
+ vm = ctx;
+ vlib_cli_output (vm, " %U",format_gbp_bridge_domain, gb);
+
+ return (1);
+}
+
+static clib_error_t *
+gbp_bridge_domain_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "Bridge-Domains:");
+ gbp_bridge_domain_walk (gbp_bridge_domain_show_one, vm);
+
+ return (NULL);
+}
+
+
+/*?
+ * Show Group Based Policy Bridge_Domains and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp bridge_domain}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_bridge_domain_show_node, static) = {
+ .path = "show gbp bridge-domain",
+ .short_help = "show gbp bridge-domain\n",
+ .function = gbp_bridge_domain_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_bridge_domain_init (vlib_main_t * vm)
+{
+ gb_logger = vlib_log_register_class ("gbp", "bd");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_bridge_domain_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_bridge_domain.h b/src/plugins/gbp/gbp_bridge_domain.h
new file mode 100644
index 00000000000..992900b4aa1
--- /dev/null
+++ b/src/plugins/gbp/gbp_bridge_domain.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_BRIDGE_DOMAIN_H__
+#define __GBP_BRIDGE_DOMAIN_H__
+
+#include <plugins/gbp/gbp_types.h>
+
+#include <vnet/fib/fib_types.h>
+
+/**
+ * A bridge Domain Representation.
+ * This is a standard bridge-domain plus all the attributes it must
+ * have to supprt the GBP model.
+ */
+typedef struct gpb_bridge_domain_t_
+{
+ /**
+ * Bridge-domain ID
+ */
+ u32 gb_bd_id;
+ u32 gb_bd_index;
+
+ /**
+ * The BD's BVI interface (obligatory)
+ */
+ u32 gb_bvi_sw_if_index;
+
+ /**
+ * The BD's MAC spine-proxy interface (optional)
+ */
+ u32 gb_uu_fwd_sw_if_index;
+
+ /**
+ * The BD's VNI interface on which packets from unkown endpoints
+ * arrive
+ */
+ u32 gb_vni_sw_if_index;
+
+ /**
+ * locks/references to the BD so it does not get deleted (from the API)
+ * whilst it is still being used
+ */
+ u32 gb_locks;
+} gbp_bridge_domain_t;
+
+extern int gbp_bridge_domain_add_and_lock (u32 bd_id,
+ u32 bvi_sw_if_index,
+ u32 uu_fwd_sw_if_index);
+extern void gbp_bridge_domain_unlock (index_t gbi);
+extern index_t gbp_bridge_domain_find_and_lock (u32 bd_id);
+extern int gbp_bridge_domain_delete (u32 bd_id);
+extern gbp_bridge_domain_t *gbp_bridge_domain_get (index_t i);
+
+typedef int (*gbp_bridge_domain_cb_t) (gbp_bridge_domain_t * gb, void *ctx);
+extern void gbp_bridge_domain_walk (gbp_bridge_domain_cb_t bgpe, void *ctx);
+
+extern u8 *format_gbp_bridge_domain (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_classify.c b/src/plugins/gbp/gbp_classify.c
index 3dc66993d11..fb57426e4a7 100644
--- a/src/plugins/gbp/gbp_classify.c
+++ b/src/plugins/gbp/gbp_classify.c
@@ -18,6 +18,8 @@
#include <plugins/gbp/gbp.h>
#include <vnet/l2/l2_input.h>
#include <vnet/l2/feat_bitmap.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
typedef enum gbp_src_classify_type_t_
{
@@ -56,7 +58,7 @@ always_inline uword
gbp_classify_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame,
- gbp_src_classify_type_t type, u8 is_l3)
+ gbp_src_classify_type_t type, dpo_proto_t dproto)
{
gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
u32 n_left_from, *from, *to_next;
@@ -75,7 +77,7 @@ gbp_classify_inline (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
u32 next0, bi0, src_epg, sw_if_index0;
- const gbp_endpoint_t *gep0;
+ const gbp_endpoint_t *ge0;
vlib_buffer_t *b0;
bi0 = from[0];
@@ -88,6 +90,7 @@ gbp_classify_inline (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
if (GBP_SRC_CLASSIFY_NULL == type)
{
@@ -98,10 +101,46 @@ gbp_classify_inline (vlib_main_t * vm,
}
else
{
- gep0 = gbp_endpoint_get_itf (sw_if_index0);
- src_epg = gep0->ge_epg_id;
- if (is_l3)
+ if (DPO_PROTO_ETHERNET == dproto)
{
+ const ethernet_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+ next0 =
+ vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
+ L2INPUT_FEAT_GBP_SRC_CLASSIFY);
+ ge0 = gbp_endpoint_find_mac (h0->src_address,
+ vnet_buffer (b0)->l2.bd_index);
+ }
+ else if (DPO_PROTO_IP4 == dproto)
+ {
+ const ip4_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+
+ ge0 = gbp_endpoint_find_ip4
+ (&h0->src_address,
+ fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+ sw_if_index0));
+
+
+ /*
+ * Go straight to looukp, do not pass go, do not collect $200
+ */
+ next0 = 0;
+ }
+ else if (DPO_PROTO_IP6 == dproto)
+ {
+ const ip6_header_t *h0;
+
+ h0 = vlib_buffer_get_current (b0);
+
+ ge0 = gbp_endpoint_find_ip6
+ (&h0->src_address,
+ fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
+ sw_if_index0));
+
+
/*
* Go straight to lookup, do not pass go, do not collect $200
*/
@@ -109,10 +148,15 @@ gbp_classify_inline (vlib_main_t * vm,
}
else
{
- next0 =
- vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
- L2INPUT_FEAT_GBP_SRC_CLASSIFY);
+ ge0 = NULL;
+ next0 = 0;
+ ASSERT (0);
}
+
+ if (PREDICT_TRUE (NULL != ge0))
+ src_epg = ge0->ge_epg_id;
+ else
+ src_epg = EPG_INVALID;
}
vnet_buffer2 (b0)->gbp.src_epg = src_epg;
@@ -139,28 +183,32 @@ static uword
gbp_src_classify (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- return (gbp_classify_inline (vm, node, frame, GBP_SRC_CLASSIFY_PORT, 0));
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET));
}
static uword
gbp_null_classify (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- return (gbp_classify_inline (vm, node, frame, GBP_SRC_CLASSIFY_NULL, 0));
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET));
}
static uword
gbp_ip4_src_classify (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- return (gbp_classify_inline (vm, node, frame, 0, 1));
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4));
}
static uword
gbp_ip6_src_classify (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- return (gbp_classify_inline (vm, node, frame, 0, 1));
+ return (gbp_classify_inline (vm, node, frame,
+ GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6));
}
diff --git a/src/plugins/gbp/gbp_endpoint.c b/src/plugins/gbp/gbp_endpoint.c
index a261527a177..79c140ff916 100644
--- a/src/plugins/gbp/gbp_endpoint.c
+++ b/src/plugins/gbp/gbp_endpoint.c
@@ -17,98 +17,87 @@
#include <plugins/gbp/gbp_endpoint.h>
#include <plugins/gbp/gbp_endpoint_group.h>
-
-#include <vnet/ethernet/arp_packet.h>
+#include <plugins/gbp/gbp_itf.h>
+#include <plugins/gbp/gbp_scanner.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_vxlan.h>
+
+#include <vnet/ethernet/arp.h>
#include <vnet/l2/l2_input.h>
#include <vnet/l2/l2_output.h>
#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_fib.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/ip/ip_neighbor.h>
+
+static const char *gbp_endpoint_attr_names[] = GBP_ENDPOINT_ATTR_NAMES;
+
+/**
+ * EP DBs
+ */
+gbp_ep_db_t gbp_ep_db;
+
+vlib_log_class_t gbp_ep_logger;
-gbp_ep_by_itf_db_t gbp_ep_by_itf_db;
-gbp_ep_by_mac_itf_db_t gbp_ep_by_mac_itf_db;
-gbp_ep_by_ip_itf_db_t gbp_ep_by_ip_itf_db;
+#define GBP_ENDPOINT_DBG(...) \
+ vlib_log_debug (gbp_ep_logger, __VA_ARGS__);
+
+#define GBP_ENDPOINT_INFO(...) \
+ vlib_log_notice (gbp_ep_logger, __VA_ARGS__);
+
+/**
+ * GBP Endpoint inactive timeout (in seconds)
+ * If a dynamically learned Endpoint has not been heard from in this
+ * amount of time it is considered inactive and discarded
+ */
+static u32 GBP_ENDPOINT_INACTIVE_TIME = 30;
/**
* Pool of GBP endpoints
*/
gbp_endpoint_t *gbp_endpoint_pool;
-/* void */
-/* gbp_itf_epg_update (u32 sw_if_index, epg_id_t src_epg, u8 do_policy) */
-/* { */
-/* vec_validate_init_empty (gbp_itf_to_epg_db.gte_vec, */
-/* sw_if_index, ITF_INVALID); */
-
-/* if (0 == gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_ref_count) */
-/* { */
-/* l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SRC_CLASSIFY, */
-/* 1); */
-/* l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_FWD, 1); */
-/* if (do_policy) */
-/* l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_POLICY, */
-/* 1); */
-/* } */
-/* gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_epg = src_epg; */
-/* gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_ref_count++; */
-/* } */
-
-/* void */
-/* gbp_itf_epg_delete (u32 sw_if_index) */
-/* { */
-/* if (vec_len (gbp_itf_to_epg_db.gte_vec) <= sw_if_index) */
-/* return; */
-
-/* if (1 == gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_ref_count) */
-/* { */
-/* gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_epg = EPG_INVALID; */
-
-/* l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SRC_CLASSIFY, */
-/* 0); */
-/* l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_FWD, 0); */
-/* l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_POLICY, 0); */
-/* } */
-/* gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_ref_count--; */
-/* } */
-
-static void
-gbp_endpoint_mk_key_mac_itf (const mac_address_t * mac,
- u32 sw_if_index, clib_bihash_kv_16_8_t * key)
-{
- key->key[0] = mac_address_as_u64 (mac);
- key->key[1] = sw_if_index;
-}
+/**
+ * A count of the number of dynamic entries
+ */
+static u32 gbp_n_learnt_endpoints;
-static void
-gbp_endpoint_extract_key_mac_itf (const clib_bihash_kv_16_8_t * key,
- mac_address_t * mac, u32 * sw_if_index)
-{
- mac_address_from_u64 (key->key[0], mac);
- *sw_if_index = key->key[1];
-}
+#define FOR_EACH_GBP_ENDPOINT_ATTR(_item) \
+ for (_item = GBP_ENDPOINT_ATTR_FIRST; \
+ _item < GBP_ENDPOINT_ATTR_LAST; \
+ _item++)
-gbp_endpoint_t *
-gbp_endpoint_find_mac_itf (const mac_address_t * mac, u32 sw_if_index)
+u8 *
+format_gbp_endpoint_flags (u8 * s, va_list * args)
{
- clib_bihash_kv_16_8_t key, value;
- int rv;
-
- gbp_endpoint_mk_key_mac_itf (mac, sw_if_index, &key);
+ gbp_endpoint_attr_t attr;
+ gbp_endpoint_flags_t flags = va_arg (*args, gbp_endpoint_flags_t);
- rv =
- clib_bihash_search_16_8 (&gbp_ep_by_mac_itf_db.gte_table, &key, &value);
+ FOR_EACH_GBP_ENDPOINT_ATTR (attr)
+ {
+ if ((1 << attr) & flags)
+ {
+ s = format (s, "%s,", gbp_endpoint_attr_names[attr]);
+ }
+ }
- if (0 != rv)
- return NULL;
+ return (s);
+}
- return (gbp_endpoint_get (value.value));
+int
+gbp_endpoint_is_remote (const gbp_endpoint_t * ge)
+{
+ return (ge->ge_flags & GBP_ENDPOINT_FLAG_REMOTE);
}
static void
-gbp_endpoint_mk_key_ip_itf (const ip46_address_t * ip,
- u32 sw_if_index, clib_bihash_kv_24_8_t * key)
+gbp_endpoint_extract_key_mac_itf (const clib_bihash_kv_16_8_t * key,
+ mac_address_t * mac, u32 * sw_if_index)
{
- key->key[0] = ip->as_u64[0];
- key->key[1] = ip->as_u64[1];
- key->key[2] = sw_if_index;
+ mac_address_from_u64 (key->key[0], mac);
+ *sw_if_index = key->key[1];
}
static void
@@ -121,14 +110,14 @@ gbp_endpoint_extract_key_ip_itf (const clib_bihash_kv_24_8_t * key,
}
gbp_endpoint_t *
-gbp_endpoint_find_ip_itf (const ip46_address_t * ip, u32 sw_if_index)
+gbp_endpoint_find_ip (const ip46_address_t * ip, u32 fib_index)
{
clib_bihash_kv_24_8_t key, value;
int rv;
- gbp_endpoint_mk_key_ip_itf (ip, sw_if_index, &key);
+ gbp_endpoint_mk_key_ip (ip, fib_index, &key);
- rv = clib_bihash_search_24_8 (&gbp_ep_by_ip_itf_db.gte_table, &key, &value);
+ rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
if (0 != rv)
return NULL;
@@ -136,238 +125,552 @@ gbp_endpoint_find_ip_itf (const ip46_address_t * ip, u32 sw_if_index)
return (gbp_endpoint_get (value.value));
}
-gbp_endpoint_t *
-gbp_endpoint_find_itf (u32 sw_if_index)
+static void
+gbp_endpoint_add_itf (u32 sw_if_index, index_t gei)
{
- /* if (vec_len(gbp_ep_by_itf_db.gte_vec) >= sw_if_index) */
- /* return NULL; */
+ vec_validate_init_empty (gbp_ep_db.ged_by_sw_if_index, sw_if_index, ~0);
- /* vec_search(gbp_ep_by_itf_db.gte_vec[sw_if_index], */
- /* return (gbp_endpoint_get(gbp_ep_by_itf_db.gte_vec[sw_if_index][0])); */
- return (NULL);
+ gbp_ep_db.ged_by_sw_if_index[sw_if_index] = gei;
}
static bool
-gbp_endpoint_add_mac_itf (const mac_address_t * mac,
- u32 sw_if_index, index_t gbpei)
+gbp_endpoint_add_mac (const mac_address_t * mac, u32 bd_index, index_t gei)
{
clib_bihash_kv_16_8_t key;
int rv;
- gbp_endpoint_mk_key_mac_itf (mac, sw_if_index, &key);
- key.value = gbpei;
+ gbp_endpoint_mk_key_mac (mac->bytes, bd_index, &key);
+ key.value = gei;
+
+ rv = clib_bihash_add_del_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, 1);
- rv = clib_bihash_add_del_16_8 (&gbp_ep_by_mac_itf_db.gte_table, &key, 1);
return (0 == rv);
}
static bool
-gbp_endpoint_add_ip_itf (const ip46_address_t * ip,
- u32 sw_if_index, index_t gbpei)
+gbp_endpoint_add_ip (const ip46_address_t * ip, u32 fib_index, index_t gei)
{
clib_bihash_kv_24_8_t key;
int rv;
- gbp_endpoint_mk_key_ip_itf (ip, sw_if_index, &key);
- key.value = gbpei;
+ gbp_endpoint_mk_key_ip (ip, fib_index, &key);
+ key.value = gei;
- rv = clib_bihash_add_del_24_8 (&gbp_ep_by_ip_itf_db.gte_table, &key, 1);
+ rv = clib_bihash_add_del_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, 1);
return (0 == rv);
}
static void
-gbp_endpoint_add_itf (u32 sw_if_index, index_t gbpei)
-{
- vec_validate_init_empty (gbp_ep_by_itf_db.gte_vec, sw_if_index,
- INDEX_INVALID);
-
- if (INDEX_INVALID == gbp_ep_by_itf_db.gte_vec[sw_if_index])
- {
- l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SRC_CLASSIFY,
- 1);
- l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_FWD, 1);
- l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_POLICY, 1);
- }
- gbp_ep_by_itf_db.gte_vec[sw_if_index] = gbpei;
-}
-
-static void
-gbp_endpoint_del_mac_itf (const mac_address_t * mac, u32 sw_if_index)
+gbp_endpoint_del_mac (const mac_address_t * mac, u32 bd_index)
{
clib_bihash_kv_16_8_t key;
- gbp_endpoint_mk_key_mac_itf (mac, sw_if_index, &key);
+ gbp_endpoint_mk_key_mac (mac->bytes, bd_index, &key);
- clib_bihash_add_del_16_8 (&gbp_ep_by_mac_itf_db.gte_table, &key, 0);
+ clib_bihash_add_del_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, 0);
}
static void
-gbp_endpoint_del_ip_itf (const ip46_address_t * ip, u32 sw_if_index)
+gbp_endpoint_del_ip (const ip46_address_t * ip, u32 fib_index)
{
clib_bihash_kv_24_8_t key;
- gbp_endpoint_mk_key_ip_itf (ip, sw_if_index, &key);
+ gbp_endpoint_mk_key_ip (ip, fib_index, &key);
- clib_bihash_add_del_24_8 (&gbp_ep_by_ip_itf_db.gte_table, &key, 0);
+ clib_bihash_add_del_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, 0);
}
-static void
-gbp_endpoint_del_itf (u32 sw_if_index)
+static index_t
+gbp_endpoint_index (const gbp_endpoint_t * ge)
{
- if (vec_len (gbp_ep_by_itf_db.gte_vec) <= sw_if_index)
- return;
+ return (ge - gbp_endpoint_pool);
+}
- l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SRC_CLASSIFY, 0);
- l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_FWD, 0);
- l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_POLICY, 0);
+static ip46_type_t
+ip46_address_get_type (const ip46_address_t * a)
+{
+ return (ip46_address_is_ip4 (a) ? IP46_TYPE_IP4 : IP46_TYPE_IP6);
+}
- gbp_ep_by_itf_db.gte_vec[sw_if_index] = INDEX_INVALID;
+static ip46_type_t
+ip46_address_get_len (const ip46_address_t * a)
+{
+ return (ip46_address_is_ip4 (a) ? 32 : 128);
}
-static index_t
-gbp_endpoint_index (const gbp_endpoint_t * gbpe)
+static gbp_endpoint_t *
+gbp_endpoint_alloc (epg_id_t epg_id,
+ index_t ggi, u32 sw_if_index, gbp_endpoint_flags_t flags,
+ const ip46_address_t * tun_src,
+ const ip46_address_t * tun_dst)
{
- return (gbpe - gbp_endpoint_pool);
+ gbp_endpoint_t *ge;
+
+ pool_get_zero (gbp_endpoint_pool, ge);
+
+ ge->ge_epg = ggi;
+ ge->ge_epg_id = epg_id;
+ ge->ge_flags = flags;
+ ge->ge_sw_if_index = sw_if_index;
+ ge->ge_last_time = vlib_time_now (vlib_get_main ());
+
+ gbp_endpoint_group_find_and_lock (epg_id);
+
+ if (gbp_endpoint_is_remote (ge))
+ {
+ if (NULL != tun_src)
+ ip46_address_copy (&ge->tun.ge_src, tun_src);
+ if (NULL != tun_dst)
+ ip46_address_copy (&ge->tun.ge_dst, tun_dst);
+
+ /*
+ * the input interface may be the parent GBP-vxlan interface,
+ * create a child vlxan-gbp tunnel and use that as the endpoint's
+ * interface.
+ */
+ switch (gbp_vxlan_tunnel_get_type (sw_if_index))
+ {
+ case GBP_VXLAN_TEMPLATE_TUNNEL:
+ ge->tun.ge_parent_sw_if_index = sw_if_index;
+ ge->ge_sw_if_index =
+ gbp_vxlan_tunnel_clone_and_lock (sw_if_index, tun_src, tun_dst);
+ break;
+ case VXLAN_GBP_TUNNEL:
+ ge->tun.ge_parent_sw_if_index =
+ vxlan_gbp_tunnel_get_parent (sw_if_index);
+ ge->ge_sw_if_index = sw_if_index;
+ vxlan_gbp_tunnel_lock (ge->ge_sw_if_index);
+ break;
+ }
+ }
+
+ return (ge);
}
int
gbp_endpoint_update (u32 sw_if_index,
const ip46_address_t * ips,
- const mac_address_t * mac, epg_id_t epg_id, u32 * handle)
+ const mac_address_t * mac,
+ epg_id_t epg_id,
+ gbp_endpoint_flags_t flags,
+ const ip46_address_t * tun_src,
+ const ip46_address_t * tun_dst, u32 * handle)
{
- gbp_endpoint_group_t *gepg;
- const ip46_address_t *ip;
- gbp_endpoint_t *gbpe;
+ gbp_endpoint_group_t *gg;
+ gbp_endpoint_t *ge;
+ index_t ggi, gei;
+
+ if (~0 == sw_if_index)
+ return (VNET_API_ERROR_INVALID_SW_IF_INDEX);
- gbpe = NULL;
- gepg = gbp_endpoint_group_find (epg_id);
+ ge = NULL;
+ ggi = gbp_endpoint_group_find_and_lock (epg_id);
- if (NULL == gepg)
+ if (INDEX_INVALID == ggi)
return (VNET_API_ERROR_NO_SUCH_ENTRY);
+ gg = gbp_endpoint_group_get (ggi);
+
/*
- * find an existing endpoint matching one of the key types
+ * L2 EP
*/
- if (NULL != mac)
- {
- gbpe = gbp_endpoint_find_mac_itf (mac, sw_if_index);
- }
- if (NULL == gbpe && NULL != ips)
- {
- vec_foreach (ip, ips)
- {
- gbpe = gbp_endpoint_find_ip_itf (ip, sw_if_index);
-
- if (NULL != gbpe)
- break;
- }
- }
- if (NULL == gbpe)
+ if (NULL != mac && !mac_address_is_zero (mac))
{
- gbpe = gbp_endpoint_find_itf (sw_if_index);
+ /*
+ * find an existing endpoint matching one of the key types
+ */
+ ge = gbp_endpoint_find_mac (mac->bytes, gg->gg_bd_index);
+ if (NULL == ge)
+ {
+ /*
+ * new entry
+ */
+ ge = gbp_endpoint_alloc (epg_id, ggi, sw_if_index, flags,
+ tun_src, tun_dst);
+ gei = gbp_endpoint_index (ge);
+ mac_address_copy (&ge->ge_mac, mac);
+
+ ge->ge_itf = gbp_itf_add_and_lock (ge->ge_sw_if_index,
+ gg->gg_bd_index);
+
+ gbp_itf_set_l2_input_feature (ge->ge_itf, gei,
+ L2INPUT_FEAT_GBP_FWD);
+
+ if (gbp_endpoint_is_remote (ge))
+ {
+ gbp_itf_set_l2_output_feature (ge->ge_itf, gei,
+ L2OUTPUT_FEAT_GBP_POLICY_MAC);
+ }
+ else
+ {
+ gbp_endpoint_add_itf (ge->ge_sw_if_index, gei);
+ gbp_itf_set_l2_output_feature (ge->ge_itf, gei,
+ L2OUTPUT_FEAT_GBP_POLICY_PORT);
+ }
+
+ gbp_endpoint_add_mac (mac, gg->gg_bd_index, gei);
+
+ l2fib_add_entry (mac->bytes, gg->gg_bd_index, ge->ge_sw_if_index,
+ L2FIB_ENTRY_RESULT_FLAG_STATIC);
+ }
+ else
+ {
+ /*
+ * update existing entry..
+ */
+ ge->ge_flags = flags;
+ gei = gbp_endpoint_index (ge);
+ goto out;
+ }
}
- if (NULL == gbpe)
+ /*
+ * L3 EP
+ */
+ if (NULL != ips && !ip46_address_is_zero (ips))
{
- index_t gbpei;
+ const ip46_address_t *ip;
+ fib_protocol_t fproto;
+ gbp_endpoint_t *l3_ge;
u32 ii;
+
/*
- * new entry
+ * look for a matching EP by any of the address
+ * An EP's IP addresses cannot change so we can search based on
+ * the first
*/
- pool_get (gbp_endpoint_pool, gbpe);
- gbpei = gbp_endpoint_index (gbpe);
+ fproto = fib_proto_from_ip46 (ip46_address_get_type (&ips[0]));
- gbpe->ge_epg_id = epg_id;
- gbpe->ge_sw_if_index = sw_if_index;
- gbp_endpoint_add_itf (gbpe->ge_sw_if_index, gbpei);
-
- if (NULL != mac)
+ l3_ge = gbp_endpoint_find_ip (&ips[0],
+ gbp_endpoint_group_get_fib_index (gg,
+ fproto));
+ if (NULL == l3_ge)
{
- gbpe->ge_mac = *mac;
-
- // FIXME ERROR
- gbp_endpoint_add_mac_itf (mac, sw_if_index, gbpei);
+ if (NULL == ge)
+ {
+ ge = gbp_endpoint_alloc (epg_id, ggi, sw_if_index, flags,
+ tun_src, tun_dst);
+ ge->ge_itf = gbp_itf_add_and_lock (sw_if_index, ~0);
+ }
+ else
+ /* L2 EP with IPs */
+ gei = gbp_endpoint_index (ge);
}
-
- if (NULL != ips)
+ else
{
- vec_validate (gbpe->ge_ips, vec_len (ips) - 1);
- vec_foreach_index (ii, ips)
+ /* modify */
+ ge = l3_ge;
+ ge->ge_flags = flags;
+ gei = gbp_endpoint_index (ge);
+ goto out;
+ }
+
+ gei = gbp_endpoint_index (ge);
+ ge->ge_ips = ips;
+ vec_validate (ge->ge_adjs, vec_len (ips) - 1);
+
+ vec_foreach_index (ii, ge->ge_ips)
+ {
+ ethernet_header_t *eth;
+ ip46_type_t ip_type;
+ u32 ip_sw_if_index;
+ u8 *rewrite;
+
+ rewrite = NULL;
+ ip = &ge->ge_ips[ii];
+ ip_type = ip46_address_get_type (ip);
+ fproto = fib_proto_from_ip46 (ip_type);
+
+ bd_add_del_ip_mac (gg->gg_bd_index, ip_type, ip, &ge->ge_mac, 1);
+
+ // FIXME - check error
+ gbp_endpoint_add_ip (ip,
+ gbp_endpoint_group_get_fib_index (gg, fproto),
+ gei);
+
+ /*
+ * add a host route via the EPG's BVI we need this because the
+ * adj fib does not install, due to cover refinement check, since
+ * the BVI's prefix is /32
+ */
+ fib_prefix_t pfx = {
+ .fp_proto = fproto,
+ .fp_len = ip46_address_get_len (ip),
+ .fp_addr = *ip,
+ };
+ vec_validate (rewrite, sizeof (*eth) - 1);
+ eth = (ethernet_header_t *) rewrite;
+
+ eth->type = clib_host_to_net_u16 ((fproto == FIB_PROTOCOL_IP4 ?
+ ETHERNET_TYPE_IP4 :
+ ETHERNET_TYPE_IP6));
+
+ if (gbp_endpoint_is_remote (ge))
{
- ip46_address_copy (&gbpe->ge_ips[ii], &ips[ii]);
+ /*
+ * for dynamic EPs we msut add the IP adjacency via the learned
+ * tunnel since the BD will not contain the EP's MAC since it was
+ * L3 learned. The dst MAC address used is the 'BD's MAC'.
+ */
+ ip_sw_if_index = ge->ge_sw_if_index;
+
+ mac_address_to_bytes (gbp_route_domain_get_local_mac (),
+ eth->src_address);
+ mac_address_to_bytes (gbp_route_domain_get_remote_mac (),
+ eth->dst_address);
+ }
+ else
+ {
+ /*
+ * for the static EPs we add the IP adjacency via the BVI
+ * knowing that the BD has the MAC address to route to and
+ * that policy will be applied on egress to the EP's port
+ */
+ ip_sw_if_index = gbp_endpoint_group_get_bvi (gg);
+
+ clib_memcpy (eth->src_address,
+ vnet_sw_interface_get_hw_address (vnet_get_main (),
+ ip_sw_if_index),
+ sizeof (eth->src_address));
+ mac_address_to_bytes (&ge->ge_mac, eth->dst_address);
+ }
+
+ fib_table_entry_path_add
+ (gbp_endpoint_group_get_fib_index (gg, fproto),
+ &pfx, FIB_SOURCE_PLUGIN_LOW,
+ FIB_ENTRY_FLAG_NONE,
+ fib_proto_to_dpo (fproto), ip, ip_sw_if_index,
+ ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE);
- // FIXME ERROR
- gbp_endpoint_add_ip_itf (&ips[ii], sw_if_index, gbpei);
+ ge->ge_adjs[ii] = adj_nbr_add_or_lock_w_rewrite (fproto,
+ fib_proto_to_link
+ (fproto), ip,
+ ip_sw_if_index,
+ rewrite);
+
+ if (gbp_endpoint_is_remote (ge))
+ {
+ dpo_id_t policy_dpo = DPO_INVALID;
/*
- * send a gratuitous ARP on the EPG's uplink. this is done so
- * that if this EP has moved from some other place in the
- * 'fabric', upstream devices are informed
+ * interpose a policy DPO from the endpoint so that policy
+ * is applied
*/
- if (ip46_address_is_ip4 (&ips[ii]))
+ gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (fproto),
+ gg->gg_id, ~0, &policy_dpo);
+
+ fib_table_entry_special_dpo_add
+ (gbp_endpoint_group_get_fib_index (gg, fproto),
+ &pfx,
+ FIB_SOURCE_PLUGIN_HI, FIB_ENTRY_FLAG_INTERPOSE, &policy_dpo);
+ }
+
+ /*
+ * send a gratuitous ARP on the EPG's uplink. this is done so
+ * that if this EP has moved from some other place in the
+ * 'fabric', upstream devices are informed
+ */
+ if (!(gbp_endpoint_is_remote (ge)) && ~0 != gg->gg_uplink_sw_if_index)
+ {
+ gbp_endpoint_add_itf (sw_if_index, gei);
+ if (ip46_address_is_ip4 (ip))
send_ip4_garp_w_addr (vlib_get_main (),
- &ips[ii].ip4,
- gepg->gepg_uplink_sw_if_index);
+ &ip->ip4, gg->gg_uplink_sw_if_index);
else
send_ip6_na_w_addr (vlib_get_main (),
- &ips[ii].ip6,
- gepg->gepg_uplink_sw_if_index);
+ &ip->ip6, gg->gg_uplink_sw_if_index);
}
+ }
+ }
+
+ if (NULL == ge)
+ return (0);
+
+ /*
+ * count the number of dynamic entries and kick off the scanner
+ * process is this is our first.
+ */
+ if (gbp_endpoint_is_remote (ge))
+ {
+ gbp_n_learnt_endpoints++;
+
+ if (1 == gbp_n_learnt_endpoints)
+ {
+ vlib_process_signal_event (vlib_get_main (),
+ gbp_scanner_node.index,
+ GBP_ENDPOINT_SCAN_START, 0);
}
}
else
{
/*
- * update existing entry..
+ * non-remote endpoints (i.e. those not arriving on iVXLAN
+ * tunnels) need to be classifed based on the the input interface.
+ * We enable the GBP-FWD feature only is the group has an uplink
+ * interface (on which the GBP-FWD feature would send UU traffic).
*/
- ASSERT (0);
+ l2input_feat_masks_t feats = L2INPUT_FEAT_GBP_SRC_CLASSIFY;
+
+ if (~0 != gg->gg_uplink_sw_if_index)
+ feats |= L2INPUT_FEAT_GBP_FWD;
+ gbp_itf_set_l2_input_feature (ge->ge_itf, gbp_endpoint_index (ge),
+ feats);
}
+out:
+
+ if (handle)
+ *handle = (ge - gbp_endpoint_pool);
- *handle = (gbpe - gbp_endpoint_pool);
+ gbp_endpoint_group_unlock (ggi);
+ GBP_ENDPOINT_INFO ("update: %U", format_gbp_endpoint, gei);
return (0);
}
void
-gbp_endpoint_delete (u32 handle)
+gbp_endpoint_delete (index_t gei)
{
- gbp_endpoint_t *gbpe;
+ gbp_endpoint_group_t *gg;
+ gbp_endpoint_t *ge;
- if (pool_is_free_index (gbp_endpoint_pool, handle))
+ if (pool_is_free_index (gbp_endpoint_pool, gei))
return;
- gbpe = pool_elt_at_index (gbp_endpoint_pool, handle);
+ GBP_ENDPOINT_INFO ("delete: %U", format_gbp_endpoint, gei);
- gbp_endpoint_del_itf (gbpe->ge_sw_if_index);
+ ge = gbp_endpoint_get (gei);
+ gg = gbp_endpoint_group_get (ge->ge_epg);
- if (!mac_address_is_zero (&gbpe->ge_mac))
- {
- gbp_endpoint_del_mac_itf (&gbpe->ge_mac, gbpe->ge_sw_if_index);
- }
+ gbp_endpoint_del_mac (&ge->ge_mac, gg->gg_bd_index);
+ l2fib_del_entry (ge->ge_mac.bytes, gg->gg_bd_index, ge->ge_sw_if_index);
+ gbp_itf_set_l2_input_feature (ge->ge_itf, gei, (L2INPUT_FEAT_NONE));
+ gbp_itf_set_l2_output_feature (ge->ge_itf, gei, L2OUTPUT_FEAT_NONE);
- if (NULL != gbpe->ge_ips)
+ if (NULL != ge->ge_ips)
{
const ip46_address_t *ip;
+ index_t *ai;
- vec_foreach (ip, gbpe->ge_ips)
+ vec_foreach (ai, ge->ge_adjs)
{
- gbp_endpoint_del_ip_itf (ip, gbpe->ge_sw_if_index);
+ adj_unlock (*ai);
+ }
+ vec_foreach (ip, ge->ge_ips)
+ {
+ fib_protocol_t fproto;
+ ip46_type_t ip_type;
+
+ ip_type = ip46_address_get_type (ip);
+ fproto = fib_proto_from_ip46 (ip_type);
+
+ gbp_endpoint_del_ip (ip,
+ gbp_endpoint_group_get_fib_index (gg, fproto));
+
+ bd_add_del_ip_mac (gg->gg_bd_index, ip_type, ip, &ge->ge_mac, 0);
+
+ /*
+ * remove a host route via the EPG's BVI
+ */
+ fib_prefix_t pfx = {
+ .fp_proto = fproto,
+ .fp_len = ip46_address_get_len (ip),
+ .fp_addr = *ip,
+ };
+
+ if (gbp_endpoint_is_remote (ge))
+ {
+ fib_table_entry_special_remove
+ (gbp_endpoint_group_get_fib_index (gg, fproto),
+ &pfx, FIB_SOURCE_PLUGIN_HI);
+ }
+
+ fib_table_entry_path_remove
+ (gbp_endpoint_group_get_fib_index (gg, fproto),
+ &pfx, FIB_SOURCE_PLUGIN_LOW,
+ fib_proto_to_dpo (fproto), ip,
+ (gbp_endpoint_is_remote (ge) ?
+ ge->ge_sw_if_index :
+ gbp_endpoint_group_get_bvi (gg)),
+ ~0, 1, FIB_ROUTE_PATH_FLAG_NONE);
}
}
- pool_put (gbp_endpoint_pool, gbpe);
+
+ if (ge->ge_flags & GBP_ENDPOINT_FLAG_LEARNT)
+ {
+ gbp_n_learnt_endpoints--;
+
+ if (0 == gbp_n_learnt_endpoints)
+ {
+ vlib_process_signal_event (vlib_get_main (),
+ gbp_scanner_node.index,
+ GBP_ENDPOINT_SCAN_STOP, 0);
+ }
+ }
+
+ gbp_itf_unlock (ge->ge_itf);
+ if (gbp_endpoint_is_remote (ge))
+ {
+ vxlan_gbp_tunnel_unlock (ge->ge_sw_if_index);
+ }
+ gbp_endpoint_group_unlock (ge->ge_epg);
+ pool_put (gbp_endpoint_pool, ge);
+}
+
+typedef struct gbp_endpoint_flush_ctx_t_
+{
+ u32 sw_if_index;
+ index_t *geis;
+} gbp_endpoint_flush_ctx_t;
+
+static walk_rc_t
+gbp_endpoint_flush_cb (index_t gei, void *args)
+{
+ gbp_endpoint_flush_ctx_t *ctx = args;
+ gbp_endpoint_t *ge;
+
+ ge = gbp_endpoint_get (gei);
+
+ if (gbp_endpoint_is_remote (ge) &&
+ ctx->sw_if_index == ge->tun.ge_parent_sw_if_index)
+ {
+ vec_add1 (ctx->geis, gei);
+ }
+
+ return (WALK_CONTINUE);
+}
+
+/**
+ * remove all learnt endpoints using the interface
+ */
+void
+gbp_endpoint_flush (u32 sw_if_index)
+{
+ gbp_endpoint_flush_ctx_t ctx = {
+ .sw_if_index = sw_if_index,
+ };
+ index_t *gei;
+
+ gbp_endpoint_walk (gbp_endpoint_flush_cb, &ctx);
+
+ vec_foreach (gei, ctx.geis)
+ {
+ gbp_endpoint_delete (*gei);
+ }
+
+ vec_free (ctx.geis);
}
void
gbp_endpoint_walk (gbp_endpoint_cb_t cb, void *ctx)
{
- gbp_endpoint_t *gbpe;
+ u32 index;
/* *INDENT-OFF* */
- pool_foreach(gbpe, gbp_endpoint_pool,
+ pool_foreach_index(index, gbp_endpoint_pool,
{
- if (!cb(gbpe, ctx))
+ if (!cb(index, ctx))
break;
});
/* *INDENT-ON* */
@@ -380,7 +683,7 @@ gbp_endpoint_cli (vlib_main_t * vm,
ip46_address_t ip = ip46_address_initializer, *ips = NULL;
mac_address_t mac = ZERO_MAC_ADDRESS;
vnet_main_t *vnm = vnet_get_main ();
- epg_id_t epg_id = EPG_INVALID;
+ u32 epg_id = EPG_INVALID;
u32 handle = INDEX_INVALID;
u32 sw_if_index = ~0;
u8 add = 1;
@@ -418,7 +721,9 @@ gbp_endpoint_cli (vlib_main_t * vm,
if (EPG_INVALID == epg_id)
return clib_error_return (0, "EPG-ID must be specified");
- rv = gbp_endpoint_update (sw_if_index, ips, &mac, epg_id, &handle);
+ rv =
+ gbp_endpoint_update (sw_if_index, ips, &mac, epg_id,
+ GBP_ENDPOINT_FLAG_NONE, NULL, NULL, &handle);
if (rv)
return clib_error_return (0, "GBP Endpoint update returned %d", rv);
@@ -457,37 +762,41 @@ VLIB_CLI_COMMAND (gbp_endpoint_cli_node, static) = {
u8 *
format_gbp_endpoint (u8 * s, va_list * args)
{
- index_t gbpei = va_arg (*args, index_t);
- vnet_main_t *vnm = vnet_get_main ();
+ index_t gei = va_arg (*args, index_t);
const ip46_address_t *ip;
- gbp_endpoint_t *gbpe;
+ gbp_endpoint_t *ge;
- gbpe = gbp_endpoint_get (gbpei);
+ ge = gbp_endpoint_get (gei);
- s = format (s, "[@%d] ", gbpei);
- s =
- format (s, "%U", format_vnet_sw_if_index_name, vnm, gbpe->ge_sw_if_index);
- s = format (s, ", IPs:[");
+ s = format (s, "[@%d] ", gei);
+ s = format (s, "IPs:[");
- vec_foreach (ip, gbpe->ge_ips)
+ vec_foreach (ip, ge->ge_ips)
{
s = format (s, "%U, ", format_ip46_address, ip, IP46_TYPE_ANY);
}
s = format (s, "]");
- s = format (s, " MAC:%U", format_mac_address_t, &gbpe->ge_mac);
- s = format (s, " EPG-ID:%d", gbpe->ge_epg_id);
+ s = format (s, " MAC:%U", format_mac_address_t, &ge->ge_mac);
+ s = format (s, " EPG-ID:%d", ge->ge_epg_id);
+ if (GBP_ENDPOINT_FLAG_NONE != ge->ge_flags)
+ {
+ s = format (s, " flags:%U", format_gbp_endpoint_flags, ge->ge_flags);
+ }
+
+ s = format (s, " itf:[%U]", format_gbp_itf, ge->ge_itf);
+ s = format (s, " last-time:[%f]", ge->ge_last_time);
return s;
}
static walk_rc_t
-gbp_endpoint_show_one (gbp_endpoint_t * gbpe, void *ctx)
+gbp_endpoint_show_one (index_t gei, void *ctx)
{
vlib_main_t *vm;
vm = ctx;
- vlib_cli_output (vm, " %U", format_gbp_endpoint, gbp_endpoint_index (gbpe));
+ vlib_cli_output (vm, " %U", format_gbp_endpoint, gei);
return (WALK_CONTINUE);
}
@@ -530,7 +839,7 @@ static clib_error_t *
gbp_endpoint_show (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
- u32 sw_if_index, show_dbs, handle;
+ u32 show_dbs, handle;
handle = INDEX_INVALID;
show_dbs = 0;
@@ -539,7 +848,7 @@ gbp_endpoint_show (vlib_main_t * vm,
{
if (unformat (input, "%d", &handle))
;
- else if (unformat (input, "db", &handle))
+ else if (unformat (input, "db"))
show_dbs = 1;
else
break;
@@ -552,19 +861,10 @@ gbp_endpoint_show (vlib_main_t * vm,
else if (show_dbs)
{
vlib_cli_output (vm, "\nDatabases:");
- clib_bihash_foreach_key_value_pair_24_8 (&gbp_ep_by_ip_itf_db.gte_table,
+ clib_bihash_foreach_key_value_pair_24_8 (&gbp_ep_db.ged_by_ip_rd,
gbp_endpoint_walk_ip_itf, vm);
clib_bihash_foreach_key_value_pair_16_8
- (&gbp_ep_by_mac_itf_db.gte_table, gbp_endpoint_walk_mac_itf, vm);
-
- vec_foreach_index (sw_if_index, gbp_ep_by_itf_db.gte_vec)
- {
- if (INDEX_INVALID != gbp_ep_by_itf_db.gte_vec[sw_if_index])
- vlib_cli_output (vm, " {%U} -> %d",
- format_vnet_sw_if_index_name, vnet_get_main (),
- sw_if_index,
- gbp_ep_by_itf_db.gte_vec[sw_if_index]);
- }
+ (&gbp_ep_db.ged_by_mac_bd, gbp_endpoint_walk_mac_itf, vm);
}
else
{
@@ -590,20 +890,161 @@ VLIB_CLI_COMMAND (gbp_endpoint_show_node, static) = {
};
/* *INDENT-ON* */
+static void
+gbp_endpoint_check (index_t gei, f64 start_time)
+{
+ gbp_endpoint_t *ge;
+
+ ge = gbp_endpoint_get (gei);
+
+ GBP_ENDPOINT_DBG ("scan at:%f -> %U", start_time, format_gbp_endpoint, gei);
+
+ if ((ge->ge_flags & GBP_ENDPOINT_FLAG_LEARNT) &&
+ ((start_time - ge->ge_last_time) > GBP_ENDPOINT_INACTIVE_TIME))
+ {
+ gbp_endpoint_delete (gei);
+ }
+}
+
+static void
+gbp_endpoint_scan_l2 (vlib_main_t * vm)
+{
+ clib_bihash_16_8_t *gte_table = &gbp_ep_db.ged_by_mac_bd;
+ f64 last_start, start_time, delta_t;
+ int i, j, k;
+
+ delta_t = 0;
+ last_start = start_time = vlib_time_now (vm);
+
+ for (i = 0; i < gte_table->nbuckets; i++)
+ {
+ clib_bihash_bucket_16_8_t *b;
+ clib_bihash_value_16_8_t *v;
+
+ /* allow no more than 20us without a pause */
+ delta_t = vlib_time_now (vm) - last_start;
+ if (delta_t > 20e-6)
+ {
+ /* suspend for 100 us */
+ vlib_process_suspend (vm, 100e-6);
+ last_start = vlib_time_now (vm);
+ }
+
+ b = &gte_table->buckets[i];
+ if (b->offset == 0)
+ continue;
+ v = clib_bihash_get_value_16_8 (gte_table, b->offset);
+
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (clib_bihash_is_free_16_8 (&v->kvp[k]))
+ continue;
+
+ gbp_endpoint_check (v->kvp[k].value, start_time);
+
+ /*
+ * Note: we may have just freed the bucket's backing
+ * storage, so check right here...
+ */
+ if (b->offset == 0)
+ goto doublebreak;
+ }
+ v++;
+ }
+ doublebreak:
+ ;
+ }
+}
+
+static void
+gbp_endpoint_scan_l3 (vlib_main_t * vm)
+{
+ clib_bihash_24_8_t *gte_table = &gbp_ep_db.ged_by_ip_rd;
+ f64 last_start, start_time, delta_t;
+ int i, j, k;
+
+ delta_t = 0;
+ last_start = start_time = vlib_time_now (vm);
+
+ for (i = 0; i < gte_table->nbuckets; i++)
+ {
+ clib_bihash_bucket_24_8_t *b;
+ clib_bihash_value_24_8_t *v;
+
+ /* allow no more than 20us without a pause */
+ delta_t = vlib_time_now (vm) - last_start;
+ if (delta_t > 20e-6)
+ {
+ /* suspend for 100 us */
+ vlib_process_suspend (vm, 100e-6);
+ last_start = vlib_time_now (vm);
+ }
+
+ b = &gte_table->buckets[i];
+ if (b->offset == 0)
+ continue;
+ v = clib_bihash_get_value_24_8 (gte_table, b->offset);
+
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (clib_bihash_is_free_24_8 (&v->kvp[k]))
+ continue;
+
+ gbp_endpoint_check (v->kvp[k].value, start_time);
+
+ /*
+ * Note: we may have just freed the bucket's backing
+ * storage, so check right here...
+ */
+ if (b->offset == 0)
+ goto doublebreak;
+ }
+ v++;
+ }
+ doublebreak:
+ ;
+ }
+}
+
+void
+gbp_endpoint_scan (vlib_main_t * vm)
+{
+ gbp_endpoint_scan_l2 (vm);
+ gbp_endpoint_scan_l3 (vm);
+}
+
+void
+gbp_learn_set_inactive_threshold (u32 threshold)
+{
+ GBP_ENDPOINT_INACTIVE_TIME = threshold;
+}
+
+f64
+gbp_endpoint_scan_threshold (void)
+{
+ return (GBP_ENDPOINT_INACTIVE_TIME);
+}
+
#define GBP_EP_HASH_NUM_BUCKETS (2 * 1024)
#define GBP_EP_HASH_MEMORY_SIZE (1 << 20)
static clib_error_t *
gbp_endpoint_init (vlib_main_t * vm)
{
- clib_bihash_init_24_8 (&gbp_ep_by_ip_itf_db.gte_table,
- "GBP Endpoints - IP/Interface",
+ clib_bihash_init_24_8 (&gbp_ep_db.ged_by_ip_rd,
+ "GBP Endpoints - IP/RD",
GBP_EP_HASH_NUM_BUCKETS, GBP_EP_HASH_MEMORY_SIZE);
- clib_bihash_init_16_8 (&gbp_ep_by_mac_itf_db.gte_table,
- "GBP Endpoints - MAC/Interface",
+ clib_bihash_init_16_8 (&gbp_ep_db.ged_by_mac_bd,
+ "GBP Endpoints - MAC/BD",
GBP_EP_HASH_NUM_BUCKETS, GBP_EP_HASH_MEMORY_SIZE);
+ gbp_ep_logger = vlib_log_register_class ("gbp", "ep");
+
return (NULL);
}
diff --git a/src/plugins/gbp/gbp_endpoint.h b/src/plugins/gbp/gbp_endpoint.h
index c92b2172594..bd157c99f1b 100644
--- a/src/plugins/gbp/gbp_endpoint.h
+++ b/src/plugins/gbp/gbp_endpoint.h
@@ -28,13 +28,31 @@
/**
* Flags for each endpoint
*/
+typedef enum gbp_endpoint_attr_t_
+{
+ GBP_ENDPOINT_ATTR_FIRST = 0,
+ GBP_ENDPOINT_ATTR_BOUNCE = GBP_ENDPOINT_ATTR_FIRST,
+ GBP_ENDPOINT_ATTR_REMOTE = 1,
+ GBP_ENDPOINT_ATTR_LEARNT = 2,
+ GBP_ENDPOINT_ATTR_LAST,
+} gbp_endpoint_attr_t;
+
typedef enum gbp_endpoint_flags_t_
{
GBP_ENDPOINT_FLAG_NONE = 0,
- GBP_ENDPOINT_FLAG_BOUNCE = (1 << 0),
- GBP_ENDPOINT_FLAG_DYNAMIC = (1 << 1),
+ GBP_ENDPOINT_FLAG_BOUNCE = (1 << GBP_ENDPOINT_ATTR_BOUNCE),
+ GBP_ENDPOINT_FLAG_REMOTE = (1 << GBP_ENDPOINT_ATTR_REMOTE),
+ GBP_ENDPOINT_FLAG_LEARNT = (1 << GBP_ENDPOINT_ATTR_LEARNT),
} gbp_endpoint_flags_t;
+#define GBP_ENDPOINT_ATTR_NAMES { \
+ [GBP_ENDPOINT_ATTR_BOUNCE] = "bounce", \
+ [GBP_ENDPOINT_ATTR_REMOTE] = "remote", \
+ [GBP_ENDPOINT_ATTR_LEARNT] = "learnt", \
+}
+
+extern u8 *format_gbp_endpoint_flags (u8 * s, va_list * args);
+
/**
* A Group Based Policy Endpoint.
* This is typically a VM or container. If the endpoint is local (i.e. on
@@ -48,12 +66,13 @@ typedef struct gbp_endpoint_t_
/**
* The interface on which the EP is connected
*/
+ index_t ge_itf;
u32 ge_sw_if_index;
/**
* A vector of ip addresses that below to the endpoint
*/
- ip46_address_t *ge_ips;
+ const ip46_address_t *ge_ips;
/**
* MAC address of the endpoint
@@ -61,52 +80,74 @@ typedef struct gbp_endpoint_t_
mac_address_t ge_mac;
/**
- * The endpoint's designated EPG
+ * Index of the Endpoint's Group
+ */
+ index_t ge_epg;
+
+ /**
+ * Endpoint Group's ID
*/
- epg_id_t ge_epg_id;
+ index_t ge_epg_id;
/**
* Endpoint flags
*/
gbp_endpoint_flags_t ge_flags;
+
+ /**
+ * The L3 adj, if created
+ */
+ index_t *ge_adjs;
+
+ /**
+ * The last time a packet from seen from this end point
+ */
+ f64 ge_last_time;
+
+ /**
+ * Tunnel info for remote endpoints
+ */
+ struct
+ {
+ u32 ge_parent_sw_if_index;
+ ip46_address_t ge_src;
+ ip46_address_t ge_dst;
+ } tun;
} gbp_endpoint_t;
extern u8 *format_gbp_endpoint (u8 * s, va_list * args);
/**
- * Interface to source EPG DB - a per-interface vector
+ * GBP Endpoint Databases
*/
-typedef struct gbp_ep_by_itf_db_t_
-{
- index_t *gte_vec;
-} gbp_ep_by_itf_db_t;
-
typedef struct gbp_ep_by_ip_itf_db_t_
{
- clib_bihash_24_8_t gte_table;
-} gbp_ep_by_ip_itf_db_t;
-
-typedef struct gbp_ep_by_mac_itf_db_t_
-{
- clib_bihash_16_8_t gte_table;
-} gbp_ep_by_mac_itf_db_t;
+ index_t *ged_by_sw_if_index;
+ clib_bihash_24_8_t ged_by_ip_rd;
+ clib_bihash_16_8_t ged_by_mac_bd;
+} gbp_ep_db_t;
extern int gbp_endpoint_update (u32 sw_if_index,
const ip46_address_t * ip,
const mac_address_t * mac,
- epg_id_t epg_id, u32 * handle);
-extern void gbp_endpoint_delete (u32 handle);
+ epg_id_t epg_id,
+ gbp_endpoint_flags_t flags,
+ const ip46_address_t * tun_src,
+ const ip46_address_t * tun_dst, u32 * handle);
+extern void gbp_endpoint_delete (index_t gbpei);
-typedef walk_rc_t (*gbp_endpoint_cb_t) (gbp_endpoint_t * gbpe, void *ctx);
+typedef walk_rc_t (*gbp_endpoint_cb_t) (index_t gbpei, void *ctx);
extern void gbp_endpoint_walk (gbp_endpoint_cb_t cb, void *ctx);
+extern void gbp_endpoint_scan (vlib_main_t * vm);
+extern f64 gbp_endpoint_scan_threshold (void);
+extern int gbp_endpoint_is_remote (const gbp_endpoint_t * ge);
+extern void gbp_endpoint_flush (u32 sw_if_index);
/**
* DP functions and databases
*/
-extern gbp_ep_by_itf_db_t gbp_ep_by_itf_db;
-extern gbp_ep_by_mac_itf_db_t gbp_ep_by_mac_itf_db;
-extern gbp_ep_by_ip_itf_db_t gbp_ep_by_ip_itf_db;
+extern gbp_ep_db_t gbp_ep_db;
extern gbp_endpoint_t *gbp_endpoint_pool;
/**
@@ -118,12 +159,104 @@ gbp_endpoint_get (index_t gbpei)
return (pool_elt_at_index (gbp_endpoint_pool, gbpei));
}
-always_inline gbp_endpoint_t *
-gbp_endpoint_get_itf (u32 sw_if_index)
+static_always_inline void
+gbp_endpoint_mk_key_mac (const u8 * mac,
+ u32 bd_index, clib_bihash_kv_16_8_t * key)
{
- return (gbp_endpoint_get (gbp_ep_by_itf_db.gte_vec[sw_if_index]));
+ key->key[0] = ethernet_mac_address_u64 (mac);
+ key->key[1] = bd_index;
}
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_mac (const u8 * mac, u32 bd_index)
+{
+ clib_bihash_kv_16_8_t key, value;
+ int rv;
+
+ gbp_endpoint_mk_key_mac (mac, bd_index, &key);
+
+ rv = clib_bihash_search_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, &value);
+
+ if (0 != rv)
+ return NULL;
+
+ return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip (const ip46_address_t * ip,
+ u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+ key->key[0] = ip->as_u64[0];
+ key->key[1] = ip->as_u64[1];
+ key->key[2] = fib_index;
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip4 (const ip4_address_t * ip,
+ u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+ const ip46_address_t a = {
+ .ip4 = *ip,
+ };
+ gbp_endpoint_mk_key_ip (&a, fib_index, key);
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_ip4 (const ip4_address_t * ip, u32 fib_index)
+{
+ clib_bihash_kv_24_8_t key, value;
+ int rv;
+
+ gbp_endpoint_mk_key_ip4 (ip, fib_index, &key);
+
+ rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
+
+ if (0 != rv)
+ return NULL;
+
+ return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip6 (const ip6_address_t * ip,
+ u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+ key->key[0] = ip->as_u64[0];
+ key->key[1] = ip->as_u64[1];
+ key->key[2] = fib_index;
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_ip6 (const ip6_address_t * ip, u32 fib_index)
+{
+ clib_bihash_kv_24_8_t key, value;
+ int rv;
+
+ gbp_endpoint_mk_key_ip6 (ip, fib_index, &key);
+
+ rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
+
+ if (0 != rv)
+ return NULL;
+
+ return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_itf (u32 sw_if_index)
+{
+ index_t gei;
+
+ gei = gbp_ep_db.ged_by_sw_if_index[sw_if_index];
+
+ if (INDEX_INVALID != gei)
+ return (gbp_endpoint_get (gei));
+
+ return (NULL);
+}
+
+
#endif
/*
diff --git a/src/plugins/gbp/gbp_endpoint_group.c b/src/plugins/gbp/gbp_endpoint_group.c
index 095c8fe607b..ee4af2c0060 100644
--- a/src/plugins/gbp/gbp_endpoint_group.c
+++ b/src/plugins/gbp/gbp_endpoint_group.c
@@ -17,11 +17,12 @@
#include <plugins/gbp/gbp_endpoint_group.h>
#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
#include <vnet/dpo/dvr_dpo.h>
#include <vnet/fib/fib_table.h>
#include <vnet/l2/l2_input.h>
-#include <vnet/l2/feat_bitmap.h>
/**
* Pool of GBP endpoint_groups
@@ -32,106 +33,218 @@ gbp_endpoint_group_t *gbp_endpoint_group_pool;
* DB of endpoint_groups
*/
gbp_endpoint_group_db_t gbp_endpoint_group_db;
+vlib_log_class_t gg_logger;
+
+#define GBP_EPG_DBG(...) \
+ vlib_log_debug (gg_logger, __VA_ARGS__);
gbp_endpoint_group_t *
+gbp_endpoint_group_get (index_t i)
+{
+ return (pool_elt_at_index (gbp_endpoint_group_pool, i));
+}
+
+static void
+gbp_endpoint_group_lock (index_t i)
+{
+ gbp_endpoint_group_t *gg;
+
+ gg = gbp_endpoint_group_get (i);
+ gg->gg_locks++;
+}
+
+index_t
gbp_endpoint_group_find (epg_id_t epg_id)
{
uword *p;
- p = hash_get (gbp_endpoint_group_db.gepg_hash, epg_id);
+ p = hash_get (gbp_endpoint_group_db.gg_hash, epg_id);
if (NULL != p)
- return (pool_elt_at_index (gbp_endpoint_group_pool, p[0]));
+ return p[0];
- return (NULL);
+ return (INDEX_INVALID);
+}
+
+index_t
+gbp_endpoint_group_find_and_lock (epg_id_t epg_id)
+{
+ uword *p;
+
+ p = hash_get (gbp_endpoint_group_db.gg_hash, epg_id);
+
+ if (NULL != p)
+ {
+ gbp_endpoint_group_lock (p[0]);
+ return p[0];
+ }
+ return (INDEX_INVALID);
}
int
-gbp_endpoint_group_add (epg_id_t epg_id,
- u32 bd_id,
- u32 ip4_table_id,
- u32 ip6_table_id, u32 uplink_sw_if_index)
+gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
+ u32 bd_id, u32 rd_id, u32 uplink_sw_if_index)
{
- gbp_endpoint_group_t *gepg;
+ gbp_endpoint_group_t *gg;
+ index_t ggi;
- gepg = gbp_endpoint_group_find (epg_id);
+ ggi = gbp_endpoint_group_find (epg_id);
- if (NULL == gepg)
+ if (INDEX_INVALID == ggi)
{
+ gbp_bridge_domain_t *gb;
fib_protocol_t fproto;
+ index_t gbi, grdi;
+
+ gbi = gbp_bridge_domain_find_and_lock (bd_id);
+
+ if (~0 == gbi)
+ return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
- pool_get (gbp_endpoint_group_pool, gepg);
- clib_memset (gepg, 0, sizeof (*gepg));
+ grdi = gbp_route_domain_find_and_lock (rd_id);
- gepg->gepg_id = epg_id;
- gepg->gepg_bd = bd_id;
- gepg->gepg_rd[FIB_PROTOCOL_IP4] = ip4_table_id;
- gepg->gepg_rd[FIB_PROTOCOL_IP6] = ip6_table_id;
- gepg->gepg_uplink_sw_if_index = uplink_sw_if_index;
+ if (~0 == grdi)
+ {
+ gbp_bridge_domain_unlock (gbi);
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+ }
+
+ gb = gbp_bridge_domain_get (gbi);
+
+ pool_get_zero (gbp_endpoint_group_pool, gg);
+
+ gg->gg_id = epg_id;
+ gg->gg_rd = grdi;
+ gg->gg_gbd = gbi;
+ gg->gg_bd_index = gb->gb_bd_index;
+
+ gg->gg_uplink_sw_if_index = uplink_sw_if_index;
+ gg->gg_locks = 1;
/*
* an egress DVR dpo for internal subnets to use when sending
* on the uplink interface
*/
- FOR_EACH_FIB_IP_PROTOCOL (fproto)
- {
- gepg->gepg_fib_index[fproto] =
- fib_table_find_or_create_and_lock (fproto,
- gepg->gepg_rd[fproto],
- FIB_SOURCE_PLUGIN_HI);
-
- if (~0 == gepg->gepg_fib_index[fproto])
+ if (~0 != gg->gg_uplink_sw_if_index)
+ {
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
{
- return (VNET_API_ERROR_NO_SUCH_FIB);
+ dvr_dpo_add_or_lock (uplink_sw_if_index,
+ fib_proto_to_dpo (fproto),
+ &gg->gg_dpo[fproto]);
}
- dvr_dpo_add_or_lock (uplink_sw_if_index,
- fib_proto_to_dpo (fproto),
- &gepg->gepg_dpo[fproto]);
- }
-
- /*
- * packets direct from the uplink have had policy applied
- */
- l2input_intf_bitmap_enable (gepg->gepg_uplink_sw_if_index,
- L2INPUT_FEAT_GBP_NULL_CLASSIFY, 1);
+ /*
+ * Add the uplink to the BD
+ * packets direct from the uplink have had policy applied
+ */
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L2_BRIDGE, gg->gg_uplink_sw_if_index,
+ gg->gg_bd_index, L2_BD_PORT_TYPE_NORMAL, 0, 0);
+ l2input_intf_bitmap_enable (gg->gg_uplink_sw_if_index,
+ L2INPUT_FEAT_GBP_NULL_CLASSIFY, 1);
+ }
- hash_set (gbp_endpoint_group_db.gepg_hash,
- gepg->gepg_id, gepg - gbp_endpoint_group_pool);
+ hash_set (gbp_endpoint_group_db.gg_hash,
+ gg->gg_id, gg - gbp_endpoint_group_pool);
}
+ else
+ {
+ gg = gbp_endpoint_group_get (ggi);
+ gg->gg_locks++;
+ }
+
+ GBP_EPG_DBG ("add: %U", format_gbp_endpoint_group, gg);
return (0);
}
void
-gbp_endpoint_group_delete (epg_id_t epg_id)
+gbp_endpoint_group_unlock (index_t ggi)
{
- gbp_endpoint_group_t *gepg;
- uword *p;
+ gbp_endpoint_group_t *gg;
- p = hash_get (gbp_endpoint_group_db.gepg_hash, epg_id);
+ gg = gbp_endpoint_group_get (ggi);
- if (NULL != p)
+ gg->gg_locks--;
+
+ if (0 == gg->gg_locks)
{
fib_protocol_t fproto;
- gepg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
+ gg = pool_elt_at_index (gbp_endpoint_group_pool, ggi);
- l2input_intf_bitmap_enable (gepg->gepg_uplink_sw_if_index,
- L2INPUT_FEAT_GBP_NULL_CLASSIFY, 0);
+ if (~0 != gg->gg_uplink_sw_if_index)
+ {
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L3, gg->gg_uplink_sw_if_index,
+ gg->gg_bd_index, L2_BD_PORT_TYPE_NORMAL, 0, 0);
+ l2input_intf_bitmap_enable (gg->gg_uplink_sw_if_index,
+ L2INPUT_FEAT_GBP_NULL_CLASSIFY, 0);
+ }
FOR_EACH_FIB_IP_PROTOCOL (fproto)
{
- dpo_reset (&gepg->gepg_dpo[fproto]);
- fib_table_unlock (gepg->gepg_fib_index[fproto],
- fproto, FIB_SOURCE_PLUGIN_HI);
+ dpo_reset (&gg->gg_dpo[fproto]);
}
+ gbp_bridge_domain_unlock (gg->gg_gbd);
+ gbp_route_domain_unlock (gg->gg_rd);
+
+ hash_unset (gbp_endpoint_group_db.gg_hash, gg->gg_id);
- hash_unset (gbp_endpoint_group_db.gepg_hash, epg_id);
+ pool_put (gbp_endpoint_group_pool, gg);
+ }
+}
+
+int
+gbp_endpoint_group_delete (epg_id_t epg_id)
+{
+ index_t ggi;
+
+ ggi = gbp_endpoint_group_find (epg_id);
- pool_put (gbp_endpoint_group_pool, gepg);
+ if (INDEX_INVALID != ggi)
+ {
+ GBP_EPG_DBG ("del: %U", format_gbp_endpoint_group,
+ gbp_endpoint_group_get (ggi));
+ gbp_endpoint_group_unlock (ggi);
+
+ return (0);
}
+
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+u32
+gbp_endpoint_group_get_bd_id (const gbp_endpoint_group_t * gg)
+{
+ const gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gg->gg_gbd);
+
+ return (gb->gb_bd_id);
+}
+
+index_t
+gbp_endpoint_group_get_fib_index (gbp_endpoint_group_t * gg,
+ fib_protocol_t fproto)
+{
+ const gbp_route_domain_t *grd;
+
+ grd = gbp_route_domain_get (gg->gg_rd);
+
+ return (grd->grd_fib_index[fproto]);
+}
+
+u32
+gbp_endpoint_group_get_bvi (gbp_endpoint_group_t * gg)
+{
+ const gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gg->gg_gbd);
+
+ return (gb->gb_bvi_sw_if_index);
}
void
@@ -190,8 +303,8 @@ gbp_endpoint_group_cli (vlib_main_t * vm,
if (~0 == rd_id)
return clib_error_return (0, "route-domain must be specified");
- gbp_endpoint_group_add (epg_id, bd_id, rd_id, rd_id,
- uplink_sw_if_index);
+ gbp_endpoint_group_add_and_lock (epg_id, bd_id, rd_id,
+ uplink_sw_if_index);
}
else
gbp_endpoint_group_delete (epg_id);
@@ -213,19 +326,32 @@ VLIB_CLI_COMMAND (gbp_endpoint_group_cli_node, static) = {
.function = gbp_endpoint_group_cli,
};
-static int
-gbp_endpoint_group_show_one (gbp_endpoint_group_t *gepg, void *ctx)
+u8 *
+format_gbp_endpoint_group (u8 * s, va_list * args)
{
+ gbp_endpoint_group_t *gg = va_arg (*args, gbp_endpoint_group_t*);
vnet_main_t *vnm = vnet_get_main ();
+
+ if (NULL != gg)
+ s = format (s, "%d, bd:[%d,%d], rd:[%d] uplink:%U locks:%d",
+ gg->gg_id,
+ gbp_endpoint_group_get_bd_id(gg), gg->gg_bd_index,
+ gg->gg_rd,
+ format_vnet_sw_if_index_name, vnm, gg->gg_uplink_sw_if_index,
+ gg->gg_locks);
+ else
+ s = format (s, "NULL");
+
+ return (s);
+}
+
+static int
+gbp_endpoint_group_show_one (gbp_endpoint_group_t *gg, void *ctx)
+{
vlib_main_t *vm;
vm = ctx;
- vlib_cli_output (vm, " %d, bd:%d, ip4:%d ip6:%d uplink:%U",
- gepg->gepg_id,
- gepg->gepg_bd,
- gepg->gepg_rd[FIB_PROTOCOL_IP4],
- gepg->gepg_rd[FIB_PROTOCOL_IP6],
- format_vnet_sw_if_index_name, vnm, gepg->gepg_uplink_sw_if_index);
+ vlib_cli_output (vm, " %U",format_gbp_endpoint_group, gg);
return (1);
}
@@ -256,6 +382,16 @@ VLIB_CLI_COMMAND (gbp_endpoint_group_show_node, static) = {
};
/* *INDENT-ON* */
+static clib_error_t *
+gbp_endpoint_group_init (vlib_main_t * vm)
+{
+ gg_logger = vlib_log_register_class ("gbp", "epg");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_endpoint_group_init);
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/gbp/gbp_endpoint_group.h b/src/plugins/gbp/gbp_endpoint_group.h
index f71e5f5d70b..7116a058aee 100644
--- a/src/plugins/gbp/gbp_endpoint_group.h
+++ b/src/plugins/gbp/gbp_endpoint_group.h
@@ -28,37 +28,38 @@ typedef struct gpb_endpoint_group_t_
/**
* ID
*/
- epg_id_t gepg_id;
+ epg_id_t gg_id;
/**
* Bridge-domain ID the EPG is in
*/
- u32 gepg_bd;
+ index_t gg_gbd;
+ index_t gg_bd_index;
/**
* route-domain/IP-table ID the EPG is in
*/
- u32 gepg_rd[FIB_PROTOCOL_IP_MAX];
-
- /**
- * resulting FIB indices
- */
- u32 gepg_fib_index[FIB_PROTOCOL_IP_MAX];
+ index_t gg_rd;
/**
* Is the EPG an external/NAT
*/
- u8 gepg_is_ext;
+ u8 gg_is_ext;
/**
* the uplink interface dedicated to the EPG
*/
- u32 gepg_uplink_sw_if_index;
+ u32 gg_uplink_sw_if_index;
/**
* The DPO used in the L3 path for forwarding internal subnets
*/
- dpo_id_t gepg_dpo[FIB_PROTOCOL_IP_MAX];
+ dpo_id_t gg_dpo[FIB_PROTOCOL_IP_MAX];
+
+ /**
+ * Locks/references to this EPG
+ */
+ u32 gg_locks;
} gbp_endpoint_group_t;
/**
@@ -66,20 +67,30 @@ typedef struct gpb_endpoint_group_t_
*/
typedef struct gbp_endpoint_group_db_t_
{
- uword *gepg_hash;
+ uword *gg_hash;
} gbp_endpoint_group_db_t;
-extern int gbp_endpoint_group_add (epg_id_t epg_id,
- u32 bd_id,
- u32 ip4_table_id,
- u32 ip6_table_id, u32 uplink_sw_if_index);
-extern void gbp_endpoint_group_delete (epg_id_t epg_id);
+extern int gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
+ u32 bd_id,
+ u32 rd_id,
+ u32 uplink_sw_if_index);
+extern index_t gbp_endpoint_group_find_and_lock (epg_id_t epg_id);
+extern index_t gbp_endpoint_group_find (epg_id_t epg_id);
+extern int gbp_endpoint_group_delete (epg_id_t epg_id);
+extern void gbp_endpoint_group_unlock (index_t index);
+extern u32 gbp_endpoint_group_get_bd_id (const gbp_endpoint_group_t *);
+
+extern gbp_endpoint_group_t *gbp_endpoint_group_get (index_t i);
+extern index_t gbp_endpoint_group_get_fib_index (gbp_endpoint_group_t * gg,
+ fib_protocol_t fproto);
+extern u32 gbp_endpoint_group_get_bvi (gbp_endpoint_group_t * gg);
typedef int (*gbp_endpoint_group_cb_t) (gbp_endpoint_group_t * gbpe,
void *ctx);
extern void gbp_endpoint_group_walk (gbp_endpoint_group_cb_t bgpe, void *ctx);
-extern gbp_endpoint_group_t *gbp_endpoint_group_find (epg_id_t epg_id);
+
+extern u8 *format_gbp_endpoint_group (u8 * s, va_list * args);
/**
* DP functions and databases
@@ -92,14 +103,14 @@ gbp_epg_itf_lookup (epg_id_t epg)
{
uword *p;
- p = hash_get (gbp_endpoint_group_db.gepg_hash, epg);
+ p = hash_get (gbp_endpoint_group_db.gg_hash, epg);
if (NULL != p)
{
- gbp_endpoint_group_t *gepg;
+ gbp_endpoint_group_t *gg;
- gepg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
- return (gepg->gepg_uplink_sw_if_index);
+ gg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
+ return (gg->gg_uplink_sw_if_index);
}
return (~0);
}
@@ -109,14 +120,14 @@ gbp_epg_dpo_lookup (epg_id_t epg, fib_protocol_t fproto)
{
uword *p;
- p = hash_get (gbp_endpoint_group_db.gepg_hash, epg);
+ p = hash_get (gbp_endpoint_group_db.gg_hash, epg);
if (NULL != p)
{
- gbp_endpoint_group_t *gepg;
+ gbp_endpoint_group_t *gg;
- gepg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
- return (&gepg->gepg_dpo[fproto]);
+ gg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
+ return (&gg->gg_dpo[fproto]);
}
return (NULL);
}
diff --git a/src/plugins/gbp/gbp_itf.c b/src/plugins/gbp/gbp_itf.c
new file mode 100644
index 00000000000..39a1124bba2
--- /dev/null
+++ b/src/plugins/gbp/gbp_itf.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_itf.h>
+
+/**
+ * Attributes and configurations attached to interfaces by GBP
+ */
+typedef struct gbp_itf_t_
+{
+ /**
+ * Number of references to this interface
+ */
+ u32 gi_locks;
+
+ u32 gi_sw_if_index;
+ u32 gi_bd_index;
+
+ /**
+ * L2/L3 Features configured by each user
+ */
+ u32 *gi_l2_input_fbs;
+ u32 gi_l2_input_fb;
+ u32 *gi_l2_output_fbs;
+ u32 gi_l2_output_fb;
+} gbp_itf_t;
+
+static gbp_itf_t *gbp_itfs;
+
+static gbp_itf_t *
+gbp_itf_get (index_t gii)
+{
+ vec_validate (gbp_itfs, gii);
+
+ return (&gbp_itfs[gii]);
+}
+
+static index_t
+gbp_itf_get_itf (u32 sw_if_index)
+{
+ return (sw_if_index);
+}
+
+index_t
+gbp_itf_add_and_lock (u32 sw_if_index, u32 bd_index)
+{
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_get (gbp_itf_get_itf (sw_if_index));
+
+ if (0 == gi->gi_locks)
+ {
+ gi->gi_sw_if_index = sw_if_index;
+ gi->gi_bd_index = bd_index;
+
+ if (~0 != gi->gi_bd_index)
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+ MODE_L2_BRIDGE, sw_if_index, bd_index,
+ L2_BD_PORT_TYPE_NORMAL, 0, 0);
+
+ }
+
+ gi->gi_locks++;
+
+ return (sw_if_index);
+}
+
+void
+gbp_itf_unlock (index_t gii)
+{
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_get (gii);
+ ASSERT (gi->gi_locks > 0);
+ gi->gi_locks--;
+
+ if (0 == gi->gi_locks)
+ {
+ if (~0 != gi->gi_bd_index)
+ set_int_l2_mode (vlib_get_main (), vnet_get_main (), MODE_L3,
+ gi->gi_sw_if_index, 0, L2_BD_PORT_TYPE_NORMAL, 0, 0);
+ vec_free (gi->gi_l2_input_fbs);
+ vec_free (gi->gi_l2_output_fbs);
+
+ memset (gi, 0, sizeof (*gi));
+ }
+}
+
+void
+gbp_itf_set_l2_input_feature (index_t gii,
+ index_t useri, l2input_feat_masks_t feats)
+{
+ u32 diff_fb, new_fb, *fb, feat;
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_get (gii);
+
+ if (gi->gi_bd_index == ~0)
+ return;
+
+ vec_validate (gi->gi_l2_input_fbs, useri);
+ gi->gi_l2_input_fbs[useri] = feats;
+
+ new_fb = 0;
+ vec_foreach (fb, gi->gi_l2_input_fbs)
+ {
+ new_fb |= *fb;
+ }
+
+ /* add new features */
+ diff_fb = (gi->gi_l2_input_fb ^ new_fb) & new_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ l2input_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 1);
+ }));
+ /* *INDENT-ON* */
+
+ /* remove unneeded features */
+ diff_fb = (gi->gi_l2_input_fb ^ new_fb) & gi->gi_l2_input_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ l2input_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 0);
+ }));
+ /* *INDENT-ON* */
+
+ gi->gi_l2_input_fb = new_fb;
+}
+
+void
+gbp_itf_set_l2_output_feature (index_t gii,
+ index_t useri, l2output_feat_masks_t feats)
+{
+ u32 diff_fb, new_fb, *fb, feat;
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_get (gii);
+
+ if (gi->gi_bd_index == ~0)
+ return;
+
+ vec_validate (gi->gi_l2_output_fbs, useri);
+ gi->gi_l2_output_fbs[useri] = feats;
+
+ new_fb = 0;
+ vec_foreach (fb, gi->gi_l2_output_fbs)
+ {
+ new_fb |= *fb;
+ }
+
+ /* add new features */
+ diff_fb = (gi->gi_l2_output_fb ^ new_fb) & new_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ l2output_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 1);
+ }));
+ /* *INDENT-ON* */
+
+ /* remove unneeded features */
+ diff_fb = (gi->gi_l2_output_fb ^ new_fb) & gi->gi_l2_output_fb;
+
+ /* *INDENT-OFF* */
+ foreach_set_bit (feat, diff_fb,
+ ({
+ l2output_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 0);
+ }));
+ /* *INDENT-ON* */
+
+ gi->gi_l2_output_fb = new_fb;
+}
+
+u8 *
+format_gbp_itf (u8 * s, va_list * args)
+{
+ index_t gii = va_arg (*args, index_t);
+ gbp_itf_t *gi;
+
+ gi = gbp_itf_get (gii);
+
+ s = format (s, "%U locks:%d input-feats:%U output-feats:%U",
+ format_vnet_sw_if_index_name, vnet_get_main (),
+ gi->gi_sw_if_index, gi->gi_locks, format_l2_input_features,
+ gi->gi_l2_input_fb, format_l2_output_features,
+ gi->gi_l2_output_fb);
+
+ return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_itf.h b/src/plugins/gbp/gbp_itf.h
new file mode 100644
index 00000000000..6ece7b10c29
--- /dev/null
+++ b/src/plugins/gbp/gbp_itf.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_INTERFACE_H__
+#define __GBP_INTERFACE_H__
+
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+
+extern index_t gbp_itf_add_and_lock (u32 sw_if_index, u32 bd_index);
+extern void gbp_itf_unlock (index_t index);
+
+extern void gbp_itf_set_l2_input_feature (index_t gii,
+ index_t useri,
+ l2input_feat_masks_t feats);
+extern void gbp_itf_set_l2_output_feature (index_t gii,
+ index_t useri,
+ l2output_feat_masks_t feats);
+
+extern u8 *format_gbp_itf (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_learn.c b/src/plugins/gbp/gbp_learn.c
new file mode 100644
index 00000000000..9239779dd99
--- /dev/null
+++ b/src/plugins/gbp/gbp_learn.c
@@ -0,0 +1,756 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/util/throttle.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_learn_main_t_
+{
+ /**
+ * Next nodes for L2 output features
+ */
+ u32 gl_l2_input_feat_next[32];
+
+ /**
+ * logger - VLIB log class
+ */
+ vlib_log_class_t gl_logger;
+
+ /**
+ * throttles for the DP leanring
+ */
+ throttle_t gl_l2_throttle;
+ throttle_t gl_l3_throttle;
+} gbp_learn_main_t;
+
+/**
+ * The maximum learning rate per-hashed EP
+ */
+#define GBP_ENDPOINT_HASH_LEARN_RATE (1e-2)
+
+static gbp_learn_main_t gbp_learn_main;
+
+#define GBP_LEARN_DBG(...) \
+ vlib_log_debug (gbp_learn_main.gl_logger, __VA_ARGS__);
+
+#define foreach_gbp_learn \
+ _(DROP, "drop")
+
+typedef enum
+{
+#define _(sym,str) GBP_LEARN_ERROR_##sym,
+ foreach_gbp_learn
+#undef _
+ GBP_LEARN_N_ERROR,
+} gbp_learn_error_t;
+
+static char *gbp_learn_error_strings[] = {
+#define _(sym,string) string,
+ foreach_gbp_learn
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) GBP_LEARN_NEXT_##sym,
+ foreach_gbp_learn
+#undef _
+ GBP_LEARN_N_NEXT,
+} gbp_learn_next_t;
+
+typedef struct gbp_learn_l2_t_
+{
+ ip46_address_t ip;
+ mac_address_t mac;
+ u32 sw_if_index;
+ u32 bd_index;
+ epg_id_t epg;
+ ip46_address_t outer_src;
+ ip46_address_t outer_dst;
+} gbp_learn_l2_t;
+
+
+static void
+gbp_learn_l2_cp (const gbp_learn_l2_t * gl2)
+{
+ ip46_address_t *ips = NULL;
+
+ GBP_LEARN_DBG ("L2 EP: %U %U, %d",
+ format_mac_address_t, &gl2->mac,
+ format_ip46_address, &gl2->ip, IP46_TYPE_ANY, gl2->epg);
+
+ vec_add1 (ips, gl2->ip);
+
+ ASSERT (!ip46_address_is_zero (&gl2->outer_src));
+ ASSERT (!ip46_address_is_zero (&gl2->outer_dst));
+
+ /*
+ * flip the source and dst, since that's how it was received, this API
+ * takes how it's sent
+ */
+ gbp_endpoint_update (gl2->sw_if_index, ips,
+ &gl2->mac, gl2->epg,
+ (GBP_ENDPOINT_FLAG_LEARNT |
+ GBP_ENDPOINT_FLAG_REMOTE),
+ &gl2->outer_dst, &gl2->outer_src, NULL);
+}
+
+static void
+gbp_learn_l2_ip4_dp (const u8 * mac, const ip4_address_t * ip,
+ u32 bd_index, u32 sw_if_index, epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .epg = epg,
+ .ip.ip4 = *ip,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ ASSERT (!ip46_address_is_zero (&gl2.outer_src));
+ ASSERT (!ip46_address_is_zero (&gl2.outer_dst));
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+static void
+gbp_learn_l2_ip6_dp (const u8 * mac, const ip6_address_t * ip,
+ u32 bd_index, u32 sw_if_index, epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .epg = epg,
+ .ip.ip6 = *ip,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+static void
+gbp_learn_l2_dp (const u8 * mac, u32 bd_index, u32 sw_if_index,
+ epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ gbp_learn_l2_t gl2 = {
+ .sw_if_index = sw_if_index,
+ .bd_index = bd_index,
+ .epg = epg,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ mac_address_from_bytes (&gl2.mac, mac);
+
+ vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_learn_l2_trace_t_
+{
+ /* per-pkt trace data */
+ mac_address_t mac;
+ u32 sw_if_index;
+ u32 new;
+ u32 throttled;
+ u32 epg;
+ u32 d_bit;
+} gbp_learn_l2_trace_t;
+
+always_inline void
+gbp_learn_get_outer (const ethernet_header_t * eh0,
+ ip4_address_t * outer_src, ip4_address_t * outer_dst)
+{
+ ip4_header_t *ip0;
+ u8 *buff;
+
+ /* rewind back to the ivxlan header */
+ buff = (u8 *) eh0;
+ buff -= (sizeof (vxlan_gbp_header_t) +
+ sizeof (udp_header_t) + sizeof (ip4_header_t));
+
+ ip0 = (ip4_header_t *) buff;
+
+ *outer_src = ip0->src_address;
+ *outer_dst = ip0->dst_address;
+}
+
+static uword
+gbp_learn_l2 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
+ gbp_learn_main_t *glm;
+ f64 time_now;
+
+ glm = &gbp_learn_main;
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+ time_now = vlib_time_now (vm);
+ thread_index = vm->thread_index;
+
+ seed = throttle_seed (&glm->gl_l2_throttle, thread_index, time_now);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ ip4_address_t outer_src, outer_dst;
+ u32 bi0, sw_if_index0, t0, epg0;
+ const ethernet_header_t *eh0;
+ gbp_learn_next_t next0;
+ gbp_endpoint_t *ge0;
+ vlib_buffer_t *b0;
+
+ next0 = GBP_LEARN_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ eh0 = vlib_buffer_get_current (b0);
+ epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+
+ next0 = vnet_l2_feature_next (b0, glm->gl_l2_input_feat_next,
+ L2INPUT_FEAT_GBP_LEARN);
+
+ ge0 = gbp_endpoint_find_mac (eh0->src_address,
+ vnet_buffer (b0)->l2.bd_index);
+
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
+ {
+ ge0 = NULL;
+ t0 = 1;
+ goto trace;
+ }
+
+ /*
+ * check for new EP or a moved EP
+ */
+ if (NULL == ge0 || ge0->ge_sw_if_index != sw_if_index0)
+
+ {
+ /*
+ * use the last 4 bytes of the mac address as the hash for the EP
+ */
+ t0 = throttle_check (&glm->gl_l2_throttle, thread_index,
+ *((u32 *) (eh0->src_address + 2)), seed);
+ if (!t0)
+ {
+ gbp_learn_get_outer (eh0, &outer_src, &outer_dst);
+
+ switch (clib_net_to_host_u16 (eh0->type))
+ {
+ case ETHERNET_TYPE_IP4:
+ {
+ const ip4_header_t *ip0;
+
+ ip0 = (ip4_header_t *) (eh0 + 1);
+
+ gbp_learn_l2_ip4_dp (eh0->src_address,
+ &ip0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+
+ break;
+ }
+ case ETHERNET_TYPE_IP6:
+ {
+ const ip6_header_t *ip0;
+
+ ip0 = (ip6_header_t *) (eh0 + 1);
+
+ gbp_learn_l2_ip6_dp (eh0->src_address,
+ &ip0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+
+ break;
+ }
+ default:
+ gbp_learn_l2_dp (eh0->src_address,
+ vnet_buffer (b0)->l2.bd_index,
+ sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple workers
+ * but that's ok we are not interested in being very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ trace:
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_learn_l2_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ clib_memcpy (t->mac.bytes, eh0->src_address, 6);
+ t->new = (NULL == ge0);
+ t->throttled = t0;
+ t->sw_if_index = sw_if_index0;
+ t->epg = epg0;
+ t->d_bit = ! !(vnet_buffer2 (b0)->gbp.flags &
+ VXLAN_GBP_GPFLAGS_D);
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_learn_l2_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_learn_l2_trace_t *t = va_arg (*args, gbp_learn_l2_trace_t *);
+
+ s = format (s, "new:%d throttled:%d d-bit:%d mac:%U itf:%d epg:%d",
+ t->new, t->throttled, t->d_bit,
+ format_mac_address_t, &t->mac, t->sw_if_index, t->epg);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_learn_l2_node) = {
+ .function = gbp_learn_l2,
+ .name = "gbp-learn-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l2_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_learn_error_strings),
+ .error_strings = gbp_learn_error_strings,
+
+ .n_next_nodes = GBP_LEARN_N_NEXT,
+
+ .next_nodes = {
+ [GBP_LEARN_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_l2_node, gbp_learn_l2);
+/* *INDENT-ON* */
+
+typedef struct gbp_learn_l3_t_
+{
+ ip46_address_t ip;
+ u32 fib_index;
+ u32 sw_if_index;
+ epg_id_t epg;
+ ip46_address_t outer_src;
+ ip46_address_t outer_dst;
+} gbp_learn_l3_t;
+
+static void
+gbp_learn_l3_cp (const gbp_learn_l3_t * gl3)
+{
+ ip46_address_t *ips = NULL;
+
+ GBP_LEARN_DBG ("L3 EP: %U, %d", format_ip46_address, &gl3->ip,
+ IP46_TYPE_ANY, gl3->epg);
+
+ vec_add1 (ips, gl3->ip);
+
+ gbp_endpoint_update (gl3->sw_if_index, ips, NULL, gl3->epg,
+ (GBP_ENDPOINT_FLAG_REMOTE |
+ GBP_ENDPOINT_FLAG_LEARNT),
+ &gl3->outer_dst, &gl3->outer_src, NULL);
+}
+
+static void
+gbp_learn_ip4_dp (const ip4_address_t * ip,
+ u32 fib_index, u32 sw_if_index, epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ /* *INDENT-OFF* */
+ gbp_learn_l3_t gl3 = {
+ .ip = {
+ .ip4 = *ip,
+ },
+ .sw_if_index = sw_if_index,
+ .fib_index = fib_index,
+ .epg = epg,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ /* *INDENT-ON* */
+
+ vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
+}
+
+static void
+gbp_learn_ip6_dp (const ip6_address_t * ip,
+ u32 fib_index, u32 sw_if_index, epg_id_t epg,
+ const ip4_address_t * outer_src,
+ const ip4_address_t * outer_dst)
+{
+ /* *INDENT-OFF* */
+ gbp_learn_l3_t gl3 = {
+ .ip = {
+ .ip6 = *ip,
+ },
+ .sw_if_index = sw_if_index,
+ .fib_index = fib_index,
+ .epg = epg,
+ .outer_src.ip4 = *outer_src,
+ .outer_dst.ip4 = *outer_dst,
+ };
+ /* *INDENT-ON* */
+
+ vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
+}
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_learn_l3_trace_t_
+{
+ /* per-pkt trace data */
+ ip46_address_t ip;
+ u32 sw_if_index;
+ u32 new;
+ u32 throttled;
+ u32 epg;
+} gbp_learn_l3_trace_t;
+
+static uword
+gbp_learn_l3 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame,
+ fib_protocol_t fproto)
+{
+ u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
+ gbp_learn_main_t *glm;
+ f64 time_now;
+
+ glm = &gbp_learn_main;
+ next_index = 0;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
+ time_now = vlib_time_now (vm);
+ thread_index = vm->thread_index;
+
+ seed = throttle_seed (&glm->gl_l3_throttle, thread_index, time_now);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0, t0, epg0, fib_index0;
+ CLIB_UNUSED (const ip4_header_t *) ip4_0;
+ CLIB_UNUSED (const ip6_header_t *) ip6_0;
+ ip4_address_t outer_src, outer_dst;
+ ethernet_header_t *eth0;
+ gbp_learn_next_t next0;
+ gbp_endpoint_t *ge0;
+ vlib_buffer_t *b0;
+
+ next0 = GBP_LEARN_NEXT_DROP;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+ ip6_0 = NULL;
+ ip4_0 = NULL;
+
+ vnet_feature_next (&next0, b0);
+
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
+ {
+ t0 = 1;
+ ge0 = NULL;
+ goto trace;
+ }
+
+ fib_index0 = fib_table_get_index_for_sw_if_index (fproto,
+ sw_if_index0);
+
+ if (FIB_PROTOCOL_IP6 == fproto)
+ {
+ ip6_0 = vlib_buffer_get_current (b0);
+ eth0 = (ethernet_header_t *) (((u8 *) ip6_0) - sizeof (*eth0));
+
+ gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
+
+ ge0 = gbp_endpoint_find_ip6 (&ip6_0->src_address, fib_index0);
+
+ if (NULL == ge0)
+ {
+ t0 = throttle_check (&glm->gl_l3_throttle,
+ thread_index,
+ ip6_address_hash_to_u32
+ (&ip6_0->src_address), seed);
+
+ if (!t0)
+ {
+ gbp_learn_ip6_dp (&ip6_0->src_address,
+ fib_index0, sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple
+ * workers but that's ok we are not interested in being
+ * very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ }
+ else
+ {
+ ip4_0 = vlib_buffer_get_current (b0);
+ eth0 = (ethernet_header_t *) (((u8 *) ip4_0) - sizeof (*eth0));
+
+ gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
+ ge0 = gbp_endpoint_find_ip4 (&ip4_0->src_address, fib_index0);
+
+ if (NULL == ge0)
+ {
+ t0 = throttle_check (&glm->gl_l3_throttle, thread_index,
+ ip4_0->src_address.as_u32, seed);
+
+ if (!t0)
+ {
+ gbp_learn_ip4_dp (&ip4_0->src_address,
+ fib_index0, sw_if_index0, epg0,
+ &outer_src, &outer_dst);
+ }
+ }
+ else
+ {
+ /*
+ * this update could happen simultaneoulsy from multiple
+ * workers but that's ok we are not interested in being
+ * very accurate.
+ */
+ t0 = 0;
+ ge0->ge_last_time = time_now;
+ }
+ }
+ trace:
+ if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ gbp_learn_l3_trace_t *t;
+
+ t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ if (FIB_PROTOCOL_IP6 == fproto && ip6_0)
+ ip46_address_set_ip6 (&t->ip, &ip6_0->src_address);
+ if (FIB_PROTOCOL_IP4 == fproto && ip4_0)
+ ip46_address_set_ip4 (&t->ip, &ip4_0->src_address);
+ t->new = (NULL == ge0);
+ t->throttled = t0;
+ t->sw_if_index = sw_if_index0;
+ t->epg = epg0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_learn_l3_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_learn_l3_trace_t *t = va_arg (*args, gbp_learn_l3_trace_t *);
+
+ s = format (s, "new:%d throttled:%d ip:%U itf:%d epg:%d",
+ t->new, t->throttled,
+ format_ip46_address, &t->ip, IP46_TYPE_ANY, t->sw_if_index,
+ t->epg);
+
+ return s;
+}
+
+static uword
+gbp_learn_ip4 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP4));
+}
+
+static uword
+gbp_learn_ip6 (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP6));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_learn_ip4_node) = {
+ .function = gbp_learn_ip4,
+ .name = "gbp-learn-ip4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l3_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_ip4_node, gbp_learn_ip4);
+
+VNET_FEATURE_INIT (gbp_learn_ip4, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "gbp-learn-ip4",
+};
+
+VLIB_REGISTER_NODE (gbp_learn_ip6_node) = {
+ .function = gbp_learn_ip6,
+ .name = "gbp-learn-ip6",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_learn_l3_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_ip6_node, gbp_learn_ip6);
+
+VNET_FEATURE_INIT (gbp_learn_ip6, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "gbp-learn-ip6",
+};
+
+/* *INDENT-ON* */
+
+void
+gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode)
+{
+ if (GBP_LEARN_MODE_L2 == mode)
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1);
+ else
+ {
+ vnet_feature_enable_disable ("ip4-unicast",
+ "gbp-learn-ip4", sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "gbp-learn-ip6", sw_if_index, 1, 0, 0);
+ }
+}
+
+void
+gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode)
+{
+ if (GBP_LEARN_MODE_L2 == mode)
+ l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0);
+ else
+ {
+ vnet_feature_enable_disable ("ip4-unicast",
+ "gbp-learn-ip4", sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast",
+ "gbp-learn-ip6", sw_if_index, 0, 0, 0);
+ }
+}
+
+static clib_error_t *
+gbp_learn_init (vlib_main_t * vm)
+{
+ gbp_learn_main_t *glm = &gbp_learn_main;
+ vlib_thread_main_t *tm = &vlib_thread_main;
+
+ /* Initialize the feature next-node indices */
+ feat_bitmap_init_next_nodes (vm,
+ gbp_learn_l2_node.index,
+ L2INPUT_N_FEAT,
+ l2input_get_feat_names (),
+ glm->gl_l2_input_feat_next);
+
+ throttle_init (&glm->gl_l2_throttle,
+ tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
+
+ throttle_init (&glm->gl_l3_throttle,
+ tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
+
+ glm->gl_logger = vlib_log_register_class ("gbp", "learn");
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gbp_learn_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_learn.h b/src/plugins/gbp/gbp_learn.h
new file mode 100644
index 00000000000..836daf80886
--- /dev/null
+++ b/src/plugins/gbp/gbp_learn.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_LEARN_H__
+#define __GBP_LEARN_H__
+
+#include <plugins/gbp/gbp.h>
+
+typedef enum gbp_learn_mode_t_
+{
+ GBP_LEARN_MODE_L2,
+ GBP_LEARN_MODE_L3,
+} gbb_learn_mode_t;
+
+extern void gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode);
+extern void gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode);
+
+extern void gbp_learn_set_inactive_threshold (u32 max_age);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_policy.c b/src/plugins/gbp/gbp_policy.c
index f57aa07a853..6d84a994d41 100644
--- a/src/plugins/gbp/gbp_policy.c
+++ b/src/plugins/gbp/gbp_policy.c
@@ -15,6 +15,8 @@
#include <plugins/gbp/gbp.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
/**
* Grouping of global data for the GBP source EPG classification feature
*/
@@ -23,7 +25,7 @@ typedef struct gbp_policy_main_t_
/**
* Next nodes for L2 output features
*/
- u32 l2_output_feat_next[32];
+ u32 l2_output_feat_next[2][32];
} gbp_policy_main_t;
static gbp_policy_main_t gbp_policy_main;
@@ -59,14 +61,16 @@ typedef enum
typedef struct gbp_policy_trace_t_
{
/* per-pkt trace data */
- epg_id_t src_epg;
- epg_id_t dst_epg;
+ u32 src_epg;
+ u32 dst_epg;
u32 acl_index;
+ u32 allowed;
} gbp_policy_trace_t;
static uword
-gbp_policy (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+gbp_policy_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, u8 is_port_based)
{
gbp_main_t *gm = &gbp_main;
gbp_policy_main_t *gpm = &gbp_policy_main;
@@ -85,7 +89,8 @@ gbp_policy (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
- const gbp_endpoint_t *gep0;
+ const ethernet_header_t *h0;
+ const gbp_endpoint_t *ge0;
gbp_policy_next_t next0;
gbp_contract_key_t key0;
gbp_contract_value_t value0 = {
@@ -103,13 +108,39 @@ gbp_policy (vlib_main_t * vm,
n_left_to_next -= 1;
b0 = vlib_get_buffer (vm, bi0);
+ h0 = vlib_buffer_get_current (b0);
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
/*
+ * If the A0bit is set then policy has already been applied
+ * and we skip enforcement here.
+ */
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
+ {
+ next0 = vnet_l2_feature_next (b0,
+ gpm->l2_output_feat_next
+ [is_port_based],
+ (is_port_based ?
+ L2OUTPUT_FEAT_GBP_POLICY_PORT :
+ L2OUTPUT_FEAT_GBP_POLICY_MAC));
+ key0.as_u32 = ~0;
+ goto trace;
+ }
+ /*
* determine the src and dst EPG
*/
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
- gep0 = gbp_endpoint_get_itf (sw_if_index0);
- key0.gck_dst = gep0->ge_epg_id;
+ if (is_port_based)
+ ge0 = gbp_endpoint_find_itf (sw_if_index0);
+ else
+ ge0 = gbp_endpoint_find_mac (h0->dst_address,
+ vnet_buffer (b0)->l2.bd_index);
+
+ if (NULL != ge0)
+ key0.gck_dst = ge0->ge_epg_id;
+ else
+ /* If you cannot determine the destination EP then drop */
+ goto trace;
+
key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
if (EPG_INVALID != key0.gck_src)
@@ -119,8 +150,14 @@ gbp_policy (vlib_main_t * vm,
/*
* intra-epg allowed
*/
- next0 = vnet_l2_feature_next (b0, gpm->l2_output_feat_next,
- L2OUTPUT_FEAT_GBP_POLICY);
+ next0 =
+ vnet_l2_feature_next (b0,
+ gpm->l2_output_feat_next
+ [is_port_based],
+ (is_port_based ?
+ L2OUTPUT_FEAT_GBP_POLICY_PORT :
+ L2OUTPUT_FEAT_GBP_POLICY_MAC));
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
}
else
{
@@ -163,9 +200,19 @@ gbp_policy (vlib_main_t * vm,
&trace_bitmap0);
if (action0 > 0)
- next0 =
- vnet_l2_feature_next (b0, gpm->l2_output_feat_next,
- L2OUTPUT_FEAT_GBP_POLICY);
+ {
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+
+ next0 =
+ vnet_l2_feature_next (b0,
+ gpm->l2_output_feat_next
+ [is_port_based],
+ (is_port_based ?
+ L2OUTPUT_FEAT_GBP_POLICY_PORT
+ :
+ L2OUTPUT_FEAT_GBP_POLICY_MAC));
+ ;
+ }
}
}
}
@@ -175,10 +222,15 @@ gbp_policy (vlib_main_t * vm,
* the src EPG is not set when the packet arrives on an EPG
* uplink interface and we do not need to apply policy
*/
- next0 = vnet_l2_feature_next (b0, gpm->l2_output_feat_next,
- L2OUTPUT_FEAT_GBP_POLICY);
+ next0 =
+ vnet_l2_feature_next (b0,
+ gpm->l2_output_feat_next[is_port_based],
+ (is_port_based ?
+ L2OUTPUT_FEAT_GBP_POLICY_PORT :
+ L2OUTPUT_FEAT_GBP_POLICY_MAC));
}
+ trace:
if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
{
gbp_policy_trace_t *t =
@@ -186,6 +238,7 @@ gbp_policy (vlib_main_t * vm,
t->src_epg = key0.gck_src;
t->dst_epg = key0.gck_dst;
t->acl_index = value0.gc_acl_index;
+ t->allowed = (next0 != GBP_POLICY_NEXT_DENY);
}
/* verify speculative enqueue, maybe switch current next frame */
@@ -200,6 +253,20 @@ gbp_policy (vlib_main_t * vm,
return frame->n_vectors;
}
+static uword
+gbp_policy_port (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_policy_inline (vm, node, frame, 1));
+}
+
+static uword
+gbp_policy_mac (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return (gbp_policy_inline (vm, node, frame, 0));
+}
+
/* packet trace format function */
static u8 *
format_gbp_policy_trace (u8 * s, va_list * args)
@@ -209,16 +276,16 @@ format_gbp_policy_trace (u8 * s, va_list * args)
gbp_policy_trace_t *t = va_arg (*args, gbp_policy_trace_t *);
s =
- format (s, "src:%d, dst:%d, acl:%d", t->src_epg, t->dst_epg,
- t->acl_index);
+ format (s, "src:%d, dst:%d, acl:%d allowed:%d",
+ t->src_epg, t->dst_epg, t->acl_index, t->allowed);
return s;
}
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (gbp_policy_node) = {
- .function = gbp_policy,
- .name = "gbp-policy",
+VLIB_REGISTER_NODE (gbp_policy_port_node) = {
+ .function = gbp_policy_port,
+ .name = "gbp-policy-port",
.vector_size = sizeof (u32),
.format_trace = format_gbp_policy_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
@@ -233,7 +300,26 @@ VLIB_REGISTER_NODE (gbp_policy_node) = {
},
};
-VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_node, gbp_policy);
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_port_node, gbp_policy_port);
+
+VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
+ .function = gbp_policy_mac,
+ .name = "gbp-policy-mac",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gbp_policy_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(gbp_policy_error_strings),
+ .error_strings = gbp_policy_error_strings,
+
+ .n_next_nodes = GBP_POLICY_N_NEXT,
+
+ .next_nodes = {
+ [GBP_POLICY_NEXT_DENY] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_mac_node, gbp_policy_mac);
/* *INDENT-ON* */
@@ -245,10 +331,15 @@ gbp_policy_init (vlib_main_t * vm)
/* Initialize the feature next-node indexes */
feat_bitmap_init_next_nodes (vm,
- gbp_policy_node.index,
+ gbp_policy_port_node.index,
+ L2OUTPUT_N_FEAT,
+ l2output_get_feat_names (),
+ gpm->l2_output_feat_next[1]);
+ feat_bitmap_init_next_nodes (vm,
+ gbp_policy_mac_node.index,
L2OUTPUT_N_FEAT,
l2output_get_feat_names (),
- gpm->l2_output_feat_next);
+ gpm->l2_output_feat_next[0]);
return error;
}
diff --git a/src/plugins/gbp/gbp_policy_dpo.c b/src/plugins/gbp/gbp_policy_dpo.c
index a2d9510b838..fd9dbce8bfa 100644
--- a/src/plugins/gbp/gbp_policy_dpo.c
+++ b/src/plugins/gbp/gbp_policy_dpo.c
@@ -17,6 +17,8 @@
#include <vnet/fib/ip4_fib.h>
#include <vnet/fib/ip6_fib.h>
#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
#include <plugins/gbp/gbp.h>
#include <plugins/gbp/gbp_policy_dpo.h>
@@ -49,7 +51,7 @@ gbp_policy_dpo_alloc (void)
{
gbp_policy_dpo_t *gpd;
- pool_get (gbp_policy_dpo_pool, gpd);
+ pool_get_zero (gbp_policy_dpo_pool, gpd);
return (gpd);
}
@@ -110,19 +112,24 @@ gbp_policy_dpo_add_or_lock (dpo_proto_t dproto,
dpo_id_t parent = DPO_INVALID;
gpd = gbp_policy_dpo_alloc ();
- clib_memset (gpd, 0, sizeof (*gpd));
gpd->gpd_proto = dproto;
gpd->gpd_sw_if_index = sw_if_index;
gpd->gpd_epg = epg;
- /*
- * stack on the DVR DPO for the output interface
- */
- dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
+ if (~0 != sw_if_index)
+ {
+ /*
+ * stack on the DVR DPO for the output interface
+ */
+ dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
+ }
+ else
+ {
+ dpo_copy (&parent, drop_dpo_get (dproto));
+ }
dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent);
-
dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd));
}
@@ -144,11 +151,36 @@ format_gbp_policy_dpo (u8 * s, va_list * ap)
return (s);
}
+/**
+ * Interpose a policy DPO
+ */
+static void
+gbp_policy_dpo_interpose (const dpo_id_t * original,
+ const dpo_id_t * parent, dpo_id_t * clone)
+{
+ gbp_policy_dpo_t *gpd, *gpd_clone;
+
+ gpd_clone = gbp_policy_dpo_alloc ();
+ gpd = gbp_policy_dpo_get (original->dpoi_index);
+
+ gpd_clone->gpd_proto = gpd->gpd_proto;
+ gpd_clone->gpd_epg = gpd->gpd_epg;
+ gpd_clone->gpd_sw_if_index = gpd->gpd_sw_if_index;
+
+ dpo_stack (gbp_policy_dpo_type,
+ gpd_clone->gpd_proto, &gpd_clone->gpd_dpo, parent);
+
+ dpo_set (clone,
+ gbp_policy_dpo_type,
+ gpd_clone->gpd_proto, gbp_policy_dpo_get_index (gpd_clone));
+}
+
const static dpo_vft_t gbp_policy_dpo_vft = {
.dv_lock = gbp_policy_dpo_lock,
.dv_unlock = gbp_policy_dpo_unlock,
.dv_format = format_gbp_policy_dpo,
.dv_get_urpf = gbp_policy_dpo_get_urpf,
+ .dv_mk_interpose = gbp_policy_dpo_interpose,
};
/**
@@ -195,6 +227,7 @@ typedef struct gbp_policy_dpo_trace_t_
u32 src_epg;
u32 dst_epg;
u32 acl_index;
+ u32 a_bit;
} gbp_policy_dpo_trace_t;
typedef enum
@@ -241,10 +274,18 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
next0 = GBP_POLICY_DROP;
b0 = vlib_get_buffer (vm, bi0);
+
gpd0 =
gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
+ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
+ {
+ next0 = gpd0->gpd_dpo.dpoi_next_node;
+ key0.as_u32 = ~0;
+ goto trace;
+ }
+
key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
key0.gck_dst = gpd0->gpd_epg;
@@ -256,6 +297,7 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
* intra-epg allowed
*/
next0 = gpd0->gpd_dpo.dpoi_next_node;
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
}
else
{
@@ -287,7 +329,10 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
&trace_bitmap0);
if (action0 > 0)
- next0 = gpd0->gpd_dpo.dpoi_next_node;
+ {
+ vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+ next0 = gpd0->gpd_dpo.dpoi_next_node;
+ }
}
}
}
@@ -299,7 +344,7 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
*/
next0 = gpd0->gpd_dpo.dpoi_next_node;
}
-
+ trace:
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
gbp_policy_dpo_trace_t *tr;
@@ -308,6 +353,7 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
tr->src_epg = key0.gck_src;
tr->dst_epg = key0.gck_dst;
tr->acl_index = value0.gc_acl_index;
+ tr->a_bit = vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A;
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
@@ -325,8 +371,8 @@ format_gbp_policy_dpo_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
gbp_policy_dpo_trace_t *t = va_arg (*args, gbp_policy_dpo_trace_t *);
- s = format (s, " src-epg:%d dst-epg:%d acl-index:%d",
- t->src_epg, t->dst_epg, t->acl_index);
+ s = format (s, " src-epg:%d dst-epg:%d acl-index:%d a-bit:%d",
+ t->src_epg, t->dst_epg, t->acl_index, t->a_bit);
return s;
}
diff --git a/src/plugins/gbp/gbp_recirc.c b/src/plugins/gbp/gbp_recirc.c
index 95e8066d443..57ba4083671 100644
--- a/src/plugins/gbp/gbp_recirc.c
+++ b/src/plugins/gbp/gbp_recirc.c
@@ -16,6 +16,7 @@
#include <plugins/gbp/gbp_recirc.h>
#include <plugins/gbp/gbp_endpoint_group.h>
#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_itf.h>
#include <vnet/dpo/dvr_dpo.h>
#include <vnet/fib/fib_table.h>
@@ -30,6 +31,25 @@ gbp_recirc_t *gbp_recirc_pool;
*/
index_t *gbp_recirc_db;
+/**
+ * logger
+ */
+vlib_log_class_t gr_logger;
+
+#define GBP_RECIRC_DBG(...) \
+ vlib_log_debug (gr_logger, __VA_ARGS__);
+
+u8 *
+format_gbp_recirc (u8 * s, va_list * args)
+{
+ gbp_recirc_t *gr = va_arg (*args, gbp_recirc_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+
+ return format (s, " %U, epg:%d, ext:%d",
+ format_vnet_sw_if_index_name, vnm,
+ gr->gr_sw_if_index, gr->gr_epg, gr->gr_is_ext);
+}
+
int
gbp_recirc_add (u32 sw_if_index, epg_id_t epg_id, u8 is_ext)
{
@@ -42,8 +62,14 @@ gbp_recirc_add (u32 sw_if_index, epg_id_t epg_id, u8 is_ext)
if (INDEX_INVALID == gri)
{
- gbp_endpoint_group_t *gepg;
+ gbp_endpoint_group_t *gg;
fib_protocol_t fproto;
+ index_t ggi;
+
+ ggi = gbp_endpoint_group_find_and_lock (epg_id);
+
+ if (INDEX_INVALID == ggi)
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
pool_get (gbp_recirc_pool, gr);
clib_memset (gr, 0, sizeof (*gr));
@@ -62,17 +88,21 @@ gbp_recirc_add (u32 sw_if_index, epg_id_t epg_id, u8 is_ext)
/*
* cache the FIB indicies of the EPG
*/
- gepg = gbp_endpoint_group_find (gr->gr_epg);
-
- if (NULL == gepg)
- return (VNET_API_ERROR_NO_SUCH_ENTRY);
+ gr->gr_epgi = ggi;
+ gg = gbp_endpoint_group_get (gr->gr_epgi);
FOR_EACH_FIB_IP_PROTOCOL (fproto)
{
- gr->gr_fib_index[fproto] = gepg->gepg_fib_index[fproto];
+ gr->gr_fib_index[fproto] =
+ gbp_endpoint_group_get_fib_index (gg, fproto);
}
/*
+ * bind to the bridge-domain of the EPG
+ */
+ gr->gr_itf = gbp_itf_add_and_lock (gr->gr_sw_if_index, gg->gg_bd_index);
+
+ /*
* Packets on the recirculation interface are subject to src-EPG
* classification. Recirc interfaces are L2-emulation mode.
* for internal EPGs this is via an LPM on all external subnets.
@@ -80,13 +110,19 @@ gbp_recirc_add (u32 sw_if_index, epg_id_t epg_id, u8 is_ext)
*/
if (gr->gr_is_ext)
{
+ mac_address_t mac;
/*
* recirc is for post-NAT translation packets going into
* the external EPG, these are classified to the NAT EPG
* based on its port
*/
+ mac_address_from_bytes (&mac,
+ vnet_sw_interface_get_hw_address
+ (vnet_get_main (), gr->gr_sw_if_index));
gbp_endpoint_update (gr->gr_sw_if_index,
- NULL, NULL, gr->gr_epg, &gr->gr_ep);
+ NULL, &mac, gr->gr_epg,
+ GBP_ENDPOINT_FLAG_NONE,
+ NULL, NULL, &gr->gr_ep);
vnet_feature_enable_disable ("ip4-unicast",
"ip4-gbp-src-classify",
gr->gr_sw_if_index, 1, 0, 0);
@@ -111,7 +147,12 @@ gbp_recirc_add (u32 sw_if_index, epg_id_t epg_id, u8 is_ext)
gbp_recirc_db[sw_if_index] = gri;
}
+ else
+ {
+ gr = gbp_recirc_get (gri);
+ }
+ GBP_RECIRC_DBG ("add: %U", format_gbp_recirc, gr);
return (0);
}
@@ -127,6 +168,8 @@ gbp_recirc_delete (u32 sw_if_index)
{
gr = pool_elt_at_index (gbp_recirc_pool, gri);
+ GBP_RECIRC_DBG ("del: %U", format_gbp_recirc, gr);
+
if (gr->gr_is_ext)
{
gbp_endpoint_delete (gr->gr_ep);
@@ -150,6 +193,9 @@ gbp_recirc_delete (u32 sw_if_index)
ip4_sw_interface_enable_disable (gr->gr_sw_if_index, 0);
ip6_sw_interface_enable_disable (gr->gr_sw_if_index, 0);
+ gbp_itf_unlock (gr->gr_itf);
+
+ gbp_endpoint_group_unlock (gr->gr_epgi);
gbp_recirc_db[sw_if_index] = INDEX_INVALID;
pool_put (gbp_recirc_pool, gr);
}
@@ -158,12 +204,12 @@ gbp_recirc_delete (u32 sw_if_index)
void
gbp_recirc_walk (gbp_recirc_cb_t cb, void *ctx)
{
- gbp_recirc_t *gbpe;
+ gbp_recirc_t *ge;
/* *INDENT-OFF* */
- pool_foreach(gbpe, gbp_recirc_pool,
+ pool_foreach(ge, gbp_recirc_pool,
{
- if (!cb(gbpe, ctx))
+ if (!cb(ge, ctx))
break;
});
/* *INDENT-ON* */
@@ -172,13 +218,7 @@ gbp_recirc_walk (gbp_recirc_cb_t cb, void *ctx)
static int
gbp_recirc_show_one (gbp_recirc_t * gr, void *ctx)
{
- vnet_main_t *vnm = vnet_get_main ();
- vlib_main_t *vm;
-
- vm = ctx;
- vlib_cli_output (vm, " %U, epg:%d, ext:%d",
- format_vnet_sw_if_index_name, vnm,
- gr->gr_sw_if_index, gr->gr_epg, gr->gr_is_ext);
+ vlib_cli_output (ctx, " %U", format_gbp_recirc, gr);
return (1);
}
@@ -193,7 +233,6 @@ gbp_recirc_show (vlib_main_t * vm,
return (NULL);
}
-
/*?
* Show Group Based Policy Recircs and derived information
*
@@ -209,6 +248,16 @@ VLIB_CLI_COMMAND (gbp_recirc_show_node, static) = {
};
/* *INDENT-ON* */
+static clib_error_t *
+gbp_recirc_init (vlib_main_t * vm)
+{
+ gr_logger = vlib_log_register_class ("gbp", "recirc");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_recirc_init);
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/gbp/gbp_recirc.h b/src/plugins/gbp/gbp_recirc.h
index 148a5beafcb..1d1a88a396b 100644
--- a/src/plugins/gbp/gbp_recirc.h
+++ b/src/plugins/gbp/gbp_recirc.h
@@ -30,6 +30,11 @@ typedef struct gpb_recirc_t_
epg_id_t gr_epg;
/**
+ * The index of the EPG
+ */
+ index_t gr_epgi;
+
+ /**
* FIB indices the EPG is mapped to
*/
u32 gr_fib_index[FIB_PROTOCOL_IP_MAX];
@@ -43,6 +48,7 @@ typedef struct gpb_recirc_t_
/**
*/
u32 gr_sw_if_index;
+ u32 gr_itf;
/**
* The endpoint created to represent the reric interface
@@ -62,7 +68,7 @@ extern void gbp_recirc_walk (gbp_recirc_cb_t bgpe, void *ctx);
extern gbp_recirc_t *gbp_recirc_pool;
extern index_t *gbp_recirc_db;
-always_inline const gbp_recirc_t *
+always_inline gbp_recirc_t *
gbp_recirc_get (u32 sw_if_index)
{
return (pool_elt_at_index (gbp_recirc_pool, gbp_recirc_db[sw_if_index]));
diff --git a/src/plugins/gbp/gbp_route_domain.c b/src/plugins/gbp/gbp_route_domain.c
new file mode 100644
index 00000000000..5518cc1ca36
--- /dev/null
+++ b/src/plugins/gbp/gbp_route_domain.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_endpoint.h>
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/ip/ip_neighbor.h>
+
+/**
+ * A fixed MAC address to use as the source MAC for packets L3 switched
+ * onto the routed uu-fwd interfaces.
+ * Magic values - origin lost to the mists of time...
+ */
+/* *INDENT-OFF* */
+const static mac_address_t GBP_ROUTED_SRC_MAC = {
+ .bytes = {
+ 0x0, 0x22, 0xBD, 0xF8, 0x19, 0xFF,
+ }
+};
+
+const static mac_address_t GBP_ROUTED_DST_MAC = {
+ .bytes = {
+ 00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ }
+};
+/* *INDENT-ON* */
+
+/**
+ * Pool of GBP route_domains
+ */
+gbp_route_domain_t *gbp_route_domain_pool;
+
+/**
+ * DB of route_domains
+ */
+typedef struct gbp_route_domain_db_t
+{
+ uword *gbd_by_rd_id;
+} gbp_route_domain_db_t;
+
+static gbp_route_domain_db_t gbp_route_domain_db;
+
+/**
+ * logger
+ */
+vlib_log_class_t grd_logger;
+
+#define GBP_BD_DBG(...) \
+ vlib_log_debug (grd_logger, __VA_ARGS__);
+
+gbp_route_domain_t *
+gbp_route_domain_get (index_t i)
+{
+ return (pool_elt_at_index (gbp_route_domain_pool, i));
+}
+
+static void
+gbp_route_domain_lock (index_t i)
+{
+ gbp_route_domain_t *grd;
+
+ grd = gbp_route_domain_get (i);
+ grd->grd_locks++;
+}
+
+index_t
+gbp_route_domain_find (u32 rd_id)
+{
+ uword *p;
+
+ p = hash_get (gbp_route_domain_db.gbd_by_rd_id, rd_id);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+index_t
+gbp_route_domain_find_and_lock (u32 rd_id)
+{
+ index_t grdi;
+
+ grdi = gbp_route_domain_find (rd_id);
+
+ if (INDEX_INVALID != grdi)
+ {
+ gbp_route_domain_lock (grdi);
+ }
+ return (grdi);
+}
+
+static void
+gbp_route_domain_db_add (gbp_route_domain_t * grd)
+{
+ index_t grdi = grd - gbp_route_domain_pool;
+
+ hash_set (gbp_route_domain_db.gbd_by_rd_id, grd->grd_id, grdi);
+}
+
+static void
+gbp_route_domain_db_remove (gbp_route_domain_t * grd)
+{
+ hash_unset (gbp_route_domain_db.gbd_by_rd_id, grd->grd_id);
+}
+
+int
+gbp_route_domain_add_and_lock (u32 rd_id,
+ u32 ip4_table_id,
+ u32 ip6_table_id,
+ u32 ip4_uu_sw_if_index, u32 ip6_uu_sw_if_index)
+{
+ gbp_route_domain_t *grd;
+ index_t grdi;
+
+ grdi = gbp_route_domain_find (rd_id);
+
+ if (INDEX_INVALID == grdi)
+ {
+ fib_protocol_t fproto;
+
+ pool_get_zero (gbp_route_domain_pool, grd);
+
+ grd->grd_id = rd_id;
+ grd->grd_table_id[FIB_PROTOCOL_IP4] = ip4_table_id;
+ grd->grd_table_id[FIB_PROTOCOL_IP6] = ip6_table_id;
+ grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4] = ip4_uu_sw_if_index;
+ grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6] = ip6_uu_sw_if_index;
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ grd->grd_fib_index[fproto] =
+ fib_table_find_or_create_and_lock (fproto,
+ grd->grd_table_id[fproto],
+ FIB_SOURCE_PLUGIN_HI);
+
+ if (~0 != grd->grd_uu_sw_if_index[fproto])
+ {
+ ethernet_header_t *eth;
+ u8 *rewrite;
+
+ rewrite = NULL;
+ vec_validate (rewrite, sizeof (*eth) - 1);
+ eth = (ethernet_header_t *) rewrite;
+
+ eth->type = clib_host_to_net_u16 ((fproto == FIB_PROTOCOL_IP4 ?
+ ETHERNET_TYPE_IP4 :
+ ETHERNET_TYPE_IP6));
+
+ mac_address_to_bytes (gbp_route_domain_get_local_mac (),
+ eth->src_address);
+ mac_address_to_bytes (gbp_route_domain_get_remote_mac (),
+ eth->src_address);
+
+ /*
+ * create an adjacency out of the uu-fwd interfaces that will
+ * be used when adding subnet routes.
+ */
+ grd->grd_adj[fproto] =
+ adj_nbr_add_or_lock_w_rewrite (fproto,
+ fib_proto_to_link (fproto),
+ &ADJ_BCAST_ADDR,
+ grd->grd_uu_sw_if_index[fproto],
+ rewrite);
+ }
+ else
+ {
+ grd->grd_adj[fproto] = INDEX_INVALID;
+ }
+ }
+
+ gbp_route_domain_db_add (grd);
+ }
+ else
+ {
+ grd = gbp_route_domain_get (grdi);
+ }
+
+ grd->grd_locks++;
+ GBP_BD_DBG ("add: %U", format_gbp_route_domain, grd);
+
+ return (0);
+}
+
+void
+gbp_route_domain_unlock (index_t index)
+{
+ gbp_route_domain_t *grd;
+
+ grd = gbp_route_domain_get (index);
+
+ grd->grd_locks--;
+
+ if (0 == grd->grd_locks)
+ {
+ fib_protocol_t fproto;
+
+ GBP_BD_DBG ("destroy: %U", format_gbp_route_domain, grd);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ fib_table_unlock (grd->grd_fib_index[fproto],
+ fproto, FIB_SOURCE_PLUGIN_HI);
+ if (INDEX_INVALID != grd->grd_adj[fproto])
+ adj_unlock (grd->grd_adj[fproto]);
+ }
+
+ gbp_route_domain_db_remove (grd);
+
+ pool_put (gbp_route_domain_pool, grd);
+ }
+}
+
+int
+gbp_route_domain_delete (u32 rd_id)
+{
+ index_t grdi;
+
+ GBP_BD_DBG ("del: %d", rd_id);
+ grdi = gbp_route_domain_find (rd_id);
+
+ if (INDEX_INVALID != grdi)
+ {
+ GBP_BD_DBG ("del: %U", format_gbp_route_domain,
+ gbp_route_domain_get (grdi));
+ gbp_route_domain_unlock (grdi);
+
+ return (0);
+ }
+
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+const mac_address_t *
+gbp_route_domain_get_local_mac (void)
+{
+ return (&GBP_ROUTED_SRC_MAC);
+}
+
+const mac_address_t *
+gbp_route_domain_get_remote_mac (void)
+{
+ return (&GBP_ROUTED_DST_MAC);
+}
+
+void
+gbp_route_domain_walk (gbp_route_domain_cb_t cb, void *ctx)
+{
+ gbp_route_domain_t *gbpe;
+
+ /* *INDENT-OFF* */
+ pool_foreach(gbpe, gbp_route_domain_pool,
+ {
+ if (!cb(gbpe, ctx))
+ break;
+ });
+ /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_route_domain_cli (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 ip4_uu_sw_if_index = ~0;
+ u32 ip6_uu_sw_if_index = ~0;
+ u32 ip4_table_id = ~0;
+ u32 ip6_table_id = ~0;
+ u32 rd_id = ~0;
+ u8 add = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "ip4-uu %U", unformat_vnet_sw_interface,
+ vnm, &ip4_uu_sw_if_index))
+ ;
+ else if (unformat (input, "ip6-uu %U", unformat_vnet_sw_interface,
+ vnm, &ip6_uu_sw_if_index))
+ ;
+ else if (unformat (input, "ip4-table-id %d", ip4_table_id))
+ ;
+ else if (unformat (input, "ip6-table-id %d", ip6_table_id))
+ ;
+ else if (unformat (input, "add"))
+ add = 1;
+ else if (unformat (input, "del"))
+ add = 0;
+ else if (unformat (input, "rd %d", &rd_id))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == rd_id)
+ return clib_error_return (0, "RD-ID must be specified");
+
+ if (add)
+ {
+ if (~0 == ip4_table_id)
+ return clib_error_return (0, "IP4 table-ID must be specified");
+ if (~0 == ip6_table_id)
+ return clib_error_return (0, "IP6 table-ID must be specified");
+
+ gbp_route_domain_add_and_lock (rd_id, ip4_table_id,
+ ip6_table_id,
+ ip4_uu_sw_if_index, ip6_uu_sw_if_index);
+ }
+ else
+ gbp_route_domain_delete (rd_id);
+
+ return (NULL);
+}
+
+/*?
+ * Configure a GBP route-domain
+ *
+ * @cliexpar
+ * @cliexstart{set gbp route-domain [del] bd <ID> bvi <interface> uu-flood <interface>}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_route_domain_cli_node, static) = {
+ .path = "gbp route-domain",
+ .short_help = "gbp route-domain [del] epg bd <ID> bvi <interface> uu-flood <interface>",
+ .function = gbp_route_domain_cli,
+};
+
+u8 *
+format_gbp_route_domain (u8 * s, va_list * args)
+{
+ gbp_route_domain_t *grd = va_arg (*args, gbp_route_domain_t*);
+ vnet_main_t *vnm = vnet_get_main ();
+
+ if (NULL != grd)
+ s = format (s, "[%d] rd:%d ip4-uu:%U ip6-uu:%U locks:%d",
+ grd - gbp_route_domain_pool,
+ grd->grd_id,
+ format_vnet_sw_if_index_name, vnm, grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4],
+ format_vnet_sw_if_index_name, vnm, grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6],
+ grd->grd_locks);
+ else
+ s = format (s, "NULL");
+
+ return (s);
+}
+
+static int
+gbp_route_domain_show_one (gbp_route_domain_t *gb, void *ctx)
+{
+ vlib_main_t *vm;
+
+ vm = ctx;
+ vlib_cli_output (vm, " %U",format_gbp_route_domain, gb);
+
+ return (1);
+}
+
+static clib_error_t *
+gbp_route_domain_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vlib_cli_output (vm, "Route-Domains:");
+ gbp_route_domain_walk (gbp_route_domain_show_one, vm);
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Route_Domains and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp route_domain}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_route_domain_show_node, static) = {
+ .path = "show gbp route-domain",
+ .short_help = "show gbp route-domain\n",
+ .function = gbp_route_domain_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_route_domain_init (vlib_main_t * vm)
+{
+ grd_logger = vlib_log_register_class ("gbp", "rd");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_route_domain_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_route_domain.h b/src/plugins/gbp/gbp_route_domain.h
new file mode 100644
index 00000000000..f7fc4a46d0b
--- /dev/null
+++ b/src/plugins/gbp/gbp_route_domain.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_ROUTE_DOMAIN_H__
+#define __GBP_ROUTE_DOMAIN_H__
+
+#include <plugins/gbp/gbp_types.h>
+
+#include <vnet/fib/fib_types.h>
+#include <vnet/ethernet/mac_address.h>
+
+/**
+ * A route Domain Representation.
+ * This is a standard route-domain plus all the attributes it must
+ * have to supprt the GBP model.
+ */
+typedef struct gpb_route_domain_t_
+{
+ /**
+ * Route-domain ID
+ */
+ u32 grd_id;
+ u32 grd_fib_index[FIB_PROTOCOL_IP_MAX];
+ u32 grd_table_id[FIB_PROTOCOL_IP_MAX];
+
+ /**
+ * The RD's VNI interface on which packets from unkown endpoints
+ * arrive
+ */
+ u32 grd_vni_sw_if_index;
+
+ /**
+ * The interfaces on which to send packets to unnknown EPs
+ */
+ u32 grd_uu_sw_if_index[FIB_PROTOCOL_IP_MAX];
+
+ /**
+ * adjacencies on the UU interfaces.
+ */
+ u32 grd_adj[FIB_PROTOCOL_IP_MAX];
+
+ u32 grd_locks;
+} gbp_route_domain_t;
+
+extern int gbp_route_domain_add_and_lock (u32 rd_id,
+ u32 ip4_table_id,
+ u32 ip6_table_id,
+ u32 ip4_uu_sw_if_index,
+ u32 ip6_uu_sw_if_index);
+extern void gbp_route_domain_unlock (index_t grdi);
+extern index_t gbp_route_domain_find_and_lock (u32 rd_id);
+extern index_t gbp_route_domain_find (u32 rd_id);
+
+extern int gbp_route_domain_delete (u32 rd_id);
+extern gbp_route_domain_t *gbp_route_domain_get (index_t i);
+
+typedef int (*gbp_route_domain_cb_t) (gbp_route_domain_t * gb, void *ctx);
+extern void gbp_route_domain_walk (gbp_route_domain_cb_t bgpe, void *ctx);
+
+extern const mac_address_t *gbp_route_domain_get_local_mac (void);
+extern const mac_address_t *gbp_route_domain_get_remote_mac (void);
+
+extern u8 *format_gbp_route_domain (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_scanner.c b/src/plugins/gbp/gbp_scanner.c
new file mode 100644
index 00000000000..a2d0c9a98cb
--- /dev/null
+++ b/src/plugins/gbp/gbp_scanner.c
@@ -0,0 +1,104 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_scanner.h>
+#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_vxlan.h>
+
+vlib_log_class_t gs_logger;
+
+#define GBP_SCANNER_DBG(...) \
+ vlib_log_debug (gs_logger, __VA_ARGS__);
+
+static uword
+gbp_scanner (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ uword event_type, *event_data = 0;
+ bool enabled = 0, do_scan = 0;
+
+ while (1)
+ {
+ do_scan = 0;
+
+ if (enabled)
+ {
+ /* scan every 'inactive threshold' seconds */
+ vlib_process_wait_for_event_or_clock (vm,
+ gbp_endpoint_scan_threshold
+ ());
+ }
+ else
+ vlib_process_wait_for_event (vm);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+
+ switch (event_type)
+ {
+ case ~0:
+ /* timer expired */
+ do_scan = 1;
+ break;
+
+ case GBP_ENDPOINT_SCAN_START:
+ enabled = 1;
+ break;
+
+ case GBP_ENDPOINT_SCAN_STOP:
+ enabled = 0;
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ if (do_scan)
+ {
+ GBP_SCANNER_DBG ("start");
+ gbp_endpoint_scan (vm);
+ GBP_SCANNER_DBG ("stop");
+ }
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_scanner_node) = {
+ .function = gbp_scanner,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "gbp-scanner",
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+gbp_scanner_init (vlib_main_t * vm)
+{
+ gs_logger = vlib_log_register_class ("gbp", "scan");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_scanner_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_scanner.h b/src/plugins/gbp/gbp_scanner.h
new file mode 100644
index 00000000000..070da3892ca
--- /dev/null
+++ b/src/plugins/gbp/gbp_scanner.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_SCANNER_H__
+#define __GBP_SCANNER_H__
+
+#include <vlib/vlib.h>
+
+typedef enum gbp_scan_event_t_
+{
+ GBP_ENDPOINT_SCAN_START,
+ GBP_ENDPOINT_SCAN_STOP,
+ GBP_VXLAN_SCAN_START,
+ GBP_VXLAN_SCAN_STOP,
+} gbp_scan_event_t;
+
+extern vlib_node_registration_t gbp_scanner_node;
+
+#endif
diff --git a/src/plugins/gbp/gbp_subnet.c b/src/plugins/gbp/gbp_subnet.c
index b3925110b7b..d9d42997aa7 100644
--- a/src/plugins/gbp/gbp_subnet.c
+++ b/src/plugins/gbp/gbp_subnet.c
@@ -16,19 +16,125 @@
#include <plugins/gbp/gbp.h>
#include <plugins/gbp/gbp_fwd_dpo.h>
#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_route_domain.h>
#include <vnet/fib/fib_table.h>
#include <vnet/dpo/load_balance.h>
+/**
+ * a key for the DB
+ */
+typedef struct gbp_subnet_key_t_
+{
+ fib_prefix_t gsk_pfx;
+ u32 gsk_fib_index;
+} gbp_subnet_key_t;
+
+/**
+ * Subnet
+ */
+typedef struct gbp_subnet_t_
+{
+ gbp_subnet_key_t *gs_key;
+ gbp_subnet_type_t gs_type;
+ index_t gs_rd;
+
+ union
+ {
+ struct
+ {
+ epg_id_t gs_epg;
+ u32 gs_sw_if_index;
+ } gs_stitched_external;
+ };
+} gbp_subnet_t;
+
+/**
+ * A DB of the subnets; key={pfx,fib-index}
+ */
+uword *gbp_subnet_db;
+
+/**
+ * pool of subnets
+ */
+gbp_subnet_t *gbp_subnet_pool;
+
+static index_t
+gbp_subnet_db_find (u32 fib_index, const fib_prefix_t * pfx)
+{
+ gbp_subnet_key_t key = {
+ .gsk_pfx = *pfx,
+ .gsk_fib_index = fib_index,
+ };
+ uword *p;
+
+ p = hash_get_mem (gbp_subnet_db, &key);
+
+ if (NULL != p)
+ return p[0];
+
+ return (INDEX_INVALID);
+}
+
+static void
+gbp_subnet_db_add (u32 fib_index, const fib_prefix_t * pfx, gbp_subnet_t * gs)
+{
+ gbp_subnet_key_t *key;
+
+ key = clib_mem_alloc (sizeof (*key));
+
+ clib_memcpy (&(key->gsk_pfx), pfx, sizeof (*pfx));
+ key->gsk_fib_index = fib_index;
+
+ hash_set_mem (gbp_subnet_db, key, (gs - gbp_subnet_pool));
+
+ gs->gs_key = key;
+}
+
+static void
+gbp_subnet_db_del (gbp_subnet_t * gs)
+{
+ hash_unset_mem (gbp_subnet_db, gs->gs_key);
+
+ clib_mem_free (gs->gs_key);
+ gs->gs_key = NULL;
+}
+
+
+static int
+gbp_subnet_transport_add (const gbp_subnet_t * gs)
+{
+ dpo_id_t gfd = DPO_INVALID;
+ gbp_route_domain_t *grd;
+ fib_protocol_t fproto;
+
+ fproto = gs->gs_key->gsk_pfx.fp_proto;
+ grd = gbp_route_domain_get (gs->gs_rd);
+
+ fib_table_entry_update_one_path (gs->gs_key->gsk_fib_index,
+ &gs->gs_key->gsk_pfx,
+ FIB_SOURCE_PLUGIN_HI,
+ FIB_ENTRY_FLAG_NONE,
+ fib_proto_to_dpo (fproto),
+ &ADJ_BCAST_ADDR,
+ grd->grd_uu_sw_if_index[fproto],
+ ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE);
+
+ dpo_reset (&gfd);
+
+ return (0);
+}
+
static int
-gbp_internal_subnet_add (u32 fib_index, const fib_prefix_t * pfx)
+gbp_subnet_internal_add (const gbp_subnet_t * gs)
{
dpo_id_t gfd = DPO_INVALID;
- gbp_fwd_dpo_add_or_lock (fib_proto_to_dpo (pfx->fp_proto), &gfd);
+ gbp_fwd_dpo_add_or_lock (fib_proto_to_dpo (gs->gs_key->gsk_pfx.fp_proto),
+ &gfd);
- fib_table_entry_special_dpo_update (fib_index,
- pfx,
+ fib_table_entry_special_dpo_update (gs->gs_key->gsk_fib_index,
+ &gs->gs_key->gsk_pfx,
FIB_SOURCE_PLUGIN_HI,
FIB_ENTRY_FLAG_EXCLUSIVE, &gfd);
@@ -38,17 +144,19 @@ gbp_internal_subnet_add (u32 fib_index, const fib_prefix_t * pfx)
}
static int
-gbp_external_subnet_add (u32 fib_index,
- const fib_prefix_t * pfx,
- u32 sw_if_index, epg_id_t epg)
+gbp_subnet_external_add (gbp_subnet_t * gs, u32 sw_if_index, epg_id_t epg)
{
dpo_id_t gpd = DPO_INVALID;
- gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (pfx->fp_proto),
- epg, sw_if_index, &gpd);
+ gs->gs_stitched_external.gs_epg = epg;
+ gs->gs_stitched_external.gs_sw_if_index = sw_if_index;
+
+ gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (gs->gs_key->gsk_pfx.fp_proto),
+ gs->gs_stitched_external.gs_epg,
+ gs->gs_stitched_external.gs_sw_if_index, &gpd);
- fib_table_entry_special_dpo_update (fib_index,
- pfx,
+ fib_table_entry_special_dpo_update (gs->gs_key->gsk_fib_index,
+ &gs->gs_key->gsk_pfx,
FIB_SOURCE_PLUGIN_HI,
(FIB_ENTRY_FLAG_EXCLUSIVE |
FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT),
@@ -59,114 +167,255 @@ gbp_external_subnet_add (u32 fib_index,
return (0);
}
-static int
-gbp_subnet_del (u32 fib_index, const fib_prefix_t * pfx)
+int
+gbp_subnet_del (u32 rd_id, const fib_prefix_t * pfx)
{
+ gbp_route_domain_t *grd;
+ index_t gsi, grdi;
+ gbp_subnet_t *gs;
+ u32 fib_index;
+
+ grdi = gbp_route_domain_find (rd_id);
+
+ if (~0 == grdi)
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+
+ grd = gbp_route_domain_get (grdi);
+ fib_index = grd->grd_fib_index[pfx->fp_proto];
+
+ gsi = gbp_subnet_db_find (fib_index, pfx);
+
+ if (INDEX_INVALID == gsi)
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+
+ gs = pool_elt_at_index (gbp_subnet_pool, gsi);
+
fib_table_entry_delete (fib_index, pfx, FIB_SOURCE_PLUGIN_HI);
+ gbp_subnet_db_del (gs);
+ gbp_route_domain_unlock (gs->gs_rd);
+
+ pool_put (gbp_subnet_pool, gs);
+
return (0);
}
int
-gbp_subnet_add_del (u32 table_id,
- const fib_prefix_t * pfx,
- u32 sw_if_index, epg_id_t epg, u8 is_add, u8 is_internal)
+gbp_subnet_add (u32 rd_id,
+ const fib_prefix_t * pfx,
+ gbp_subnet_type_t type, u32 sw_if_index, epg_id_t epg)
{
+ gbp_route_domain_t *grd;
+ index_t grdi, gsi;
+ gbp_subnet_t *gs;
u32 fib_index;
+ int rv;
- fib_index = fib_table_find (pfx->fp_proto, table_id);
+ grdi = gbp_route_domain_find_and_lock (rd_id);
- if (~0 == fib_index)
+ if (~0 == grdi)
return (VNET_API_ERROR_NO_SUCH_FIB);
- if (is_internal && is_add)
- return (gbp_internal_subnet_add (fib_index, pfx));
- else if (!is_internal && is_add)
- return (gbp_external_subnet_add (fib_index, pfx, sw_if_index, epg));
+ grd = gbp_route_domain_get (grdi);
+ fib_index = grd->grd_fib_index[pfx->fp_proto];
- return (gbp_subnet_del (fib_index, pfx));
-}
+ gsi = gbp_subnet_db_find (fib_index, pfx);
-typedef struct gbp_subnet_fib_table_walk_ctx_t_
-{
- gbp_subnet_cb_t cb;
- void *ctx;
-} gbp_subnet_fib_table_walk_ctx_t;
+ if (INDEX_INVALID != gsi)
+ return (VNET_API_ERROR_ENTRY_ALREADY_EXISTS);
-static fib_table_walk_rc_t
-gbp_subnet_fib_table_walk (fib_node_index_t fei, void *arg)
-{
- gbp_subnet_fib_table_walk_ctx_t *ctx = arg;
- const fib_prefix_t *pfx;
- const dpo_id_t *dpo;
- u32 table_id;
+ rv = -2;
+
+ pool_get (gbp_subnet_pool, gs);
- pfx = fib_entry_get_prefix (fei);
- table_id = fib_table_get_table_id (fib_entry_get_fib_index (fei),
- pfx->fp_proto);
- dpo = fib_entry_contribute_ip_forwarding (fei);
+ gs->gs_type = type;
+ gs->gs_rd = grdi;
+ gbp_subnet_db_add (fib_index, pfx, gs);
- if (DPO_LOAD_BALANCE == dpo->dpoi_type)
+ switch (type)
{
- dpo = load_balance_get_bucket (dpo->dpoi_index, 0);
-
- if (dpo->dpoi_type == gbp_policy_dpo_get_type ())
- {
- gbp_policy_dpo_t *gpd;
-
- gpd = gbp_policy_dpo_get (dpo->dpoi_index);
-
- /* *INDENT-OFF* */
- ctx->cb (table_id, pfx,
- gpd->gpd_sw_if_index,
- gpd->gpd_epg,
- 0, // is_internal
- ctx->ctx);
- /* *INDENT-ON* */
- }
- else if (dpo->dpoi_type == gbp_fwd_dpo_get_type ())
- {
- /* *INDENT-OFF* */
- ctx->cb (table_id, pfx,
- ~0, // sw_if_index
- EPG_INVALID, // epg
- 1, // is_internal
- ctx->ctx);
- /* *INDENT-ON* */
- }
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ rv = gbp_subnet_internal_add (gs);
+ break;
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ rv = gbp_subnet_external_add (gs, sw_if_index, epg);
+ break;
+ case GBP_SUBNET_TRANSPORT:
+ rv = gbp_subnet_transport_add (gs);
+ break;
}
- return (FIB_TABLE_WALK_CONTINUE);
+ return (rv);
}
void
gbp_subnet_walk (gbp_subnet_cb_t cb, void *ctx)
{
- fib_table_t *fib_table;
+ gbp_route_domain_t *grd;
+ gbp_subnet_t *gs;
+ u32 sw_if_index;
+ epg_id_t epg;
- gbp_subnet_fib_table_walk_ctx_t wctx = {
- .cb = cb,
- .ctx = ctx,
- };
+ epg = EPG_INVALID;
+ sw_if_index = ~0;
/* *INDENT-OFF* */
- pool_foreach (fib_table, ip4_main.fibs,
+ pool_foreach (gs, gbp_subnet_pool,
({
- fib_table_walk(fib_table->ft_index,
- FIB_PROTOCOL_IP4,
- gbp_subnet_fib_table_walk,
- &wctx);
- }));
- pool_foreach (fib_table, ip6_main.fibs,
- ({
- fib_table_walk(fib_table->ft_index,
- FIB_PROTOCOL_IP6,
- gbp_subnet_fib_table_walk,
- &wctx);
+ grd = gbp_route_domain_get(gs->gs_rd);
+
+ switch (gs->gs_type)
+ {
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ case GBP_SUBNET_TRANSPORT:
+ /* use defaults above */
+ break;
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ sw_if_index = gs->gs_stitched_external.gs_sw_if_index;
+ epg = gs->gs_stitched_external.gs_epg;
+ break;
+ }
+
+ if (WALK_STOP == cb (grd->grd_id, &gs->gs_key->gsk_pfx,
+ gs->gs_type, epg, sw_if_index, ctx))
+ break;
}));
/* *INDENT-ON* */
}
+typedef enum gsb_subnet_show_flags_t_
+{
+ GBP_SUBNET_SHOW_BRIEF,
+ GBP_SUBNET_SHOW_DETAILS,
+} gsb_subnet_show_flags_t;
+
+static u8 *
+format_gbp_subnet_type (u8 * s, va_list * args)
+{
+ gbp_subnet_type_t type = va_arg (*args, gbp_subnet_type_t);
+
+ switch (type)
+ {
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ return (format (s, "stitched-internal"));
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ return (format (s, "stitched-external"));
+ case GBP_SUBNET_TRANSPORT:
+ return (format (s, "transport"));
+ }
+
+ return (format (s, "unknown"));
+}
+
+u8 *
+format_gbp_subnet (u8 * s, va_list * args)
+{
+ index_t gsi = va_arg (*args, index_t);
+ gsb_subnet_show_flags_t flags = va_arg (*args, gsb_subnet_show_flags_t);
+ gbp_subnet_t *gs;
+ u32 table_id;
+
+ gs = pool_elt_at_index (gbp_subnet_pool, gsi);
+
+ table_id = fib_table_get_table_id (gs->gs_key->gsk_fib_index,
+ gs->gs_key->gsk_pfx.fp_proto);
+
+ s = format (s, "[%d] tbl:%d %U %U", gsi, table_id,
+ format_fib_prefix, &gs->gs_key->gsk_pfx,
+ format_gbp_subnet_type, gs->gs_type);
+
+ switch (gs->gs_type)
+ {
+ case GBP_SUBNET_STITCHED_INTERNAL:
+ case GBP_SUBNET_TRANSPORT:
+ break;
+ case GBP_SUBNET_STITCHED_EXTERNAL:
+ s = format (s, " {epg:%d %U}", gs->gs_stitched_external.gs_epg,
+ format_vnet_sw_if_index_name,
+ vnet_get_main (), gs->gs_stitched_external.gs_sw_if_index);
+ break;
+ }
+
+ switch (flags)
+ {
+ case GBP_SUBNET_SHOW_DETAILS:
+ {
+ fib_node_index_t fei;
+
+ fei = fib_table_lookup_exact_match (gs->gs_key->gsk_fib_index,
+ &gs->gs_key->gsk_pfx);
+
+ s =
+ format (s, "\n %U", format_fib_entry, fei,
+ FIB_ENTRY_FORMAT_DETAIL);
+ }
+ case GBP_SUBNET_SHOW_BRIEF:
+ break;
+ }
+ return (s);
+}
+
+static clib_error_t *
+gbp_subnet_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ u32 gsi;
+
+ gsi = INDEX_INVALID;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &gsi))
+ ;
+ else
+ break;
+ }
+
+ if (INDEX_INVALID != gsi)
+ {
+ vlib_cli_output (vm, "%U", format_gbp_subnet, gsi,
+ GBP_SUBNET_SHOW_DETAILS);
+ }
+ else
+ {
+ /* *INDENT-OFF* */
+ pool_foreach_index(gsi, gbp_subnet_pool,
+ ({
+ vlib_cli_output (vm, "%U", format_gbp_subnet, gsi,
+ GBP_SUBNET_SHOW_BRIEF);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Subnets
+ *
+ * @cliexpar
+ * @cliexstart{show gbp subnet}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_subnet_show_node, static) = {
+ .path = "show gbp subnet",
+ .short_help = "show gbp subnet\n",
+ .function = gbp_subnet_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_subnet_init (vlib_main_t * vm)
+{
+ gbp_subnet_db = hash_create_mem (0,
+ sizeof (gbp_subnet_key_t), sizeof (u32));
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_subnet_init);
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/gbp/gbp_subnet.h b/src/plugins/gbp/gbp_subnet.h
index 24b4f3a5816..b6906dee674 100644
--- a/src/plugins/gbp/gbp_subnet.h
+++ b/src/plugins/gbp/gbp_subnet.h
@@ -18,16 +18,26 @@
#include <plugins/gbp/gbp_types.h>
-extern int gbp_subnet_add_del (u32 table_id,
- const fib_prefix_t * pfx,
- u32 sw_if_index,
- epg_id_t epg, u8 is_add, u8 is_internal);
+typedef enum gbp_subnet_type_t_
+{
+ GBP_SUBNET_TRANSPORT,
+ GBP_SUBNET_STITCHED_INTERNAL,
+ GBP_SUBNET_STITCHED_EXTERNAL,
+} gbp_subnet_type_t;
+extern int gbp_subnet_add (u32 rd_id,
+ const fib_prefix_t * pfx,
+ gbp_subnet_type_t type,
+ u32 sw_if_index, epg_id_t epg);
+
+extern int gbp_subnet_del (u32 rd_id, const fib_prefix_t * pfx);
+
+typedef walk_rc_t (*gbp_subnet_cb_t) (u32 rd_id,
+ const fib_prefix_t * pfx,
+ gbp_subnet_type_t type,
+ u32 sw_if_index,
+ epg_id_t epg, void *ctx);
-typedef int (*gbp_subnet_cb_t) (u32 table_id,
- const fib_prefix_t * pfx,
- u32 sw_if_index,
- epg_id_t epg, u8 is_internal, void *ctx);
extern void gbp_subnet_walk (gbp_subnet_cb_t cb, void *ctx);
#endif
diff --git a/src/plugins/gbp/gbp_vxlan.c b/src/plugins/gbp/gbp_vxlan.c
new file mode 100644
index 00000000000..b29fc111131
--- /dev/null
+++ b/src/plugins/gbp/gbp_vxlan.c
@@ -0,0 +1,880 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_vxlan.h>
+#include <plugins/gbp/gbp_itf.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+#include <vlibmemory/api.h>
+#include <vnet/fib/fib_table.h>
+
+/**
+ * A reference to a VXLAN-GBP tunnel created as a child/dependent tunnel
+ * of the tempplate GBP-VXLAN tunnel
+ */
+typedef struct vxlan_tunnel_ref_t_
+{
+ u32 vxr_sw_if_index;
+ index_t vxr_itf;
+ u32 vxr_locks;
+ index_t vxr_parent;
+ gbp_vxlan_tunnel_layer_t vxr_layer;
+} vxlan_tunnel_ref_t;
+
+/**
+ * DB of added tunnels
+ */
+uword *gv_db;
+
+/**
+ * Logger
+ */
+vlib_log_class_t gt_logger;
+
+/**
+ * Pool of template tunnels
+ */
+gbp_vxlan_tunnel_t *gbp_vxlan_tunnel_pool;
+
+/**
+ * Pool of child tunnels
+ */
+vxlan_tunnel_ref_t *vxlan_tunnel_ref_pool;
+
+/**
+ * DB of template interfaces by SW interface index
+ */
+index_t *gbp_vxlan_tunnel_db;
+
+/**
+ * DB of child interfaces by SW interface index
+ */
+index_t *vxlan_tunnel_ref_db;
+
+
+static char *gbp_vxlan_tunnel_layer_strings[] = {
+#define _(n,s) [GBP_VXLAN_TUN_##n] = s,
+ forecah_gbp_vxlan_tunnel_layer
+#undef _
+};
+
+#define GBP_VXLAN_TUN_DBG(...) \
+ vlib_log_debug (gt_logger, __VA_ARGS__);
+
+
+
+always_inline gbp_vxlan_tunnel_t *
+gbp_vxlan_tunnel_get (index_t gti)
+{
+ return (pool_elt_at_index (gbp_vxlan_tunnel_pool, gti));
+}
+
+static vxlan_tunnel_ref_t *
+vxlan_tunnel_ref_get (index_t vxri)
+{
+ return (pool_elt_at_index (vxlan_tunnel_ref_pool, vxri));
+}
+
+static u8 *
+format_vxlan_tunnel_ref (u8 * s, va_list * args)
+{
+ index_t vxri = va_arg (*args, u32);
+ vxlan_tunnel_ref_t *vxr;
+
+ vxr = vxlan_tunnel_ref_get (vxri);
+
+ s = format (s, "[%U locks:%d]", format_vnet_sw_if_index_name,
+ vnet_get_main (), vxr->vxr_sw_if_index, vxr->vxr_locks);
+
+ return (s);
+}
+
+static u32
+gdb_vxlan_dep_add (gbp_vxlan_tunnel_t * gt,
+ u32 vni,
+ const ip46_address_t * src, const ip46_address_t * dst)
+{
+ vnet_vxlan_gbp_tunnel_add_del_args_t args = {
+ .is_add = 1,
+ .is_ip6 = !ip46_address_is_ip4 (src),
+ .vni = vni,
+ .src = *src,
+ .dst = *dst,
+ .instance = ~0,
+ .mode = (GBP_VXLAN_TUN_L2 == gt->gt_layer ?
+ VXLAN_GBP_TUNNEL_MODE_L2 : VXLAN_GBP_TUNNEL_MODE_L3),
+ };
+ vxlan_tunnel_ref_t *vxr;
+ u32 sw_if_index;
+ index_t vxri;
+ int rv;
+
+ sw_if_index = ~0;
+ rv = vnet_vxlan_gbp_tunnel_add_del (&args, &sw_if_index);
+
+ if (VNET_API_ERROR_TUNNEL_EXIST == rv)
+ {
+ vxri = vxlan_tunnel_ref_db[sw_if_index];
+
+ vxr = vxlan_tunnel_ref_get (vxri);
+ vxr->vxr_locks++;
+ }
+ else if (0 == rv)
+ {
+ ASSERT (~0 != sw_if_index);
+ GBP_VXLAN_TUN_DBG ("add-dep:%U %U %U %d", format_vnet_sw_if_index_name,
+ vnet_get_main (), sw_if_index,
+ format_ip46_address, src, IP46_TYPE_ANY,
+ format_ip46_address, dst, IP46_TYPE_ANY, vni);
+
+ pool_get_zero (vxlan_tunnel_ref_pool, vxr);
+
+ vxri = (vxr - vxlan_tunnel_ref_pool);
+ vxr->vxr_parent = gt - gbp_vxlan_tunnel_pool;
+ vxr->vxr_sw_if_index = sw_if_index;
+ vxr->vxr_locks = 1;
+ vxr->vxr_layer = gt->gt_layer;
+
+ /*
+ * store the child both on the parent's list and the global DB
+ */
+ vec_add1 (gt->gt_tuns, vxri);
+
+ vec_validate_init_empty (vxlan_tunnel_ref_db,
+ vxr->vxr_sw_if_index, INDEX_INVALID);
+ vxlan_tunnel_ref_db[vxr->vxr_sw_if_index] = vxri;
+
+ if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
+ {
+ vxr->vxr_itf = gbp_itf_add_and_lock (vxr->vxr_sw_if_index,
+ gt->gt_bd_index);
+
+ gbp_itf_set_l2_output_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
+ L2OUTPUT_FEAT_GBP_POLICY_MAC);
+ gbp_itf_set_l2_input_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
+ L2INPUT_FEAT_GBP_LEARN);
+ }
+ else
+ {
+ const gbp_route_domain_t *grd;
+ fib_protocol_t fproto;
+
+ grd = gbp_route_domain_get (gt->gt_grd);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ ip_table_bind (fproto, vxr->vxr_sw_if_index,
+ grd->grd_table_id[fproto], 1);
+
+ gbp_learn_enable (vxr->vxr_sw_if_index, GBP_LEARN_MODE_L3);
+ }
+ }
+
+ return (sw_if_index);
+}
+
+u32
+vxlan_gbp_tunnel_get_parent (u32 sw_if_index)
+{
+ ASSERT ((sw_if_index < vec_len (vxlan_tunnel_ref_db)) &&
+ (INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index]));
+
+ gbp_vxlan_tunnel_t *gt;
+ vxlan_tunnel_ref_t *vxr;
+
+ vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
+ gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
+
+ return (gt->gt_sw_if_index);
+}
+
+gbp_vxlan_tunnel_type_t
+gbp_vxlan_tunnel_get_type (u32 sw_if_index)
+{
+ if (sw_if_index < vec_len (vxlan_tunnel_ref_db) &&
+ INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index])
+ {
+ return (VXLAN_GBP_TUNNEL);
+ }
+ else if (sw_if_index < vec_len (gbp_vxlan_tunnel_db) &&
+ INDEX_INVALID != gbp_vxlan_tunnel_db[sw_if_index])
+ {
+ return (GBP_VXLAN_TEMPLATE_TUNNEL);
+ }
+
+ ASSERT (0);
+ return (GBP_VXLAN_TEMPLATE_TUNNEL);
+}
+
+u32
+gbp_vxlan_tunnel_clone_and_lock (u32 sw_if_index,
+ const ip46_address_t * src,
+ const ip46_address_t * dst)
+{
+ gbp_vxlan_tunnel_t *gt;
+ index_t gti;
+
+ gti = gbp_vxlan_tunnel_db[sw_if_index];
+
+ if (INDEX_INVALID == gti)
+ return (~0);
+
+ gt = pool_elt_at_index (gbp_vxlan_tunnel_pool, gti);
+
+ return (gdb_vxlan_dep_add (gt, gt->gt_vni, src, dst));
+}
+
+static void
+gdb_vxlan_dep_del (index_t vxri)
+{
+ vxlan_tunnel_ref_t *vxr;
+ gbp_vxlan_tunnel_t *gt;
+ u32 pos;
+
+ vxr = vxlan_tunnel_ref_get (vxri);
+ gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
+
+ GBP_VXLAN_TUN_DBG ("del-dep:%U", format_vxlan_tunnel_ref, vxri);
+
+ vxlan_tunnel_ref_db[vxr->vxr_sw_if_index] = INDEX_INVALID;
+ pos = vec_search (gt->gt_tuns, vxri);
+
+ ASSERT (~0 != pos);
+ vec_del1 (gt->gt_tuns, pos);
+
+ if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
+ {
+ gbp_itf_set_l2_output_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
+ L2OUTPUT_FEAT_NONE);
+ gbp_itf_set_l2_input_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
+ L2INPUT_FEAT_NONE);
+ gbp_itf_unlock (vxr->vxr_itf);
+ }
+ else
+ {
+ fib_protocol_t fproto;
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ ip_table_bind (fproto, vxr->vxr_sw_if_index, 0, 0);
+ }
+
+ vnet_vxlan_gbp_tunnel_del (vxr->vxr_sw_if_index);
+
+ pool_put (vxlan_tunnel_ref_pool, vxr);
+}
+
+void
+vxlan_gbp_tunnel_unlock (u32 sw_if_index)
+{
+ vxlan_tunnel_ref_t *vxr;
+ index_t vxri;
+
+ vxri = vxlan_tunnel_ref_db[sw_if_index];
+
+ ASSERT (vxri != INDEX_INVALID);
+
+ vxr = vxlan_tunnel_ref_get (vxri);
+ vxr->vxr_locks--;
+
+ if (0 == vxr->vxr_locks)
+ {
+ gdb_vxlan_dep_del (vxri);
+ }
+}
+
+void
+vxlan_gbp_tunnel_lock (u32 sw_if_index)
+{
+ vxlan_tunnel_ref_t *vxr;
+ index_t vxri;
+
+ vxri = vxlan_tunnel_ref_db[sw_if_index];
+
+ ASSERT (vxri != INDEX_INVALID);
+
+ vxr = vxlan_tunnel_ref_get (vxri);
+ vxr->vxr_locks++;
+}
+
+#define foreach_gbp_vxlan_input_next \
+ _(DROP, "error-drop") \
+ _(L2_INPUT, "l2-input") \
+ _(IP4_INPUT, "ip4-input") \
+ _(IP6_INPUT, "ip6-input")
+
+typedef enum
+{
+#define _(s,n) GBP_VXLAN_INPUT_NEXT_##s,
+ foreach_gbp_vxlan_input_next
+#undef _
+ GBP_VXLAN_INPUT_N_NEXT,
+} gbp_vxlan_input_next_t;
+
+#define foreach_gbp_vxlan_error \
+ _(DECAPPED, "decapped") \
+ _(LEARNED, "learned")
+
+typedef enum
+{
+#define _(s,n) GBP_VXLAN_ERROR_##s,
+ foreach_gbp_vxlan_error
+#undef _
+ GBP_VXLAN_N_ERROR,
+} gbp_vxlan_input_error_t;
+
+static char *gbp_vxlan_error_strings[] = {
+#define _(n,s) s
+ foreach_gbp_vxlan_error
+#undef _
+};
+
+typedef struct gbp_vxlan_trace_t_
+{
+ u8 dropped;
+ u32 vni;
+ u32 sw_if_index;
+ u16 sclass;
+ u8 flags;
+} gbp_vxlan_trace_t;
+
+
+static uword
+gbp_vxlan_decap (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_ip4)
+{
+ u32 n_left_to_next, n_left_from, next_index, *to_next, *from;
+
+ next_index = 0;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vxlan_gbp_header_t *vxlan_gbp0;
+ gbp_vxlan_input_next_t next0;
+ gbp_vxlan_tunnel_t *gt0;
+ vlib_buffer_t *b0;
+ u32 bi0, vni0;
+ uword *p;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ next0 = GBP_VXLAN_INPUT_NEXT_DROP;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ vxlan_gbp0 =
+ vlib_buffer_get_current (b0) - sizeof (vxlan_gbp_header_t);
+
+ vni0 = vxlan_gbp_get_vni (vxlan_gbp0);
+ p = hash_get (gv_db, vni0);
+
+ if (PREDICT_FALSE (NULL == p))
+ {
+ gt0 = NULL;
+ next0 = GBP_VXLAN_INPUT_NEXT_DROP;
+ }
+ else
+ {
+ gt0 = gbp_vxlan_tunnel_get (p[0]);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = gt0->gt_sw_if_index;
+
+ if (GBP_VXLAN_TUN_L2 == gt0->gt_layer)
+ /*
+ * An L2 layer tunnel goes into the BD
+ */
+ next0 = GBP_VXLAN_INPUT_NEXT_L2_INPUT;
+ else
+ {
+ /*
+ * An L3 layer tunnel needs to strip the L2 header
+ * an inject into the RD
+ */
+ ethernet_header_t *e0;
+ u16 type0;
+
+ e0 = vlib_buffer_get_current (b0);
+ type0 = clib_net_to_host_u16 (e0->type);
+ switch (type0)
+ {
+ case ETHERNET_TYPE_IP4:
+ next0 = GBP_VXLAN_INPUT_NEXT_IP4_INPUT;
+ break;
+ case ETHERNET_TYPE_IP6:
+ next0 = GBP_VXLAN_INPUT_NEXT_IP6_INPUT;
+ break;
+ default:
+ goto trace;
+ }
+ vlib_buffer_advance (b0, sizeof (*e0));
+ }
+ }
+
+ trace:
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gbp_vxlan_trace_t *tr;
+
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->dropped = (next0 == GBP_VXLAN_INPUT_NEXT_DROP);
+ tr->vni = vni0;
+ tr->sw_if_index = (gt0 ? gt0->gt_sw_if_index : ~0);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+ tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_gbp_vxlan_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_vxlan_trace_t *t = va_arg (*args, gbp_vxlan_trace_t *);
+
+ s = format (s, "vni:%d dropped:%d rx:%d sclass:%d flags:%U",
+ t->vni, t->dropped, t->sw_if_index,
+ t->sclass, format_vxlan_gbp_header_gpflags, t->flags);
+
+ return (s);
+}
+
+static uword
+gbp_vxlan4_decap (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ return gbp_vxlan_decap (vm, node, from_frame, 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_vxlan4_input_node) =
+{
+ .function = gbp_vxlan4_decap,
+ .name = "gbp-vxlan4",
+ .vector_size = sizeof (u32),
+ .n_errors = GBP_VXLAN_N_ERROR,
+ .error_strings = gbp_vxlan_error_strings,
+ .n_next_nodes = GBP_VXLAN_INPUT_N_NEXT,
+ .format_trace = format_gbp_vxlan_rx_trace,
+ .next_nodes = {
+#define _(s,n) [GBP_VXLAN_INPUT_NEXT_##s] = n,
+ foreach_gbp_vxlan_input_next
+#undef _
+ },
+};
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_vxlan4_input_node, gbp_vxlan4_decap)
+
+/* *INDENT-ON* */
+
+void
+gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx)
+{
+ gbp_vxlan_tunnel_t *gt;
+
+ /* *INDENT-OFF* */
+ pool_foreach (gt, gbp_vxlan_tunnel_pool,
+ ({
+ if (WALK_CONTINUE != cb(gt, ctx))
+ break;
+ }));
+ /* *INDENT-ON* */
+}
+
+static walk_rc_t
+gbp_vxlan_tunnel_show_one (gbp_vxlan_tunnel_t * gt, void *ctx)
+{
+ vlib_cli_output (ctx, "%U", format_gbp_vxlan_tunnel,
+ gt - gbp_vxlan_tunnel_pool);
+
+ return (WALK_CONTINUE);
+}
+
+static u8 *
+format_gbp_vxlan_tunnel_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+
+ return format (s, "gbp-vxlan-%d", dev_instance);
+}
+
+u8 *
+format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args)
+{
+ gbp_vxlan_tunnel_layer_t gl = va_arg (*args, gbp_vxlan_tunnel_layer_t);
+ s = format (s, "%s", gbp_vxlan_tunnel_layer_strings[gl]);
+
+ return (s);
+}
+
+u8 *
+format_gbp_vxlan_tunnel (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ CLIB_UNUSED (int verbose) = va_arg (*args, int);
+ gbp_vxlan_tunnel_t *gt = gbp_vxlan_tunnel_get (dev_instance);
+ index_t *vxri;
+
+ s = format (s, "GBP VXLAN tunnel: hw:%d sw:%d vni:%d %U",
+ gt->gt_hw_if_index, gt->gt_sw_if_index, gt->gt_vni,
+ format_gbp_vxlan_tunnel_layer, gt->gt_layer);
+ if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
+ s = format (s, " BD:%d bd-index:%d", gt->gt_bd_rd_id, gt->gt_bd_index);
+ else
+ s = format (s, " RD:%d fib-index:[%d,%d]",
+ gt->gt_bd_rd_id,
+ gt->gt_fib_index[FIB_PROTOCOL_IP4],
+ gt->gt_fib_index[FIB_PROTOCOL_IP6]);
+
+ s = format (s, " children:[");
+ vec_foreach (vxri, gt->gt_tuns)
+ {
+ s = format (s, "%U, ", format_vxlan_tunnel_ref, *vxri);
+ }
+ s = format (s, "]");
+
+ return s;
+}
+
+typedef struct gbp_vxlan_tx_trace_t_
+{
+ u32 vni;
+} gbp_vxlan_tx_trace_t;
+
+u8 *
+format_gbp_vxlan_tx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gbp_vxlan_tx_trace_t *t = va_arg (*args, gbp_vxlan_tx_trace_t *);
+
+ s = format (s, "GBP-VXLAN: vni:%d", t->vni);
+
+ return (s);
+}
+
+clib_error_t *
+gbp_vxlan_interface_admin_up_down (vnet_main_t * vnm,
+ u32 hw_if_index, u32 flags)
+{
+ vnet_hw_interface_t *hi;
+ u32 ti;
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ if (NULL == gbp_vxlan_tunnel_db ||
+ hi->sw_if_index >= vec_len (gbp_vxlan_tunnel_db))
+ return (NULL);
+
+ ti = gbp_vxlan_tunnel_db[hi->sw_if_index];
+
+ if (~0 == ti)
+ /* not one of ours */
+ return (NULL);
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ vnet_hw_interface_set_flags (vnm, hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ else
+ vnet_hw_interface_set_flags (vnm, hw_if_index, 0);
+
+ return (NULL);
+}
+
+static uword
+gbp_vxlan_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ clib_warning ("you shouldn't be here, leaking buffers...");
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (gbp_vxlan_device_class) = {
+ .name = "GBP VXLAN tunnel-template",
+ .format_device_name = format_gbp_vxlan_tunnel_name,
+ .format_device = format_gbp_vxlan_tunnel,
+ .format_tx_trace = format_gbp_vxlan_tx_trace,
+ .admin_up_down_function = gbp_vxlan_interface_admin_up_down,
+ .tx_function = gbp_vxlan_interface_tx,
+};
+
+VNET_HW_INTERFACE_CLASS (gbp_vxlan_hw_interface_class) = {
+ .name = "GBP-VXLAN",
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+/* *INDENT-ON* */
+
+int
+gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
+ u32 bd_rd_id, u32 * sw_if_indexp)
+{
+ gbp_vxlan_tunnel_t *gt;
+ index_t gti;
+ uword *p;
+ int rv;
+
+ rv = 0;
+ p = hash_get (gv_db, vni);
+
+ GBP_VXLAN_TUN_DBG ("add: %d %d %d", vni, layer, bd_rd_id);
+
+ if (NULL == p)
+ {
+ vnet_sw_interface_t *si;
+ vnet_hw_interface_t *hi;
+ index_t gbi, grdi;
+ vnet_main_t *vnm;
+
+ gbi = grdi = INDEX_INVALID;
+
+ if (layer == GBP_VXLAN_TUN_L2)
+ {
+ gbi = gbp_bridge_domain_find_and_lock (bd_rd_id);
+
+ if (INDEX_INVALID == gbi)
+ {
+ return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+ }
+ }
+ else
+ {
+ grdi = gbp_route_domain_find_and_lock (bd_rd_id);
+
+ if (INDEX_INVALID == grdi)
+ {
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+ }
+ }
+
+ vnm = vnet_get_main ();
+ pool_get (gbp_vxlan_tunnel_pool, gt);
+ gti = gt - gbp_vxlan_tunnel_pool;
+
+ gt->gt_vni = vni;
+ gt->gt_layer = layer;
+ gt->gt_bd_rd_id = bd_rd_id;
+ gt->gt_hw_if_index = vnet_register_interface (vnm,
+ gbp_vxlan_device_class.index,
+ gti,
+ gbp_vxlan_hw_interface_class.index,
+ gti);
+
+ hi = vnet_get_hw_interface (vnm, gt->gt_hw_if_index);
+
+ gt->gt_sw_if_index = hi->sw_if_index;
+
+ /* don't flood packets in a BD to these interfaces */
+ si = vnet_get_sw_interface (vnm, gt->gt_sw_if_index);
+ si->flood_class = VNET_FLOOD_CLASS_NO_FLOOD;
+
+ if (layer == GBP_VXLAN_TUN_L2)
+ {
+ gbp_bridge_domain_t *gb;
+
+ gb = gbp_bridge_domain_get (gbi);
+
+ gt->gt_gbd = gbi;
+ gt->gt_bd_index = gb->gb_bd_id;
+ gb->gb_vni_sw_if_index = gt->gt_sw_if_index;
+ /* set it up as a GBP interface */
+ gt->gt_itf = gbp_itf_add_and_lock (gt->gt_sw_if_index,
+ gt->gt_bd_index);
+ gbp_learn_enable (gt->gt_sw_if_index, GBP_LEARN_MODE_L2);
+ }
+ else
+ {
+ gbp_route_domain_t *grd;
+ fib_protocol_t fproto;
+
+ grd = gbp_route_domain_get (grdi);
+
+ gt->gt_grd = grdi;
+ grd->grd_vni_sw_if_index = gt->gt_sw_if_index;
+
+ gbp_learn_enable (gt->gt_sw_if_index, GBP_LEARN_MODE_L3);
+
+ ip4_sw_interface_enable_disable (gt->gt_sw_if_index, 1);
+ ip6_sw_interface_enable_disable (gt->gt_sw_if_index, 1);
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ {
+ gt->gt_fib_index[fproto] = grd->grd_fib_index[fproto];
+
+ ip_table_bind (fproto, gt->gt_sw_if_index,
+ grd->grd_table_id[fproto], 1);
+ }
+ }
+
+ /*
+ * save the tunnel by VNI and by sw_if_index
+ */
+ hash_set (gv_db, vni, gti);
+
+ vec_validate (gbp_vxlan_tunnel_db, gt->gt_sw_if_index);
+ gbp_vxlan_tunnel_db[gt->gt_sw_if_index] = gti;
+
+ if (sw_if_indexp)
+ *sw_if_indexp = gt->gt_sw_if_index;
+
+ vxlan_gbp_register_udp_ports ();
+ }
+ else
+ {
+ gti = p[0];
+ rv = VNET_API_ERROR_IF_ALREADY_EXISTS;
+ }
+
+ GBP_VXLAN_TUN_DBG ("add: %U", format_gbp_vxlan_tunnel, gti);
+
+ return (rv);
+}
+
+int
+gbp_vxlan_tunnel_del (u32 vni)
+{
+ gbp_vxlan_tunnel_t *gt;
+ uword *p;
+
+ p = hash_get (gv_db, vni);
+
+ if (NULL != p)
+ {
+ vnet_main_t *vnm;
+
+ vnm = vnet_get_main ();
+ gt = gbp_vxlan_tunnel_get (p[0]);
+
+ vxlan_gbp_unregister_udp_ports ();
+
+ GBP_VXLAN_TUN_DBG ("del: %U", format_gbp_vxlan_tunnel,
+ gt - gbp_vxlan_tunnel_pool);
+
+ gbp_endpoint_flush (gt->gt_sw_if_index);
+ ASSERT (0 == vec_len (gt->gt_tuns));
+ vec_free (gt->gt_tuns);
+
+ if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
+ {
+ gbp_learn_disable (gt->gt_sw_if_index, GBP_LEARN_MODE_L2);
+ gbp_itf_unlock (gt->gt_itf);
+ gbp_bridge_domain_unlock (gt->gt_gbd);
+ }
+ else
+ {
+ fib_protocol_t fproto;
+
+ FOR_EACH_FIB_IP_PROTOCOL (fproto)
+ ip_table_bind (fproto, gt->gt_sw_if_index, 0, 0);
+
+ ip4_sw_interface_enable_disable (gt->gt_sw_if_index, 0);
+ ip6_sw_interface_enable_disable (gt->gt_sw_if_index, 0);
+
+ gbp_learn_disable (gt->gt_sw_if_index, GBP_LEARN_MODE_L3);
+ gbp_route_domain_unlock (gt->gt_grd);
+ }
+
+ vnet_sw_interface_set_flags (vnm, gt->gt_sw_if_index, 0);
+ vnet_delete_hw_interface (vnm, gt->gt_hw_if_index);
+
+ hash_unset (gv_db, vni);
+ gbp_vxlan_tunnel_db[gt->gt_sw_if_index] = INDEX_INVALID;
+
+ pool_put (gbp_vxlan_tunnel_pool, gt);
+ }
+ else
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ return (0);
+}
+
+static clib_error_t *
+gbp_vxlan_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ gbp_vxlan_walk (gbp_vxlan_tunnel_show_one, vm);
+
+ return (NULL);
+}
+
+/*?
+ * Show Group Based Policy VXLAN tunnels
+ *
+ * @cliexpar
+ * @cliexstart{show gbp vxlan}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_vxlan_show_node, static) = {
+ .path = "show gbp vxlan",
+ .short_help = "show gbp vxlan\n",
+ .function = gbp_vxlan_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_vxlan_init (vlib_main_t * vm)
+{
+ u32 slot4;
+
+ /*
+ * insert ourselves into the VXLAN-GBP arc to collect the no-tunnel
+ * packets.
+ */
+ slot4 = vlib_node_add_next_with_slot (vm,
+ vxlan4_gbp_input_node.index,
+ gbp_vxlan4_input_node.index,
+ VXLAN_GBP_INPUT_NEXT_NO_TUNNEL);
+ ASSERT (slot4 == VXLAN_GBP_INPUT_NEXT_NO_TUNNEL);
+
+ /* slot6 = vlib_node_add_next_with_slot (vm, */
+ /* vxlan6_gbp_input_node.index, */
+ /* gbp_vxlan6_input_node.index, */
+ /* VXLAN_GBP_INPUT_NEXT_NO_TUNNEL); */
+ /* ASSERT (slot6 == VXLAN_GBP_INPUT_NEXT_NO_TUNNEL); */
+
+ gt_logger = vlib_log_register_class ("gbp", "tun");
+
+ return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_vxlan_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_vxlan.h b/src/plugins/gbp/gbp_vxlan.h
new file mode 100644
index 00000000000..7aa22e351a2
--- /dev/null
+++ b/src/plugins/gbp/gbp_vxlan.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_VXLAN_H__
+#define __GBP_VXLAN_H__
+
+#include <vnet/fib/fib_types.h>
+
+#define forecah_gbp_vxlan_tunnel_layer \
+ _(L2, "l2") \
+ _(L3, "l3")
+
+typedef enum gbp_vxlan_tunnel_layer_t_
+{
+#define _(s,n) GBP_VXLAN_TUN_##s,
+ forecah_gbp_vxlan_tunnel_layer
+#undef _
+} gbp_vxlan_tunnel_layer_t;
+
+/**
+ * GBP VXLAN (template) tunnel.
+ * A template tunnel has only a VNI, it does not have src,dst address.
+ * As such it cannot be used to send traffic. It is used in the RX path
+ * to RX vxlan-gbp packets that do not match an existing tunnel;
+ */
+typedef struct gbp_vxlan_tunnel_t_
+{
+ u32 gt_hw_if_index;
+ u32 gt_sw_if_index;
+ u32 gt_vni;
+
+ /**
+ * The BD or RD value (depending on the layer) that the tunnel is bound to
+ */
+ u32 gt_bd_rd_id;
+ gbp_vxlan_tunnel_layer_t gt_layer;
+
+ union
+ {
+ struct
+ {
+ /**
+ * BD index (if L2)
+ */
+ u32 gt_bd_index;
+ /**
+ * Reference to the GPB-BD
+ */
+ index_t gt_gbd;
+ };
+ struct
+ {
+ /**
+ * FIB inidices (if L3)
+ */
+ u32 gt_fib_index[FIB_PROTOCOL_IP_MAX];
+ /**
+ * References to the GBP-RD
+ */
+ index_t gt_grd;
+ };
+ };
+
+ /**
+ * gbp-itf config for this interface
+ */
+ index_t gt_itf;
+
+ /**
+ * list of child vxlan-gbp tunnels built from this template
+ */
+ index_t *gt_tuns;
+} gbp_vxlan_tunnel_t;
+
+/**
+ * The different types of interfaces that endpoints are learned on
+ */
+typedef enum gbp_vxlan_tunnel_type_t_
+{
+ /**
+ * This is the object type deifend above.
+ * A template representation of a vxlan-gbp tunnel. from this tunnel
+ * type, real vxlan-gbp tunnels are created (by cloning the VNI)
+ */
+ GBP_VXLAN_TEMPLATE_TUNNEL,
+
+ /**
+ * A real VXLAN-GBP tunnel (from vnet/vxlan-gbp/...)
+ */
+ VXLAN_GBP_TUNNEL,
+} gbp_vxlan_tunnel_type_t;
+
+extern int gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
+ u32 bd_rd_id, u32 * sw_if_indexp);
+extern int gbp_vxlan_tunnel_del (u32 vni);
+
+extern gbp_vxlan_tunnel_type_t gbp_vxlan_tunnel_get_type (u32 sw_if_index);
+
+extern u32 gbp_vxlan_tunnel_clone_and_lock (u32 parent_tunnel,
+ const ip46_address_t * src,
+ const ip46_address_t * dst);
+
+extern void vxlan_gbp_tunnel_lock (u32 sw_if_index);
+extern void vxlan_gbp_tunnel_unlock (u32 sw_if_index);
+extern u32 vxlan_gbp_tunnel_get_parent (u32 sw_if_index);
+
+typedef walk_rc_t (*gbp_vxlan_cb_t) (gbp_vxlan_tunnel_t * gt, void *ctx);
+extern void gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx);
+
+extern u8 *format_gbp_vxlan_tunnel (u8 * s, va_list * args);
+extern u8 *format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index a45f921fb08..6d26b5ade67 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -679,6 +679,7 @@ list(APPEND VNET_SOURCES
vxlan-gbp/encap.c
vxlan-gbp/vxlan_gbp_api.c
vxlan-gbp/vxlan_gbp.c
+ vxlan-gbp/vxlan_gbp_packet.c
)
list(APPEND VNET_HEADERS
diff --git a/src/vnet/ethernet/mac_address.h b/src/vnet/ethernet/mac_address.h
index 7b4390dfb48..a249cb58e7c 100644
--- a/src/vnet/ethernet/mac_address.h
+++ b/src/vnet/ethernet/mac_address.h
@@ -37,6 +37,13 @@ mac_address_from_bytes (mac_address_t * mac, const u8 * bytes)
clib_memcpy (mac->bytes, bytes, 6);
}
+static_always_inline void
+mac_address_to_bytes (const mac_address_t * mac, u8 * bytes)
+{
+ /* zero out the last 2 bytes, then copy over only 6 */
+ clib_memcpy (bytes, mac->bytes, 6);
+}
+
static_always_inline int
mac_address_is_zero (const mac_address_t * mac)
{
@@ -57,6 +64,12 @@ mac_address_from_u64 (u64 u, mac_address_t * mac)
mac->bytes[5] = 0;
}
+static_always_inline void
+mac_address_copy (mac_address_t * dst, const mac_address_t * src)
+{
+ mac_address_from_bytes (dst, src->bytes);
+}
+
extern uword unformat_mac_address_t (unformat_input_t * input,
va_list * args);
extern u8 *format_mac_address_t (u8 * s, va_list * args);
diff --git a/src/vnet/interface_funcs.h b/src/vnet/interface_funcs.h
index a3bfdc970d4..b7d900740d5 100644
--- a/src/vnet/interface_funcs.h
+++ b/src/vnet/interface_funcs.h
@@ -271,6 +271,13 @@ vnet_sw_interface_is_api_valid (vnet_main_t * vnm, u32 sw_if_index)
&& vnet_sw_interface_is_api_visible (vnm, sw_if_index);
}
+always_inline const u8 *
+vnet_sw_interface_get_hw_address (vnet_main_t * vnm, u32 sw_if_index)
+{
+ vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ return hw->hw_address;
+}
+
always_inline uword
vnet_hw_interface_get_flags (vnet_main_t * vnm, u32 hw_if_index)
{
diff --git a/src/vnet/ip/ip_types_api.c b/src/vnet/ip/ip_types_api.c
index 11b52760fad..3d1f8065f5d 100644
--- a/src/vnet/ip/ip_types_api.c
+++ b/src/vnet/ip/ip_types_api.c
@@ -107,6 +107,7 @@ ip_prefix_decode (const vl_api_prefix_t * in, fib_prefix_t * out)
break;
}
out->fp_len = in->address_length;
+ out->___fp___pad = 0;
ip_address_decode (&in->address, &out->fp_addr);
}
diff --git a/src/vnet/l2/l2.api b/src/vnet/l2/l2.api
index 8b65bc36afc..7c71ea6e151 100644
--- a/src/vnet/l2/l2.api
+++ b/src/vnet/l2/l2.api
@@ -483,7 +483,6 @@ autoreply define bd_ip_mac_add_del
u32 context;
u32 bd_id;
u8 is_add;
- u8 is_ipv6;
vl_api_address_t ip;
vl_api_mac_address_t mac;
};
diff --git a/src/vnet/l2/l2_input.h b/src/vnet/l2/l2_input.h
index f55e70371e0..57fca57fc29 100644
--- a/src/vnet/l2/l2_input.h
+++ b/src/vnet/l2/l2_input.h
@@ -104,12 +104,13 @@ l2input_bd_config (u32 bd_index)
_(FLOOD, "l2-flood") \
_(ARP_TERM, "arp-term-l2bd") \
_(UU_FLOOD, "l2-flood") \
- _(UU_FWD, "l2-uu-fwd") \
_(GBP_FWD, "gbp-fwd") \
+ _(UU_FWD, "l2-uu-fwd") \
_(FWD, "l2-fwd") \
_(RW, "l2-rw") \
_(LEARN, "l2-learn") \
_(L2_EMULATION, "l2-emulation") \
+ _(GBP_LEARN, "gbp-learn-l2") \
_(GBP_NULL_CLASSIFY, "gbp-null-classify") \
_(GBP_SRC_CLASSIFY, "gbp-src-classify") \
_(VTR, "l2-input-vtr") \
diff --git a/src/vnet/l2/l2_output.h b/src/vnet/l2/l2_output.h
index a6db776841d..33eeb8e6e9b 100644
--- a/src/vnet/l2/l2_output.h
+++ b/src/vnet/l2/l2_output.h
@@ -81,7 +81,8 @@ extern vlib_node_registration_t l2output_node;
#define foreach_l2output_feat \
_(OUTPUT, "interface-output") \
_(SPAN, "span-l2-output") \
- _(GBP_POLICY, "gbp-policy") \
+ _(GBP_POLICY_PORT, "gbp-policy-port") \
+ _(GBP_POLICY_MAC, "gbp-policy-mac") \
_(CFM, "feature-bitmap-drop") \
_(QOS, "feature-bitmap-drop") \
_(ACL, "l2-output-acl") \
diff --git a/src/vnet/vxlan-gbp/decap.c b/src/vnet/vxlan-gbp/decap.c
index 1602e940f3f..0d361a37751 100644
--- a/src/vnet/vxlan-gbp/decap.c
+++ b/src/vnet/vxlan-gbp/decap.c
@@ -29,6 +29,7 @@ typedef struct
u32 error;
u32 vni;
u16 sclass;
+ u8 flags;
} vxlan_gbp_rx_trace_t;
static u8 *
@@ -44,8 +45,10 @@ format_vxlan_gbp_rx_trace (u8 * s, va_list * args)
t->vni);
return format (s,
"VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d"
- " next %d error %d",
- t->tunnel_index, t->vni, t->sclass, t->next_index, t->error);
+ " flags %U next %d error %d",
+ t->tunnel_index, t->vni, t->sclass,
+ format_vxlan_gbp_header_gpflags, t->flags,
+ t->next_index, t->error);
}
always_inline u32
@@ -161,10 +164,34 @@ vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache,
return t0;
}
+always_inline vxlan_gbp_input_next_t
+vxlan_gbp_tunnel_get_next (const vxlan_gbp_tunnel_t * t, vlib_buffer_t * b0)
+{
+ if (VXLAN_GBP_TUNNEL_MODE_L2 == t->mode)
+ return (VXLAN_GBP_INPUT_NEXT_L2_INPUT);
+ else
+ {
+ ethernet_header_t *e0;
+ u16 type0;
+
+ e0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, sizeof (*e0));
+ type0 = clib_net_to_host_u16 (e0->type);
+ switch (type0)
+ {
+ case ETHERNET_TYPE_IP4:
+ return (VXLAN_GBP_INPUT_NEXT_IP4_INPUT);
+ case ETHERNET_TYPE_IP6:
+ return (VXLAN_GBP_INPUT_NEXT_IP6_INPUT);
+ }
+ }
+ return (VXLAN_GBP_INPUT_NEXT_DROP);
+}
+
always_inline uword
vxlan_gbp_input (vlib_main_t * vm,
vlib_node_runtime_t * node,
- vlib_frame_t * from_frame, u32 is_ip4)
+ vlib_frame_t * from_frame, u8 is_ip4)
{
vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
vnet_main_t *vnm = vxm->vnet_main;
@@ -239,10 +266,6 @@ vxlan_gbp_input (vlib_main_t * vm,
ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
}
- /* pop vxlan_gbp */
- vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
- vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
-
u32 fi0 = buf_fib_index (b0, is_ip4);
u32 fi1 = buf_fib_index (b1, is_ip4);
@@ -270,16 +293,19 @@ vxlan_gbp_input (vlib_main_t * vm,
u32 len0 = vlib_buffer_length_in_chain (vm, b0);
u32 len1 = vlib_buffer_length_in_chain (vm, b1);
- u32 next0, next1;
+ vxlan_gbp_input_next_t next0, next1;
u8 error0 = 0, error1 = 0;
u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1);
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ /* pop vxlan_gbp */
+ vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
+ vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
+
/* Validate VXLAN_GBP tunnel encap-fib index against packet */
if (PREDICT_FALSE
(t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
{
- next0 = VXLAN_GBP_INPUT_NEXT_DROP;
-
if (t0 != 0
&& flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
{
@@ -287,22 +313,18 @@ vxlan_gbp_input (vlib_main_t * vm,
vlib_increment_combined_counter
(drop_counter, thread_index, stats_t0->sw_if_index, 1,
len0);
+ next0 = VXLAN_GBP_INPUT_NEXT_DROP;
}
else
- error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ {
+ error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+ }
b0->error = node->errors[error0];
}
else
{
- next0 = t0->decap_next_index;
- vnet_buffer2 (b0)->gbp.flags =
- vxlan_gbp_get_gpflags (vxlan_gbp0);
- vnet_buffer2 (b0)->gbp.src_epg =
- vxlan_gbp_get_sclass (vxlan_gbp0);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
- vnet_update_l2_len (b0);
+ next0 = vxlan_gbp_tunnel_get_next (t0, b0);
/* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
@@ -311,12 +333,13 @@ vxlan_gbp_input (vlib_main_t * vm,
pkts_decapsulated++;
}
- /* Validate VXLAN_GBP tunnel encap-fib index against packet */
+ vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+ vnet_buffer2 (b0)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp0);
+
+
if (PREDICT_FALSE
(t1 == 0 || flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
{
- next1 = VXLAN_GBP_INPUT_NEXT_DROP;
-
if (t1 != 0
&& flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
{
@@ -324,22 +347,18 @@ vxlan_gbp_input (vlib_main_t * vm,
vlib_increment_combined_counter
(drop_counter, thread_index, stats_t1->sw_if_index, 1,
len1);
+ next1 = VXLAN_GBP_INPUT_NEXT_DROP;
}
else
- error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ {
+ error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next1 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+ }
b1->error = node->errors[error1];
}
else
{
- next1 = t1->decap_next_index;
- vnet_buffer2 (b1)->gbp.flags =
- vxlan_gbp_get_gpflags (vxlan_gbp1);
- vnet_buffer2 (b1)->gbp.src_epg =
- vxlan_gbp_get_sclass (vxlan_gbp1);
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- if (PREDICT_TRUE (next1 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
- vnet_update_l2_len (b1);
+ next1 = vxlan_gbp_tunnel_get_next (t1, b1);
/* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
@@ -349,6 +368,12 @@ vxlan_gbp_input (vlib_main_t * vm,
(rx_counter, thread_index, stats_t1->sw_if_index, 1, len1);
}
+ vnet_buffer2 (b1)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
+ vnet_buffer2 (b1)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp1);
+
+ vnet_update_l2_len (b0);
+ vnet_update_l2_len (b1);
+
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
vxlan_gbp_rx_trace_t *tr =
@@ -358,6 +383,7 @@ vxlan_gbp_input (vlib_main_t * vm,
tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
}
if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -368,6 +394,7 @@ vxlan_gbp_input (vlib_main_t * vm,
tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels;
tr->vni = vxlan_gbp_get_vni (vxlan_gbp1);
tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
}
vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
@@ -395,9 +422,6 @@ vxlan_gbp_input (vlib_main_t * vm,
else
ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
- /* pop (ip, udp, vxlan_gbp) */
- vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
-
u32 fi0 = buf_fib_index (b0, is_ip4);
vxlan_gbp_tunnel_t *t0, *stats_t0 = 0;
@@ -412,15 +436,16 @@ vxlan_gbp_input (vlib_main_t * vm,
uword len0 = vlib_buffer_length_in_chain (vm, b0);
- u32 next0;
+ vxlan_gbp_input_next_t next0;
u8 error0 = 0;
u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
+
+ /* pop (ip, udp, vxlan_gbp) */
+ vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
/* Validate VXLAN_GBP tunnel encap-fib index against packet */
if (PREDICT_FALSE
(t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
{
- next0 = VXLAN_GBP_INPUT_NEXT_DROP;
-
if (t0 != 0
&& flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
{
@@ -428,24 +453,18 @@ vxlan_gbp_input (vlib_main_t * vm,
vlib_increment_combined_counter
(drop_counter, thread_index, stats_t0->sw_if_index, 1,
len0);
+ next0 = VXLAN_GBP_INPUT_NEXT_DROP;
}
else
- error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ {
+ error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+ next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+ }
b0->error = node->errors[error0];
}
else
{
- next0 = t0->decap_next_index;
- vnet_buffer2 (b0)->gbp.flags =
- vxlan_gbp_get_gpflags (vxlan_gbp0);
- vnet_buffer2 (b0)->gbp.src_epg =
- vxlan_gbp_get_sclass (vxlan_gbp0);
-
-
- /* Required to make the l2 tag push / pop code work on l2 subifs */
- if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
- vnet_update_l2_len (b0);
-
+ next0 = vxlan_gbp_tunnel_get_next (t0, b0);
/* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
pkts_decapsulated++;
@@ -453,6 +472,11 @@ vxlan_gbp_input (vlib_main_t * vm,
vlib_increment_combined_counter
(rx_counter, thread_index, stats_t0->sw_if_index, 1, len0);
}
+ vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+ vnet_buffer2 (b0)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -463,6 +487,7 @@ vxlan_gbp_input (vlib_main_t * vm,
tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+ tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.api b/src/vnet/vxlan-gbp/vxlan_gbp.api
index 6e41ec8b2b9..3e213ddc563 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.api
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.api
@@ -23,7 +23,6 @@ import "vnet/ip/ip_types.api";
@param dst - Destination IP address, can be multicast
@param mcast_sw_if_index - Interface for multicast destination
@param encap_table_id - Encap route table
- @param decap_next_index - Name of decap next graph node
@param vni - The VXLAN Network Identifier, uint24
@param sw_ifindex - Ignored in add message, set in details
*/
@@ -34,7 +33,6 @@ typedef vxlan_gbp_tunnel
vl_api_address_t dst;
u32 mcast_sw_if_index;
u32 encap_table_id;
- u32 decap_next_index;
u32 vni;
u32 sw_if_index;
};
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.c b/src/vnet/vxlan-gbp/vxlan_gbp.c
index ec4f923bdbc..691cc762549 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.c
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.c
@@ -32,16 +32,21 @@
vxlan_gbp_main_t vxlan_gbp_main;
-static u8 *
-format_decap_next (u8 * s, va_list * args)
+u8 *
+format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args)
{
- u32 next_index = va_arg (*args, u32);
+ vxlan_gbp_tunnel_mode_t mode = va_arg (*args, vxlan_gbp_tunnel_mode_t);
- if (next_index == VXLAN_GBP_INPUT_NEXT_DROP)
- return format (s, "drop");
- else
- return format (s, "index %d", next_index);
- return s;
+ switch (mode)
+ {
+ case VXLAN_GBP_TUNNEL_MODE_L2:
+ s = format (s, "L2");
+ break;
+ case VXLAN_GBP_TUNNEL_MODE_L3:
+ s = format (s, "L3");
+ break;
+ }
+ return (s);
}
u8 *
@@ -51,17 +56,15 @@ format_vxlan_gbp_tunnel (u8 * s, va_list * args)
s = format (s,
"[%d] instance %d src %U dst %U vni %d fib-idx %d"
- " sw-if-idx %d ",
+ " sw-if-idx %d mode %U ",
t->dev_instance, t->user_instance,
format_ip46_address, &t->src, IP46_TYPE_ANY,
format_ip46_address, &t->dst, IP46_TYPE_ANY,
- t->vni, t->encap_fib_index, t->sw_if_index);
+ t->vni, t->encap_fib_index, t->sw_if_index,
+ format_vxlan_gbp_tunnel_mode, t->mode);
s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index);
- if (PREDICT_FALSE (t->decap_next_index != VXLAN_GBP_INPUT_NEXT_L2_INPUT))
- s = format (s, "decap-next-%U ", format_decap_next, t->decap_next_index);
-
if (PREDICT_FALSE (ip46_address_is_multicast (&t->dst)))
s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index);
@@ -210,9 +213,9 @@ const static fib_node_vft_t vxlan_gbp_vft = {
#define foreach_copy_field \
_(vni) \
+_(mode) \
_(mcast_sw_if_index) \
_(encap_fib_index) \
-_(decap_next_index) \
_(src) \
_(dst)
@@ -267,18 +270,6 @@ vxlan_gbp_rewrite (vxlan_gbp_tunnel_t * t, bool is_ip6)
vnet_rewrite_set_data (*t, &h, len);
}
-static bool
-vxlan_gbp_decap_next_is_valid (vxlan_gbp_main_t * vxm, u32 is_ip6,
- u32 decap_next_index)
-{
- vlib_main_t *vm = vxm->vlib_main;
- u32 input_idx = (!is_ip6) ?
- vxlan4_gbp_input_node.index : vxlan6_gbp_input_node.index;
- vlib_node_runtime_t *r = vlib_node_get_runtime (vm, input_idx);
-
- return decap_next_index < r->n_next_nodes;
-}
-
static uword
vtep_addr_ref (ip46_address_t * ip)
{
@@ -434,14 +425,11 @@ int vnet_vxlan_gbp_tunnel_add_del
/* adding a tunnel: tunnel must not already exist */
if (p)
- return VNET_API_ERROR_TUNNEL_EXIST;
-
- /* if not set explicitly, default to l2 */
- if (a->decap_next_index == ~0)
- a->decap_next_index = VXLAN_GBP_INPUT_NEXT_L2_INPUT;
- if (!vxlan_gbp_decap_next_is_valid (vxm, is_ip6, a->decap_next_index))
- return VNET_API_ERROR_INVALID_DECAP_NEXT;
-
+ {
+ t = pool_elt_at_index (vxm->tunnels, *p);
+ *sw_if_indexp = t->sw_if_index;
+ return VNET_API_ERROR_TUNNEL_EXIST;
+ }
pool_get_aligned (vxm->tunnels, t, CLIB_CACHE_LINE_BYTES);
clib_memset (t, 0, sizeof (*t));
dev_instance = t - vxm->tunnels;
@@ -505,6 +493,12 @@ int vnet_vxlan_gbp_tunnel_add_del
t->sw_if_index = sw_if_index = hi->sw_if_index;
+ if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode)
+ {
+ ip4_sw_interface_enable_disable (t->sw_if_index, 1);
+ ip6_sw_interface_enable_disable (t->sw_if_index, 1);
+ }
+
vec_validate_init_empty (vxm->tunnel_index_by_sw_if_index, sw_if_index,
~0);
vxm->tunnel_index_by_sw_if_index[sw_if_index] = dev_instance;
@@ -626,6 +620,12 @@ int vnet_vxlan_gbp_tunnel_add_del
sw_if_index = t->sw_if_index;
vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */ );
+ if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode)
+ {
+ ip4_sw_interface_enable_disable (t->sw_if_index, 0);
+ ip6_sw_interface_enable_disable (t->sw_if_index, 0);
+ }
+
vxm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;
if (!is_ip6)
@@ -660,6 +660,36 @@ int vnet_vxlan_gbp_tunnel_add_del
return 0;
}
+int
+vnet_vxlan_gbp_tunnel_del (u32 sw_if_index)
+{
+ vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+ vxlan_gbp_tunnel_t *t = 0;
+ u32 ti;
+
+ if (sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index))
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ ti = vxm->tunnel_index_by_sw_if_index[sw_if_index];
+ if (~0 != ti)
+ {
+ t = pool_elt_at_index (vxm->tunnels, ti);
+
+ vnet_vxlan_gbp_tunnel_add_del_args_t args = {
+ .is_add = 0,
+ .is_ip6 = !ip46_address_is_ip4 (&t->src),
+ .vni = t->vni,
+ .src = t->src,
+ .dst = t->dst,
+ .instance = ~0,
+ };
+
+ return (vnet_vxlan_gbp_tunnel_add_del (&args, NULL));
+ }
+
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+}
+
static uword
get_decap_next_for_node (u32 node_index, u32 ipv4_set)
{
@@ -700,6 +730,7 @@ vxlan_gbp_tunnel_add_del_command_fn (vlib_main_t * vm,
unformat_input_t _line_input, *line_input = &_line_input;
ip46_address_t src = ip46_address_initializer, dst =
ip46_address_initializer;
+ vxlan_gbp_tunnel_mode_t mode = VXLAN_GBP_TUNNEL_MODE_L2;
u8 is_add = 1;
u8 src_set = 0;
u8 dst_set = 0;
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.h b/src/vnet/vxlan-gbp/vxlan_gbp.h
index f9edcdcbd93..66f0cffd772 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.h
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.h
@@ -59,6 +59,14 @@ typedef clib_bihash_kv_16_8_t vxlan4_gbp_tunnel_key_t;
*/
typedef clib_bihash_kv_24_8_t vxlan6_gbp_tunnel_key_t;
+typedef enum vxlan_gbp_tunnel_mode_t_
+{
+ VXLAN_GBP_TUNNEL_MODE_L2,
+ VXLAN_GBP_TUNNEL_MODE_L3,
+} vxlan_gbp_tunnel_mode_t;
+
+extern u8 *format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args);
+
typedef struct
{
/* Required for pool_get_aligned */
@@ -67,9 +75,6 @@ typedef struct
/* FIB DPO for IP forwarding of VXLAN encap packet */
dpo_id_t next_dpo;
- /* Group Policy ID */
- u16 sclass;
-
/* flags */
u16 flags;
@@ -83,9 +88,6 @@ typedef struct
/* mcast packet output intfc index (used only if dst is mcast) */
u32 mcast_sw_if_index;
- /* decap next index */
- u32 decap_next_index;
-
/* The FIB index for src/dst addresses */
u32 encap_fib_index;
@@ -97,6 +99,12 @@ typedef struct
uword encap_next_node;
/**
+ * Tunnel mode.
+ * L2 tunnels decap to L2 path, L3 tunnels to the L3 path
+ */
+ vxlan_gbp_tunnel_mode_t mode;
+
+ /**
* Linkage into the FIB object graph
*/
fib_node_t node;
@@ -122,9 +130,12 @@ typedef struct
vnet_declare_rewrite (VLIB_BUFFER_PRE_DATA_SIZE);
} vxlan_gbp_tunnel_t;
-#define foreach_vxlan_gbp_input_next \
-_(DROP, "error-drop") \
-_(L2_INPUT, "l2-input")
+#define foreach_vxlan_gbp_input_next \
+ _(DROP, "error-drop") \
+ _(NO_TUNNEL, "error-punt") \
+ _(L2_INPUT, "l2-input") \
+ _(IP4_INPUT, "ip4-input") \
+ _(IP6_INPUT, "ip6-input")
typedef enum
{
@@ -142,6 +153,13 @@ typedef enum
VXLAN_GBP_N_ERROR,
} vxlan_gbp_input_error_t;
+/**
+ * Call back function packets that do not match a configured tunnel
+ */
+typedef vxlan_gbp_input_next_t (*vxlan_bgp_no_tunnel_t) (vlib_buffer_t * b,
+ u32 thread_index,
+ u8 is_ip6);
+
typedef struct
{
/* vector of encap tunnel instances */
@@ -189,20 +207,22 @@ typedef struct
u8 is_add;
u8 is_ip6;
u32 instance;
+ vxlan_gbp_tunnel_mode_t mode;
ip46_address_t src, dst;
u32 mcast_sw_if_index;
u32 encap_fib_index;
- u32 decap_next_index;
u32 vni;
} vnet_vxlan_gbp_tunnel_add_del_args_t;
int vnet_vxlan_gbp_tunnel_add_del
(vnet_vxlan_gbp_tunnel_add_del_args_t * a, u32 * sw_if_indexp);
+int vnet_vxlan_gbp_tunnel_del (u32 sw_if_indexp);
void vnet_int_vxlan_gbp_bypass_mode (u32 sw_if_index, u8 is_ip6,
u8 is_enable);
u32 vnet_vxlan_gbp_get_tunnel_index (u32 sw_if_index);
+
#endif /* included_vnet_vxlan_gbp_h */
/*
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_api.c b/src/vnet/vxlan-gbp/vxlan_gbp_api.c
index b7e6935b2f8..f5e97e5a364 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp_api.c
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_api.c
@@ -92,10 +92,10 @@ static void vl_api_vxlan_gbp_tunnel_add_del_t_handler
.instance = ntohl (mp->tunnel.instance),
.mcast_sw_if_index = ntohl (mp->tunnel.mcast_sw_if_index),
.encap_fib_index = fib_index,
- .decap_next_index = ntohl (mp->tunnel.decap_next_index),
.vni = ntohl (mp->tunnel.vni),
.dst = dst,
.src = src,
+ .mode = VXLAN_GBP_TUNNEL_MODE_L2,
};
/* Check src & dst are different */
@@ -142,7 +142,6 @@ static void send_vxlan_gbp_tunnel_details
rmp->tunnel.instance = htonl (t->user_instance);
rmp->tunnel.mcast_sw_if_index = htonl (t->mcast_sw_if_index);
rmp->tunnel.vni = htonl (t->vni);
- rmp->tunnel.decap_next_index = htonl (t->decap_next_index);
rmp->tunnel.sw_if_index = htonl (t->sw_if_index);
rmp->context = context;
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_packet.c b/src/vnet/vxlan-gbp/vxlan_gbp_packet.c
new file mode 100644
index 00000000000..01c7a19bfb9
--- /dev/null
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_packet.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+u8 *
+format_vxlan_gbp_header_flags (u8 * s, va_list * args)
+{
+ vxlan_gbp_flags_t flags = va_arg (*args, int);
+
+ if (VXLAN_GBP_FLAGS_NONE == flags)
+ {
+ s = format (s, "None");
+ }
+#define _(n,f) { \
+ if (VXLAN_GBP_FLAGS_##f & flags) \
+ s = format (s, #f); \
+ }
+ foreach_vxlan_gbp_flags
+#undef _
+ return (s);
+}
+
+u8 *
+format_vxlan_gbp_header_gpflags (u8 * s, va_list * args)
+{
+ vxlan_gbp_gpflags_t flags = va_arg (*args, int);
+
+ if (VXLAN_GBP_GPFLAGS_NONE == flags)
+ {
+ s = format (s, "None");
+ }
+#define _(n,f) { \
+ if (VXLAN_GBP_GPFLAGS_##f & flags) \
+ s = format (s, #f); \
+ }
+ foreach_vxlan_gbp_gpflags
+#undef _
+ return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_packet.h b/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
index e1674a0dba8..33bccd6aed6 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
@@ -15,6 +15,8 @@
#ifndef __included_vxlan_gbp_packet_h__
#define __included_vxlan_gbp_packet_h__ 1
+#include <vlib/vlib.h>
+
/*
* From draft-smith-vxlan-group-policy-04.txt
*
@@ -85,8 +87,17 @@ typedef struct
u32 vni_reserved;
} vxlan_gbp_header_t;
-#define VXLAN_GBP_FLAGS_G 0x80
-#define VXLAN_GBP_FLAGS_I 0x08
+#define foreach_vxlan_gbp_flags \
+ _ (0x80, G) \
+ _ (0x08, I)
+
+typedef enum
+{
+ VXLAN_GBP_FLAGS_NONE = 0,
+#define _(n,f) VXLAN_GBP_FLAGS_##f = n,
+ foreach_vxlan_gbp_flags
+#undef _
+} __attribute__ ((packed)) vxlan_gbp_flags_t;
#define foreach_vxlan_gbp_gpflags \
_ (0x40, D) \
@@ -96,10 +107,11 @@ _ (0x08, A)
typedef enum
{
+ VXLAN_GBP_GPFLAGS_NONE = 0,
#define _(n,f) VXLAN_GBP_GPFLAGS_##f = n,
foreach_vxlan_gbp_gpflags
#undef _
-} vxlan_gbp_gpflag_t;
+} __attribute__ ((packed)) vxlan_gbp_gpflags_t;
static inline u32
vxlan_gbp_get_vni (vxlan_gbp_header_t * h)
@@ -119,13 +131,13 @@ vxlan_gbp_get_sclass (vxlan_gbp_header_t * h)
return sclass_host_byte_order;
}
-static inline u8
+static inline vxlan_gbp_gpflags_t
vxlan_gbp_get_gpflags (vxlan_gbp_header_t * h)
{
return h->gpflags;
}
-static inline u8
+static inline vxlan_gbp_flags_t
vxlan_gbp_get_flags (vxlan_gbp_header_t * h)
{
return h->flag_g_i;
@@ -139,6 +151,9 @@ vxlan_gbp_set_header (vxlan_gbp_header_t * h, u32 vni)
h->flag_g_i = VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G;
}
+extern u8 *format_vxlan_gbp_header_flags (u8 * s, va_list * args);
+extern u8 *format_vxlan_gbp_header_gpflags (u8 * s, va_list * args);
+
#endif /* __included_vxlan_gbp_packet_h__ */
/*
diff --git a/test/framework.py b/test/framework.py
index fbe6c370a3f..5bbd56a6ef7 100644
--- a/test/framework.py
+++ b/test/framework.py
@@ -929,7 +929,35 @@ class VppTestCase(unittest.TestCase):
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx = output.get_capture(len(pkts))
+ if isinstance(object, (list,)):
+ rx = []
+ for o in output:
+ rx.append(output.get_capture(len(pkts)))
+ else:
+ rx = output.get_capture(len(pkts))
+ return rx
+
+ def send_and_expect_only(self, input, pkts, output, timeout=None):
+ self.vapi.cli("clear trace")
+ input.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ if isinstance(object, (list,)):
+ outputs = output
+ rx = []
+ for o in outputs:
+ rx.append(output.get_capture(len(pkts)))
+ else:
+ rx = output.get_capture(len(pkts))
+ outputs = [output]
+ if not timeout:
+ timeout = 1
+ for i in self.pg_interfaces:
+ if i not in outputs:
+ i.get_capture(0, timeout=timeout)
+ i.assert_nothing_captured()
+ timeout = 0.1
+
return rx
diff --git a/test/test_gbp.py b/test/test_gbp.py
index ef4bf7071eb..a45b2f845b8 100644
--- a/test/test_gbp.py
+++ b/test/test_gbp.py
@@ -5,11 +5,16 @@ import unittest
from framework import VppTestCase, VppTestRunner
from vpp_object import VppObject
from vpp_neighbor import VppNeighbor
-from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, \
+ VppIpInterfaceAddress, VppIpInterfaceBind, find_route
+from vpp_l2 import VppBridgeDomain, VppBridgeDomainPort, \
+ VppBridgeDomainArpEntry, VppL2FibEntry, find_bridge_domain_port
+from vpp_vxlan_gbp_tunnel import *
from vpp_ip import *
from vpp_mac import *
from vpp_papi_provider import L2_PORT_TYPE
+from vpp_papi import VppEnum
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP
@@ -17,25 +22,43 @@ from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6NDOptSrcLLAddr, \
ICMPv6ND_NA
from scapy.utils6 import in6_getnsma, in6_getnsmac
+from scapy.layers.vxlan import VXLAN
from socket import AF_INET, AF_INET6
from scapy.utils import inet_pton, inet_ntop
from util import mactobinary
-def find_gbp_endpoint(test, sw_if_index, ip=None, mac=None):
- vip = VppIpAddress(ip)
+def find_gbp_endpoint(test, sw_if_index=None, ip=None, mac=None):
+ if ip:
+ vip = VppIpAddress(ip)
+ if mac:
+ vmac = VppMacAddress(mac)
eps = test.vapi.gbp_endpoint_dump()
+
for ep in eps:
- if ep.endpoint.sw_if_index != sw_if_index:
- continue
- for eip in ep.endpoint.ips:
- if vip == eip:
+ if sw_if_index:
+ if ep.endpoint.sw_if_index != sw_if_index:
+ continue
+ if ip:
+ for eip in ep.endpoint.ips:
+ if vip == eip:
+ return True
+ if mac:
+ if vmac == ep.endpoint.mac:
return True
return False
+def find_gbp_vxlan(test, vni):
+ ts = test.vapi.gbp_vxlan_tunnel_dump()
+ for t in ts:
+ if t.tunnel.vni == vni:
+ return True
+ return False
+
+
class VppGbpEndpoint(VppObject):
"""
GBP Endpoint
@@ -43,7 +66,11 @@ class VppGbpEndpoint(VppObject):
@property
def bin_mac(self):
- return mactobinary(self.itf.remote_mac)
+ return self.vmac.bytes
+
+ @property
+ def mac(self):
+ return self.vmac.address
@property
def mac(self):
@@ -73,7 +100,11 @@ class VppGbpEndpoint(VppObject):
def fips(self):
return [self.fip4, self.fip6]
- def __init__(self, test, itf, epg, recirc, ip4, fip4, ip6, fip6):
+ def __init__(self, test, itf, epg, recirc, ip4, fip4, ip6, fip6,
+ flags=0,
+ tun_src="0.0.0.0",
+ tun_dst="0.0.0.0",
+ mac=True):
self._test = test
self.itf = itf
self.epg = epg
@@ -84,14 +115,24 @@ class VppGbpEndpoint(VppObject):
self._ip6 = VppIpAddress(ip6)
self._fip6 = VppIpAddress(fip6)
- self.vmac = VppMacAddress(self.itf.remote_mac)
+ if mac:
+ self.vmac = VppMacAddress(self.itf.remote_mac)
+ else:
+ self.vmac = VppMacAddress("00:00:00:00:00:00")
+
+ self.flags = flags
+ self.tun_src = VppIpAddress(tun_src)
+ self.tun_dst = VppIpAddress(tun_dst)
def add_vpp_config(self):
res = self._test.vapi.gbp_endpoint_add(
self.itf.sw_if_index,
[self.ip4.encode(), self.ip6.encode()],
self.vmac.encode(),
- self.epg.epg)
+ self.epg.epg,
+ self.flags,
+ self.tun_src.encode(),
+ self.tun_dst.encode())
self.handle = res.handle
self._test.registry.register(self, self._test.logger)
@@ -102,9 +143,10 @@ class VppGbpEndpoint(VppObject):
return self.object_id()
def object_id(self):
- return "gbp-endpoint;[%d:%s:%d]" % (self.itf.sw_if_index,
- self.ip4.address,
- self.epg.epg)
+ return "gbp-endpoint:[%d==%d:%s:%d]" % (self.handle,
+ self.itf.sw_if_index,
+ self.ip4.address,
+ self.epg.epg)
def query_vpp_config(self):
return find_gbp_endpoint(self._test,
@@ -142,7 +184,7 @@ class VppGbpRecirc(VppObject):
return self.object_id()
def object_id(self):
- return "gbp-recirc;[%d]" % (self.recirc.sw_if_index)
+ return "gbp-recirc:[%d]" % (self.recirc.sw_if_index)
def query_vpp_config(self):
rs = self._test.vapi.gbp_recirc_dump()
@@ -156,23 +198,21 @@ class VppGbpSubnet(VppObject):
"""
GBP Subnet
"""
-
- def __init__(self, test, table_id, address, address_len,
- is_internal=True,
- sw_if_index=None, epg=None):
+ def __init__(self, test, rd, address, address_len,
+ type, sw_if_index=None, epg=None):
self._test = test
- self.table_id = table_id
+ self.rd_id = rd.rd_id
self.prefix = VppIpPrefix(address, address_len)
- self.is_internal = is_internal
+ self.type = type
self.sw_if_index = sw_if_index
self.epg = epg
def add_vpp_config(self):
self._test.vapi.gbp_subnet_add_del(
1,
- self.table_id,
- self.is_internal,
+ self.rd_id,
self.prefix.encode(),
+ self.type,
sw_if_index=self.sw_if_index if self.sw_if_index else 0xffffffff,
epg_id=self.epg if self.epg else 0xffff)
self._test.registry.register(self, self._test.logger)
@@ -180,21 +220,21 @@ class VppGbpSubnet(VppObject):
def remove_vpp_config(self):
self._test.vapi.gbp_subnet_add_del(
0,
- self.table_id,
- self.is_internal,
- self.prefix.encode())
+ self.rd_id,
+ self.prefix.encode(),
+ self.type)
def __str__(self):
return self.object_id()
def object_id(self):
- return "gbp-subnet;[%d-%s]" % (self.table_id,
- self.prefix)
+ return "gbp-subnet:[%d-%s]" % (self.rd_id, self.prefix)
def query_vpp_config(self):
ss = self._test.vapi.gbp_subnet_dump()
for s in ss:
- if s.subnet.table_id == self.table_id and \
+ if s.subnet.rd_id == self.rd_id and \
+ s.subnet.type == self.type and \
s.subnet.prefix == self.prefix:
return True
return False
@@ -217,29 +257,22 @@ class VppGbpEndpointGroup(VppObject):
self.rd = rd
def add_vpp_config(self):
- self._test.vapi.gbp_endpoint_group_add_del(
- 1,
+ self._test.vapi.gbp_endpoint_group_add(
self.epg,
- self.bd,
- self.rd,
- self.rd,
- self.uplink.sw_if_index)
+ self.bd.bd.bd_id,
+ self.rd.rd_id,
+ self.uplink.sw_if_index if self.uplink else INDEX_INVALID)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
- self._test.vapi.gbp_endpoint_group_add_del(
- 0,
- self.epg,
- self.bd,
- self.rd,
- self.rd,
- self.uplink.sw_if_index)
+ self._test.vapi.gbp_endpoint_group_del(
+ self.epg)
def __str__(self):
return self.object_id()
def object_id(self):
- return "gbp-endpoint-group;[%d]" % (self.epg)
+ return "gbp-endpoint-group:[%d]" % (self.epg)
def query_vpp_config(self):
epgs = self._test.vapi.gbp_endpoint_group_dump()
@@ -249,6 +282,80 @@ class VppGbpEndpointGroup(VppObject):
return False
+class VppGbpBridgeDomain(VppObject):
+ """
+ GBP Bridge Domain
+ """
+
+ def __init__(self, test, bd, bvi, uu_flood=None):
+ self._test = test
+ self.bvi = bvi
+ self.uu_flood = uu_flood
+ self.bd = bd
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_bridge_domain_add(
+ self.bd.bd_id,
+ self.bvi.sw_if_index,
+ self.uu_flood.sw_if_index if self.uu_flood else INDEX_INVALID)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_bridge_domain_del(self.bd.bd_id)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "gbp-bridge-domain:[%d]" % (self.bd.bd_id)
+
+ def query_vpp_config(self):
+ bds = self._test.vapi.gbp_bridge_domain_dump()
+ for bd in bds:
+ if bd.bd.bd_id == self.bd.bd_id:
+ return True
+ return False
+
+
+class VppGbpRouteDomain(VppObject):
+ """
+ GBP Route Domain
+ """
+
+ def __init__(self, test, rd_id, t4, t6, ip4_uu=None, ip6_uu=None):
+ self._test = test
+ self.rd_id = rd_id
+ self.t4 = t4
+ self.t6 = t6
+ self.ip4_uu = ip4_uu
+ self.ip6_uu = ip6_uu
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_route_domain_add(
+ self.rd_id,
+ self.t4.table_id,
+ self.t6.table_id,
+ self.ip4_uu.sw_if_index if self.ip4_uu else INDEX_INVALID,
+ self.ip6_uu.sw_if_index if self.ip6_uu else INDEX_INVALID)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_route_domain_del(self.rd_id)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "gbp-route-domain:[%d]" % (self.rd_id)
+
+ def query_vpp_config(self):
+ rds = self._test.vapi.gbp_route_domain_dump()
+ for rd in rds:
+ if rd.rd.rd_id == self.rd_id:
+ return True
+ return False
+
+
class VppGbpContract(VppObject):
"""
GBP Contract
@@ -279,7 +386,7 @@ class VppGbpContract(VppObject):
return self.object_id()
def object_id(self):
- return "gbp-contract;[%d:%s:%d]" % (self.src_epg,
+ return "gbp-contract:[%d:%s:%d]" % (self.src_epg,
self.dst_epg,
self.acl_index)
@@ -292,6 +399,39 @@ class VppGbpContract(VppObject):
return False
+class VppGbpVxlanTunnel(VppInterface):
+ """
+ GBP VXLAN tunnel
+ """
+
+ def __init__(self, test, vni, bd_rd_id, mode):
+ super(VppGbpVxlanTunnel, self).__init__(test)
+ self._test = test
+ self.vni = vni
+ self.bd_rd_id = bd_rd_id
+ self.mode = mode
+
+ def add_vpp_config(self):
+ r = self._test.vapi.gbp_vxlan_tunnel_add(
+ self.vni,
+ self.bd_rd_id,
+ self.mode)
+ self.set_sw_if_index(r.sw_if_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_vxlan_tunnel_del(self.vni)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "gbp-vxlan:%d" % (self.vni)
+
+ def query_vpp_config(self):
+ return find_gbp_vxlan(self._test, self.vni)
+
+
class VppGbpAcl(VppObject):
"""
GBP Acl
@@ -337,7 +477,7 @@ class VppGbpAcl(VppObject):
return self.object_id()
def object_id(self):
- return "gbp-acl;[%d]" % (self.acl_index)
+ return "gbp-acl:[%d]" % (self.acl_index)
def query_vpp_config(self):
cs = self._test.vapi.acl_dump()
@@ -354,7 +494,7 @@ class TestGBP(VppTestCase):
super(TestGBP, self).setUp()
self.create_pg_interfaces(range(9))
- self.create_loopback_interfaces(9)
+ self.create_loopback_interfaces(8)
self.router_mac = VppMacAddress("00:11:22:33:44:55")
@@ -362,9 +502,6 @@ class TestGBP(VppTestCase):
i.admin_up()
for i in self.lo_interfaces:
i.admin_up()
- self.vapi.sw_interface_set_mac_address(
- i.sw_if_index,
- self.router_mac.bytes)
def tearDown(self):
for i in self.pg_interfaces:
@@ -465,42 +602,64 @@ class TestGBP(VppTestCase):
def test_gbp(self):
""" Group Based Policy """
- nat_table = VppIpTable(self, 20)
- nat_table.add_vpp_config()
- nat_table = VppIpTable(self, 20, is_ip6=True)
- nat_table.add_vpp_config()
-
#
# Bridge Domains
#
- self.vapi.bridge_domain_add_del(1, flood=1, uu_flood=1, forward=1,
- learn=0, arp_term=1, is_add=1)
- self.vapi.bridge_domain_add_del(2, flood=1, uu_flood=1, forward=1,
- learn=0, arp_term=1, is_add=1)
- self.vapi.bridge_domain_add_del(20, flood=1, uu_flood=1, forward=1,
- learn=0, arp_term=1, is_add=1)
+ bd1 = VppBridgeDomain(self, 1)
+ bd2 = VppBridgeDomain(self, 2)
+ bd20 = VppBridgeDomain(self, 20)
+
+ bd1.add_vpp_config()
+ bd2.add_vpp_config()
+ bd20.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, self.loop0)
+ gbd2 = VppGbpBridgeDomain(self, bd2, self.loop1)
+ gbd20 = VppGbpBridgeDomain(self, bd20, self.loop2)
+
+ gbd1.add_vpp_config()
+ gbd2.add_vpp_config()
+ gbd20.add_vpp_config()
+
+ #
+ # Route Domains
+ #
+ gt4 = VppIpTable(self, 0)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 0, is_ip6=True)
+ gt6.add_vpp_config()
+ nt4 = VppIpTable(self, 20)
+ nt4.add_vpp_config()
+ nt6 = VppIpTable(self, 20, is_ip6=True)
+ nt6.add_vpp_config()
+
+ rd0 = VppGbpRouteDomain(self, 0, gt4, gt6, None, None)
+ rd20 = VppGbpRouteDomain(self, 20, nt4, nt6, None, None)
+
+ rd0.add_vpp_config()
+ rd20.add_vpp_config()
#
# 3 EPGs, 2 of which share a BD.
# 2 NAT EPGs, one for floating-IP subnets, the other for internet
#
- epgs = [VppGbpEndpointGroup(self, 220, 0, 1, self.pg4,
+ epgs = [VppGbpEndpointGroup(self, 220, rd0, gbd1, self.pg4,
self.loop0,
"10.0.0.128",
"2001:10::128"),
- VppGbpEndpointGroup(self, 221, 0, 1, self.pg5,
+ VppGbpEndpointGroup(self, 221, rd0, gbd1, self.pg5,
self.loop0,
"10.0.1.128",
"2001:10:1::128"),
- VppGbpEndpointGroup(self, 222, 0, 2, self.pg6,
+ VppGbpEndpointGroup(self, 222, rd0, gbd2, self.pg6,
self.loop1,
"10.0.2.128",
"2001:10:2::128"),
- VppGbpEndpointGroup(self, 333, 20, 20, self.pg7,
+ VppGbpEndpointGroup(self, 333, rd20, gbd20, self.pg7,
self.loop2,
"11.0.0.128",
"3001::128"),
- VppGbpEndpointGroup(self, 444, 20, 20, self.pg8,
+ VppGbpEndpointGroup(self, 444, rd20, gbd20, self.pg8,
self.loop2,
"11.0.0.129",
"3001::129")]
@@ -513,7 +672,7 @@ class TestGBP(VppTestCase):
VppGbpRecirc(self, epgs[3],
self.loop6, is_ext=True),
VppGbpRecirc(self, epgs[4],
- self.loop8, is_ext=True)]
+ self.loop7, is_ext=True)]
epg_nat = epgs[3]
recirc_nat = recircs[3]
@@ -544,8 +703,11 @@ class TestGBP(VppTestCase):
for epg in epgs:
# IP config on the BVI interfaces
if epg != epgs[1] and epg != epgs[4]:
- epg.bvi.set_table_ip4(epg.rd)
- epg.bvi.set_table_ip6(epg.rd)
+ VppIpInterfaceBind(self, epg.bvi, epg.rd.t4).add_vpp_config()
+ VppIpInterfaceBind(self, epg.bvi, epg.rd.t6).add_vpp_config()
+ self.vapi.sw_interface_set_mac_address(
+ epg.bvi.sw_if_index,
+ self.router_mac.bytes)
# The BVIs are NAT inside interfaces
self.vapi.nat44_interface_add_del_feature(epg.bvi.sw_if_index,
@@ -555,60 +717,37 @@ class TestGBP(VppTestCase):
is_inside=1,
is_add=1)
- self.vapi.sw_interface_add_del_address(epg.bvi.sw_if_index,
- epg.bvi_ip4.bytes,
- 32)
- self.vapi.sw_interface_add_del_address(epg.bvi.sw_if_index,
- epg.bvi_ip6.bytes,
- 128,
- is_ipv6=True)
+ if_ip4 = VppIpInterfaceAddress(self, epg.bvi, epg.bvi_ip4, 32)
+ if_ip6 = VppIpInterfaceAddress(self, epg.bvi, epg.bvi_ip6, 128)
+ if_ip4.add_vpp_config()
+ if_ip6.add_vpp_config()
- # EPG uplink interfaces in the BD
- epg.uplink.set_table_ip4(epg.rd)
- epg.uplink.set_table_ip6(epg.rd)
- self.vapi.sw_interface_set_l2_bridge(epg.uplink.sw_if_index,
- epg.bd)
+ # EPG uplink interfaces in the RD
+ VppIpInterfaceBind(self, epg.uplink, epg.rd.t4).add_vpp_config()
+ VppIpInterfaceBind(self, epg.uplink, epg.rd.t6).add_vpp_config()
# add the BD ARP termination entry for BVI IP
- self.vapi.bd_ip_mac_add_del(bd_id=epg.bd,
- mac=self.router_mac.encode(),
- ip=epg.bvi_ip4.encode(),
- is_ipv6=0,
- is_add=1)
- self.vapi.bd_ip_mac_add_del(bd_id=epg.bd,
- mac=self.router_mac.encode(),
- ip=epg.bvi_ip6.encode(),
- is_ipv6=1,
- is_add=1)
-
- # epg[1] shares the same BVI to epg[0]
- if epg != epgs[1] and epg != epgs[4]:
- # BVI in BD
- self.vapi.sw_interface_set_l2_bridge(
- epg.bvi.sw_if_index,
- epg.bd,
- port_type=L2_PORT_TYPE.BVI)
-
- # BVI L2 FIB entry
- self.vapi.l2fib_add_del(self.router_mac.address,
- epg.bd,
- epg.bvi.sw_if_index,
- is_add=1, bvi_mac=1)
+ epg.bd_arp_ip4 = VppBridgeDomainArpEntry(self, epg.bd.bd,
+ self.router_mac.address,
+ epg.bvi_ip4)
+ epg.bd_arp_ip6 = VppBridgeDomainArpEntry(self, epg.bd.bd,
+ self.router_mac.address,
+ epg.bvi_ip6)
+ epg.bd_arp_ip4.add_vpp_config()
+ epg.bd_arp_ip6.add_vpp_config()
# EPG in VPP
epg.add_vpp_config()
for recirc in recircs:
# EPG's ingress recirculation interface maps to its RD
- recirc.recirc.set_table_ip4(recirc.epg.rd)
- recirc.recirc.set_table_ip6(recirc.epg.rd)
+ VppIpInterfaceBind(self, recirc.recirc,
+ recirc.epg.rd.t4).add_vpp_config()
+ VppIpInterfaceBind(self, recirc.recirc,
+ recirc.epg.rd.t6).add_vpp_config()
- # in the bridge to allow DVR. L2 emulation to punt to L3
- self.vapi.sw_interface_set_l2_bridge(recirc.recirc.sw_if_index,
- recirc.epg.bd)
self.vapi.sw_interface_set_l2_emulation(
recirc.recirc.sw_if_index)
-
self.vapi.nat44_interface_add_del_feature(
recirc.recirc.sw_if_index,
is_inside=0,
@@ -620,8 +759,11 @@ class TestGBP(VppTestCase):
recirc.add_vpp_config()
- ep_routes = []
- ep_arps = []
+ for recirc in recircs:
+ self.assertTrue(find_bridge_domain_port(self,
+ recirc.epg.bd.bd.bd_id,
+ recirc.recirc.sw_if_index))
+
for ep in eps:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
@@ -631,32 +773,6 @@ class TestGBP(VppTestCase):
# the subnet is not attached.
#
for (ip, fip) in zip(ep.ips, ep.fips):
- r = VppIpRoute(self, ip.address, ip.length,
- [VppRoutePath(ip.address,
- ep.epg.bvi.sw_if_index,
- proto=ip.dpo_proto)],
- is_ip6=ip.is_ip6)
- r.add_vpp_config()
- ep_routes.append(r)
-
- #
- # ARP entries for the endpoints
- #
- a = VppNeighbor(self,
- ep.epg.bvi.sw_if_index,
- ep.itf.remote_mac,
- ip.address,
- af=ip.af)
- a.add_vpp_config()
- ep_arps.append(a)
-
- # add the BD ARP termination entry
- self.vapi.bd_ip_mac_add_del(bd_id=ep.epg.bd,
- mac=ep.vmac.encode(),
- ip=ip.encode(),
- is_ipv6=ip.is_ip6,
- is_add=1)
-
# Add static mappings for each EP from the 10/8 to 11/8 network
if ip.af == AF_INET:
self.vapi.nat44_add_del_static_mapping(ip.bytes,
@@ -668,16 +784,6 @@ class TestGBP(VppTestCase):
fip.bytes,
vrf_id=0)
- # add each EP itf to the its BD
- self.vapi.sw_interface_set_l2_bridge(ep.itf.sw_if_index,
- ep.epg.bd)
-
- # L2 FIB entry
- self.vapi.l2fib_add_del(ep.mac,
- ep.epg.bd,
- ep.itf.sw_if_index,
- is_add=1)
-
# VPP EP create ...
ep.add_vpp_config()
@@ -699,11 +805,8 @@ class TestGBP(VppTestCase):
# add the BD ARP termination entry for floating IP
for fip in ep.fips:
- self.vapi.bd_ip_mac_add_del(bd_id=epg_nat.bd,
- mac=ep.vmac.encode(),
- ip=fip.encode(),
- is_ipv6=fip.is_ip6,
- is_add=1)
+ ba = VppBridgeDomainArpEntry(self, epg_nat.bd.bd, ep.mac, fip)
+ ba.add_vpp_config()
# floating IPs route via EPG recirc
r = VppIpRoute(self, fip.address, fip.length,
@@ -714,27 +817,31 @@ class TestGBP(VppTestCase):
table_id=20,
is_ip6=fip.is_ip6)
r.add_vpp_config()
- ep_routes.append(r)
# L2 FIB entries in the NAT EPG BD to bridge the packets from
# the outside direct to the internal EPG
- self.vapi.l2fib_add_del(ep.mac,
- epg_nat.bd,
- ep.recirc.recirc.sw_if_index,
- is_add=1)
+ lf = VppL2FibEntry(self, epg_nat.bd.bd, ep.mac,
+ ep.recirc.recirc, bvi_mac=0)
+ lf.add_vpp_config()
#
- # ARP packets for unknown IP are flooded
+ # ARP packets for unknown IP are sent to the EPG uplink
#
pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
src=self.pg0.remote_mac) /
ARP(op="who-has",
hwdst="ff:ff:ff:ff:ff:ff",
hwsrc=self.pg0.remote_mac,
- pdst=epgs[0].bvi_ip4.address,
- psrc="10.0.0.88"))
+ pdst="10.0.0.88",
+ psrc="10.0.0.99"))
- self.send_and_expect(self.pg0, [pkt_arp], self.pg0)
+ self.vapi.cli("clear trace")
+ self.pg0.add_stream(pkt_arp)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rxd = epgs[0].uplink.get_capture(1)
#
# ARP/ND packets get a response
@@ -751,7 +858,8 @@ class TestGBP(VppTestCase):
nsma = in6_getnsma(inet_pton(AF_INET6, eps[0].ip6.address))
d = inet_ntop(AF_INET6, nsma)
- pkt_nd = (Ether(dst=in6_getnsmac(nsma)) /
+ pkt_nd = (Ether(dst=in6_getnsmac(nsma),
+ src=self.pg0.remote_mac) /
IPv6(dst=d, src=eps[0].ip6.address) /
ICMPv6ND_NS(tgt=epgs[0].bvi_ip6.address) /
ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
@@ -806,28 +914,40 @@ class TestGBP(VppTestCase):
#
# Add the subnet routes
#
- s41 = VppGbpSubnet(self, 0, "10.0.0.0", 24)
- s42 = VppGbpSubnet(self, 0, "10.0.1.0", 24)
- s43 = VppGbpSubnet(self, 0, "10.0.2.0", 24)
+ s41 = VppGbpSubnet(
+ self, rd0, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s42 = VppGbpSubnet(
+ self, rd0, "10.0.1.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s43 = VppGbpSubnet(
+ self, rd0, "10.0.2.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s61 = VppGbpSubnet(
+ self, rd0, "2001:10::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s62 = VppGbpSubnet(
+ self, rd0, "2001:10:1::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s63 = VppGbpSubnet(
+ self, rd0, "2001:10:2::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
s41.add_vpp_config()
s42.add_vpp_config()
s43.add_vpp_config()
- s61 = VppGbpSubnet(self, 0, "2001:10::1", 64)
- s62 = VppGbpSubnet(self, 0, "2001:10:1::1", 64)
- s63 = VppGbpSubnet(self, 0, "2001:10:2::1", 64)
s61.add_vpp_config()
s62.add_vpp_config()
s63.add_vpp_config()
- self.send_and_expect_bridged(self.pg0,
+ self.send_and_expect_bridged(eps[0].itf,
pkt_intra_epg_220_ip4 * 65,
- self.pg4)
- self.send_and_expect_bridged(self.pg3,
+ eps[0].epg.uplink)
+ self.send_and_expect_bridged(eps[0].itf,
pkt_inter_epg_222_ip4 * 65,
- self.pg6)
- self.send_and_expect_bridged6(self.pg3,
+ eps[0].epg.uplink)
+ self.send_and_expect_bridged6(eps[0].itf,
pkt_inter_epg_222_ip6 * 65,
- self.pg6)
+ eps[0].epg.uplink)
self.logger.info(self.vapi.cli("sh ip fib 11.0.0.2"))
self.logger.info(self.vapi.cli("sh gbp endpoint-group"))
@@ -838,6 +958,7 @@ class TestGBP(VppTestCase):
self.logger.info(self.vapi.cli("sh int feat loop6"))
self.logger.info(self.vapi.cli("sh vlib graph ip4-gbp-src-classify"))
self.logger.info(self.vapi.cli("sh int feat loop3"))
+ self.logger.info(self.vapi.cli("sh int feat pg0"))
#
# Packet destined to unknown unicast is sent on the epg uplink ...
@@ -849,9 +970,9 @@ class TestGBP(VppTestCase):
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
- self.send_and_expect_bridged(self.pg0,
+ self.send_and_expect_bridged(eps[0].itf,
pkt_intra_epg_220_to_uplink * 65,
- self.pg4)
+ eps[0].epg.uplink)
# ... and nowhere else
self.pg1.get_capture(0, timeout=0.1)
self.pg1.assert_nothing_captured(remark="Flood onto other VMS")
@@ -863,9 +984,9 @@ class TestGBP(VppTestCase):
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
- self.send_and_expect_bridged(self.pg2,
+ self.send_and_expect_bridged(eps[2].itf,
pkt_intra_epg_221_to_uplink * 65,
- self.pg5)
+ eps[2].epg.uplink)
#
# Packets from the uplink are forwarded in the absence of a contract
@@ -917,9 +1038,9 @@ class TestGBP(VppTestCase):
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
- self.send_and_assert_no_replies(self.pg0,
+ self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_221 * 65)
- self.send_and_assert_no_replies(self.pg0,
+ self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_222 * 65)
#
@@ -932,10 +1053,10 @@ class TestGBP(VppTestCase):
c1 = VppGbpContract(self, 220, 221, acl_index)
c1.add_vpp_config()
- self.send_and_expect_bridged(self.pg0,
+ self.send_and_expect_bridged(eps[0].itf,
pkt_inter_epg_220_to_221 * 65,
- self.pg2)
- self.send_and_assert_no_replies(self.pg0,
+ eps[2].itf)
+ self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_222 * 65)
#
@@ -944,18 +1065,18 @@ class TestGBP(VppTestCase):
c2 = VppGbpContract(self, 221, 220, acl_index)
c2.add_vpp_config()
- self.send_and_expect_bridged(self.pg0,
+ self.send_and_expect_bridged(eps[0].itf,
pkt_inter_epg_220_to_221 * 65,
- self.pg2)
- self.send_and_expect_bridged(self.pg2,
+ eps[2].itf)
+ self.send_and_expect_bridged(eps[2].itf,
pkt_inter_epg_221_to_220 * 65,
- self.pg0)
+ eps[0].itf)
#
# check that inter group is still disabled for the groups
# not in the contract.
#
- self.send_and_assert_no_replies(self.pg0,
+ self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_222 * 65)
#
@@ -966,9 +1087,9 @@ class TestGBP(VppTestCase):
self.logger.info(self.vapi.cli("sh gbp contract"))
- self.send_and_expect_routed(self.pg0,
+ self.send_and_expect_routed(eps[0].itf,
pkt_inter_epg_220_to_222 * 65,
- self.pg3,
+ eps[3].itf,
self.router_mac.address)
#
@@ -979,45 +1100,53 @@ class TestGBP(VppTestCase):
c3.remove_vpp_config()
acl.remove_vpp_config()
- self.send_and_assert_no_replies(self.pg2,
+ self.send_and_assert_no_replies(eps[2].itf,
pkt_inter_epg_221_to_220 * 65)
- self.send_and_assert_no_replies(self.pg0,
+ self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_221 * 65)
- self.send_and_expect_bridged(self.pg0, pkt_intra_epg * 65, self.pg1)
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_intra_epg * 65,
+ eps[1].itf)
#
# EPs to the outside world
#
# in the EP's RD an external subnet via the NAT EPG's recirc
- se1 = VppGbpSubnet(self, 0, "0.0.0.0", 0,
- is_internal=False,
- sw_if_index=recirc_nat.recirc.sw_if_index,
- epg=epg_nat.epg)
+ se1 = VppGbpSubnet(
+ self, rd0, "0.0.0.0", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ epg=epg_nat.epg)
+ se2 = VppGbpSubnet(
+ self, rd0, "11.0.0.0", 8,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ epg=epg_nat.epg)
+ se16 = VppGbpSubnet(
+ self, rd0, "::", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ epg=epg_nat.epg)
+ # in the NAT RD an external subnet via the NAT EPG's uplink
+ se3 = VppGbpSubnet(
+ self, rd20, "0.0.0.0", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ epg=epg_nat.epg)
+ se36 = VppGbpSubnet(
+ self, rd20, "::", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ epg=epg_nat.epg)
+ se4 = VppGbpSubnet(
+ self, rd20, "11.0.0.0", 8,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ epg=epg_nat.epg)
se1.add_vpp_config()
- se2 = VppGbpSubnet(self, 0, "11.0.0.0", 8,
- is_internal=False,
- sw_if_index=recirc_nat.recirc.sw_if_index,
- epg=epg_nat.epg)
se2.add_vpp_config()
- se16 = VppGbpSubnet(self, 0, "::", 0,
- is_internal=False,
- sw_if_index=recirc_nat.recirc.sw_if_index,
- epg=epg_nat.epg)
se16.add_vpp_config()
- # in the NAT RD an external subnet via the NAT EPG's uplink
- se3 = VppGbpSubnet(self, 20, "0.0.0.0", 0,
- is_internal=False,
- sw_if_index=epg_nat.uplink.sw_if_index,
- epg=epg_nat.epg)
- se36 = VppGbpSubnet(self, 20, "::", 0,
- is_internal=False,
- sw_if_index=epg_nat.uplink.sw_if_index,
- epg=epg_nat.epg)
- se4 = VppGbpSubnet(self, 20, "11.0.0.0", 8,
- is_internal=False,
- sw_if_index=epg_nat.uplink.sw_if_index,
- epg=epg_nat.epg)
se3.add_vpp_config()
se36.add_vpp_config()
se4.add_vpp_config()
@@ -1039,7 +1168,7 @@ class TestGBP(VppTestCase):
Raw('\xa5' * 100))
# no policy yet
- self.send_and_assert_no_replies(self.pg0,
+ self.send_and_assert_no_replies(eps[0].itf,
pkt_inter_epg_220_to_global * 65)
acl2 = VppGbpAcl(self)
@@ -1053,7 +1182,7 @@ class TestGBP(VppTestCase):
c4 = VppGbpContract(self, 220, 333, acl_index2)
c4.add_vpp_config()
- self.send_and_expect_natted(self.pg0,
+ self.send_and_expect_natted(eps[0].itf,
pkt_inter_epg_220_to_global * 65,
self.pg7,
eps[0].fip4.address)
@@ -1150,24 +1279,7 @@ class TestGBP(VppTestCase):
for epg in epgs:
# IP config on the BVI interfaces
- self.vapi.sw_interface_add_del_address(epg.bvi.sw_if_index,
- epg.bvi_ip4.bytes,
- 32,
- is_add=0)
- self.vapi.sw_interface_add_del_address(epg.bvi.sw_if_index,
- epg.bvi_ip6.bytes,
- 128,
- is_add=0,
- is_ipv6=True)
- self.logger.info(self.vapi.cli("sh int addr"))
-
- epg.uplink.set_table_ip4(0)
- epg.uplink.set_table_ip6(0)
-
if epg != epgs[0] and epg != epgs[3]:
- epg.bvi.set_table_ip4(0)
- epg.bvi.set_table_ip6(0)
-
self.vapi.nat44_interface_add_del_feature(epg.bvi.sw_if_index,
is_inside=1,
is_add=0)
@@ -1176,9 +1288,8 @@ class TestGBP(VppTestCase):
is_add=0)
for recirc in recircs:
- recirc.recirc.set_table_ip4(0)
- recirc.recirc.set_table_ip6(0)
-
+ self.vapi.sw_interface_set_l2_emulation(
+ recirc.recirc.sw_if_index, enable=0)
self.vapi.nat44_interface_add_del_feature(
recirc.recirc.sw_if_index,
is_inside=0,
@@ -1188,6 +1299,818 @@ class TestGBP(VppTestCase):
is_inside=0,
is_add=0)
+ def test_gbp_learn_l2(self):
+ """ GBP L2 Endpoint Learning """
+
+ learnt = [{'mac': '00:00:11:11:11:01',
+ 'ip': '10.0.0.1',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.0.2',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # lower the inactive threshold so these tests pass in a
+ # reasonable amount of time
+ #
+ self.vapi.gbp_endpoint_learn_set_inactive_threshold(1)
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # Pg2 hosts the vxlan tunnel, hosts on pg2 to act as TEPs
+ # Pg3 hosts the IP4 UU-flood VXLAN tunnel
+ # Pg4 hosts the IP6 UU-flood VXLAN tunnel
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.generate_remote_hosts(4)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, self.loop0, self.pg3)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi, "10.0.0.128", 32)
+ ip_addr.add_vpp_config()
+
+ #
+ # The Endpoint-group in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128")
+ epg_220.add_vpp_config()
+ epg_330 = VppGbpEndpointGroup(self, 330, rd1, gbd1,
+ None, self.loop1,
+ "10.0.1.128",
+ "2001:11::128")
+ epg_330.add_vpp_config()
+
+ #
+ # The VXLAN GBP tunnel is a bridge-port and has L2 endpoint
+ # leanring enabled
+ #
+ vx_tun_l2_1 = VppGbpVxlanTunnel(
+ self, 99, bd1.bd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L2)
+ vx_tun_l2_1.add_vpp_config()
+
+ #
+ # A static endpoint that the learnt endpoints are trying to
+ # talk to
+ #
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ self.assertTrue(find_route(self, ep.ip4.address, 32, table_id=1))
+
+ # a packet with an sclass from an unknwon EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[0].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=88, flags=0x88) /
+ Ether(src=learnt[0]["mac"], dst=ep.mac) /
+ IP(src=learnt[0]["ip"], dst=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg2, p)
+
+ #
+ # we should not have learnt a new tunnel endpoint, since
+ # the EPG was not learnt.
+ #
+ self.assertEqual(INDEX_INVALID,
+ find_vxlan_gbp_tunnel(self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[0].ip4,
+ 99))
+
+ # epg is not learnt, becasue the EPG is unknwon
+ self.assertEqual(len(self.vapi.gbp_endpoint_dump()), 1)
+
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a knwon EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=220, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ #
+ # the EP is learnt via the learnt TEP
+ # both from its MAC and its IP
+ #
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ ip=l['ip']))
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+
+ #
+ # If we sleep for the threshold time, the learnt endpoints should
+ # age out
+ #
+ self.sleep(2)
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ tep1_sw_if_index,
+ mac=l['mac']))
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+
+ #
+ # repeat. the do not learn bit is set so the EPs are not learnt
+ #
+ for l in learnt:
+ # a packet with an sclass from a knwon EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=220, flags=0x88, gpflags="D") /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p*65, self.pg0)
+
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # repeat
+ #
+ for l in learnt:
+ # a packet with an sclass from a knwon EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=220, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p*65, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # Static EP replies to dynamics
+ #
+ self.logger.info(self.vapi.cli("sh l2fib bd_id 1"))
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=l['ip'], src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 17, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 220)
+ self.assertEqual(rx[VXLAN].vni, 99)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ self.sleep(2)
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # repeat in the other EPG
+ # there's no contract between 220 and 330, but the A-bit is set
+ # so the packet is cleared for delivery
+ #
+ for l in learnt:
+ # a packet with an sclass from a knwon EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=330, flags=0x88, gpflags='A') /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p*65, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # static EP cannot reach the learnt EPs since there is no contract
+ #
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show l2fib all"))
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=l['ip'], src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, [p], timeout=0.2)
+
+ #
+ # refresh the entries after the check for no replies above
+ #
+ for l in learnt:
+ # a packet with an sclass from a knwon EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=330, flags=0x88, gpflags='A') /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p*65, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # Add the contract so they can talk
+ #
+ acl = VppGbpAcl(self)
+ rule = acl.create_rule(permit_deny=1, proto=17)
+ rule2 = acl.create_rule(is_ipv6=1, permit_deny=1, proto=17)
+ acl_index = acl.add_vpp_config([rule, rule2])
+ c1 = VppGbpContract(self, 220, 330, acl_index)
+ c1.add_vpp_config()
+
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=l['ip'], src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_expect(self.pg0, [p], self.pg2)
+
+ #
+ # send UU packets from the local EP
+ #
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ p_uu = (Ether(src=ep.mac, dst="00:11:11:11:11:11") /
+ IP(dst="10.0.0.133", src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ rxs = self.send_and_expect(ep.itf, [p_uu], gbd1.uu_flood)
+
+ #
+ # Add a mcast destination VXLAN-GBP tunnel for B&M traffic
+ #
+ tun_bm = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ "239.1.1.1", 88,
+ mcast_itf=self.pg4)
+ tun_bm.add_vpp_config()
+ bp_bm = VppBridgeDomainPort(self, bd1, tun_bm,
+ port_type=L2_PORT_TYPE.NORMAL)
+ bp_bm.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+
+ p_bm = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
+ IP(dst="10.0.0.133", src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ rxs = self.send_and_expect_only(ep.itf, [p_bm], tun_bm.mcast_itf)
+
+ #
+ # Check v6 Endpoints
+ #
+ for l in learnt:
+ # a packet with an sclass from a knwon EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=330, flags=0x88, gpflags='A') /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IPv6(src=l['ip6'], dst=ep.ip6.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p*65, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # L3 Endpoint Learning
+ # - configured on the bridge's BVI
+ #
+
+ #
+ # clean up
+ #
+ self.sleep(2)
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
+ self.pg4.unconfig_ip4()
+
+ self.logger.info(self.vapi.cli("sh int"))
+ self.logger.info(self.vapi.cli("sh gbp vxlan"))
+
+ def test_gbp_learn_l3(self):
+ """ GBP L3 Endpoint Learning """
+
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ learnt = [{'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.1.2',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:03',
+ 'ip': '10.0.1.3',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # lower the inactive threshold so these tests pass in a
+ # reasonable amount of time
+ #
+ self.vapi.gbp_endpoint_learn_set_inactive_threshold(1)
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ tun_ip4_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ self.pg4.remote_ip4, 114)
+ tun_ip6_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ self.pg4.remote_ip4, 116)
+ tun_ip4_uu.add_vpp_config()
+ tun_ip6_uu.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, t4, t6, tun_ip4_uu, tun_ip6_uu)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac.address)
+
+ #
+ # Bind the BVI to the RD
+ #
+ VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg2 hosts the vxlan tunnel
+ # hosts on pg2 to act as TEPs
+ # pg3 is BD uu-fwd
+ # pg4 is RD uu-fwd
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.generate_remote_hosts(4)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, self.loop0, self.pg3)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ self.logger.info(self.vapi.cli("sh gbp route"))
+ self.logger.info(self.vapi.cli("show l2fib all"))
+
+ # ... and has a /32 and /128 applied
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi, "10.0.0.128", 32)
+ ip4_addr.add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi, "2001:10::128", 128)
+ ip6_addr.add_vpp_config()
+
+ #
+ # The Endpoint-group in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128")
+ epg_220.add_vpp_config()
+
+ #
+ # The VXLAN GBP tunnel is a bridge-port and has L2 endpoint
+ # leanring enabled
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 101, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3)
+ vx_tun_l3.add_vpp_config()
+
+ #
+ # A static endpoint that the learnt endpoints are trying to
+ # talk to
+ #
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ #
+ # learn some remote IPv4 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a knwon EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=220, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src=l['ip'], dst=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=l['ip']))
+
+ #
+ # Static IPv4 EP replies to learnt
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=l['ip'], src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p*1, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 220)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IP].src, ep.ip4.address)
+ self.assertEqual(inner[IP].dst, l['ip'])
+
+ self.sleep(2)
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ tep1_sw_if_index,
+ ip=l['ip']))
+
+ #
+ # learn some remote IPv6 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a knwon EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=220, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IPv6(src=l['ip6'], dst=ep.ip6.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ self.logger.info(self.vapi.cli("show gbp bridge"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show int addr"))
+
+ # endpoint learnt via the TEP
+ self.assertTrue(find_gbp_endpoint(self, ip=l['ip6']))
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show ip fib index 1 %s" % l['ip']))
+
+ #
+ # Static EP replies to learnt
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IPv6(dst=l['ip6'], src=ep.ip6.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p*65, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 220)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IPv6].src, ep.ip6.address)
+ self.assertEqual(inner[IPv6].dst, l['ip6'])
+
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ self.sleep(2)
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ tep1_sw_if_index,
+ ip=l['ip']))
+
+ #
+ # Static sends to unknown EP with no route
+ #
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst="10.0.0.99", src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, [p])
+
+ #
+ # Add a route to static EP's v4 and v6 subnet
+ # packets should be send on the v4/v6 uu=fwd interface resp.
+ #
+ se_10_24 = VppGbpSubnet(
+ self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT)
+ se_10_24.add_vpp_config()
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst="10.0.0.99", src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, [p], self.pg4)
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg4.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg4.remote_ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 220)
+ self.assertEqual(rx[VXLAN].vni, 114)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # policy is not applied to packets sent to the uu-fwd interfaces
+ self.assertFalse(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ #
+ # learn some remote IPv4 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a knwon EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=220, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src=l['ip'], dst=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=l['ip']))
+
+ #
+ # Add a remote endpoint from the API
+ #
+ rep_88 = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.0.88", "11.0.0.88",
+ "2001:10::88", "3001::88",
+ VppEnum.vl_api_gbp_endpoint_flags_t.REMOTE,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ mac=None)
+ rep_88.add_vpp_config()
+
+ #
+ # Add a remote endpoint from the API that matches an existing one
+ #
+ rep_2 = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ learnt[0]['ip'], "11.0.0.101",
+ learnt[0]['ip6'], "3001::101",
+ VppEnum.vl_api_gbp_endpoint_flags_t.REMOTE,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ mac=None)
+ rep_2.add_vpp_config()
+
+ #
+ # Add a route to the leanred EP's v4 subnet
+ # packets should be send on the v4/v6 uu=fwd interface resp.
+ #
+ se_10_1_24 = VppGbpSubnet(
+ self, rd1, "10.0.1.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT)
+ se_10_1_24.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+
+ ips = ["10.0.0.88", learnt[0]['ip']]
+ for ip in ips:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=ip, src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p*65, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 220)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IP].src, ep.ip4.address)
+ self.assertEqual(inner[IP].dst, ip)
+
+ #
+ # remove the API remote EPs, they are now UU-fwd
+ #
+ rep_88.remove_vpp_config()
+ rep_2.remove_vpp_config()
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+
+ for ip in ips:
+ self.assertFalse(find_gbp_endpoint(self, ip=ip))
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=ip, src=ep.ip4.address) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, [p], self.pg4)
+
+ #
+ # shutdown with learnt endpoint present
+ #
+ self.logger.info(self.vapi.cli("show gbp endpoint-group"))
+
+ #
+ # TODO
+ # remote endpoint becomes local
+ #
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
+ self.pg4.unconfig_ip4()
+
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_l2_flood.py b/test/test_l2_flood.py
index 5a2694cbb63..9f3ef533091 100644
--- a/test/test_l2_flood.py
+++ b/test/test_l2_flood.py
@@ -5,7 +5,7 @@ import socket
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpRoute, VppRoutePath
-from vpp_papi_provider import L2_PORT_TYPE, BRIDGE_FLAGS
+from vpp_l2 import L2_PORT_TYPE, BRIDGE_FLAGS
from scapy.packet import Raw
from scapy.layers.l2 import Ether
diff --git a/test/vpp_interface.py b/test/vpp_interface.py
index c7918fcfbd2..1fe6652cfc7 100644
--- a/test/vpp_interface.py
+++ b/test/vpp_interface.py
@@ -454,3 +454,6 @@ class VppInterface(object):
"admin state")
self.test.assert_equal(if_state.link_up_down, link_up_down,
"link state")
+
+ def __str__(self):
+ return self.name
diff --git a/test/vpp_ip.py b/test/vpp_ip.py
index e92c91943db..6d22c16ff60 100644
--- a/test/vpp_ip.py
+++ b/test/vpp_ip.py
@@ -120,6 +120,10 @@ class VppIpAddress():
return self.addr.bytes
@property
+ def bytes(self):
+ return self.addr.bytes
+
+ @property
def address(self):
return self.addr.address
@@ -169,9 +173,17 @@ class VppIpPrefix():
return self.addr.address
@property
+ def bytes(self):
+ return self.addr.bytes
+
+ @property
def length(self):
return self.len
+ @property
+ def is_ip6(self):
+ return self.addr.is_ip6
+
def __str__(self):
return "%s/%d" % (self.address, self.length)
diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py
index 00a79f44232..45609e80786 100644
--- a/test/vpp_ip_route.py
+++ b/test/vpp_ip_route.py
@@ -35,6 +35,13 @@ class MplsLspMode:
UNIFORM = 1
+def ip_to_dpo_proto(addr):
+ if addr.version is 6:
+ return DpoProto.DPO_PROTO_IP6
+ else:
+ return DpoProto.DPO_PROTO_IP4
+
+
def find_route(test, ip_addr, len, table_id=0, inet=AF_INET):
if inet == AF_INET:
s = 4
@@ -71,6 +78,23 @@ def find_mroute(test, grp_addr, src_addr, grp_addr_len,
return False
+def fib_interface_ip_prefix(test, address, length, sw_if_index):
+ vp = VppIpPrefix(address, length)
+ addrs = test.vapi.ip_address_dump(sw_if_index, is_ipv6=vp.is_ip6)
+
+ if vp.is_ip6:
+ n = 16
+ else:
+ n = 4
+
+ for a in addrs:
+ if a.prefix_length == length and \
+ a.sw_if_index == sw_if_index and \
+ a.ip[:n] == vp.bytes:
+ return True
+ return False
+
+
class VppIpTable(VppObject):
def __init__(self,
@@ -95,6 +119,9 @@ class VppIpTable(VppObject):
is_add=0)
def query_vpp_config(self):
+ if self.table_id == 0:
+ # the default table always exists
+ return False
# find the default route
return find_route(self._test,
"::" if self.is_ip6 else "0.0.0.0",
@@ -111,6 +138,79 @@ class VppIpTable(VppObject):
self.table_id))
+class VppIpInterfaceAddress(VppObject):
+
+ def __init__(self, test, intf, addr, len):
+ self._test = test
+ self.intf = intf
+ self.prefix = VppIpPrefix(addr, len)
+
+ def add_vpp_config(self):
+ self._test.vapi.sw_interface_add_del_address(
+ self.intf.sw_if_index,
+ self.prefix.bytes,
+ self.prefix.length,
+ is_add=1,
+ is_ipv6=self.prefix.is_ip6)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.sw_interface_add_del_address(
+ self.intf.sw_if_index,
+ self.prefix.bytes,
+ self.prefix.length,
+ is_add=0,
+ is_ipv6=self.prefix.is_ip6)
+
+ def query_vpp_config(self):
+ return fib_interface_ip_prefix(self._test,
+ self.prefix.address,
+ self.prefix.length,
+ self.intf.sw_if_index)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "interface-ip-%s-%s" % (self.intf, self.prefix)
+
+
+class VppIpInterfaceBind(VppObject):
+
+ def __init__(self, test, intf, table):
+ self._test = test
+ self.intf = intf
+ self.table = table
+
+ def add_vpp_config(self):
+ if self.table.is_ip6:
+ self.intf.set_table_ip6(self.table.table_id)
+ else:
+ self.intf.set_table_ip4(self.table.table_id)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ if 0 == self.table.table_id:
+ return
+ if self.table.is_ip6:
+ self.intf.set_table_ip6(0)
+ else:
+ self.intf.set_table_ip4(0)
+
+ def query_vpp_config(self):
+ if 0 == self.table.table_id:
+ return False
+ return self._test.vapi.sw_interface_get_table(
+ self.intf.sw_if_index,
+ self.table.is_ip6).vrf_id == self.table.table_id
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "interface-bind-%s-%s" % (self.intf, self.table)
+
+
class VppMplsLabel(object):
def __init__(self, value, mode=MplsLspMode.PIPE, ttl=64, exp=0):
self.value = value
diff --git a/test/vpp_l2.py b/test/vpp_l2.py
new file mode 100644
index 00000000000..a6b43efe14c
--- /dev/null
+++ b/test/vpp_l2.py
@@ -0,0 +1,221 @@
+"""
+ L2/BD Types
+
+"""
+
+from vpp_object import *
+from util import mactobinary
+from vpp_ip import VppIpAddress
+from vpp_mac import VppMacAddress
+from vpp_lo_interface import VppLoInterface
+
+
+class L2_PORT_TYPE:
+ NORMAL = 0
+ BVI = 1
+ UU_FWD = 2
+
+
+class BRIDGE_FLAGS:
+ NONE = 0
+ LEARN = 1
+ FWD = 2
+ FLOOD = 4
+ UU_FLOOD = 8
+ ARP_TERM = 16
+
+
+def find_bridge_domain(test, bd_id):
+ bds = test.vapi.bridge_domain_dump(bd_id)
+ return len(bds) == 1
+
+
+def find_bridge_domain_port(test, bd_id, sw_if_index):
+ bds = test.vapi.bridge_domain_dump(bd_id)
+ for bd in bds:
+ for p in bd.sw_if_details:
+ if p.sw_if_index == sw_if_index:
+ return True
+ return False
+
+
+def find_bridge_domain_arp_entry(test, bd_id, mac, ip):
+ vmac = VppMacAddress(mac)
+ vip = VppIpAddress(ip)
+
+ if vip.version == 4:
+ n = 4
+ else:
+ n = 16
+
+ arps = test.vapi.bd_ip_mac_dump(bd_id)
+ for arp in arps:
+ # do IP addr comparison too once .api is fixed...
+ if vmac.bytes == arp.mac_address and \
+ vip.bytes == arp.ip_address[:n]:
+ return True
+ return False
+
+
+def find_l2_fib_entry(test, bd_id, mac, sw_if_index):
+ vmac = VppMacAddress(mac)
+ lfs = test.vapi.l2_fib_table_dump(bd_id)
+ for lf in lfs:
+ if vmac.bytes == lf.mac and sw_if_index == lf.sw_if_index:
+ return True
+ return False
+
+
+class VppBridgeDomain(VppObject):
+
+ def __init__(self, test, bd_id,
+ flood=1, uu_flood=1, forward=1,
+ learn=1, arp_term=1):
+ self._test = test
+ self.bd_id = bd_id
+ self.flood = flood
+ self.uu_flood = uu_flood
+ self.forward = forward
+ self.learn = learn
+ self.arp_term = arp_term
+
+ def add_vpp_config(self):
+ self._test.vapi.bridge_domain_add_del(
+ self.bd_id,
+ is_add=1,
+ flood=self.flood,
+ uu_flood=self.uu_flood,
+ forward=self.forward,
+ learn=self.learn,
+ arp_term=self.arp_term)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bridge_domain_add_del(self.bd_id, is_add=0)
+
+ def query_vpp_config(self):
+ return find_bridge_domain(self._test, self.bd_id)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "bridge-domain-%d" % (self.bd_id)
+
+
+class VppBridgeDomainPort(VppObject):
+
+ def __init__(self, test, bd, itf,
+ port_type=L2_PORT_TYPE.NORMAL):
+ self._test = test
+ self.bd = bd
+ self.itf = itf
+ self.port_type = port_type
+
+ def add_vpp_config(self):
+ self._test.vapi.sw_interface_set_l2_bridge(
+ self.itf.sw_if_index,
+ self.bd.bd_id,
+ port_type=self.port_type,
+ enable=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.sw_interface_set_l2_bridge(
+ self.itf.sw_if_index,
+ self.bd.bd_id,
+ port_type=self.port_type,
+ enable=0)
+
+ def query_vpp_config(self):
+ return find_bridge_domain_port(self._test,
+ self.bd.bd_id,
+ self.itf.sw_if_index)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "BD-Port-%s-%s" % (self.bd, self.itf)
+
+
+class VppBridgeDomainArpEntry(VppObject):
+
+ def __init__(self, test, bd, mac, ip):
+ self._test = test
+ self.bd = bd
+ self.mac = VppMacAddress(mac)
+ self.ip = VppIpAddress(ip)
+
+ def add_vpp_config(self):
+ self._test.vapi.bd_ip_mac_add_del(
+ self.bd.bd_id,
+ self.mac.encode(),
+ self.ip.encode(),
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bd_ip_mac_add_del(
+ self.bd.bd_id,
+ self.mac.encode(),
+ self.ip.encode(),
+ is_add=0)
+
+ def query_vpp_config(self):
+ return find_bridge_domain_arp_entry(self._test,
+ self.bd.bd_id,
+ self.mac.address,
+ self.ip.address)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "BD-Arp-Entry-%s-%s-%s" % (self.bd, self.mac, self.ip.address)
+
+
+class VppL2FibEntry(VppObject):
+
+ def __init__(self, test, bd, mac, itf,
+ static_mac=0, filter_mac=0, bvi_mac=-1):
+ self._test = test
+ self.bd = bd
+ self.mac = VppMacAddress(mac)
+ self.itf = itf
+ self.static_mac = static_mac
+ self.filter_mac = filter_mac
+ if bvi_mac == -1:
+ self.bvi_mac = isinstance(self.itf, VppLoInterface)
+ else:
+ self.bvi_mac = bvi_mac
+
+ def add_vpp_config(self):
+ self._test.vapi.l2fib_add_del(
+ self.mac.address,
+ self.bd.bd_id,
+ self.itf.sw_if_index,
+ is_add=1,
+ static_mac=self.static_mac,
+ filter_mac=self.filter_mac,
+ bvi_mac=self.bvi_mac)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.l2fib_add_del(
+ self.mac.address,
+ self.bd.bd_id,
+ self.itf.sw_if_index,
+ is_add=0)
+
+ def query_vpp_config(self):
+ return find_l2_fib_entry(self._test,
+ self.bd.bd_id,
+ self.mac.address,
+ self.itf.sw_if_index)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "L2-Fib-Entry-%s-%s-%s" % (self.bd, self.mac, self.itf)
diff --git a/test/vpp_mac.py b/test/vpp_mac.py
index c9ee11e6658..c0e69746175 100644
--- a/test/vpp_mac.py
+++ b/test/vpp_mac.py
@@ -22,3 +22,18 @@ class VppMacAddress():
@property
def address(self):
return self.addr.address
+
+ def __str__(self):
+ return self.address
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.address == other.addres
+ elif hasattr(other, "bytes"):
+ # vl_api_mac_addres_t
+ return self.bytes == other.bytes
+ else:
+ raise Exception("Comparing VppMacAddress:%s"
+ "with unknown type: %s" %
+ (self, other))
+ return False
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index c2192471437..1adcc1ba24b 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -18,6 +18,7 @@ except:
if do_import:
from vpp_papi import VPP
+ from vpp_l2 import L2_PORT_TYPE
# from vnet/vnet/mpls/mpls_types.h
MPLS_IETF_MAX_LABEL = 0xfffff
@@ -43,21 +44,6 @@ class QOS_SOURCE:
IP = 3
-class L2_PORT_TYPE:
- NORMAL = 0
- BVI = 1
- UU_FWD = 2
-
-
-class BRIDGE_FLAGS:
- NONE = 0
- LEARN = 1
- FWD = 2
- FLOOD = 4
- UU_FLOOD = 8
- ARP_TERM = 16
-
-
class UnexpectedApiReturnValueError(Exception):
""" exception raised when the API return value is unexpected """
pass
@@ -257,6 +243,17 @@ class VppPapiProvider(object):
{'sw_if_index': sw_if_index, 'is_ipv6': is_ipv6,
'vrf_id': table_id})
+ def sw_interface_get_table(self, sw_if_index, is_ipv6):
+ """ Get the IPvX Table-id for the Interface
+
+ :param sw_if_index:
+ :param is_ipv6:
+ :return table_id
+
+ """
+ return self.api(self.papi.sw_interface_get_table,
+ {'sw_if_index': sw_if_index, 'is_ipv6': is_ipv6})
+
def sw_interface_add_del_address(self, sw_if_index, addr, addr_len,
is_ipv6=0, is_add=1, del_all=0):
"""
@@ -277,6 +274,11 @@ class VppPapiProvider(object):
'address_length': addr_len,
'address': addr})
+ def ip_address_dump(self, sw_if_index, is_ipv6=0):
+ return self.api(self.papi.ip_address_dump,
+ {'sw_if_index': sw_if_index,
+ 'is_ipv6': is_ipv6})
+
def sw_interface_set_unnumbered(self, sw_if_index, ip_sw_if_index,
is_add=1):
""" Set the Interface to be unnumbered
@@ -477,10 +479,13 @@ class VppPapiProvider(object):
return self.api(self.papi.bd_ip_mac_add_del,
{'bd_id': bd_id,
'is_add': is_add,
- 'is_ipv6': is_ipv6,
'ip': ip,
'mac': mac})
+ def bd_ip_mac_dump(self, bd_id):
+ return self.api(self.papi.bd_ip_mac_dump,
+ {'bd_id': bd_id})
+
def want_ip4_arp_events(self, enable_disable=1, address=0):
return self.api(self.papi.want_ip4_arp_events,
{'enable_disable': enable_disable,
@@ -647,6 +652,11 @@ class VppPapiProvider(object):
"""
return self.api(self.papi.l2fib_flush_all, {})
+ def l2_fib_table_dump(self, bd_id):
+ """ Dump the L2 FIB """
+ return self.api(self.papi.l2_fib_table_dump,
+ {'bd_id': bd_id})
+
def sw_interface_set_l2_bridge(self, sw_if_index, bd_id,
shg=0, port_type=L2_PORT_TYPE.NORMAL,
enable=1):
@@ -2771,7 +2781,6 @@ class VppPapiProvider(object):
is_add=1,
is_ipv6=0,
encap_table_id=0,
- decap_next_index=0xFFFFFFFF,
vni=0,
instance=0xFFFFFFFF):
"""
@@ -2794,7 +2803,6 @@ class VppPapiProvider(object):
'dst': dst,
'mcast_sw_if_index': mcast_sw_if_index,
'encap_table_id': encap_table_id,
- 'decap_next_index': decap_next_index,
'vni': vni,
'instance': instance}})
@@ -3482,15 +3490,22 @@ class VppPapiProvider(object):
'enable_ip6': 1 if enable_ip6 else 0,
})
- def gbp_endpoint_add(self, sw_if_index, ips, mac, epg):
+ def gbp_endpoint_add(self, sw_if_index, ips, mac, epg, flags,
+ tun_src, tun_dst):
""" GBP endpoint Add """
return self.api(self.papi.gbp_endpoint_add,
{'endpoint': {
'sw_if_index': sw_if_index,
+ 'flags': 0,
'ips': ips,
'n_ips': len(ips),
'mac': mac,
- 'epg_id': epg}})
+ 'epg_id': epg,
+ 'flags': flags,
+ 'tun': {
+ 'src': tun_src,
+ 'dst': tun_dst,
+ }}})
def gbp_endpoint_del(self, handle):
""" GBP endpoint Del """
@@ -3501,24 +3516,73 @@ class VppPapiProvider(object):
""" GBP endpoint Dump """
return self.api(self.papi.gbp_endpoint_dump, {})
- def gbp_endpoint_group_add_del(self, is_add, epg, bd,
- ip4_rd,
- ip6_rd,
- uplink_sw_if_index):
- """ GBP endpoint group Add/Del """
- return self.api(self.papi.gbp_endpoint_group_add_del,
- {'is_add': is_add,
- 'epg': {
+ def gbp_endpoint_group_add(self, epg, bd,
+ rd, uplink_sw_if_index):
+ """ GBP endpoint group Add """
+ return self.api(self.papi.gbp_endpoint_group_add,
+ {'epg':
+ {
'uplink_sw_if_index': uplink_sw_if_index,
'bd_id': bd,
- 'ip4_table_id': ip4_rd,
- 'ip6_table_id': ip6_rd,
- 'epg_id': epg}})
+ 'rd_id': rd,
+ 'epg_id': epg
+ }})
+
+ def gbp_endpoint_group_del(self, epg):
+ """ GBP endpoint group Del """
+ return self.api(self.papi.gbp_endpoint_group_del,
+ {'epg_id': epg})
def gbp_endpoint_group_dump(self):
""" GBP endpoint group Dump """
return self.api(self.papi.gbp_endpoint_group_dump, {})
+ def gbp_bridge_domain_add(self, bd_id,
+ bvi_sw_if_index,
+ uu_fwd_sw_if_index):
+ """ GBP bridge-domain Add """
+ return self.api(self.papi.gbp_bridge_domain_add,
+ {'bd':
+ {
+ 'bvi_sw_if_index': bvi_sw_if_index,
+ 'uu_fwd_sw_if_index': uu_fwd_sw_if_index,
+ 'bd_id': bd_id
+ }})
+
+ def gbp_bridge_domain_del(self, bd_id):
+ """ GBP bridge-domain Del """
+ return self.api(self.papi.gbp_bridge_domain_del,
+ {'bd_id': bd_id})
+
+ def gbp_bridge_domain_dump(self):
+ """ GBP Bridge Domain Dump """
+ return self.api(self.papi.gbp_bridge_domain_dump, {})
+
+ def gbp_route_domain_add(self, rd_id,
+ ip4_table_id,
+ ip6_table_id,
+ ip4_uu_sw_if_index,
+ ip6_uu_sw_if_index):
+ """ GBP route-domain Add """
+ return self.api(self.papi.gbp_route_domain_add,
+ {'rd':
+ {
+ 'ip4_table_id': ip4_table_id,
+ 'ip6_table_id': ip6_table_id,
+ 'ip4_uu_sw_if_index': ip4_uu_sw_if_index,
+ 'ip6_uu_sw_if_index': ip6_uu_sw_if_index,
+ 'rd_id': rd_id
+ }})
+
+ def gbp_route_domain_del(self, rd_id):
+ """ GBP route-domain Del """
+ return self.api(self.papi.gbp_route_domain_del,
+ {'rd_id': rd_id})
+
+ def gbp_route_domain_dump(self):
+ """ GBP Route Domain Dump """
+ return self.api(self.papi.gbp_route_domain_dump, {})
+
def gbp_recirc_add_del(self, is_add, sw_if_index, epg, is_ext):
""" GBP recirc Add/Del """
return self.api(self.papi.gbp_recirc_add_del,
@@ -3532,22 +3596,19 @@ class VppPapiProvider(object):
""" GBP recirc Dump """
return self.api(self.papi.gbp_recirc_dump, {})
- def gbp_subnet_add_del(self, is_add, table_id,
- is_internal,
- prefix,
+ def gbp_subnet_add_del(self, is_add, rd_id,
+ prefix, type,
sw_if_index=0xffffffff,
- epg_id=0xffff,
- is_ip6=False):
+ epg_id=0xffff):
""" GBP Subnet Add/Del """
return self.api(self.papi.gbp_subnet_add_del,
{'is_add': is_add,
'subnet': {
- 'is_internal': is_internal,
- 'is_ip6': is_ip6,
+ 'type': type,
'sw_if_index': sw_if_index,
'epg_id': epg_id,
'prefix': prefix,
- 'table_id': table_id}})
+ 'rd_id': rd_id}})
def gbp_subnet_dump(self):
""" GBP Subnet Dump """
@@ -3566,6 +3627,33 @@ class VppPapiProvider(object):
""" GBP contract Dump """
return self.api(self.papi.gbp_contract_dump, {})
+ def gbp_endpoint_learn_set_inactive_threshold(self, threshold):
+ """ GBP set inactive threshold """
+ return self.api(self.papi.gbp_endpoint_learn_set_inactive_threshold,
+ {'threshold': threshold})
+
+ def gbp_vxlan_tunnel_add(self, vni, bd_rd_id, mode):
+ """ GBP VXLAN tunnel add """
+ return self.api(self.papi.gbp_vxlan_tunnel_add,
+ {
+ 'tunnel': {
+ 'vni': vni,
+ 'mode': mode,
+ 'bd_rd_id': bd_rd_id
+ }
+ })
+
+ def gbp_vxlan_tunnel_del(self, vni):
+ """ GBP VXLAN tunnel del """
+ return self.api(self.papi.gbp_vxlan_tunnel_del,
+ {
+ 'vni': vni,
+ })
+
+ def gbp_vxlan_tunnel_dump(self):
+ """ GBP VXLAN tunnel add/del """
+ return self.api(self.papi.gbp_vxlan_tunnel_dump, {})
+
def ipip_6rd_add_tunnel(self, ip6_table_id, ip6_prefix, ip6_prefix_len,
ip4_table_id, ip4_prefix, ip4_prefix_len, ip4_src,
security_check):
diff --git a/test/vpp_vxlan_gbp_tunnel.py b/test/vpp_vxlan_gbp_tunnel.py
new file mode 100644
index 00000000000..805d4c5f3e2
--- /dev/null
+++ b/test/vpp_vxlan_gbp_tunnel.py
@@ -0,0 +1,69 @@
+
+from vpp_interface import VppInterface
+from vpp_ip import VppIpAddress
+
+
+INDEX_INVALID = 0xffffffff
+
+
+def find_vxlan_gbp_tunnel(test, src, dst, vni):
+ vsrc = VppIpAddress(src)
+ vdst = VppIpAddress(dst)
+
+ ts = test.vapi.vxlan_gbp_tunnel_dump(INDEX_INVALID)
+ for t in ts:
+ if vsrc == t.tunnel.src and \
+ vdst == t.tunnel.dst and \
+ t.tunnel.vni == vni:
+ return t.tunnel.sw_if_index
+ return INDEX_INVALID
+
+
+class VppVxlanGbpTunnel(VppInterface):
+ """
+ VPP VXLAN GBP interface
+ """
+
+ def __init__(self, test, src, dst, vni, mcast_itf=None):
+ """ Create VXLAN-GBP Tunnel interface """
+ super(VppVxlanGbpTunnel, self).__init__(test)
+ self.src = VppIpAddress(src)
+ self.dst = VppIpAddress(dst)
+ self.vni = vni
+ self.mcast_itf = mcast_itf
+
+ def add_vpp_config(self):
+ mcast_sw_if_index = INDEX_INVALID
+ if (self.mcast_itf):
+ mcast_sw_if_index = self.mcast_itf.sw_if_index
+ reply = self.test.vapi.vxlan_gbp_tunnel_add_del(
+ self.src.encode(),
+ self.dst.encode(),
+ vni=self.vni,
+ mcast_sw_if_index=mcast_sw_if_index)
+ self.set_sw_if_index(reply.sw_if_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ mcast_sw_if_index = INDEX_INVALID
+ if (self.mcast_itf):
+ mcast_sw_if_index = self.mcast_itf.sw_if_index
+ self.test.vapi.vxlan_gbp_tunnel_add_del(
+ self.src.encode(),
+ self.dst.encode(),
+ vni=self.vni,
+ is_add=0,
+ mcast_sw_if_index=mcast_sw_if_index)
+
+ def query_vpp_config(self):
+ return (INDEX_INVALID != find_vxlan_gbp_tunnel(self._test,
+ self.src.address,
+ self.dst.address,
+ self.vni))
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "vxlan-gbp-%d-%d-%s-%s" % (self.sw_if_index, self.vni,
+ self.src, self.dst)