summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--extras/vom/vom/api_types.cpp72
-rw-r--r--extras/vom/vom/api_types.hpp15
-rw-r--r--extras/vom/vom/interface.cpp4
-rw-r--r--extras/vom/vom/mroute_cmds.cpp69
-rw-r--r--extras/vom/vom/mroute_cmds.hpp42
-rw-r--r--extras/vom/vom/neighbour.hpp2
-rw-r--r--extras/vom/vom/neighbour_cmds.cpp8
-rw-r--r--extras/vom/vom/neighbour_cmds.hpp11
-rw-r--r--extras/vom/vom/prefix.cpp21
-rw-r--r--extras/vom/vom/prefix.hpp13
-rw-r--r--extras/vom/vom/route.cpp281
-rw-r--r--extras/vom/vom/route.hpp4
-rw-r--r--extras/vom/vom/route_api_types.cpp164
-rw-r--r--extras/vom/vom/route_api_types.hpp8
-rw-r--r--extras/vom/vom/route_cmds.cpp104
-rw-r--r--extras/vom/vom/route_cmds.hpp63
-rw-r--r--extras/vom/vom/route_domain.cpp32
-rw-r--r--extras/vom/vom/route_domain.hpp9
-rw-r--r--extras/vom/vom/route_domain_cmds.cpp37
-rw-r--r--extras/vom/vom/route_domain_cmds.hpp34
-rw-r--r--extras/vom/vom/srpc_cmd.hpp74
-rw-r--r--src/plugins/abf/abf.api1
-rw-r--r--src/plugins/abf/abf_api.c15
-rw-r--r--src/plugins/abf/abf_error.def1
-rw-r--r--src/plugins/abf/abf_itf_attach.c13
-rw-r--r--src/plugins/gtpu/gtpu.c12
-rw-r--r--src/plugins/igmp/igmp.c37
-rw-r--r--src/plugins/igmp/igmp_proxy.c4
-rw-r--r--src/plugins/l3xc/l3xc_api.c14
-rw-r--r--src/plugins/unittest/bier_test.c4
-rw-r--r--src/plugins/unittest/mfib_test.c227
-rwxr-xr-xsrc/tools/vppapigen/vppapigen.py2
-rw-r--r--src/vat/api_format.c1370
-rw-r--r--src/vnet/CMakeLists.txt1
-rw-r--r--src/vnet/api_errno.h5
-rw-r--r--src/vnet/bier/bier.api21
-rw-r--r--src/vnet/bier/bier_api.c64
-rw-r--r--src/vnet/bier/bier_entry.c2
-rw-r--r--src/vnet/bier/bier_fmask.c12
-rw-r--r--src/vnet/bier/bier_fmask.h2
-rw-r--r--src/vnet/bier/bier_table.c9
-rw-r--r--src/vnet/dhcp/dhcp6_proxy_node.c6
-rw-r--r--src/vnet/dpo/mpls_disposition.c30
-rw-r--r--src/vnet/fib/fib_api.c608
-rw-r--r--src/vnet/fib/fib_api.h73
-rw-r--r--src/vnet/fib/fib_entry.c24
-rw-r--r--src/vnet/fib/fib_entry.h9
-rw-r--r--src/vnet/fib/fib_entry_src.c67
-rw-r--r--src/vnet/fib/fib_path.c138
-rw-r--r--src/vnet/fib/fib_path.h28
-rw-r--r--src/vnet/fib/fib_path_list.c236
-rw-r--r--src/vnet/fib/fib_path_list.h4
-rw-r--r--src/vnet/fib/fib_table.c51
-rw-r--r--src/vnet/fib/fib_types.api127
-rw-r--r--src/vnet/fib/fib_types.c18
-rw-r--r--src/vnet/fib/fib_types.h52
-rw-r--r--src/vnet/fib/mpls_fib.c17
-rw-r--r--src/vnet/geneve/geneve.c12
-rw-r--r--src/vnet/ip/ip.api281
-rw-r--r--src/vnet/ip/ip_api.c1056
-rw-r--r--src/vnet/ip/ip_types_api.c3
-rw-r--r--src/vnet/ip/lookup.c108
-rw-r--r--src/vnet/mfib/ip6_mfib.c14
-rw-r--r--src/vnet/mfib/mfib_api.c119
-rw-r--r--src/vnet/mfib/mfib_api.h38
-rw-r--r--src/vnet/mfib/mfib_entry.c224
-rw-r--r--src/vnet/mfib/mfib_entry.h6
-rw-r--r--src/vnet/mfib/mfib_table.c95
-rw-r--r--src/vnet/mfib/mfib_table.h25
-rw-r--r--src/vnet/mfib/mfib_types.api35
-rw-r--r--src/vnet/mpls/mpls.api129
-rw-r--r--src/vnet/mpls/mpls_api.c352
-rw-r--r--src/vnet/mpls/mpls_tunnel.c14
-rw-r--r--src/vnet/udp/udp_encap.c2
-rw-r--r--src/vnet/vxlan-gbp/vxlan_gbp.c8
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe.c10
-rw-r--r--src/vnet/vxlan/vxlan.c12
-rw-r--r--src/vpp/api/api.c2
-rw-r--r--src/vpp/api/custom_dump.c202
-rw-r--r--src/vpp/api/types.c47
-rw-r--r--src/vpp/api/types.h1
-rw-r--r--test/remote_test.py22
-rw-r--r--test/test_abf.py41
-rw-r--r--test/test_bfd.py8
-rw-r--r--test/test_bier.py38
-rw-r--r--test/test_classifier.py31
-rw-r--r--test/test_dhcp6.py44
-rw-r--r--test/test_dvr.py40
-rw-r--r--test/test_gbp.py17
-rw-r--r--test/test_geneve.py17
-rw-r--r--test/test_gre.py23
-rw-r--r--test/test_gtpu.py17
-rw-r--r--test/test_interface_crud.py6
-rw-r--r--test/test_ip4.py201
-rw-r--r--test/test_ip4_vrf_multi_instance.py20
-rw-r--r--test/test_ip6.py167
-rw-r--r--test/test_ip6_vrf_multi_instance.py33
-rw-r--r--test/test_ip_ecmp.py21
-rw-r--r--test/test_ip_mcast.py33
-rw-r--r--test/test_ipip.py10
-rw-r--r--test/test_ipsec_ah.py3
-rw-r--r--test/test_ipsec_esp.py3
-rw-r--r--test/test_ipsec_nat.py3
-rw-r--r--test/test_ipsec_tun_if_esp.py44
-rw-r--r--test/test_l3xc.py29
-rw-r--r--test/test_lb.py20
-rw-r--r--test/test_map.py20
-rw-r--r--test/test_memif.py13
-rw-r--r--test/test_mpls.py144
-rw-r--r--test/test_mtu.py2
-rw-r--r--test/test_nat.py212
-rw-r--r--test/test_neighbor.py14
-rw-r--r--test/test_p2p_ethernet.py34
-rw-r--r--test/test_punt.py3
-rw-r--r--test/test_qos.py10
-rw-r--r--test/test_reassembly.py9
-rw-r--r--test/test_sixrd.py10
-rw-r--r--test/test_srv6.py64
-rw-r--r--test/test_srv6_ad.py9
-rwxr-xr-xtest/test_srv6_as.py14
-rw-r--r--test/test_udp.py65
-rw-r--r--test/test_vcl.py12
-rw-r--r--test/test_vxlan.py21
-rw-r--r--test/test_vxlan6.py13
-rw-r--r--test/test_vxlan_gbp.py17
-rw-r--r--test/test_vxlan_gpe.py20
-rw-r--r--test/vpp_bier.py53
-rw-r--r--test/vpp_interface.py13
-rw-r--r--test/vpp_ip.py79
-rw-r--r--test/vpp_ip_route.py588
-rw-r--r--test/vpp_memif.py28
-rw-r--r--test/vpp_mpls_tunnel_interface.py51
-rw-r--r--test/vpp_papi_provider.py237
133 files changed, 4807 insertions, 5005 deletions
diff --git a/extras/vom/vom/api_types.cpp b/extras/vom/vom/api_types.cpp
index ea75d7fd8ee..721034fc810 100644
--- a/extras/vom/vom/api_types.cpp
+++ b/extras/vom/vom/api_types.cpp
@@ -45,6 +45,11 @@ from_api(vapi_enum_ip_neighbor_flags f)
return out;
}
+invalid_decode::invalid_decode(const std::string reason)
+ : reason(reason)
+{
+}
+
void
to_api(const boost::asio::ip::address_v4& a, vapi_type_ip4_address& v)
{
@@ -82,6 +87,16 @@ to_api(const ip_address_t& a,
}
}
+void
+to_api(const ip_address_t& a, vapi_union_address_union& u)
+{
+ if (a.is_v4()) {
+ memcpy(u.ip4, a.to_v4().to_bytes().data(), 4);
+ } else {
+ memcpy(u.ip6, a.to_v6().to_bytes().data(), 16);
+ }
+}
+
boost::asio::ip::address_v6
from_api(const vapi_type_ip6_address& v)
{
@@ -123,6 +138,26 @@ from_api(const vapi_type_address& v)
}
ip_address_t
+from_api(const vapi_union_address_union& u, vapi_enum_fib_path_nh_proto proto)
+{
+ boost::asio::ip::address addr;
+
+ if (FIB_API_PATH_NH_PROTO_IP6 == proto) {
+ std::array<uint8_t, 16> a;
+ std::copy(u.ip6, u.ip6 + 16, std::begin(a));
+ boost::asio::ip::address_v6 v6(a);
+ addr = v6;
+ } else if (FIB_API_PATH_NH_PROTO_IP4 == proto) {
+ std::array<uint8_t, 4> a;
+ std::copy(u.ip6, u.ip6 + 4, std::begin(a));
+ boost::asio::ip::address_v4 v4(a);
+ addr = v4;
+ }
+
+ return addr;
+}
+
+ip_address_t
from_api(const vapi_union_address_union& u, vapi_enum_address_family af)
{
boost::asio::ip::address addr;
@@ -187,7 +222,42 @@ to_api(const route::mprefix_t& p)
v.af = af;
return v;
}
-};
+
+vapi_enum_fib_path_nh_proto
+to_api(const nh_proto_t& p)
+{
+ if (p == nh_proto_t::IPV4) {
+ return FIB_API_PATH_NH_PROTO_IP4;
+ } else if (p == nh_proto_t::IPV6) {
+ return FIB_API_PATH_NH_PROTO_IP6;
+ } else if (p == nh_proto_t::ETHERNET) {
+ return FIB_API_PATH_NH_PROTO_ETHERNET;
+ } else if (p == nh_proto_t::MPLS) {
+ return FIB_API_PATH_NH_PROTO_MPLS;
+ }
+
+ return FIB_API_PATH_NH_PROTO_IP4;
+}
+const nh_proto_t&
+from_api(vapi_enum_fib_path_nh_proto p)
+{
+ switch (p) {
+ case FIB_API_PATH_NH_PROTO_IP4:
+ return nh_proto_t::IPV4;
+ case FIB_API_PATH_NH_PROTO_IP6:
+ return nh_proto_t::IPV6;
+ case FIB_API_PATH_NH_PROTO_ETHERNET:
+ return nh_proto_t::ETHERNET;
+ case FIB_API_PATH_NH_PROTO_MPLS:
+ return nh_proto_t::MPLS;
+ case FIB_API_PATH_NH_PROTO_BIER:
+ break;
+ }
+
+ return nh_proto_t::IPV4;
+}
+
+}; // VOM
/*
* fd.io coding-style-patch-verification: ON
diff --git a/extras/vom/vom/api_types.hpp b/extras/vom/vom/api_types.hpp
index 789bbb19401..b026ba38db2 100644
--- a/extras/vom/vom/api_types.hpp
+++ b/extras/vom/vom/api_types.hpp
@@ -22,6 +22,12 @@
namespace VOM {
+struct invalid_decode
+{
+ invalid_decode(const std::string reason);
+ const std::string reason;
+};
+
typedef boost::asio::ip::address ip_address_t;
vapi_enum_ip_neighbor_flags to_api(const neighbour::flags_t& f);
@@ -33,12 +39,15 @@ void to_api(const boost::asio::ip::address_v6& a, vapi_type_ip6_address& v);
void to_api(const boost::asio::ip::address& a,
vapi_union_address_union& u,
vapi_enum_address_family& af);
+void to_api(const boost::asio::ip::address& a, vapi_union_address_union& u);
boost::asio::ip::address_v4 from_api(const vapi_type_ip4_address& v);
boost::asio::ip::address_v6 from_api(const vapi_type_ip6_address& v);
ip_address_t from_api(const vapi_type_address& v);
ip_address_t from_api(const vapi_union_address_union& u,
vapi_enum_address_family af);
+ip_address_t from_api(const vapi_union_address_union& u,
+ vapi_enum_fib_path_nh_proto proto);
void to_api(const mac_address_t& a, vapi_type_mac_address& m);
@@ -49,7 +58,11 @@ route::mprefix_t from_api(const vapi_type_mprefix&);
vapi_type_prefix to_api(const route::prefix_t&);
vapi_type_mprefix to_api(const route::mprefix_t&);
-};
+
+vapi_enum_fib_path_nh_proto to_api(const nh_proto_t&);
+const nh_proto_t& from_api(vapi_enum_fib_path_nh_proto);
+
+}; // VOM
/*
* fd.io coding-style-patch-verification: ON
diff --git a/extras/vom/vom/interface.cpp b/extras/vom/vom/interface.cpp
index 40f960730d7..1e27d42c7f4 100644
--- a/extras/vom/vom/interface.cpp
+++ b/extras/vom/vom/interface.cpp
@@ -14,6 +14,7 @@
*/
#include "vom/interface.hpp"
+#include "vom/api_types.hpp"
#include "vom/bond_group_binding.hpp"
#include "vom/bond_group_binding_cmds.hpp"
#include "vom/bond_interface_cmds.hpp"
@@ -666,8 +667,7 @@ interface::event_handler::handle_populate(const client_db::key_t& key)
for (auto& l3_record : *dcmd) {
auto& payload = l3_record.get_payload();
- const route::prefix_t pfx(payload.is_ipv6, payload.ip,
- payload.prefix_length);
+ const route::prefix_t pfx = from_api(payload.prefix);
VOM_LOG(log_level_t::DEBUG) << "dump: " << pfx.to_string();
diff --git a/extras/vom/vom/mroute_cmds.cpp b/extras/vom/vom/mroute_cmds.cpp
index 232d786abeb..2f4dd6e1a86 100644
--- a/extras/vom/vom/mroute_cmds.cpp
+++ b/extras/vom/vom/mroute_cmds.cpp
@@ -45,18 +45,17 @@ update_cmd::operator==(const update_cmd& other) const
rc_t
update_cmd::issue(connection& con)
{
- msg_t req(con.ctx(), std::ref(*this));
+ msg_t req(con.ctx(), 1, std::ref(*this));
auto& payload = req.get_request().get_payload();
- payload.table_id = m_id;
payload.is_add = 1;
- m_mprefix.to_vpp(&payload.is_ipv6, payload.src_address, payload.grp_address,
- &payload.grp_address_length);
+ payload.route.table_id = m_id;
+ payload.route.prefix = to_api(m_mprefix);
- to_vpp(m_path, payload);
- payload.itf_flags = m_flags.value();
+ to_api(m_path, payload.route.paths[0].path);
+ payload.route.paths[0].itf_flags = to_api(m_flags);
VAPI_CALL(req.execute());
@@ -96,17 +95,16 @@ delete_cmd::operator==(const delete_cmd& other) const
rc_t
delete_cmd::issue(connection& con)
{
- msg_t req(con.ctx(), std::ref(*this));
+ msg_t req(con.ctx(), 1, std::ref(*this));
auto& payload = req.get_request().get_payload();
- payload.table_id = m_id;
- payload.is_add = 0;
+ payload.is_add = 1;
- m_mprefix.to_vpp(&payload.is_ipv6, payload.grp_address, payload.src_address,
- &payload.grp_address_length);
+ payload.route.table_id = m_id;
+ payload.route.prefix = to_api(m_mprefix);
- to_vpp(m_path, payload);
- payload.itf_flags = m_flags.value();
+ to_api(m_path, payload.route.paths[0].path);
+ payload.route.paths[0].itf_flags = to_api(m_flags);
VAPI_CALL(req.execute());
@@ -126,48 +124,27 @@ delete_cmd::to_string() const
return (s.str());
}
-dump_v4_cmd::dump_v4_cmd()
+dump_cmd::dump_cmd(route::table_id_t id, const l3_proto_t& proto)
+ : m_id(id)
+ , m_proto(proto)
{
}
bool
-dump_v4_cmd::operator==(const dump_v4_cmd& other) const
+dump_cmd::operator==(const dump_cmd& other) const
{
return (true);
}
rc_t
-dump_v4_cmd::issue(connection& con)
+dump_cmd::issue(connection& con)
{
m_dump.reset(new msg_t(con.ctx(), std::ref(*this)));
- VAPI_CALL(m_dump->execute());
-
- wait();
-
- return rc_t::OK;
-}
-
-std::string
-dump_v4_cmd::to_string() const
-{
- return ("ip-mroute-v4-dump");
-}
-
-dump_v6_cmd::dump_v6_cmd()
-{
-}
-
-bool
-dump_v6_cmd::operator==(const dump_v6_cmd& other) const
-{
- return (true);
-}
+ auto& payload = m_dump->get_request().get_payload();
-rc_t
-dump_v6_cmd::issue(connection& con)
-{
- m_dump.reset(new msg_t(con.ctx(), std::ref(*this)));
+ payload.table.table_id = m_id;
+ payload.table.is_ip6 = m_proto.is_ipv6();
VAPI_CALL(m_dump->execute());
@@ -177,10 +154,14 @@ dump_v6_cmd::issue(connection& con)
}
std::string
-dump_v6_cmd::to_string() const
+dump_cmd::to_string() const
{
- return ("ip-mroute-v6-dump");
+ std::ostringstream s;
+ s << "ip-mroute-dump: id:" << m_id << " proto:" << m_proto.to_string();
+
+ return (s.str());
}
+
} // namespace ip_mroute_cmds
} // namespace mroute
} // namespace vom
diff --git a/extras/vom/vom/mroute_cmds.hpp b/extras/vom/vom/mroute_cmds.hpp
index b8f18f6b45b..3392d4a3b2b 100644
--- a/extras/vom/vom/mroute_cmds.hpp
+++ b/extras/vom/vom/mroute_cmds.hpp
@@ -103,14 +103,13 @@ private:
/**
* A cmd class that Dumps ipv4 fib
*/
-class dump_v4_cmd : public VOM::dump_cmd<vapi::Ip_mfib_dump>
+class dump_cmd : public VOM::dump_cmd<vapi::Ip_mroute_dump>
{
public:
/**
* Constructor
*/
- dump_v4_cmd();
- dump_v4_cmd(const dump_cmd& d);
+ dump_cmd(route::table_id_t id, const l3_proto_t& proto);
/**
* Issue the command to VPP/HW
@@ -124,46 +123,15 @@ public:
/**
* Comparison operator - only used for UT
*/
- bool operator==(const dump_v4_cmd& i) const;
-
-private:
- /**
- * HW reutrn code
- */
- HW::item<bool> item;
-};
-
-/**
- * A cmd class that Dumps ipv6 fib
- */
-class dump_v6_cmd : public VOM::dump_cmd<vapi::Ip6_mfib_dump>
-{
-public:
- /**
- * Constructor
- */
- dump_v6_cmd();
- dump_v6_cmd(const dump_cmd& d);
-
- /**
- * Issue the command to VPP/HW
- */
- rc_t issue(connection& con);
- /**
- * convert to string format for debug purposes
- */
- std::string to_string() const;
-
- /**
- * Comparison operator - only used for UT
- */
- bool operator==(const dump_v6_cmd& i) const;
+ bool operator==(const dump_cmd& i) const;
private:
/**
* HW reutrn code
*/
HW::item<bool> item;
+ route::table_id_t m_id;
+ const l3_proto_t& m_proto;
};
}; // namespace ip_mroute_cmds
diff --git a/extras/vom/vom/neighbour.hpp b/extras/vom/vom/neighbour.hpp
index 4e074bf7f45..5b1f9c12847 100644
--- a/extras/vom/vom/neighbour.hpp
+++ b/extras/vom/vom/neighbour.hpp
@@ -181,7 +181,7 @@ private:
/**
* HW configuration for the result of creating the bridge_domain
*/
- HW::item<bool> m_hw;
+ HW::item<handle_t> m_hw;
/**
* The bridge_domain domain the bridge_domain is in.
diff --git a/extras/vom/vom/neighbour_cmds.cpp b/extras/vom/vom/neighbour_cmds.cpp
index 5f9e180b01d..758147c49c5 100644
--- a/extras/vom/vom/neighbour_cmds.cpp
+++ b/extras/vom/vom/neighbour_cmds.cpp
@@ -18,12 +18,12 @@
namespace VOM {
namespace neighbour_cmds {
-create_cmd::create_cmd(HW::item<bool>& item,
+create_cmd::create_cmd(HW::item<handle_t>& item,
handle_t itf,
const mac_address_t& mac,
const boost::asio::ip::address& ip_addr,
const neighbour::flags_t& flags)
- : rpc_cmd(item)
+ : srpc_cmd(item)
, m_itf(itf)
, m_mac(mac)
, m_ip_addr(ip_addr)
@@ -67,12 +67,12 @@ create_cmd::to_string() const
return (s.str());
}
-delete_cmd::delete_cmd(HW::item<bool>& item,
+delete_cmd::delete_cmd(HW::item<handle_t>& item,
handle_t itf,
const mac_address_t& mac,
const boost::asio::ip::address& ip_addr,
const neighbour::flags_t& flags)
- : rpc_cmd(item)
+ : srpc_cmd(item)
, m_itf(itf)
, m_mac(mac)
, m_ip_addr(ip_addr)
diff --git a/extras/vom/vom/neighbour_cmds.hpp b/extras/vom/vom/neighbour_cmds.hpp
index d43a6fe8f3b..ac1523d863d 100644
--- a/extras/vom/vom/neighbour_cmds.hpp
+++ b/extras/vom/vom/neighbour_cmds.hpp
@@ -17,6 +17,7 @@
#define __VOM_NEIGHBOUR_CMDS_H__
#include "vom/dump_cmd.hpp"
+#include "vom/srpc_cmd.hpp"
#include "neighbour.hpp"
#include <vapi/ip.api.vapi.hpp>
@@ -27,14 +28,13 @@ namespace neighbour_cmds {
/**
* A command class that creates or updates the bridge domain ARP Entry
*/
-class create_cmd : public rpc_cmd<HW::item<bool>,
- vapi::Ip_neighbor_add_del>
+class create_cmd : public srpc_cmd<vapi::Ip_neighbor_add_del>
{
public:
/**
* Constructor
*/
- create_cmd(HW::item<bool>& item,
+ create_cmd(HW::item<handle_t>& item,
handle_t itf,
const mac_address_t& mac,
const boost::asio::ip::address& ip_addr,
@@ -65,14 +65,13 @@ private:
/**
* A cmd class that deletes a bridge domain ARP entry
*/
-class delete_cmd : public rpc_cmd<HW::item<bool>,
- vapi::Ip_neighbor_add_del>
+class delete_cmd : public srpc_cmd<vapi::Ip_neighbor_add_del>
{
public:
/**
* Constructor
*/
- delete_cmd(HW::item<bool>& item,
+ delete_cmd(HW::item<handle_t>& item,
handle_t itf,
const mac_address_t& mac,
const boost::asio::ip::address& ip_addr,
diff --git a/extras/vom/vom/prefix.cpp b/extras/vom/vom/prefix.cpp
index fdad67998d1..a6305997f53 100644
--- a/extras/vom/vom/prefix.cpp
+++ b/extras/vom/vom/prefix.cpp
@@ -32,13 +32,13 @@ l3_proto_t::l3_proto_t(int v, const std::string& s)
}
bool
-l3_proto_t::is_ipv6()
+l3_proto_t::is_ipv6() const
{
return (*this == IPV6);
}
bool
-l3_proto_t::is_ipv4()
+l3_proto_t::is_ipv4() const
{
return (*this == IPV4);
}
@@ -492,15 +492,16 @@ route::mprefix_t::mask_width() const
return (m_len);
}
-void
-route::mprefix_t::to_vpp(uint8_t* is_ip6,
- uint8_t* saddr,
- uint8_t* gaddr,
- uint16_t* len) const
+l3_proto_t
+route::mprefix_t::l3_proto() const
{
- *len = m_len;
- to_bytes(m_saddr, is_ip6, saddr);
- to_bytes(m_gaddr, is_ip6, gaddr);
+ if (m_gaddr.is_v6()) {
+ return (l3_proto_t::IPV6);
+ } else {
+ return (l3_proto_t::IPV4);
+ }
+
+ return (l3_proto_t::IPV4);
}
route::mprefix_t&
diff --git a/extras/vom/vom/prefix.hpp b/extras/vom/vom/prefix.hpp
index 1b6a06874d3..b75dc66f86e 100644
--- a/extras/vom/vom/prefix.hpp
+++ b/extras/vom/vom/prefix.hpp
@@ -56,8 +56,8 @@ public:
const static l3_proto_t IPV6;
const static l3_proto_t MPLS;
- bool is_ipv4();
- bool is_ipv6();
+ bool is_ipv4() const;
+ bool is_ipv6() const;
static const l3_proto_t& from_address(const boost::asio::ip::address& addr);
@@ -233,8 +233,8 @@ public:
mprefix_t(const boost::asio::ip::address& gaddr, uint8_t len);
/**
-*Constructor for (S,G)
-*/
+ *Constructor for (S,G)
+ */
mprefix_t(const boost::asio::ip::address& saddr,
const boost::asio::ip::address& gaddr,
uint16_t len);
@@ -300,11 +300,6 @@ public:
*/
l3_proto_t l3_proto() const;
- void to_vpp(uint8_t* is_ip6,
- uint8_t* saddr,
- uint8_t* gaddr,
- uint16_t* len) const;
-
private:
/**
* The address
diff --git a/extras/vom/vom/route.cpp b/extras/vom/vom/route.cpp
index b136c251b94..f627629e2c7 100644
--- a/extras/vom/vom/route.cpp
+++ b/extras/vom/vom/route.cpp
@@ -54,14 +54,6 @@ itf_flags_t::itf_flags_t(int v, const std::string& s)
: enum_base<itf_flags_t>(v, s)
{
}
-const itf_flags_t&
-itf_flags_t::from_vpp(uint32_t val)
-{
- if (itf_flags_t::ACCEPT == (int)val)
- return itf_flags_t::ACCEPT;
- else
- return itf_flags_t::FORWARD;
-}
path::path(special_t special, const nh_proto_t& proto)
: m_type(special)
@@ -339,9 +331,8 @@ void
ip_route::sweep()
{
if (m_hw) {
- for (auto& p : m_paths)
- HW::enqueue(
- new ip_route_cmds::delete_cmd(m_hw, m_rd->table_id(), m_prefix, p));
+ HW::enqueue(
+ new ip_route_cmds::delete_cmd(m_hw, m_rd->table_id(), m_prefix));
}
HW::write();
}
@@ -350,9 +341,8 @@ void
ip_route::replay()
{
if (m_hw) {
- for (auto& p : m_paths)
- HW::enqueue(
- new ip_route_cmds::update_cmd(m_hw, m_rd->table_id(), m_prefix, p));
+ HW::enqueue(
+ new ip_route_cmds::update_cmd(m_hw, m_rd->table_id(), m_prefix, m_paths));
}
}
std::string
@@ -369,38 +359,9 @@ ip_route::to_string() const
void
ip_route::update(const ip_route& r)
{
- if (rc_t::OK != m_hw.rc()) {
- /*
- * route not yet installed. install each of the desired paths
- */
- m_paths = r.m_paths;
-
- for (auto& p : m_paths)
- HW::enqueue(
- new ip_route_cmds::update_cmd(m_hw, m_rd->table_id(), m_prefix, p));
- } else {
- /*
- * add each path that is not installed yet and remove each that is no longer
- * wanted
- */
- path_list_t to_add;
- set_difference(r.m_paths.begin(), r.m_paths.end(), m_paths.begin(),
- m_paths.end(), std::inserter(to_add, to_add.begin()));
-
- for (auto& p : to_add)
- HW::enqueue(
- new ip_route_cmds::update_cmd(m_hw, m_rd->table_id(), m_prefix, p));
-
- path_list_t to_del;
- set_difference(m_paths.begin(), m_paths.end(), r.m_paths.begin(),
- r.m_paths.end(), std::inserter(to_del, to_del.begin()));
-
- for (auto& p : to_del)
- HW::enqueue(
- new ip_route_cmds::delete_cmd(m_hw, m_rd->table_id(), m_prefix, p));
-
- m_paths = r.m_paths;
- }
+ m_paths = r.m_paths;
+ HW::enqueue(
+ new ip_route_cmds::update_cmd(m_hw, m_rd->table_id(), m_prefix, m_paths));
}
std::shared_ptr<ip_route>
@@ -442,65 +403,84 @@ ip_route::event_handler::handle_replay()
void
ip_route::event_handler::handle_populate(const client_db::key_t& key)
{
- std::shared_ptr<ip_route_cmds::dump_v4_cmd> cmd_v4 =
- std::make_shared<ip_route_cmds::dump_v4_cmd>();
- std::shared_ptr<ip_route_cmds::dump_v6_cmd> cmd_v6 =
- std::make_shared<ip_route_cmds::dump_v6_cmd>();
-
- HW::enqueue(cmd_v4);
- HW::enqueue(cmd_v6);
- HW::write();
-
- for (auto& record : *cmd_v4) {
- auto& payload = record.get_payload();
-
- prefix_t pfx(0, payload.address, payload.address_length);
-
- /**
- * populating the route domain here
- */
- route_domain rd_temp(payload.table_id);
- std::shared_ptr<route_domain> rd = route_domain::find(payload.table_id);
- if (!rd) {
- OM::commit(key, rd_temp);
- }
- ip_route ip_r(rd_temp, pfx);
-
- for (unsigned int i = 0; i < payload.count; i++) {
- ip_r.add(from_vpp(payload.path[i], nh_proto_t::IPV4));
- }
- VOM_LOG(log_level_t::DEBUG) << "ip-route-dump: " << ip_r.to_string();
-
- /*
- * Write each of the discovered interfaces into the OM,
- * but disable the HW Command q whilst we do, so that no
- * commands are sent to VPP
- */
- OM::commit(key, ip_r);
- }
-
- for (auto& record : *cmd_v6) {
- auto& payload = record.get_payload();
-
- prefix_t pfx(1, payload.address, payload.address_length);
- route_domain rd_temp(payload.table_id);
- std::shared_ptr<route_domain> rd = route_domain::find(payload.table_id);
- if (!rd) {
- OM::commit(key, rd_temp);
- }
- ip_route ip_r(rd_temp, pfx);
-
- for (unsigned int i = 0; i < payload.count; i++) {
- ip_r.add(from_vpp(payload.path[i], nh_proto_t::IPV6));
+ // for each known route-domain
+ auto it = route_domain::cbegin();
+
+ while (it != route_domain::cend()) {
+
+ std::vector<l3_proto_t> l3s = { l3_proto_t::IPV4, l3_proto_t::IPV4 };
+
+ for (auto l3 : l3s) {
+ std::shared_ptr<ip_route_cmds::dump_cmd> cmd =
+ std::make_shared<ip_route_cmds::dump_cmd>(it->second.lock()->table_id(),
+ l3);
+
+ HW::enqueue(cmd);
+ HW::write();
+
+ for (auto& record : *cmd) {
+ auto& payload = record.get_payload();
+
+ std::shared_ptr<route_domain> rd =
+ route_domain::find(payload.route.table_id);
+
+ if (!rd) {
+ continue;
+ }
+
+ prefix_t pfx = from_api(payload.route.prefix);
+ ip_route ip_r(*rd, pfx);
+
+ for (unsigned int i = 0; i < payload.route.n_paths; i++) {
+ ip_r.add(from_api(payload.route.paths[i]));
+
+ // vapi_type_fib_path& p = payload.route.paths[i];
+ /* if (p.is_local) { */
+ /* path path_v4(path::special_t::LOCAL); */
+ /* ip_r.add(path_v4); */
+ /* } */
+ /* } else if (p.is_drop) { */
+ /* path path_v4(path::special_t::DROP); */
+ /* ip_r.add(path_v4); */
+ /* } else if (p.is_unreach) { */
+ /* path path_v4(path::special_t::UNREACH); */
+ /* ip_r.add(path_v4); */
+ /* } else if (p.is_prohibit) { */
+ /* path path_v4(path::special_t::PROHIBIT); */
+ /* ip_r.add(path_v4); */
+ /* } else { */
+ /* boost::asio::ip::address address = from_bytes(0, p.next_hop);
+ */
+ /* std::shared_ptr<interface> itf =
+ * interface::find(p.sw_if_index); */
+ /* if (itf) { */
+ /* if (p.is_dvr) { */
+ /* path path_v4(*itf, nh_proto_t::IPV4,
+ * route::path::flags_t::DVR,
+ */
+ /* p.weight, p.preference); */
+ /* ip_r.add(path_v4); */
+ /* } else { */
+ /* path path_v4(address, *itf, p.weight, p.preference); */
+ /* ip_r.add(path_v4); */
+ /* } */
+ /* } else { */
+ /* path path_v4(rd_temp, address, p.weight, p.preference); */
+ /* ip_r.add(path_v4); */
+ /* } */
+ /* } */
+ }
+
+ VOM_LOG(log_level_t::DEBUG) << "ip-route-dump: " << ip_r.to_string();
+
+ /*
+ * Write each of the discovered interfaces into the OM,
+ * but disable the HW Command q whilst we do, so that no
+ * commands are sent to VPP
+ */
+ OM::commit(key, ip_r);
+ }
}
- VOM_LOG(log_level_t::DEBUG) << "ip-route-dump: " << ip_r.to_string();
-
- /*
- * Write each of the discovered interfaces into the OM,
- * but disable the HW Command q whilst we do, so that no
- * commands are sent to VPP
- */
- OM::commit(key, ip_r);
}
}
@@ -645,76 +625,51 @@ ip_mroute::event_handler::handle_replay()
void
ip_mroute::event_handler::handle_populate(const client_db::key_t& key)
{
- std::shared_ptr<ip_mroute_cmds::dump_v4_cmd> cmd_v4 =
- std::make_shared<ip_mroute_cmds::dump_v4_cmd>();
- std::shared_ptr<ip_mroute_cmds::dump_v6_cmd> cmd_v6 =
- std::make_shared<ip_mroute_cmds::dump_v6_cmd>();
+ // for each known route-domain
+ auto it = route_domain::cbegin();
- HW::enqueue(cmd_v4);
- HW::enqueue(cmd_v6);
- HW::write();
+ while (it != route_domain::cend()) {
- VOM_LOG(log_level_t::INFO) << "ip-mroute-dump: ";
+ std::vector<l3_proto_t> l3s = { l3_proto_t::IPV4, l3_proto_t::IPV4 };
- for (auto& record : *cmd_v4) {
- auto& payload = record.get_payload();
+ for (auto l3 : l3s) {
+ std::shared_ptr<ip_mroute_cmds::dump_cmd> cmd =
+ std::make_shared<ip_mroute_cmds::dump_cmd>(
+ it->second.lock()->table_id(), l3);
- ip_address_t gaddr = from_bytes(0, payload.grp_address);
- ip_address_t saddr = from_bytes(0, payload.src_address);
- mprefix_t pfx(saddr, gaddr, payload.address_length);
+ HW::enqueue(cmd);
+ HW::write();
- /**
- * populating the route domain here
- */
- route_domain rd_temp(payload.table_id);
- std::shared_ptr<route_domain> rd = route_domain::find(payload.table_id);
- if (!rd) {
- OM::commit(key, rd_temp);
- }
- ip_mroute ip_r(rd_temp, pfx);
+ VOM_LOG(log_level_t::DEBUG) << "ip-mroute-dump: ";
- for (unsigned int i = 0; i < payload.count; i++) {
- vapi_type_mfib_path& p = payload.path[i];
- ip_r.add(from_vpp(p.path, nh_proto_t::IPV4),
- itf_flags_t::from_vpp(p.itf_flags));
- }
- VOM_LOG(log_level_t::DEBUG) << "ip-mroute-dump: " << ip_r.to_string();
-
- /*
- * Write each of the discovered interfaces into the OM,
- * but disable the HW Command q whilst we do, so that no
- * commands are sent to VPP
- */
- OM::commit(key, ip_r);
- }
+ for (auto& record : *cmd) {
+ auto& payload = record.get_payload();
- for (auto& record : *cmd_v6) {
- auto& payload = record.get_payload();
+ std::shared_ptr<route_domain> rd =
+ route_domain::find(payload.route.table_id);
- ip_address_t gaddr = from_bytes(1, payload.grp_address);
- ip_address_t saddr = from_bytes(1, payload.src_address);
- mprefix_t pfx(saddr, gaddr, payload.address_length);
+ if (!rd) {
+ continue;
+ }
- route_domain rd_temp(payload.table_id);
- std::shared_ptr<route_domain> rd = route_domain::find(payload.table_id);
- if (!rd) {
- OM::commit(key, rd_temp);
- }
- ip_mroute ip_r(rd_temp, pfx);
+ mprefix_t pfx = from_api(payload.route.prefix);
+ ip_mroute ip_r(*rd, pfx);
+
+ for (unsigned int i = 0; i < payload.route.n_paths; i++) {
+ ip_r.add(from_api(payload.route.paths[i].path),
+ from_api(payload.route.paths[i].itf_flags));
+ }
+
+ VOM_LOG(log_level_t::DEBUG) << "ip-mroute-dump: " << ip_r.to_string();
- for (unsigned int i = 0; i < payload.count; i++) {
- vapi_type_mfib_path& p = payload.path[i];
- ip_r.add(from_vpp(p.path, nh_proto_t::IPV6),
- itf_flags_t::from_vpp(p.itf_flags));
+ /*
+ * Write each of the discovered interfaces into the OM,
+ * but disable the HW Command q whilst we do, so that no
+ * commands are sent to VPP
+ */
+ OM::commit(key, ip_r);
+ }
}
- VOM_LOG(log_level_t::DEBUG) << "ip-mroute-dump: " << ip_r.to_string();
-
- /*
- * Write each of the discovered interfaces into the OM,
- * but disable the HW Command q whilst we do, so that no
- * commands are sent to VPP
- */
- OM::commit(key, ip_r);
}
}
diff --git a/extras/vom/vom/route.hpp b/extras/vom/vom/route.hpp
index 746ceb29ebc..8b68015f7ba 100644
--- a/extras/vom/vom/route.hpp
+++ b/extras/vom/vom/route.hpp
@@ -218,8 +218,6 @@ public:
*/
const static itf_flags_t FORWARD;
- static const itf_flags_t& from_vpp(uint32_t val);
-
private:
/**
* Private constructor taking the value and the string name
@@ -398,7 +396,7 @@ private:
/**
* HW configuration for the result of creating the route
*/
- HW::item<bool> m_hw;
+ HW::item<handle_t> m_hw;
/**
* The route domain the route is in.
diff --git a/extras/vom/vom/route_api_types.cpp b/extras/vom/vom/route_api_types.cpp
index d2234796beb..31acc84b6fb 100644
--- a/extras/vom/vom/route_api_types.cpp
+++ b/extras/vom/vom/route_api_types.cpp
@@ -13,111 +13,121 @@
* limitations under the License.
*/
+#include <vom/api_types.hpp>
#include <vom/route.hpp>
#include <vom/route_api_types.hpp>
namespace VOM {
+const route::itf_flags_t&
+from_api(vapi_enum_mfib_itf_flags val)
+{
+ if (route::itf_flags_t::ACCEPT == val)
+ return route::itf_flags_t::ACCEPT;
+ else
+ return route::itf_flags_t::FORWARD;
+}
+
+vapi_enum_mfib_itf_flags
+to_api(const route::itf_flags_t& in)
+{
+ vapi_enum_mfib_itf_flags out = MFIB_API_ITF_FLAG_NONE;
+
+ if (route::itf_flags_t::ACCEPT & in)
+ out = static_cast<vapi_enum_mfib_itf_flags>(out | MFIB_API_ITF_FLAG_ACCEPT);
+ if (route::itf_flags_t::FORWARD & in)
+ out =
+ static_cast<vapi_enum_mfib_itf_flags>(out | MFIB_API_ITF_FLAG_FORWARD);
+
+ return (out);
+}
+
void
-to_vpp(const route::path& p, vapi_payload_ip_add_del_route& payload)
+to_api(const route::path& p, vapi_type_fib_path& payload)
{
- payload.is_drop = 0;
- payload.is_unreach = 0;
- payload.is_prohibit = 0;
- payload.is_local = 0;
- payload.is_classify = 0;
- payload.is_resolve_host = 0;
- payload.is_resolve_attached = 0;
+ payload.flags = FIB_API_PATH_FLAG_NONE;
+ payload.proto = to_api(p.nh_proto());
+ payload.sw_if_index = ~0;
if (route::path::flags_t::DVR & p.flags()) {
- payload.is_dvr = 1;
- }
-
- if (route::path::special_t::STANDARD == p.type()) {
- uint8_t path_v6;
- to_bytes(p.nh(), &path_v6, payload.next_hop_address);
- payload.next_hop_sw_if_index = 0xffffffff;
+ payload.type = FIB_API_PATH_TYPE_DVR;
+ } else if (route::path::special_t::STANDARD == p.type()) {
+ to_api(p.nh(), payload.nh.address);
if (p.rd()) {
- payload.next_hop_table_id = p.rd()->table_id();
+ payload.table_id = p.rd()->table_id();
}
if (p.itf()) {
- payload.next_hop_sw_if_index = p.itf()->handle().value();
+ payload.sw_if_index = p.itf()->handle().value();
}
} else if (route::path::special_t::DROP == p.type()) {
- payload.is_drop = 1;
+ payload.type = FIB_API_PATH_TYPE_DROP;
} else if (route::path::special_t::UNREACH == p.type()) {
- payload.is_unreach = 1;
+ payload.type = FIB_API_PATH_TYPE_ICMP_UNREACH;
} else if (route::path::special_t::PROHIBIT == p.type()) {
- payload.is_prohibit = 1;
+ payload.type = FIB_API_PATH_TYPE_ICMP_PROHIBIT;
} else if (route::path::special_t::LOCAL == p.type()) {
- payload.is_local = 1;
+ payload.type = FIB_API_PATH_TYPE_LOCAL;
}
- payload.next_hop_weight = p.weight();
- payload.next_hop_preference = p.preference();
- payload.next_hop_via_label = 0x100000;
- payload.classify_table_index = 0;
+
+ payload.weight = p.weight();
+ payload.preference = p.preference();
+ payload.n_labels = 0;
}
-void
-to_vpp(const route::path& p, vapi_payload_ip_mroute_add_del& payload)
+route::path
+from_api(const vapi_type_fib_path& p)
{
- payload.next_hop_afi = p.nh_proto();
-
- if (route::path::special_t::STANDARD == p.type()) {
- uint8_t path_v6;
- to_bytes(p.nh(), &path_v6, payload.nh_address);
+ switch (p.type) {
+ case FIB_API_PATH_TYPE_DVR: {
+ std::shared_ptr<interface> itf = interface::find(p.sw_if_index);
+ if (!itf)
+ throw invalid_decode("fib-path deocde no interface:" +
+ std::to_string(p.sw_if_index));
- if (p.itf()) {
- payload.next_hop_sw_if_index = p.itf()->handle().value();
+ return (route::path(*itf, from_api(p.proto), route::path::flags_t::DVR,
+ p.weight, p.preference));
}
+ case FIB_API_PATH_TYPE_NORMAL: {
+ boost::asio::ip::address address = from_api(p.nh.address, p.proto);
+ std::shared_ptr<interface> itf = interface::find(p.sw_if_index);
+ if (itf) {
+ return (route::path(address, *itf, p.weight, p.preference));
+ } else {
+ std::shared_ptr<route_domain> rd = route_domain::find(p.table_id);
- payload.next_hop_afi = p.nh_proto();
- } else if (route::path::special_t::LOCAL == p.type()) {
- payload.is_local = 1;
- }
-}
+ if (!rd)
+ throw invalid_decode("fib-path deocde no route-domain:" +
+ std::to_string(p.table_id));
-route::path
-from_vpp(const vapi_type_fib_path& p, const nh_proto_t& nhp)
-{
- if (p.is_local) {
- return route::path(route::path::special_t::LOCAL);
- } else if (p.is_drop) {
- return route::path(route::path::special_t::DROP);
- } else if (p.is_unreach) {
- return route::path(route::path::special_t::UNREACH);
- } else if (p.is_prohibit) {
- return route::path(route::path::special_t::PROHIBIT);
- } else {
- boost::asio::ip::address address =
- from_bytes(nh_proto_t::IPV6 == nhp, p.next_hop);
- std::shared_ptr<interface> itf = interface::find(p.sw_if_index);
- if (itf) {
- if (p.is_dvr) {
- return route::path(*itf, nhp, route::path::flags_t::DVR, p.weight,
- p.preference);
- } else {
- return route::path(address, *itf, p.weight, p.preference);
- }
- } else {
- std::shared_ptr<route_domain> rd = route_domain::find(p.table_id);
- if (rd) {
- return route::path(*rd, address, p.weight, p.preference);
+ return (route::path(*rd, address, p.weight, p.preference));
}
}
- }
-
- VOM_LOG(log_level_t::ERROR) << "cannot decode: ";
+ case FIB_API_PATH_TYPE_LOCAL:
+ return (route::path(route::path::special_t::LOCAL));
+ case FIB_API_PATH_TYPE_DROP:
+ return (route::path(route::path::special_t::DROP));
+ case FIB_API_PATH_TYPE_ICMP_UNREACH:
+ return (route::path(route::path::special_t::PROHIBIT));
+ case FIB_API_PATH_TYPE_ICMP_PROHIBIT:
+ return (route::path(route::path::special_t::UNREACH));
- return route::path(route::path::special_t::DROP);
-}
+ case FIB_API_PATH_TYPE_UDP_ENCAP:
+ case FIB_API_PATH_TYPE_BIER_IMP:
+ case FIB_API_PATH_TYPE_SOURCE_LOOKUP:
+ case FIB_API_PATH_TYPE_INTERFACE_RX:
+ case FIB_API_PATH_TYPE_CLASSIFY:
+ // not done yet
+ break;
+ }
+ return (route::path(route::path::special_t::DROP));
};
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "mozilla")
- * End:
- */
+}; // namespace VOM
+ /*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/route_api_types.hpp b/extras/vom/vom/route_api_types.hpp
index 09015485cdb..25d0902cda1 100644
--- a/extras/vom/vom/route_api_types.hpp
+++ b/extras/vom/vom/route_api_types.hpp
@@ -19,10 +19,12 @@
namespace VOM {
-void to_vpp(const route::path& p, vapi_payload_ip_mroute_add_del& payload);
-void to_vpp(const route::path& p, vapi_payload_ip_add_del_route& payload);
+vapi_enum_mfib_itf_flags to_api(const route::itf_flags_t& flags);
+const route::itf_flags_t& from_api(vapi_enum_mfib_itf_flags flags);
-route::path from_vpp(const vapi_type_fib_path& p, const nh_proto_t& nh);
+void to_api(const route::path& p, vapi_type_fib_path& o);
+
+route::path from_api(const vapi_type_fib_path& p);
}; // namespace VOM
diff --git a/extras/vom/vom/route_cmds.cpp b/extras/vom/vom/route_cmds.cpp
index 24233d9e443..fb1fc2f9933 100644
--- a/extras/vom/vom/route_cmds.cpp
+++ b/extras/vom/vom/route_cmds.cpp
@@ -15,6 +15,7 @@
#include <sstream>
+#include <vom/api_types.hpp>
#include <vom/route_api_types.hpp>
#include <vom/route_cmds.hpp>
@@ -22,14 +23,14 @@ namespace VOM {
namespace route {
namespace ip_route_cmds {
-update_cmd::update_cmd(HW::item<bool>& item,
+update_cmd::update_cmd(HW::item<handle_t>& item,
table_id_t id,
const prefix_t& prefix,
- const path& path)
- : rpc_cmd(item)
+ const path_list_t& pl)
+ : srpc_cmd(item)
, m_id(id)
, m_prefix(prefix)
- , m_path(path)
+ , m_pl(pl)
{
}
@@ -37,23 +38,26 @@ bool
update_cmd::operator==(const update_cmd& other) const
{
return ((m_prefix == other.m_prefix) && (m_id == other.m_id) &&
- (m_path == other.m_path));
+ (m_pl == other.m_pl));
}
rc_t
update_cmd::issue(connection& con)
{
- msg_t req(con.ctx(), 0, std::ref(*this));
+ msg_t req(con.ctx(), m_pl.size(), std::ref(*this));
auto& payload = req.get_request().get_payload();
- payload.table_id = m_id;
+ payload.route.table_id = m_id;
payload.is_add = 1;
payload.is_multipath = 1;
- m_prefix.to_vpp(&payload.is_ipv6, payload.dst_address,
- &payload.dst_address_length);
- to_vpp(m_path, payload);
+ payload.route.table_id = m_id;
+ payload.route.prefix = to_api(m_prefix);
+
+ uint32_t ii = 0;
+ for (auto& p : m_pl)
+ to_api(p, payload.route.paths[ii++]);
VAPI_CALL(req.execute());
@@ -65,27 +69,26 @@ update_cmd::to_string() const
{
std::ostringstream s;
s << "ip-route-create: " << m_hw_item.to_string() << " table-id:" << m_id
- << " prefix:" << m_prefix.to_string() << " paths:" << m_path.to_string();
+ << " prefix:" << m_prefix.to_string() << " paths:";
+ for (auto p : m_pl)
+ s << p.to_string() << " ";
return (s.str());
}
-delete_cmd::delete_cmd(HW::item<bool>& item,
+delete_cmd::delete_cmd(HW::item<handle_t>& item,
table_id_t id,
- const prefix_t& prefix,
- const path& path)
+ const prefix_t& prefix)
: rpc_cmd(item)
, m_id(id)
, m_prefix(prefix)
- , m_path(path)
{
}
bool
delete_cmd::operator==(const delete_cmd& other) const
{
- return ((m_prefix == other.m_prefix) && (m_id == other.m_id) &&
- (m_path == other.m_path));
+ return ((m_prefix == other.m_prefix) && (m_id == other.m_id));
}
rc_t
@@ -94,12 +97,12 @@ delete_cmd::issue(connection& con)
msg_t req(con.ctx(), 0, std::ref(*this));
auto& payload = req.get_request().get_payload();
- payload.table_id = m_id;
+ payload.route.table_id = m_id;
payload.is_add = 0;
+ payload.is_multipath = 0;
- m_prefix.to_vpp(&payload.is_ipv6, payload.dst_address,
- &payload.dst_address_length);
- to_vpp(m_path, payload);
+ payload.route.table_id = m_id;
+ payload.route.prefix = to_api(m_prefix);
VAPI_CALL(req.execute());
@@ -114,53 +117,32 @@ delete_cmd::to_string() const
{
std::ostringstream s;
s << "ip-route-delete: " << m_hw_item.to_string() << " id:" << m_id
- << " prefix:" << m_prefix.to_string() << " paths:" << m_path.to_string();
+ << " prefix:" << m_prefix.to_string();
return (s.str());
}
-dump_v4_cmd::dump_v4_cmd()
+dump_cmd::dump_cmd(route::table_id_t id, const l3_proto_t& proto)
+ : m_id(id)
+ , m_proto(proto)
{
}
bool
-dump_v4_cmd::operator==(const dump_v4_cmd& other) const
+dump_cmd::operator==(const dump_cmd& other) const
{
return (true);
}
rc_t
-dump_v4_cmd::issue(connection& con)
+dump_cmd::issue(connection& con)
{
m_dump.reset(new msg_t(con.ctx(), std::ref(*this)));
- VAPI_CALL(m_dump->execute());
+ auto& payload = m_dump->get_request().get_payload();
- wait();
-
- return rc_t::OK;
-}
-
-std::string
-dump_v4_cmd::to_string() const
-{
- return ("ip-route-v4-dump");
-}
-
-dump_v6_cmd::dump_v6_cmd()
-{
-}
-
-bool
-dump_v6_cmd::operator==(const dump_v6_cmd& other) const
-{
- return (true);
-}
-
-rc_t
-dump_v6_cmd::issue(connection& con)
-{
- m_dump.reset(new msg_t(con.ctx(), std::ref(*this)));
+ payload.table.table_id = m_id;
+ payload.table.is_ip6 = m_proto.is_ipv6();
VAPI_CALL(m_dump->execute());
@@ -170,17 +152,19 @@ dump_v6_cmd::issue(connection& con)
}
std::string
-dump_v6_cmd::to_string() const
+dump_cmd::to_string() const
{
- return ("ip-route-v6-dump");
+ return ("ip-route-v4-dump");
}
+
} // namespace ip_route_cmds
} // namespace route
} // namespace vom
- /*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "mozilla")
- * End:
- */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
diff --git a/extras/vom/vom/route_cmds.hpp b/extras/vom/vom/route_cmds.hpp
index 6db7b5894ef..3c43208f120 100644
--- a/extras/vom/vom/route_cmds.hpp
+++ b/extras/vom/vom/route_cmds.hpp
@@ -18,7 +18,7 @@
#include "vom/dump_cmd.hpp"
#include "vom/route.hpp"
-#include "vom/rpc_cmd.hpp"
+#include "vom/srpc_cmd.hpp"
#include <vapi/ip.api.vapi.hpp>
@@ -29,16 +29,16 @@ namespace ip_route_cmds {
/**
* A command class that creates or updates the route
*/
-class update_cmd : public rpc_cmd<HW::item<bool>, vapi::Ip_add_del_route>
+class update_cmd : public srpc_cmd<vapi::Ip_route_add_del>
{
public:
/**
* Constructor
*/
- update_cmd(HW::item<bool>& item,
+ update_cmd(HW::item<handle_t>& item,
table_id_t id,
const prefix_t& prefix,
- const path& path);
+ const path_list_t& pl);
/**
* Issue the command to VPP/HW
@@ -58,22 +58,19 @@ public:
private:
route::table_id_t m_id;
prefix_t m_prefix;
- const path m_path;
+ const path_list_t& m_pl;
};
/**
* A cmd class that deletes a route
*/
-class delete_cmd : public rpc_cmd<HW::item<bool>, vapi::Ip_add_del_route>
+class delete_cmd : public rpc_cmd<HW::item<handle_t>, vapi::Ip_route_add_del>
{
public:
/**
* Constructor
*/
- delete_cmd(HW::item<bool>& item,
- table_id_t id,
- const prefix_t& prefix,
- const path& path);
+ delete_cmd(HW::item<handle_t>& item, table_id_t id, const prefix_t& prefix);
/**
* Issue the command to VPP/HW
@@ -93,20 +90,19 @@ public:
private:
route::table_id_t m_id;
prefix_t m_prefix;
- const path m_path;
};
/**
- * A cmd class that Dumps ipv4 fib
+ * A cmd class that Dumps ip fib routes
*/
-class dump_v4_cmd : public VOM::dump_cmd<vapi::Ip_fib_dump>
+class dump_cmd : public VOM::dump_cmd<vapi::Ip_route_dump>
{
public:
/**
* Constructor
*/
- dump_v4_cmd();
- dump_v4_cmd(const dump_cmd& d);
+ dump_cmd(route::table_id_t id, const l3_proto_t& proto);
+ dump_cmd(const dump_cmd& d);
/**
* Issue the command to VPP/HW
@@ -120,46 +116,15 @@ public:
/**
* Comparison operator - only used for UT
*/
- bool operator==(const dump_v4_cmd& i) const;
-
-private:
- /**
- * HW reutrn code
- */
- HW::item<bool> item;
-};
-
-/**
- * A cmd class that Dumps ipv6 fib
- */
-class dump_v6_cmd : public VOM::dump_cmd<vapi::Ip6_fib_dump>
-{
-public:
- /**
- * Constructor
- */
- dump_v6_cmd();
- dump_v6_cmd(const dump_cmd& d);
-
- /**
- * Issue the command to VPP/HW
- */
- rc_t issue(connection& con);
- /**
- * convert to string format for debug purposes
- */
- std::string to_string() const;
-
- /**
- * Comparison operator - only used for UT
- */
- bool operator==(const dump_v6_cmd& i) const;
+ bool operator==(const dump_cmd& i) const;
private:
/**
* HW reutrn code
*/
HW::item<bool> item;
+ route::table_id_t m_id;
+ const l3_proto_t& m_proto;
};
}; // namespace ip_route_cmds
diff --git a/extras/vom/vom/route_domain.cpp b/extras/vom/vom/route_domain.cpp
index b97faf6ae49..16bf5f36c90 100644
--- a/extras/vom/vom/route_domain.cpp
+++ b/extras/vom/vom/route_domain.cpp
@@ -62,6 +62,18 @@ route_domain::key() const
return (table_id());
}
+route_domain::const_iterator_t
+route_domain::cbegin()
+{
+ return m_db.begin();
+}
+
+route_domain::const_iterator_t
+route_domain::cend()
+{
+ return m_db.end();
+}
+
void
route_domain::sweep()
{
@@ -159,6 +171,26 @@ route_domain::dump(std::ostream& os)
void
route_domain::event_handler::handle_populate(const client_db::key_t& key)
{
+ std::shared_ptr<route_domain_cmds::dump_cmd> cmd =
+ std::make_shared<route_domain_cmds::dump_cmd>();
+
+ HW::enqueue(cmd);
+ HW::write();
+
+ for (auto& record : *cmd) {
+ auto& payload = record.get_payload();
+
+ route_domain rd(payload.table.table_id);
+
+ VOM_LOG(log_level_t::DEBUG) << "ip-table-dump: " << rd.to_string();
+
+ /*
+ * Write each of the discovered interfaces into the OM,
+ * but disable the HW Command q whilst we do, so that no
+ * commands are sent to VPP
+ */
+ OM::commit(key, rd);
+ }
}
route_domain::event_handler::event_handler()
diff --git a/extras/vom/vom/route_domain.hpp b/extras/vom/vom/route_domain.hpp
index 96e46ce575b..78db63bbe59 100644
--- a/extras/vom/vom/route_domain.hpp
+++ b/extras/vom/vom/route_domain.hpp
@@ -37,6 +37,15 @@ public:
typedef route::table_id_t key_t;
/**
+ * The iterator type
+ */
+ typedef singular_db<const key_t, route_domain>::const_iterator
+ const_iterator_t;
+
+ static const_iterator_t cbegin();
+ static const_iterator_t cend();
+
+ /**
* Construct a new object matching the desried state
*/
route_domain(route::table_id_t id);
diff --git a/extras/vom/vom/route_domain_cmds.cpp b/extras/vom/vom/route_domain_cmds.cpp
index 9eb50436cc2..8f135e50ef5 100644
--- a/extras/vom/vom/route_domain_cmds.cpp
+++ b/extras/vom/vom/route_domain_cmds.cpp
@@ -39,9 +39,9 @@ create_cmd::issue(connection& con)
msg_t req(con.ctx(), std::ref(*this));
auto& payload = req.get_request().get_payload();
- payload.table_id = m_id;
+ payload.table.table_id = m_id;
+ payload.table.is_ip6 = m_proto.is_ipv6();
payload.is_add = 1;
- payload.is_ipv6 = m_proto.is_ipv6();
VAPI_CALL(req.execute());
@@ -79,9 +79,9 @@ delete_cmd::issue(connection& con)
msg_t req(con.ctx(), std::ref(*this));
auto& payload = req.get_request().get_payload();
- payload.table_id = m_id;
+ payload.table.table_id = m_id;
+ payload.table.is_ip6 = m_proto.is_ipv6();
payload.is_add = 0;
- payload.is_ipv6 = m_proto.is_ipv6();
VAPI_CALL(req.execute());
@@ -100,6 +100,35 @@ delete_cmd::to_string() const
return (s.str());
}
+
+dump_cmd::dump_cmd()
+{
+}
+
+bool
+dump_cmd::operator==(const dump_cmd& other) const
+{
+ return (true);
+}
+
+rc_t
+dump_cmd::issue(connection& con)
+{
+ m_dump.reset(new msg_t(con.ctx(), std::ref(*this)));
+
+ VAPI_CALL(m_dump->execute());
+
+ wait();
+
+ return rc_t::OK;
+}
+
+std::string
+dump_cmd::to_string() const
+{
+ return ("ip-table-dump");
+}
+
} // namespace route_domain_cmds
} // namespace VOM
/*
diff --git a/extras/vom/vom/route_domain_cmds.hpp b/extras/vom/vom/route_domain_cmds.hpp
index 6ac679bd343..42546da91b4 100644
--- a/extras/vom/vom/route_domain_cmds.hpp
+++ b/extras/vom/vom/route_domain_cmds.hpp
@@ -16,6 +16,7 @@
#ifndef __VOM_ROUTE_DOMAIN_CMDS_H__
#define __VOM_ROUTE_DOMAIN_CMDS_H__
+#include "vom/dump_cmd.hpp"
#include "vom/route_domain.hpp"
#include "vom/rpc_cmd.hpp"
@@ -100,6 +101,39 @@ private:
l3_proto_t m_proto;
};
+/**
+ * A cmd class that Dumps IP fib tables
+ */
+class dump_cmd : public VOM::dump_cmd<vapi::Ip_table_dump>
+{
+public:
+ /**
+ * Constructor
+ */
+ dump_cmd();
+ dump_cmd(const dump_cmd& d);
+
+ /**
+ * Issue the command to VPP/HW
+ */
+ rc_t issue(connection& con);
+ /**
+ * convert to string format for debug purposes
+ */
+ std::string to_string() const;
+
+ /**
+ * Comparison operator - only used for UT
+ */
+ bool operator==(const dump_cmd& i) const;
+
+private:
+ /**
+ * HW reutrn code
+ */
+ HW::item<bool> item;
+};
+
}; // namespace route_domain_cmds
}; // namespace VOM
diff --git a/extras/vom/vom/srpc_cmd.hpp b/extras/vom/vom/srpc_cmd.hpp
new file mode 100644
index 00000000000..da6064dafba
--- /dev/null
+++ b/extras/vom/vom/srpc_cmd.hpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VOM_SRPC_CMD_H__
+#define __VOM_SRPC_CMD_H__
+
+#include "vom/hw.hpp"
+#include "vom/rpc_cmd.hpp"
+
+namespace VOM {
+template <typename MSG>
+class srpc_cmd : public rpc_cmd<HW::item<handle_t>, MSG>
+{
+public:
+ /**
+ * convenient typedef
+ */
+ typedef MSG msg_t;
+
+ /**
+ * Constructor taking the HW item that will be updated by the command
+ */
+ srpc_cmd(HW::item<handle_t>& item)
+ : rpc_cmd<HW::item<handle_t>, MSG>(item)
+ {
+ }
+
+ /**
+ * Desructor
+ */
+ virtual ~srpc_cmd() {}
+
+ virtual vapi_error_e operator()(MSG& reply)
+ {
+ int stats_index = reply.get_response().get_payload().stats_index;
+ int retval = reply.get_response().get_payload().retval;
+
+ VOM_LOG(log_level_t::DEBUG) << this->to_string() << " " << retval;
+
+ rc_t rc = rc_t::from_vpp_retval(retval);
+ handle_t handle = handle_t::INVALID;
+
+ if (rc_t::OK == rc) {
+ handle = stats_index;
+ }
+
+ this->fulfill(HW::item<handle_t>(handle, rc));
+
+ return (VAPI_OK);
+ }
+};
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "mozilla")
+ * End:
+ */
+
+#endif
diff --git a/src/plugins/abf/abf.api b/src/plugins/abf/abf.api
index 6716dce562f..03044ad5a6b 100644
--- a/src/plugins/abf/abf.api
+++ b/src/plugins/abf/abf.api
@@ -20,6 +20,7 @@
*/
option version = "1.0.0";
+import "vnet/ip/ip_types.api";
import "vnet/fib/fib_types.api";
/** \brief Get the plugin version
diff --git a/src/plugins/abf/abf_api.c b/src/plugins/abf/abf_api.c
index e5c3bfa6f3f..a14c9008332 100644
--- a/src/plugins/abf/abf_api.c
+++ b/src/plugins/abf/abf_api.c
@@ -101,7 +101,7 @@ vl_api_abf_policy_add_del_t_handler (vl_api_abf_policy_add_del_t * mp)
for (pi = 0; pi < mp->policy.n_paths; pi++)
{
path = &paths[pi];
- rv = fib_path_api_parse (&mp->policy.paths[pi], path);
+ rv = fib_api_path_decode (&mp->policy.paths[pi], path);
if (0 != rv)
{
@@ -158,9 +158,12 @@ typedef struct abf_dump_walk_ctx_t_
static int
abf_policy_send_details (u32 api, void *args)
{
- fib_route_path_encode_t *api_rpaths = NULL, *api_rpath;
+ fib_path_encode_ctx_t walk_ctx = {
+ .rpaths = NULL,
+ };
vl_api_abf_policy_details_t *mp;
abf_dump_walk_ctx_t *ctx;
+ fib_route_path_t *rpath;
vl_api_fib_path_t *fp;
size_t msg_size;
abf_policy_t *ap;
@@ -181,17 +184,19 @@ abf_policy_send_details (u32 api, void *args)
mp->policy.acl_index = htonl (ap->ap_acl);
mp->policy.policy_id = htonl (ap->ap_id);
- fib_path_list_walk_w_ext (ap->ap_pl, NULL, fib_path_encode, &api_rpaths);
+ fib_path_list_walk_w_ext (ap->ap_pl, NULL, fib_path_encode, &walk_ctx);
fp = mp->policy.paths;
- vec_foreach (api_rpath, api_rpaths)
+ vec_foreach (rpath, walk_ctx.rpaths)
{
- fib_api_path_encode (api_rpath, fp);
+ fib_api_path_encode (rpath, fp);
fp++;
}
vl_api_send_msg (ctx->rp, (u8 *) mp);
+ vec_free (walk_ctx.rpaths);
+
return (1);
}
diff --git a/src/plugins/abf/abf_error.def b/src/plugins/abf/abf_error.def
index 71e798beb71..83349eae42a 100644
--- a/src/plugins/abf/abf_error.def
+++ b/src/plugins/abf/abf_error.def
@@ -17,3 +17,4 @@
abf_error (NONE, "no match")
abf_error (MATCHED, "matched")
+abf_error (MISSED, "missed")
diff --git a/src/plugins/abf/abf_itf_attach.c b/src/plugins/abf/abf_itf_attach.c
index 9569306ec2a..337eed8697d 100644
--- a/src/plugins/abf/abf_itf_attach.c
+++ b/src/plugins/abf/abf_itf_attach.c
@@ -505,12 +505,12 @@ abf_input_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame, fib_protocol_t fproto)
{
- u32 n_left_from, *from, *to_next, next_index, matches;
+ u32 n_left_from, *from, *to_next, next_index, matches, misses;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- matches = 0;
+ matches = misses = 0;
while (n_left_from > 0)
{
@@ -530,6 +530,7 @@ abf_input_inline (vlib_main_t * vm,
u32 match_acl_pos = ~0;
u32 match_rule_index = ~0;
u32 trace_bitmap = 0;
+ u32 lc_index;
u8 action;
bi0 = from[0];
@@ -549,7 +550,7 @@ abf_input_inline (vlib_main_t * vm,
/*
* check if any of the policies attached to this interface matches.
*/
- u32 lc_index = abf_alctx_per_itf[fproto][sw_if_index0];
+ lc_index = abf_alctx_per_itf[fproto][sw_if_index0];
/*
A non-inline version looks like this:
@@ -589,6 +590,7 @@ abf_input_inline (vlib_main_t * vm,
* move on down the feature arc
*/
vnet_feature_next (&next0, b0);
+ misses++;
}
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
@@ -614,6 +616,11 @@ abf_input_inline (vlib_main_t * vm,
abf_ip4_node.index :
abf_ip6_node.index),
ABF_ERROR_MATCHED, matches);
+ vlib_node_increment_counter (vm,
+ (fproto = FIB_PROTOCOL_IP6 ?
+ abf_ip4_node.index :
+ abf_ip6_node.index),
+ ABF_ERROR_MISSED, misses);
return frame->n_vectors;
}
diff --git a/src/plugins/gtpu/gtpu.c b/src/plugins/gtpu/gtpu.c
index 7612055ed7f..dc5c689f5d3 100644
--- a/src/plugins/gtpu/gtpu.c
+++ b/src/plugins/gtpu/gtpu.c
@@ -520,8 +520,9 @@ int vnet_gtpu_add_del_tunnel
.frp_addr = zero_addr,
.frp_sw_if_index = 0xffffffff,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
const mfib_prefix_t mpfx = {
.fp_proto = fp,
@@ -535,17 +536,14 @@ int vnet_gtpu_add_del_tunnel
* - the accepting interface is that from the API
*/
mfib_table_entry_path_update (t->encap_fib_index,
- &mpfx,
- MFIB_SOURCE_GTPU,
- &path, MFIB_ITF_FLAG_FORWARD);
+ &mpfx, MFIB_SOURCE_GTPU, &path);
path.frp_sw_if_index = a->mcast_sw_if_index;
path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
mfei = mfib_table_entry_path_update (t->encap_fib_index,
&mpfx,
- MFIB_SOURCE_GTPU,
- &path,
- MFIB_ITF_FLAG_ACCEPT);
+ MFIB_SOURCE_GTPU, &path);
/*
* Create the mcast adjacency to send traffic to the group
diff --git a/src/plugins/igmp/igmp.c b/src/plugins/igmp/igmp.c
index 4e2fce1b916..1e9f647cd11 100644
--- a/src/plugins/igmp/igmp.c
+++ b/src/plugins/igmp/igmp.c
@@ -346,15 +346,6 @@ igmp_enable_disable (u32 sw_if_index, u8 enable, igmp_mode_t mode)
format_vnet_sw_if_index_name, vnet_get_main (), sw_if_index);
/* *INDENT-OFF* */
- fib_route_path_t for_us_path =
- {
- .frp_proto = fib_proto_to_dpo (FIB_PROTOCOL_IP4),
- .frp_addr = zero_addr,
- .frp_sw_if_index = 0xffffffff,
- .frp_fib_index = 0,
- .frp_weight = 1,
- .frp_flags = FIB_ROUTE_PATH_LOCAL,
- };
fib_route_path_t via_itf_path =
{
.frp_proto = fib_proto_to_dpo (FIB_PROTOCOL_IP4),
@@ -362,7 +353,18 @@ igmp_enable_disable (u32 sw_if_index, u8 enable, igmp_mode_t mode)
.frp_sw_if_index = sw_if_index,
.frp_fib_index = 0,
.frp_weight = 1,
+ .frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT,
};
+ fib_route_path_t for_us_path = {
+ .frp_proto = fib_proto_to_dpo (FIB_PROTOCOL_IP4),
+ .frp_addr = zero_addr,
+ .frp_sw_if_index = 0xffffffff,
+ .frp_fib_index = 1,
+ .frp_weight = 0,
+ .frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
+ };
+
/* *INDENT-ON* */
/* find configuration, if it doesn't exist, create new */
config = igmp_config_lookup (sw_if_index);
@@ -405,24 +407,19 @@ igmp_enable_disable (u32 sw_if_index, u8 enable, igmp_mode_t mode)
if (1 == im->n_configs_per_mfib_index[mfib_index])
{
/* first config in this FIB */
+ mfib_table_lock (mfib_index, FIB_PROTOCOL_IP4, MFIB_SOURCE_IGMP);
mfib_table_entry_path_update (mfib_index,
&mpfx_general_query,
- MFIB_SOURCE_IGMP,
- &for_us_path,
- MFIB_ITF_FLAG_FORWARD);
+ MFIB_SOURCE_IGMP, &for_us_path);
mfib_table_entry_path_update (mfib_index,
&mpfx_report,
- MFIB_SOURCE_IGMP,
- &for_us_path,
- MFIB_ITF_FLAG_FORWARD);
+ MFIB_SOURCE_IGMP, &for_us_path);
}
mfib_table_entry_path_update (mfib_index,
&mpfx_general_query,
- MFIB_SOURCE_IGMP,
- &via_itf_path, MFIB_ITF_FLAG_ACCEPT);
+ MFIB_SOURCE_IGMP, &via_itf_path);
mfib_table_entry_path_update (mfib_index, &mpfx_report,
- MFIB_SOURCE_IGMP, &via_itf_path,
- MFIB_ITF_FLAG_ACCEPT);
+ MFIB_SOURCE_IGMP, &via_itf_path);
}
}
else if (config && !enable)
@@ -438,6 +435,7 @@ igmp_enable_disable (u32 sw_if_index, u8 enable, igmp_mode_t mode)
mfib_table_entry_path_remove (mfib_index,
&mpfx_report,
MFIB_SOURCE_IGMP, &for_us_path);
+ mfib_table_unlock (mfib_index, FIB_PROTOCOL_IP4, MFIB_SOURCE_IGMP);
}
mfib_table_entry_path_remove (mfib_index,
@@ -482,7 +480,6 @@ igmp_init (vlib_main_t * vm)
igmp_main_t *im = &igmp_main;
im->igmp_api_client_by_client_index = hash_create (0, sizeof (u32));
-
im->logger = vlib_log_register_class ("igmp", 0);
IGMP_DBG ("initialized");
diff --git a/src/plugins/igmp/igmp_proxy.c b/src/plugins/igmp/igmp_proxy.c
index 690b38a8001..c2f3e06fb1f 100644
--- a/src/plugins/igmp/igmp_proxy.c
+++ b/src/plugins/igmp/igmp_proxy.c
@@ -49,13 +49,13 @@ igmp_proxy_device_mfib_path_add_del (igmp_group_t * group, u8 add)
.frp_sw_if_index = config->sw_if_index,
.frp_fib_index = 0,
.frp_weight = 1,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
/* *INDENT-ON* */
if (add)
mfib_table_entry_path_update (mfib_index, &mpfx_group_addr,
- MFIB_SOURCE_IGMP, &via_itf_path,
- MFIB_ITF_FLAG_FORWARD);
+ MFIB_SOURCE_IGMP, &via_itf_path);
else
mfib_table_entry_path_remove (mfib_index, &mpfx_group_addr,
MFIB_SOURCE_IGMP, &via_itf_path);
diff --git a/src/plugins/l3xc/l3xc_api.c b/src/plugins/l3xc/l3xc_api.c
index 45c01fa4e7a..0427cca5f1c 100644
--- a/src/plugins/l3xc/l3xc_api.c
+++ b/src/plugins/l3xc/l3xc_api.c
@@ -108,7 +108,7 @@ vl_api_l3xc_update_t_handler (vl_api_l3xc_update_t * mp)
for (pi = 0; pi < mp->l3xc.n_paths; pi++)
{
path = &paths[pi];
- rv = fib_path_api_parse (&mp->l3xc.paths[pi], path);
+ rv = fib_api_path_decode (&mp->l3xc.paths[pi], path);
if (0 != rv)
{
@@ -155,9 +155,12 @@ typedef struct l3xc_dump_walk_ctx_t_
static int
l3xc_send_details (u32 l3xci, void *args)
{
- fib_route_path_encode_t *api_rpaths = NULL, *api_rpath;
+ fib_path_encode_ctx_t path_ctx = {
+ .rpaths = NULL,
+ };
vl_api_l3xc_details_t *mp;
l3xc_dump_walk_ctx_t *ctx;
+ fib_route_path_t *rpath;
vl_api_fib_path_t *fp;
size_t msg_size;
l3xc_t *l3xc;
@@ -177,13 +180,12 @@ l3xc_send_details (u32 l3xci, void *args)
mp->l3xc.n_paths = n_paths;
mp->l3xc.sw_if_index = htonl (l3xc->l3xc_sw_if_index);
- fib_path_list_walk_w_ext (l3xc->l3xc_pl, NULL, fib_path_encode,
- &api_rpaths);
+ fib_path_list_walk_w_ext (l3xc->l3xc_pl, NULL, fib_path_encode, &path_ctx);
fp = mp->l3xc.paths;
- vec_foreach (api_rpath, api_rpaths)
+ vec_foreach (rpath, path_ctx.rpaths)
{
- fib_api_path_encode (api_rpath, fp);
+ fib_api_path_encode (rpath, fp);
fp++;
}
diff --git a/src/plugins/unittest/bier_test.c b/src/plugins/unittest/bier_test.c
index e6799eb4f42..8a609e5f20d 100644
--- a/src/plugins/unittest/bier_test.c
+++ b/src/plugins/unittest/bier_test.c
@@ -770,12 +770,12 @@ bier_test_mpls_imp (void)
.frp_bier_imp = bii,
.frp_weight = 0,
.frp_flags = FIB_ROUTE_PATH_BIER_IMP,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
mfib_table_entry_path_update(0, // default table
&pfx_1_1_1_1_c_239_1_1_1 ,
MFIB_SOURCE_API,
- &path_via_bier_imp_1,
- MFIB_ITF_FLAG_FORWARD);
+ &path_via_bier_imp_1);
mfib_table_entry_delete(0,
&pfx_1_1_1_1_c_239_1_1_1 ,
MFIB_SOURCE_API);
diff --git a/src/plugins/unittest/mfib_test.c b/src/plugins/unittest/mfib_test.c
index 0707a6f57c8..2497a299c50 100644
--- a/src/plugins/unittest/mfib_test.c
+++ b/src/plugins/unittest/mfib_test.c
@@ -233,7 +233,7 @@ mfib_test_entry (fib_node_index_t fei,
MFIB_TEST_REP((eflags == mfe->mfe_flags),
"%U has %U expect %U",
- format_mfib_prefix, &pfx,
+ format_mfib_prefix, pfx,
format_mfib_entry_flags, mfe->mfe_flags,
format_mfib_entry_flags, eflags);
@@ -241,7 +241,7 @@ mfib_test_entry (fib_node_index_t fei,
{
MFIB_TEST_REP((DPO_DROP == mfe->mfe_rep.dpoi_type),
"%U links to %U",
- format_mfib_prefix, &pfx,
+ format_mfib_prefix, pfx,
format_dpo_id, &mfe->mfe_rep, 0);
}
else
@@ -257,7 +257,7 @@ mfib_test_entry (fib_node_index_t fei,
MFIB_TEST_REP((DPO_REPLICATE == tmp.dpoi_type),
"%U links to %U",
- format_mfib_prefix, &pfx,
+ format_mfib_prefix, pfx,
format_dpo_type, tmp.dpoi_type);
va_start(ap, n_buckets);
@@ -288,11 +288,11 @@ mfib_test_entry_itf (fib_node_index_t fei,
MFIB_TEST_REP((NULL != mfi),
"%U has interface %d",
- format_mfib_prefix, &pfx, sw_if_index);
+ format_mfib_prefix, pfx, sw_if_index);
MFIB_TEST_REP((flags == mfi->mfi_flags),
"%U interface %d has flags %U expect %U",
- format_mfib_prefix, &pfx, sw_if_index,
+ format_mfib_prefix, pfx, sw_if_index,
format_mfib_itf_flags, flags,
format_mfib_itf_flags, mfi->mfi_flags);
@@ -408,15 +408,15 @@ mfib_test_i (fib_protocol_t PROTO,
.frp_addr = zero_addr,
.frp_sw_if_index = tm->hw[0]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT,
};
mfib_table_entry_path_update(fib_index,
pfx_no_forward,
MFIB_SOURCE_API,
- &path_via_if0,
- MFIB_ITF_FLAG_ACCEPT);
+ &path_via_if0);
mfei_no_f = mfib_table_lookup_exact_match(fib_index, pfx_no_forward);
MFIB_TEST(!mfib_test_entry(mfei_no_f,
@@ -424,41 +424,42 @@ mfib_test_i (fib_protocol_t PROTO,
0),
"%U no replcaitions",
format_mfib_prefix, pfx_no_forward);
- MFIB_TEST_NS(!mfib_test_entry_itf(mfei_no_f, tm->hw[0]->sw_if_index,
- MFIB_ITF_FLAG_ACCEPT));
+ MFIB_TEST(!mfib_test_entry_itf(mfei_no_f, tm->hw[0]->sw_if_index,
+ MFIB_ITF_FLAG_ACCEPT),
+ "%U interface not accepting",
+ format_mfib_prefix, pfx_no_forward);
fib_route_path_t path_via_if1 = {
.frp_proto = fib_proto_to_dpo(PROTO),
.frp_addr = zero_addr,
.frp_sw_if_index = tm->hw[1]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
fib_route_path_t path_via_if2 = {
.frp_proto = fib_proto_to_dpo(PROTO),
.frp_addr = zero_addr,
.frp_sw_if_index = tm->hw[2]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
fib_route_path_t path_via_if3 = {
.frp_proto = fib_proto_to_dpo(PROTO),
.frp_addr = zero_addr,
.frp_sw_if_index = tm->hw[3]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = (MFIB_ITF_FLAG_FORWARD |
+ MFIB_ITF_FLAG_NEGATE_SIGNAL),
};
- fib_route_path_t path_for_us = {
- .frp_proto = fib_proto_to_dpo(PROTO),
- .frp_addr = zero_addr,
- .frp_sw_if_index = 0xffffffff,
- .frp_fib_index = ~0,
- .frp_weight = 0,
- .frp_flags = FIB_ROUTE_PATH_LOCAL,
- };
+ fib_route_path_t *two_paths = NULL;
+ vec_add1(two_paths, path_via_if2);
+ vec_add1(two_paths, path_via_if3);
/*
* An (S,G) with 1 accepting and 3 forwarding paths
@@ -466,24 +467,15 @@ mfib_test_i (fib_protocol_t PROTO,
mfib_table_entry_path_update(fib_index,
pfx_s_g,
MFIB_SOURCE_API,
- &path_via_if0,
- MFIB_ITF_FLAG_ACCEPT);
- mfib_table_entry_path_update(fib_index,
- pfx_s_g,
- MFIB_SOURCE_API,
- &path_via_if1,
- MFIB_ITF_FLAG_FORWARD);
- mfib_table_entry_path_update(fib_index,
- pfx_s_g,
- MFIB_SOURCE_API,
- &path_via_if2,
- MFIB_ITF_FLAG_FORWARD);
+ &path_via_if0);
mfib_table_entry_path_update(fib_index,
pfx_s_g,
MFIB_SOURCE_API,
- &path_via_if3,
- (MFIB_ITF_FLAG_FORWARD |
- MFIB_ITF_FLAG_NEGATE_SIGNAL));
+ &path_via_if1);
+ mfib_table_entry_paths_update(fib_index,
+ pfx_s_g,
+ MFIB_SOURCE_API,
+ two_paths);
mfei_s_g = mfib_table_lookup_exact_match(fib_index, pfx_s_g);
@@ -515,13 +507,11 @@ mfib_test_i (fib_protocol_t PROTO,
mfei_g_1 = mfib_table_entry_path_update(fib_index,
pfx_star_g_1,
MFIB_SOURCE_API,
- &path_via_if0,
- MFIB_ITF_FLAG_ACCEPT);
+ &path_via_if0);
mfib_table_entry_path_update(fib_index,
pfx_star_g_1,
MFIB_SOURCE_API,
- &path_via_if1,
- MFIB_ITF_FLAG_FORWARD);
+ &path_via_if1);
/*
* test we find the *,G and S,G via LPM and exact matches
@@ -583,16 +573,15 @@ mfib_test_i (fib_protocol_t PROTO,
* A (*,G/m), which the same root G as the (*,G).
* different paths. test our LPM.
*/
+ path_via_if2.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
mfei_g_m = mfib_table_entry_path_update(fib_index,
pfx_star_g_slash_m,
MFIB_SOURCE_API,
- &path_via_if2,
- MFIB_ITF_FLAG_ACCEPT);
+ &path_via_if2);
mfib_table_entry_path_update(fib_index,
pfx_star_g_slash_m,
MFIB_SOURCE_API,
- &path_via_if3,
- MFIB_ITF_FLAG_FORWARD);
+ &path_via_if3);
/*
* test we find the (*,G/m), (*,G) and (S,G) via LPM and exact matches
@@ -656,11 +645,20 @@ mfib_test_i (fib_protocol_t PROTO,
/*
* Add a for-us path
*/
+ fib_route_path_t path_for_us = {
+ .frp_proto = fib_proto_to_dpo(PROTO),
+ .frp_addr = zero_addr,
+ .frp_sw_if_index = 0xffffffff,
+ .frp_fib_index = ~0,
+ .frp_weight = 1,
+ .frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
+ };
+
mfei = mfib_table_entry_path_update(fib_index,
pfx_s_g,
MFIB_SOURCE_API,
- &path_for_us,
- MFIB_ITF_FLAG_FORWARD);
+ &path_for_us);
MFIB_TEST(!mfib_test_entry(mfei,
MFIB_ENTRY_FLAG_NONE,
@@ -693,11 +691,11 @@ mfib_test_i (fib_protocol_t PROTO,
* update an existing forwarding path to be only accepting
* - expect it to be removed from the replication set.
*/
+ path_via_if3.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
mfib_table_entry_path_update(fib_index,
pfx_s_g,
MFIB_SOURCE_API,
- &path_via_if3,
- MFIB_ITF_FLAG_ACCEPT);
+ &path_via_if3);
MFIB_TEST(!mfib_test_entry(mfei,
MFIB_ENTRY_FLAG_NONE,
@@ -718,13 +716,13 @@ mfib_test_i (fib_protocol_t PROTO,
* Make the path forwarding again
* - expect it to be added back to the replication set
*/
+ path_via_if3.frp_mitf_flags = (MFIB_ITF_FLAG_FORWARD |
+ MFIB_ITF_FLAG_ACCEPT |
+ MFIB_ITF_FLAG_NEGATE_SIGNAL);
mfib_table_entry_path_update(fib_index,
pfx_s_g,
MFIB_SOURCE_API,
- &path_via_if3,
- (MFIB_ITF_FLAG_FORWARD |
- MFIB_ITF_FLAG_ACCEPT |
- MFIB_ITF_FLAG_NEGATE_SIGNAL));
+ &path_via_if3);
mfei = mfib_table_lookup_exact_match(fib_index,
pfx_s_g);
@@ -806,32 +804,37 @@ mfib_test_i (fib_protocol_t PROTO,
MFIB_TEST_NS(!mfib_test_entry_no_itf(mfei, tm->hw[3]->sw_if_index));
/*
- * remove the accpeting only interface
+ * remove
*/
- mfib_table_entry_path_remove(fib_index,
- pfx_s_g,
- MFIB_SOURCE_API,
- &path_via_if0);
-
- MFIB_TEST(!mfib_test_entry(mfei,
- MFIB_ENTRY_FLAG_SIGNAL,
- 1,
- DPO_ADJACENCY_MCAST, ai_2),
- "%U replicate OK",
- format_mfib_prefix, pfx_s_g);
- MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[2]->sw_if_index,
- MFIB_ITF_FLAG_FORWARD));
- MFIB_TEST_NS(!mfib_test_entry_no_itf(mfei, tm->hw[0]->sw_if_index));
- MFIB_TEST_NS(!mfib_test_entry_no_itf(mfei, tm->hw[1]->sw_if_index));
- MFIB_TEST_NS(!mfib_test_entry_no_itf(mfei, tm->hw[3]->sw_if_index));
+ /* mfib_table_entry_path_remove(fib_index, */
+ /* pfx_s_g, */
+ /* MFIB_SOURCE_API, */
+ /* &path_via_if0); */
+
+ /* MFIB_TEST(!mfib_test_entry(mfei, */
+ /* MFIB_ENTRY_FLAG_SIGNAL, */
+ /* 1, */
+ /* DPO_ADJACENCY_MCAST, ai_2), */
+ /* "%U replicate OK", */
+ /* format_mfib_prefix, pfx_s_g); */
+ /* MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[2]->sw_if_index, */
+ /* MFIB_ITF_FLAG_FORWARD)); */
+ /* MFIB_TEST_NS(!mfib_test_entry_no_itf(mfei, tm->hw[0]->sw_if_index)); */
+ /* MFIB_TEST_NS(!mfib_test_entry_no_itf(mfei, tm->hw[1]->sw_if_index)); */
+ /* MFIB_TEST_NS(!mfib_test_entry_no_itf(mfei, tm->hw[3]->sw_if_index)); */
/*
- * remove the last path, the entry still has flags so it remains
+ * remove the last path and the accpeting only interface,
+ * the entry still has flags so it remains
*/
- mfib_table_entry_path_remove(fib_index,
- pfx_s_g,
- MFIB_SOURCE_API,
- &path_via_if2);
+ vec_reset_length(two_paths);
+ vec_add1(two_paths, path_via_if0);
+ vec_add1(two_paths, path_via_if2);
+
+ mfib_table_entry_paths_remove(fib_index,
+ pfx_s_g,
+ MFIB_SOURCE_API,
+ two_paths);
MFIB_TEST(!mfib_test_entry(mfei,
MFIB_ENTRY_FLAG_SIGNAL,
@@ -858,12 +861,12 @@ mfib_test_i (fib_protocol_t PROTO,
/*
* An entry with a NS interface
*/
+ path_via_if0.frp_mitf_flags = (MFIB_ITF_FLAG_ACCEPT |
+ MFIB_ITF_FLAG_NEGATE_SIGNAL);
mfei_g_2 = mfib_table_entry_path_update(fib_index,
pfx_star_g_2,
MFIB_SOURCE_API,
- &path_via_if0,
- (MFIB_ITF_FLAG_ACCEPT |
- MFIB_ITF_FLAG_NEGATE_SIGNAL));
+ &path_via_if0);
MFIB_TEST(!mfib_test_entry(mfei_g_2,
MFIB_ENTRY_FLAG_NONE,
0),
@@ -886,12 +889,12 @@ mfib_test_i (fib_protocol_t PROTO,
/*
* An entry with a NS interface
*/
+ path_via_if0.frp_mitf_flags = (MFIB_ITF_FLAG_ACCEPT |
+ MFIB_ITF_FLAG_NEGATE_SIGNAL);
mfei_g_3 = mfib_table_entry_path_update(fib_index,
pfx_star_g_3,
MFIB_SOURCE_API,
- &path_via_if0,
- (MFIB_ITF_FLAG_ACCEPT |
- MFIB_ITF_NEGATE_SIGNAL));
+ &path_via_if0);
MFIB_TEST(!mfib_test_entry(mfei_g_3,
MFIB_ENTRY_FLAG_NONE,
0),
@@ -1056,28 +1059,28 @@ mfib_test_i (fib_protocol_t PROTO,
.frp_addr = *addr_nbr1,
.frp_sw_if_index = tm->hw[0]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
fib_route_path_t path_via_nbr2 = {
.frp_proto = fib_proto_to_dpo(PROTO),
.frp_addr = *addr_nbr2,
.frp_sw_if_index = tm->hw[0]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
mfei_g_1 = mfib_table_entry_path_update(fib_index,
pfx_star_g_1,
MFIB_SOURCE_API,
- &path_via_nbr1,
- (MFIB_ITF_FLAG_FORWARD));
+ &path_via_nbr1);
mfei_g_1 = mfib_table_entry_path_update(fib_index,
pfx_star_g_1,
MFIB_SOURCE_API,
- &path_via_nbr2,
- (MFIB_ITF_FLAG_FORWARD));
+ &path_via_nbr2);
MFIB_TEST(!mfib_test_entry(mfei_g_1,
MFIB_ENTRY_FLAG_NONE,
2,
@@ -1230,6 +1233,7 @@ mfib_test_i (fib_protocol_t PROTO,
.frp_fib_index = 0,
.frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_FLAG_NONE,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
dpo_id_t mldp_dpo = DPO_INVALID;
@@ -1240,8 +1244,7 @@ mfib_test_i (fib_protocol_t PROTO,
mfei = mfib_table_entry_path_update(fib_index,
pfx_s_g,
MFIB_SOURCE_API,
- &path_via_mldp,
- MFIB_ITF_FLAG_FORWARD);
+ &path_via_mldp);
MFIB_TEST(!mfib_test_entry(mfei,
MFIB_ENTRY_FLAG_NONE,
@@ -1256,8 +1259,7 @@ mfib_test_i (fib_protocol_t PROTO,
mfei = mfib_table_entry_path_update(fib_index,
pfx_s_g,
MFIB_SOURCE_API,
- &path_for_us,
- MFIB_ITF_FLAG_FORWARD);
+ &path_for_us);
MFIB_TEST(!mfib_test_entry(mfei,
MFIB_ENTRY_FLAG_NONE,
2,
@@ -1321,6 +1323,7 @@ mfib_test_i (fib_protocol_t PROTO,
MFIB_TEST(n_itfs == pool_elts(mfib_itf_pool),
" No more Interfaces %d!=%d",
n_itfs, pool_elts(mfib_itf_pool));
+ vec_free(two_paths);
return (res);
}
@@ -1531,32 +1534,36 @@ mfib_test_rr_i (fib_protocol_t FPROTO,
.frp_addr = zero_addr,
.frp_sw_if_index = tm->hw[0]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT,
};
fib_route_path_t path_via_if1 = {
.frp_proto = DPROTO,
.frp_addr = zero_addr,
.frp_sw_if_index = tm->hw[1]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
fib_route_path_t path_via_if2 = {
.frp_proto = DPROTO,
.frp_addr = zero_addr,
.frp_sw_if_index = tm->hw[2]->sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = 0,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
fib_route_path_t path_for_us = {
.frp_proto = DPROTO,
.frp_addr = zero_addr,
.frp_sw_if_index = 0xffffffff,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
/*
@@ -1581,8 +1588,7 @@ mfib_test_rr_i (fib_protocol_t FPROTO,
mfib_table_entry_path_update(fib_index,
pfx_cover,
MFIB_SOURCE_API,
- &path_via_if1,
- MFIB_ITF_FLAG_FORWARD);
+ &path_via_if1);
mfei_cover = mfib_table_lookup_exact_match(fib_index, pfx_cover);
@@ -1609,8 +1615,7 @@ mfib_test_rr_i (fib_protocol_t FPROTO,
mfib_table_entry_path_update(fib_index,
pfx_cover,
MFIB_SOURCE_API,
- &path_via_if2,
- MFIB_ITF_FLAG_FORWARD);
+ &path_via_if2);
/*
* expect the /32 and /28 to be via both boths
@@ -1667,8 +1672,7 @@ mfib_test_rr_i (fib_protocol_t FPROTO,
mfib_table_entry_path_update(fib_index,
pfx_cover,
MFIB_SOURCE_API,
- &path_via_if0,
- MFIB_ITF_FLAG_ACCEPT);
+ &path_via_if0);
/*
* expect the /32 and /28 to be via both boths
@@ -1706,8 +1710,7 @@ mfib_test_rr_i (fib_protocol_t FPROTO,
mfib_table_entry_path_update(fib_index,
pfx_cover,
MFIB_SOURCE_API,
- &path_for_us,
- MFIB_ITF_FLAG_FORWARD);
+ &path_for_us);
/*
* expect the /32 and /28 to be via all three paths
@@ -1775,11 +1778,10 @@ mfib_test_rr_i (fib_protocol_t FPROTO,
/*
* source the /32 with its own path
*/
- mfib_table_entry_path_update(fib_index,
- pfx_host1,
- MFIB_SOURCE_API,
- &path_via_if2,
- MFIB_ITF_FLAG_FORWARD);
+ mfei_host1 = mfib_table_entry_path_update(fib_index,
+ pfx_host1,
+ MFIB_SOURCE_API,
+ &path_via_if2);
MFIB_TEST(!mfib_test_entry(mfei_host1,
MFIB_ENTRY_FLAG_NONE,
1,
@@ -1809,17 +1811,16 @@ mfib_test_rr_i (fib_protocol_t FPROTO,
/*
* add the RR back then remove the path and RR
*/
- mfib_table_entry_path_update(fib_index,
- pfx_host1,
- MFIB_SOURCE_API,
- &path_via_if2,
- MFIB_ITF_FLAG_FORWARD);
+ mfei_host1 = mfib_table_entry_path_update(fib_index,
+ pfx_host1,
+ MFIB_SOURCE_API,
+ &path_via_if2);
MFIB_TEST(!mfib_test_entry(mfei_host1,
MFIB_ENTRY_FLAG_NONE,
1,
DPO_ADJACENCY_MCAST, ai_2),
"%U replicate OK",
- format_mfib_prefix, pfx_cover);
+ format_mfib_prefix, pfx_host1);
mfib_table_entry_delete(fib_index, pfx_host1,
MFIB_SOURCE_API);
diff --git a/src/tools/vppapigen/vppapigen.py b/src/tools/vppapigen/vppapigen.py
index fd87b18792b..576fa548424 100755
--- a/src/tools/vppapigen/vppapigen.py
+++ b/src/tools/vppapigen/vppapigen.py
@@ -23,8 +23,6 @@ global_types = {}
def global_type_add(name, obj):
'''Add new type to the dictionary of types '''
type_name = 'vl_api_' + name + '_t'
- if type_name in global_types:
- raise KeyError('Type is already defined: {}'.format(name))
global_types[type_name] = obj
diff --git a/src/vat/api_format.c b/src/vat/api_format.c
index 5eb44c99b5d..fe1a87f573e 100644
--- a/src/vat/api_format.c
+++ b/src/vat/api_format.c
@@ -710,8 +710,9 @@ format_ethernet_address (u8 * s, va_list * args)
#endif
static void
-increment_v4_address (ip4_address_t * a)
+increment_v4_address (vl_api_ip4_address_t * i)
{
+ ip4_address_t *a = (ip4_address_t *) i;
u32 v;
v = ntohl (a->as_u32) + 1;
@@ -719,27 +720,9 @@ increment_v4_address (ip4_address_t * a)
}
static void
-increment_vl_v4_address (vl_api_ip4_address_t * a)
-{
- u32 v;
-
- v = *(u32 *) a;
- v = ntohl (v);
- v++;
- v = ntohl (v);
- clib_memcpy (a, &v, sizeof (v));
-}
-
-static void
-increment_vl_address (vl_api_address_t * a)
-{
- if (ADDRESS_IP4 == a->af)
- increment_vl_v4_address (&a->un.ip4);
-}
-
-static void
-increment_v6_address (ip6_address_t * a)
+increment_v6_address (vl_api_ip6_address_t * i)
{
+ ip6_address_t *a = (ip6_address_t *) i;
u64 v0, v1;
v0 = clib_net_to_host_u64 (a->as_u64[0]);
@@ -753,6 +736,25 @@ increment_v6_address (ip6_address_t * a)
}
static void
+increment_address (vl_api_address_t * a)
+{
+ if (a->af == ADDRESS_IP4)
+ increment_v4_address (&a->un.ip4);
+ else if (a->af == ADDRESS_IP6)
+ increment_v6_address (&a->un.ip6);
+}
+
+static void
+set_ip4_address (vl_api_address_t * a, u32 v)
+{
+ if (a->af == ADDRESS_IP4)
+ {
+ ip4_address_t *i = (ip4_address_t *) & a->un.ip4;
+ i->as_u32 = v;
+ }
+}
+
+static void
increment_mac_address (u8 * mac)
{
u64 tmp = *((u64 *) mac);
@@ -763,6 +765,34 @@ increment_mac_address (u8 * mac)
clib_memcpy (mac, &tmp, 6);
}
+static void
+vat_json_object_add_address (vat_json_node_t * node,
+ const char *str, const vl_api_address_t * addr)
+{
+ if (ADDRESS_IP6 == addr->af)
+ {
+ struct in6_addr ip6;
+
+ clib_memcpy (&ip6, &addr->un.ip6, sizeof (ip6));
+ vat_json_object_add_ip6 (node, str, ip6);
+ }
+ else
+ {
+ struct in_addr ip4;
+
+ clib_memcpy (&ip4, &addr->un.ip4, sizeof (ip4));
+ vat_json_object_add_ip4 (node, str, ip4);
+ }
+}
+
+static void
+vat_json_object_add_prefix (vat_json_node_t * node,
+ const vl_api_prefix_t * prefix)
+{
+ vat_json_object_add_uint (node, "address_length", prefix->address_length);
+ vat_json_object_add_address (node, "prefix", &prefix->address);
+}
+
static void vl_api_create_loopback_reply_t_handler
(vl_api_create_loopback_reply_t * mp)
{
@@ -2634,8 +2664,8 @@ static void vl_api_ip_address_details_t_handler
address = vec_elt_at_index (addresses, vec_len (addresses) - 1);
- clib_memcpy (&address->ip, &mp->ip, sizeof (address->ip));
- address->prefix_length = mp->prefix_length;
+ clib_memcpy (&address->ip, &mp->prefix.address.un, sizeof (address->ip));
+ address->prefix_length = mp->prefix.address_length;
#undef addresses
}
@@ -2644,8 +2674,6 @@ static void vl_api_ip_address_details_t_handler_json
{
vat_main_t *vam = &vat_main;
vat_json_node_t *node = NULL;
- struct in6_addr ip6;
- struct in_addr ip4;
if (VAT_JSON_ARRAY != vam->json_tree.type)
{
@@ -2655,17 +2683,7 @@ static void vl_api_ip_address_details_t_handler_json
node = vat_json_array_add (&vam->json_tree);
vat_json_init_object (node);
- if (vam->is_ipv6)
- {
- clib_memcpy (&ip6, mp->ip, sizeof (ip6));
- vat_json_object_add_ip6 (node, "ip", ip6);
- }
- else
- {
- clib_memcpy (&ip4, mp->ip, sizeof (ip4));
- vat_json_object_add_ip4 (node, "ip", ip4);
- }
- vat_json_object_add_uint (node, "prefix_length", mp->prefix_length);
+ vat_json_object_add_prefix (node, &mp->prefix);
}
static void
@@ -5165,7 +5183,7 @@ _(sw_interface_set_l2_xconnect_reply) \
_(l2fib_add_del_reply) \
_(l2fib_flush_int_reply) \
_(l2fib_flush_bd_reply) \
-_(ip_add_del_route_reply) \
+_(ip_route_add_del_reply) \
_(ip_table_add_del_reply) \
_(ip_mroute_add_del_reply) \
_(mpls_route_add_del_reply) \
@@ -5366,7 +5384,7 @@ _(BOND_ENSLAVE_REPLY, bond_enslave_reply) \
_(BOND_DETACH_SLAVE_REPLY, bond_detach_slave_reply) \
_(SW_INTERFACE_BOND_DETAILS, sw_interface_bond_details) \
_(SW_INTERFACE_SLAVE_DETAILS, sw_interface_slave_details) \
-_(IP_ADD_DEL_ROUTE_REPLY, ip_add_del_route_reply) \
+_(IP_ROUTE_ADD_DEL_REPLY, ip_route_add_del_reply) \
_(IP_TABLE_ADD_DEL_REPLY, ip_table_add_del_reply) \
_(IP_MROUTE_ADD_DEL_REPLY, ip_mroute_add_del_reply) \
_(MPLS_TABLE_ADD_DEL_REPLY, mpls_table_add_del_reply) \
@@ -5557,7 +5575,8 @@ _(POLICER_CLASSIFY_DETAILS, policer_classify_details) \
_(NETMAP_CREATE_REPLY, netmap_create_reply) \
_(NETMAP_DELETE_REPLY, netmap_delete_reply) \
_(MPLS_TUNNEL_DETAILS, mpls_tunnel_details) \
-_(MPLS_FIB_DETAILS, mpls_fib_details) \
+_(MPLS_TABLE_DETAILS, mpls_table_details) \
+_(MPLS_ROUTE_DETAILS, mpls_route_details) \
_(CLASSIFY_TABLE_IDS_REPLY, classify_table_ids_reply) \
_(CLASSIFY_TABLE_BY_INTERFACE_REPLY, classify_table_by_interface_reply) \
_(CLASSIFY_TABLE_INFO_REPLY, classify_table_info_reply) \
@@ -5585,8 +5604,8 @@ _(IPSEC_GRE_TUNNEL_DETAILS, ipsec_gre_tunnel_details) \
_(DELETE_SUBIF_REPLY, delete_subif_reply) \
_(L2_INTERFACE_PBB_TAG_REWRITE_REPLY, l2_interface_pbb_tag_rewrite_reply) \
_(SET_PUNT_REPLY, set_punt_reply) \
-_(IP_FIB_DETAILS, ip_fib_details) \
-_(IP6_FIB_DETAILS, ip6_fib_details) \
+_(IP_TABLE_DETAILS, ip_table_details) \
+_(IP_ROUTE_DETAILS, ip_route_details) \
_(FEATURE_ENABLE_DISABLE_REPLY, feature_enable_disable_reply) \
_(SW_INTERFACE_TAG_ADD_DEL_REPLY, sw_interface_tag_add_del_reply) \
_(L2_XCONNECT_DETAILS, l2_xconnect_details) \
@@ -7999,8 +8018,8 @@ api_ip_table_add_del (vat_main_t * vam)
/* Construct the API message */
M (IP_TABLE_ADD_DEL, mp);
- mp->table_id = ntohl (table_id);
- mp->is_ipv6 = is_ipv6;
+ mp->table.table_id = ntohl (table_id);
+ mp->table.is_ip6 = is_ipv6;
mp->is_add = is_add;
/* send it... */
@@ -8012,181 +8031,223 @@ api_ip_table_add_del (vat_main_t * vam)
return ret;
}
-static int
-api_ip_add_del_route (vat_main_t * vam)
+uword
+unformat_fib_path (unformat_input_t * input, va_list * args)
{
- unformat_input_t *i = vam->input;
- vl_api_ip_add_del_route_t *mp;
- u32 sw_if_index = ~0, vrf_id = 0;
- u8 is_ipv6 = 0;
- u8 is_local = 0, is_drop = 0;
- u8 is_unreach = 0, is_prohibit = 0;
- u8 is_add = 1;
- u32 next_hop_weight = 1;
- u8 is_multipath = 0;
- u8 address_set = 0;
- u8 address_length_set = 0;
- u32 next_hop_table_id = 0;
- u32 resolve_attempts = 0;
- u32 dst_address_length = 0;
- u8 next_hop_set = 0;
- ip4_address_t v4_dst_address, v4_next_hop_address;
- ip6_address_t v6_dst_address, v6_next_hop_address;
- int count = 1;
- int j;
- f64 before = 0;
- u32 random_add_del = 0;
- u32 *random_vector = 0;
- uword *random_hash;
- u32 random_seed = 0xdeaddabe;
- u32 classify_table_index = ~0;
- u8 is_classify = 0;
- u8 resolve_host = 0, resolve_attached = 0;
- vl_api_fib_mpls_label_t *next_hop_out_label_stack = NULL;
- mpls_label_t next_hop_out_label = MPLS_LABEL_INVALID;
- mpls_label_t next_hop_via_label = MPLS_LABEL_INVALID;
+ vat_main_t *vam = va_arg (*args, vat_main_t *);
+ vl_api_fib_path_t *path = va_arg (*args, vl_api_fib_path_t *);
+ u32 weight, preference;
+ mpls_label_t out_label;
- clib_memset (&v4_next_hop_address, 0, sizeof (ip4_address_t));
- clib_memset (&v6_next_hop_address, 0, sizeof (ip6_address_t));
- /* Parse args required to build the message */
- while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ clib_memset (path, 0, sizeof (*path));
+ path->weight = 1;
+ path->sw_if_index = ~0;
+ path->rpf_id = ~0;
+ path->n_labels = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (i, "%U", api_unformat_sw_if_index, vam, &sw_if_index))
- ;
- else if (unformat (i, "sw_if_index %d", &sw_if_index))
- ;
- else if (unformat (i, "%U", unformat_ip4_address, &v4_dst_address))
+ if (unformat (input, "%U %U",
+ unformat_vl_api_ip4_address,
+ &path->nh.address.ip4,
+ api_unformat_sw_if_index, vam, &path->sw_if_index))
{
- address_set = 1;
- is_ipv6 = 0;
+ path->proto = FIB_API_PATH_NH_PROTO_IP4;
}
- else if (unformat (i, "%U", unformat_ip6_address, &v6_dst_address))
+ else if (unformat (input, "%U %U",
+ unformat_vl_api_ip6_address,
+ &path->nh.address.ip6,
+ api_unformat_sw_if_index, vam, &path->sw_if_index))
{
- address_set = 1;
- is_ipv6 = 1;
+ path->proto = FIB_API_PATH_NH_PROTO_IP6;
}
- else if (unformat (i, "/%d", &dst_address_length))
+ else if (unformat (input, "weight %u", &weight))
{
- address_length_set = 1;
+ path->weight = weight;
}
-
- else if (is_ipv6 == 0 && unformat (i, "via %U", unformat_ip4_address,
- &v4_next_hop_address))
+ else if (unformat (input, "preference %u", &preference))
{
- next_hop_set = 1;
+ path->preference = preference;
}
- else if (is_ipv6 == 1 && unformat (i, "via %U", unformat_ip6_address,
- &v6_next_hop_address))
+ else if (unformat (input, "%U next-hop-table %d",
+ unformat_vl_api_ip4_address,
+ &path->nh.address.ip4, &path->table_id))
{
- next_hop_set = 1;
+ path->proto = FIB_API_PATH_NH_PROTO_IP4;
}
- else
- if (unformat
- (i, "via %U", api_unformat_sw_if_index, vam, &sw_if_index))
+ else if (unformat (input, "%U next-hop-table %d",
+ unformat_vl_api_ip6_address,
+ &path->nh.address.ip6, &path->table_id))
{
- next_hop_set = 1;
+ path->proto = FIB_API_PATH_NH_PROTO_IP6;
}
- else if (unformat (i, "via sw_if_index %d", &sw_if_index))
+ else if (unformat (input, "%U",
+ unformat_vl_api_ip4_address, &path->nh.address.ip4))
{
- next_hop_set = 1;
+ /*
+ * the recursive next-hops are by default in the default table
+ */
+ path->table_id = 0;
+ path->sw_if_index = ~0;
+ path->proto = FIB_API_PATH_NH_PROTO_IP4;
}
- else if (unformat (i, "resolve-attempts %d", &resolve_attempts))
- ;
- else if (unformat (i, "weight %d", &next_hop_weight))
+ else if (unformat (input, "%U",
+ unformat_vl_api_ip6_address, &path->nh.address.ip6))
+ {
+ /*
+ * the recursive next-hops are by default in the default table
+ */
+ path->table_id = 0;
+ path->sw_if_index = ~0;
+ path->proto = FIB_API_PATH_NH_PROTO_IP6;
+ }
+ else if (unformat (input, "resolve-via-host"))
+ {
+ path->flags |= FIB_API_PATH_FLAG_RESOLVE_VIA_HOST;
+ }
+ else if (unformat (input, "resolve-via-attached"))
+ {
+ path->flags |= FIB_API_PATH_FLAG_RESOLVE_VIA_ATTACHED;
+ }
+ else if (unformat (input, "ip4-lookup-in-table %d", &path->table_id))
+ {
+ path->type = FIB_API_PATH_TYPE_LOCAL;
+ path->sw_if_index = ~0;
+ path->proto = FIB_API_PATH_NH_PROTO_IP4;
+ }
+ else if (unformat (input, "ip6-lookup-in-table %d", &path->table_id))
+ {
+ path->type = FIB_API_PATH_TYPE_LOCAL;
+ path->sw_if_index = ~0;
+ path->proto = FIB_API_PATH_NH_PROTO_IP6;
+ }
+ else if (unformat (input, "sw_if_index %d", &path->sw_if_index))
;
- else if (unformat (i, "drop"))
+ else if (unformat (input, "via-label %d", &path->nh.via_label))
{
- is_drop = 1;
+ path->proto = FIB_API_PATH_NH_PROTO_MPLS;
+ path->sw_if_index = ~0;
}
- else if (unformat (i, "null-send-unreach"))
+ else if (unformat (input, "l2-input-on %d", &path->sw_if_index))
{
- is_unreach = 1;
+ path->proto = FIB_API_PATH_NH_PROTO_ETHERNET;
+ path->type = FIB_API_PATH_TYPE_INTERFACE_RX;
}
- else if (unformat (i, "null-send-prohibit"))
+ else if (unformat (input, "local"))
{
- is_prohibit = 1;
+ path->type = FIB_API_PATH_TYPE_LOCAL;
}
- else if (unformat (i, "local"))
+ else if (unformat (input, "out-labels"))
+ {
+ while (unformat (input, "%d", &out_label))
+ {
+ path->label_stack[path->n_labels].label = out_label;
+ path->label_stack[path->n_labels].is_uniform = 0;
+ path->label_stack[path->n_labels].ttl = 64;
+ path->n_labels++;
+ }
+ }
+ else if (unformat (input, "via"))
{
- is_local = 1;
+ /* new path, back up and return */
+ unformat_put_input (input);
+ unformat_put_input (input);
+ unformat_put_input (input);
+ unformat_put_input (input);
+ break;
}
- else if (unformat (i, "classify %d", &classify_table_index))
+ else
{
- is_classify = 1;
+ return (0);
}
+ }
+
+ path->proto = ntohl (path->proto);
+ path->type = ntohl (path->type);
+ path->flags = ntohl (path->flags);
+ path->table_id = ntohl (path->table_id);
+ path->sw_if_index = ntohl (path->sw_if_index);
+
+ return (1);
+}
+
+static int
+api_ip_route_add_del (vat_main_t * vam)
+{
+ unformat_input_t *i = vam->input;
+ vl_api_ip_route_add_del_t *mp;
+ u32 vrf_id = 0;
+ u8 is_add = 1;
+ u8 is_multipath = 0;
+ u8 prefix_set = 0;
+ u8 path_count = 0;
+ vl_api_prefix_t pfx = { };
+ vl_api_fib_path_t paths[8];
+ int count = 1;
+ int j;
+ f64 before = 0;
+ u32 random_add_del = 0;
+ u32 *random_vector = 0;
+ u32 random_seed = 0xdeaddabe;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "%U", unformat_vl_api_prefix, &pfx))
+ prefix_set = 1;
else if (unformat (i, "del"))
is_add = 0;
else if (unformat (i, "add"))
is_add = 1;
- else if (unformat (i, "resolve-via-host"))
- resolve_host = 1;
- else if (unformat (i, "resolve-via-attached"))
- resolve_attached = 1;
- else if (unformat (i, "multipath"))
- is_multipath = 1;
else if (unformat (i, "vrf %d", &vrf_id))
;
else if (unformat (i, "count %d", &count))
;
- else if (unformat (i, "lookup-in-vrf %d", &next_hop_table_id))
- ;
- else if (unformat (i, "next-hop-table %d", &next_hop_table_id))
- ;
- else if (unformat (i, "out-label %d", &next_hop_out_label))
- {
- vl_api_fib_mpls_label_t fib_label = {
- .label = ntohl (next_hop_out_label),
- .ttl = 64,
- .exp = 0,
- };
- vec_add1 (next_hop_out_label_stack, fib_label);
- }
- else if (unformat (i, "via via-label %d", &next_hop_via_label))
- ;
else if (unformat (i, "random"))
random_add_del = 1;
+ else if (unformat (i, "multipath"))
+ is_multipath = 1;
else if (unformat (i, "seed %d", &random_seed))
;
else
+ if (unformat
+ (i, "via %U", unformat_fib_path, vam, &paths[path_count]))
+ {
+ path_count++;
+ if (8 == path_count)
+ {
+ errmsg ("max 8 paths");
+ return -99;
+ }
+ }
+ else
{
clib_warning ("parse error '%U'", format_unformat_error, i);
return -99;
}
}
- if (!next_hop_set && !is_drop && !is_local &&
- !is_classify && !is_unreach && !is_prohibit &&
- MPLS_LABEL_INVALID == next_hop_via_label)
- {
- errmsg
- ("next hop / local / drop / unreach / prohibit / classify not set");
- return -99;
- }
-
- if (next_hop_set && MPLS_LABEL_INVALID != next_hop_via_label)
- {
- errmsg ("next hop and next-hop via label set");
- return -99;
- }
- if (address_set == 0)
+ if (!path_count)
{
- errmsg ("missing addresses");
+ errmsg ("specify a path; via ...");
return -99;
}
-
- if (address_length_set == 0)
+ if (prefix_set == 0)
{
- errmsg ("missing address length");
+ errmsg ("missing prefix");
return -99;
}
/* Generate a pile of unique, random routes */
if (random_add_del)
{
+ ip4_address_t *i = (ip4_address_t *) & paths[0].nh.address.ip4;
u32 this_random_address;
+ uword *random_hash;
+
random_hash = hash_create (count, sizeof (uword));
- hash_set (random_hash, v4_next_hop_address.as_u32, 1);
+ hash_set (random_hash, i->as_u32, 1);
for (j = 0; j <= count; j++)
{
do
@@ -8200,7 +8261,7 @@ api_ip_add_del_route (vat_main_t * vam)
hash_set (random_hash, this_random_address, 1);
}
hash_free (random_hash);
- v4_dst_address.as_u32 = random_vector[0];
+ set_ip4_address (&pfx.address, random_vector[0]);
}
if (count > 1)
@@ -8214,59 +8275,21 @@ api_ip_add_del_route (vat_main_t * vam)
for (j = 0; j < count; j++)
{
/* Construct the API message */
- M2 (IP_ADD_DEL_ROUTE, mp, sizeof (vl_api_fib_mpls_label_t) *
- vec_len (next_hop_out_label_stack));
-
- mp->next_hop_sw_if_index = ntohl (sw_if_index);
- mp->table_id = ntohl (vrf_id);
+ M2 (IP_ROUTE_ADD_DEL, mp, sizeof (vl_api_fib_path_t) * path_count);
mp->is_add = is_add;
- mp->is_drop = is_drop;
- mp->is_unreach = is_unreach;
- mp->is_prohibit = is_prohibit;
- mp->is_ipv6 = is_ipv6;
- mp->is_local = is_local;
- mp->is_classify = is_classify;
mp->is_multipath = is_multipath;
- mp->is_resolve_host = resolve_host;
- mp->is_resolve_attached = resolve_attached;
- mp->next_hop_weight = next_hop_weight;
- mp->next_hop_preference = 0;
- mp->dst_address_length = dst_address_length;
- mp->next_hop_table_id = ntohl (next_hop_table_id);
- mp->classify_table_index = ntohl (classify_table_index);
- mp->next_hop_via_label = ntohl (next_hop_via_label);
- mp->next_hop_n_out_labels = vec_len (next_hop_out_label_stack);
- if (0 != mp->next_hop_n_out_labels)
- {
- memcpy (mp->next_hop_out_label_stack,
- next_hop_out_label_stack,
- (vec_len (next_hop_out_label_stack) *
- sizeof (vl_api_fib_mpls_label_t)));
- vec_free (next_hop_out_label_stack);
- }
-
- if (is_ipv6)
- {
- clib_memcpy (mp->dst_address, &v6_dst_address,
- sizeof (v6_dst_address));
- if (next_hop_set)
- clib_memcpy (mp->next_hop_address, &v6_next_hop_address,
- sizeof (v6_next_hop_address));
- increment_v6_address (&v6_dst_address);
- }
- else
- {
- clib_memcpy (mp->dst_address, &v4_dst_address,
- sizeof (v4_dst_address));
- if (next_hop_set)
- clib_memcpy (mp->next_hop_address, &v4_next_hop_address,
- sizeof (v4_next_hop_address));
- if (random_add_del)
- v4_dst_address.as_u32 = random_vector[j + 1];
- else
- increment_v4_address (&v4_dst_address);
- }
+
+ clib_memcpy (&mp->route.prefix, &pfx, sizeof (pfx));
+ mp->route.table_id = ntohl (vrf_id);
+ mp->route.n_paths = path_count;
+
+ clib_memcpy (&mp->route.paths, &paths, sizeof (paths[0]) * path_count);
+
+ if (random_add_del)
+ set_ip4_address (&pfx.address, random_vector[j + 1]);
+ else
+ increment_address (&pfx.address);
/* send it... */
S (mp);
/* If we receive SIGTERM, stop now... */
@@ -8329,59 +8352,21 @@ static int
api_ip_mroute_add_del (vat_main_t * vam)
{
unformat_input_t *i = vam->input;
+ u8 path_set = 0, prefix_set = 0, is_add = 1;
vl_api_ip_mroute_add_del_t *mp;
- u32 sw_if_index = ~0, vrf_id = 0;
- u8 is_ipv6 = 0;
- u8 is_local = 0;
- u8 is_add = 1;
- u8 address_set = 0;
- u32 grp_address_length = 0;
- ip4_address_t v4_grp_address, v4_src_address;
- ip6_address_t v6_grp_address, v6_src_address;
- mfib_itf_flags_t iflags = 0;
mfib_entry_flags_t eflags = 0;
+ vl_api_mfib_path_t path;
+ vl_api_mprefix_t pfx = { };
+ u32 vrf_id = 0;
int ret;
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (i, "sw_if_index %d", &sw_if_index))
- ;
- else if (unformat (i, "%U %U",
- unformat_ip4_address, &v4_src_address,
- unformat_ip4_address, &v4_grp_address))
- {
- grp_address_length = 64;
- address_set = 1;
- is_ipv6 = 0;
- }
- else if (unformat (i, "%U %U",
- unformat_ip6_address, &v6_src_address,
- unformat_ip6_address, &v6_grp_address))
+ if (unformat (i, "%U", unformat_vl_api_mprefix, &pfx))
{
- grp_address_length = 256;
- address_set = 1;
- is_ipv6 = 1;
- }
- else if (unformat (i, "%U", unformat_ip4_address, &v4_grp_address))
- {
- clib_memset (&v4_src_address, 0, sizeof (v4_src_address));
- grp_address_length = 32;
- address_set = 1;
- is_ipv6 = 0;
- }
- else if (unformat (i, "%U", unformat_ip6_address, &v6_grp_address))
- {
- clib_memset (&v6_src_address, 0, sizeof (v6_src_address));
- grp_address_length = 128;
- address_set = 1;
- is_ipv6 = 1;
- }
- else if (unformat (i, "/%d", &grp_address_length))
- ;
- else if (unformat (i, "local"))
- {
- is_local = 1;
+ prefix_set = 1;
+ pfx.grp_address_length = htons (pfx.grp_address_length);
}
else if (unformat (i, "del"))
is_add = 0;
@@ -8389,10 +8374,12 @@ api_ip_mroute_add_del (vat_main_t * vam)
is_add = 1;
else if (unformat (i, "vrf %d", &vrf_id))
;
- else if (unformat (i, "%U", unformat_mfib_itf_flags, &iflags))
- ;
+ else if (unformat (i, "%U", unformat_mfib_itf_flags, &path.itf_flags))
+ path.itf_flags = htonl (path.itf_flags);
else if (unformat (i, "%U", unformat_mfib_entry_flags, &eflags))
;
+ else if (unformat (i, "via %U", unformat_fib_path, vam, &path.path))
+ path_set = 1;
else
{
clib_warning ("parse error '%U'", format_unformat_error, i);
@@ -8400,37 +8387,29 @@ api_ip_mroute_add_del (vat_main_t * vam)
}
}
- if (address_set == 0)
+ if (prefix_set == 0)
{
errmsg ("missing addresses\n");
return -99;
}
+ if (path_set == 0)
+ {
+ errmsg ("missing path\n");
+ return -99;
+ }
/* Construct the API message */
M (IP_MROUTE_ADD_DEL, mp);
- mp->next_hop_sw_if_index = ntohl (sw_if_index);
- mp->table_id = ntohl (vrf_id);
-
mp->is_add = is_add;
- mp->is_ipv6 = is_ipv6;
- mp->is_local = is_local;
- mp->itf_flags = ntohl (iflags);
- mp->entry_flags = ntohl (eflags);
- mp->grp_address_length = grp_address_length;
- mp->grp_address_length = ntohs (mp->grp_address_length);
+ mp->is_multipath = 1;
- if (is_ipv6)
- {
- clib_memcpy (mp->grp_address, &v6_grp_address, sizeof (v6_grp_address));
- clib_memcpy (mp->src_address, &v6_src_address, sizeof (v6_src_address));
- }
- else
- {
- clib_memcpy (mp->grp_address, &v4_grp_address, sizeof (v4_grp_address));
- clib_memcpy (mp->src_address, &v4_src_address, sizeof (v4_src_address));
+ clib_memcpy (&mp->route.prefix, &pfx, sizeof (pfx));
+ mp->route.table_id = htonl (vrf_id);
+ mp->route.n_paths = 1;
+ mp->route.entry_flags = htonl (eflags);
- }
+ clib_memcpy (&mp->route.paths, &path, sizeof (path));
/* send it... */
S (mp);
@@ -8473,7 +8452,7 @@ api_mpls_table_add_del (vat_main_t * vam)
/* Construct the API message */
M (MPLS_TABLE_ADD_DEL, mp);
- mp->mt_table_id = ntohl (table_id);
+ mp->mt_table.mt_table_id = ntohl (table_id);
mp->mt_is_add = is_add;
/* send it... */
@@ -8488,112 +8467,41 @@ api_mpls_table_add_del (vat_main_t * vam)
static int
api_mpls_route_add_del (vat_main_t * vam)
{
+ u8 is_add = 1, path_count = 0, is_multipath = 0, is_eos = 0;
+ mpls_label_t local_label = MPLS_LABEL_INVALID;
unformat_input_t *i = vam->input;
vl_api_mpls_route_add_del_t *mp;
- u32 sw_if_index = ~0, table_id = 0;
- u8 is_add = 1;
- u32 next_hop_weight = 1;
- u8 is_multipath = 0;
- u32 next_hop_table_id = 0;
- u8 next_hop_set = 0;
- ip4_address_t v4_next_hop_address = {
- .as_u32 = 0,
- };
- ip6_address_t v6_next_hop_address = { {0} };
- int count = 1;
- int j;
+ vl_api_fib_path_t paths[8];
+ int count = 1, j;
f64 before = 0;
- u32 classify_table_index = ~0;
- u8 is_classify = 0;
- u8 resolve_host = 0, resolve_attached = 0;
- u8 is_interface_rx = 0;
- mpls_label_t next_hop_via_label = MPLS_LABEL_INVALID;
- mpls_label_t next_hop_out_label = MPLS_LABEL_INVALID;
- vl_api_fib_mpls_label_t *next_hop_out_label_stack = NULL;
- mpls_label_t local_label = MPLS_LABEL_INVALID;
- u8 is_eos = 0;
- dpo_proto_t next_hop_proto = DPO_PROTO_MPLS;
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (i, "%U", api_unformat_sw_if_index, vam, &sw_if_index))
- ;
- else if (unformat (i, "sw_if_index %d", &sw_if_index))
- ;
- else if (unformat (i, "%d", &local_label))
+ if (unformat (i, "%d", &local_label))
;
else if (unformat (i, "eos"))
is_eos = 1;
else if (unformat (i, "non-eos"))
is_eos = 0;
- else if (unformat (i, "via %U", unformat_ip4_address,
- &v4_next_hop_address))
- {
- next_hop_set = 1;
- next_hop_proto = DPO_PROTO_IP4;
- }
- else if (unformat (i, "via %U", unformat_ip6_address,
- &v6_next_hop_address))
- {
- next_hop_set = 1;
- next_hop_proto = DPO_PROTO_IP6;
- }
- else if (unformat (i, "weight %d", &next_hop_weight))
- ;
- else if (unformat (i, "classify %d", &classify_table_index))
- {
- is_classify = 1;
- }
else if (unformat (i, "del"))
is_add = 0;
else if (unformat (i, "add"))
is_add = 1;
- else if (unformat (i, "resolve-via-host"))
- resolve_host = 1;
- else if (unformat (i, "resolve-via-attached"))
- resolve_attached = 1;
else if (unformat (i, "multipath"))
is_multipath = 1;
else if (unformat (i, "count %d", &count))
;
- else if (unformat (i, "via lookup-in-ip4-table %d", &next_hop_table_id))
- {
- next_hop_set = 1;
- next_hop_proto = DPO_PROTO_IP4;
- }
- else if (unformat (i, "via lookup-in-ip6-table %d", &next_hop_table_id))
- {
- next_hop_set = 1;
- next_hop_proto = DPO_PROTO_IP6;
- }
else
if (unformat
- (i, "via l2-input-on %U", api_unformat_sw_if_index, vam,
- &sw_if_index))
- {
- next_hop_set = 1;
- next_hop_proto = DPO_PROTO_ETHERNET;
- is_interface_rx = 1;
- }
- else if (unformat (i, "via l2-input-on sw_if_index %d", &sw_if_index))
- {
- next_hop_set = 1;
- next_hop_proto = DPO_PROTO_ETHERNET;
- is_interface_rx = 1;
- }
- else if (unformat (i, "via next-hop-table %d", &next_hop_table_id))
- next_hop_set = 1;
- else if (unformat (i, "via via-label %d", &next_hop_via_label))
- next_hop_set = 1;
- else if (unformat (i, "out-label %d", &next_hop_out_label))
+ (i, "via %U", unformat_fib_path, vam, &paths[path_count]))
{
- vl_api_fib_mpls_label_t fib_label = {
- .label = ntohl (next_hop_out_label),
- .ttl = 64,
- .exp = 0,
- };
- vec_add1 (next_hop_out_label_stack, fib_label);
+ path_count++;
+ if (8 == path_count)
+ {
+ errmsg ("max 8 paths");
+ return -99;
+ }
}
else
{
@@ -8602,9 +8510,9 @@ api_mpls_route_add_del (vat_main_t * vam)
}
}
- if (!next_hop_set && !is_classify)
+ if (!path_count)
{
- errmsg ("next hop / classify not set");
+ errmsg ("specify a path; via ...");
return -99;
}
@@ -8625,53 +8533,19 @@ api_mpls_route_add_del (vat_main_t * vam)
for (j = 0; j < count; j++)
{
/* Construct the API message */
- M2 (MPLS_ROUTE_ADD_DEL, mp, sizeof (vl_api_fib_mpls_label_t) *
- vec_len (next_hop_out_label_stack));
-
- mp->mr_next_hop_sw_if_index = ntohl (sw_if_index);
- mp->mr_table_id = ntohl (table_id);
+ M2 (MPLS_ROUTE_ADD_DEL, mp, sizeof (vl_api_fib_path_t) * path_count);
mp->mr_is_add = is_add;
- mp->mr_next_hop_proto = next_hop_proto;
- mp->mr_is_classify = is_classify;
mp->mr_is_multipath = is_multipath;
- mp->mr_is_resolve_host = resolve_host;
- mp->mr_is_resolve_attached = resolve_attached;
- mp->mr_is_interface_rx = is_interface_rx;
- mp->mr_next_hop_weight = next_hop_weight;
- mp->mr_next_hop_preference = 0;
- mp->mr_next_hop_table_id = ntohl (next_hop_table_id);
- mp->mr_classify_table_index = ntohl (classify_table_index);
- mp->mr_next_hop_via_label = ntohl (next_hop_via_label);
- mp->mr_label = ntohl (local_label);
- mp->mr_eos = is_eos;
-
- mp->mr_next_hop_n_out_labels = vec_len (next_hop_out_label_stack);
- if (0 != mp->mr_next_hop_n_out_labels)
- {
- memcpy (mp->mr_next_hop_out_label_stack,
- next_hop_out_label_stack,
- vec_len (next_hop_out_label_stack) *
- sizeof (vl_api_fib_mpls_label_t));
- vec_free (next_hop_out_label_stack);
- }
-
- if (next_hop_set)
- {
- if (DPO_PROTO_IP4 == next_hop_proto)
- {
- clib_memcpy (mp->mr_next_hop,
- &v4_next_hop_address,
- sizeof (v4_next_hop_address));
- }
- else if (DPO_PROTO_IP6 == next_hop_proto)
- {
- clib_memcpy (mp->mr_next_hop,
- &v6_next_hop_address,
- sizeof (v6_next_hop_address));
- }
- }
+ mp->mr_route.mr_label = local_label;
+ mp->mr_route.mr_eos = is_eos;
+ mp->mr_route.mr_table_id = 0;
+ mp->mr_route.mr_n_paths = path_count;
+
+ clib_memcpy (&mp->mr_route.mr_paths, paths,
+ sizeof (paths[0]) * path_count);
+
local_label++;
/* send it... */
@@ -8730,6 +8604,7 @@ api_mpls_route_add_del (vat_main_t * vam)
/* Return the good/bad news */
return (vam->retval);
+ return (0);
}
static int
@@ -8739,29 +8614,16 @@ api_mpls_ip_bind_unbind (vat_main_t * vam)
vl_api_mpls_ip_bind_unbind_t *mp;
u32 ip_table_id = 0;
u8 is_bind = 1;
- u8 is_ip4 = 1;
- ip4_address_t v4_address;
- ip6_address_t v6_address;
- u32 address_length;
- u8 address_set = 0;
+ vl_api_prefix_t pfx;
+ u8 prefix_set = 0;
mpls_label_t local_label = MPLS_LABEL_INVALID;
int ret;
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (i, "%U/%d", unformat_ip4_address,
- &v4_address, &address_length))
- {
- is_ip4 = 1;
- address_set = 1;
- }
- else if (unformat (i, "%U/%d", unformat_ip6_address,
- &v6_address, &address_length))
- {
- is_ip4 = 0;
- address_set = 1;
- }
+ if (unformat (i, "%U", unformat_vl_api_prefix, &pfx))
+ prefix_set = 1;
else if (unformat (i, "%d", &local_label))
;
else if (unformat (i, "table-id %d", &ip_table_id))
@@ -8777,9 +8639,9 @@ api_mpls_ip_bind_unbind (vat_main_t * vam)
}
}
- if (!address_set)
+ if (!prefix_set)
{
- errmsg ("IP address not set");
+ errmsg ("IP prefix not set");
return -99;
}
@@ -8793,16 +8655,10 @@ api_mpls_ip_bind_unbind (vat_main_t * vam)
M (MPLS_IP_BIND_UNBIND, mp);
mp->mb_is_bind = is_bind;
- mp->mb_is_ip4 = is_ip4;
mp->mb_ip_table_id = ntohl (ip_table_id);
mp->mb_mpls_table_id = 0;
mp->mb_label = ntohl (local_label);
- mp->mb_address_length = address_length;
-
- if (is_ip4)
- clib_memcpy (mp->mb_address, &v4_address, sizeof (v4_address));
- else
- clib_memcpy (mp->mb_address, &v6_address, sizeof (v6_address));
+ clib_memcpy (&mp->mb_prefix, &pfx, sizeof (pfx));
/* send it... */
S (mp);
@@ -8810,6 +8666,7 @@ api_mpls_ip_bind_unbind (vat_main_t * vam)
/* Wait for a reply... */
W (ret);
return ret;
+ return (0);
}
static int
@@ -9034,23 +8891,25 @@ api_bier_route_add_del (vat_main_t * vam)
M2 (BIER_ROUTE_ADD_DEL, mp, sizeof (vl_api_fib_path_t));
mp->br_is_add = is_add;
- mp->br_tbl_id.bt_set = set;
- mp->br_tbl_id.bt_sub_domain = sub_domain;
- mp->br_tbl_id.bt_hdr_len_id = hdr_len;
- mp->br_bp = ntohs (bp);
- mp->br_n_paths = 1;
- mp->br_paths[0].n_labels = 1;
- mp->br_paths[0].label_stack[0].label = ntohl (next_hop_out_label);
- mp->br_paths[0].afi = (next_hop_proto_is_ip4 ? 0 : 1);
+ mp->br_route.br_tbl_id.bt_set = set;
+ mp->br_route.br_tbl_id.bt_sub_domain = sub_domain;
+ mp->br_route.br_tbl_id.bt_hdr_len_id = hdr_len;
+ mp->br_route.br_bp = ntohs (bp);
+ mp->br_route.br_n_paths = 1;
+ mp->br_route.br_paths[0].n_labels = 1;
+ mp->br_route.br_paths[0].label_stack[0].label = ntohl (next_hop_out_label);
+ mp->br_route.br_paths[0].proto = (next_hop_proto_is_ip4 ?
+ FIB_API_PATH_NH_PROTO_IP4 :
+ FIB_API_PATH_NH_PROTO_IP6);
if (next_hop_proto_is_ip4)
{
- clib_memcpy (mp->br_paths[0].next_hop,
+ clib_memcpy (&mp->br_route.br_paths[0].nh.address.ip4,
&v4_next_hop_address, sizeof (v4_next_hop_address));
}
else
{
- clib_memcpy (mp->br_paths[0].next_hop,
+ clib_memcpy (&mp->br_route.br_paths[0].nh.address.ip6,
&v6_next_hop_address, sizeof (v6_next_hop_address));
}
@@ -9157,20 +9016,11 @@ api_mpls_tunnel_add_del (vat_main_t * vam)
unformat_input_t *i = vam->input;
vl_api_mpls_tunnel_add_del_t *mp;
- u8 is_add = 1;
- u8 l2_only = 0;
+ vl_api_fib_path_t paths[8];
u32 sw_if_index = ~0;
- u32 next_hop_sw_if_index = ~0;
- u32 next_hop_proto_is_ip4 = 1;
-
- u32 next_hop_table_id = 0;
- ip4_address_t v4_next_hop_address = {
- .as_u32 = 0,
- };
- ip6_address_t v6_next_hop_address = { {0} };
- vl_api_fib_mpls_label_t *next_hop_out_label_stack = NULL;
- mpls_label_t next_hop_via_label = MPLS_LABEL_INVALID;
- mpls_label_t next_hop_out_label = MPLS_LABEL_INVALID;
+ u8 path_count = 0;
+ u8 l2_only = 0;
+ u8 is_add = 1;
int ret;
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
@@ -9183,36 +9033,18 @@ api_mpls_tunnel_add_del (vat_main_t * vam)
is_add = 0;
else if (unformat (i, "del sw_if_index %d", &sw_if_index))
is_add = 0;
- else if (unformat (i, "via %U",
- unformat_ip4_address, &v4_next_hop_address))
- {
- next_hop_proto_is_ip4 = 1;
- }
- else if (unformat (i, "via %U",
- unformat_ip6_address, &v6_next_hop_address))
- {
- next_hop_proto_is_ip4 = 0;
- }
- else if (unformat (i, "via-label %d", &next_hop_via_label))
- ;
- else
- if (unformat
- (i, "%U", api_unformat_sw_if_index, vam, &next_hop_sw_if_index))
- ;
- else if (unformat (i, "sw_if_index %d", &next_hop_sw_if_index))
- ;
else if (unformat (i, "l2-only"))
l2_only = 1;
- else if (unformat (i, "next-hop-table %d", &next_hop_table_id))
- ;
- else if (unformat (i, "out-label %d", &next_hop_out_label))
+ else
+ if (unformat
+ (i, "via %U", unformat_fib_path, vam, &paths[path_count]))
{
- vl_api_fib_mpls_label_t fib_label = {
- .label = ntohl (next_hop_out_label),
- .ttl = 64,
- .exp = 0,
- };
- vec_add1 (next_hop_out_label_stack, fib_label);
+ path_count++;
+ if (8 == path_count)
+ {
+ errmsg ("max 8 paths");
+ return -99;
+ }
}
else
{
@@ -9221,40 +9053,16 @@ api_mpls_tunnel_add_del (vat_main_t * vam)
}
}
- M2 (MPLS_TUNNEL_ADD_DEL, mp, sizeof (vl_api_fib_mpls_label_t) *
- vec_len (next_hop_out_label_stack));
+ M2 (MPLS_TUNNEL_ADD_DEL, mp, sizeof (vl_api_fib_path_t) * path_count);
- mp->mt_next_hop_sw_if_index = ntohl (next_hop_sw_if_index);
- mp->mt_sw_if_index = ntohl (sw_if_index);
mp->mt_is_add = is_add;
- mp->mt_l2_only = l2_only;
- mp->mt_next_hop_table_id = ntohl (next_hop_table_id);
- mp->mt_next_hop_proto_is_ip4 = next_hop_proto_is_ip4;
- mp->mt_next_hop_via_label = ntohl (next_hop_via_label);
- mp->mt_next_hop_weight = 1;
- mp->mt_next_hop_preference = 0;
+ mp->mt_tunnel.mt_sw_if_index = ntohl (sw_if_index);
+ mp->mt_tunnel.mt_l2_only = l2_only;
+ mp->mt_tunnel.mt_is_multicast = 0;
+ mp->mt_tunnel.mt_n_paths = path_count;
- mp->mt_next_hop_n_out_labels = vec_len (next_hop_out_label_stack);
-
- if (0 != mp->mt_next_hop_n_out_labels)
- {
- clib_memcpy (mp->mt_next_hop_out_label_stack,
- next_hop_out_label_stack,
- (vec_len (next_hop_out_label_stack) *
- sizeof (vl_api_fib_mpls_label_t)));
- vec_free (next_hop_out_label_stack);
- }
-
- if (next_hop_proto_is_ip4)
- {
- clib_memcpy (mp->mt_next_hop,
- &v4_next_hop_address, sizeof (v4_next_hop_address));
- }
- else
- {
- clib_memcpy (mp->mt_next_hop,
- &v6_next_hop_address, sizeof (v6_next_hop_address));
- }
+ clib_memcpy (&mp->mt_tunnel.mt_paths, &paths,
+ sizeof (paths[0]) * path_count);
S (mp);
W (ret);
@@ -9324,6 +9132,7 @@ api_ip_neighbor_add_del (vat_main_t * vam)
flags = IP_NEIGHBOR_FLAG_NONE;
clib_memset (&ip_address, 0, sizeof (ip_address));
clib_memset (&mac_address, 0, sizeof (mac_address));
+
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
@@ -13309,26 +13118,6 @@ static void vl_api_gre_tunnel_details_t_handler
ntohl (mp->tunnel.session_id));
}
-static void
-vat_json_object_add_address (vat_json_node_t * node,
- const char *str, const vl_api_address_t * addr)
-{
- if (ADDRESS_IP6 == addr->af)
- {
- struct in6_addr ip6;
-
- clib_memcpy (&ip6, &addr->un.ip6, sizeof (ip6));
- vat_json_object_add_ip6 (node, str, ip6);
- }
- else
- {
- struct in_addr ip4;
-
- clib_memcpy (&ip4, &addr->un.ip4, sizeof (ip4));
- vat_json_object_add_ip4 (node, str, ip4);
- }
-}
-
static void vl_api_gre_tunnel_details_t_handler_json
(vl_api_gre_tunnel_details_t * mp)
{
@@ -14997,7 +14786,7 @@ api_ipsec_tunnel_if_add_del (vat_main_t * vam)
mp->anti_replay = anti_replay;
if (jj > 0)
- increment_vl_address (&remote_ip);
+ increment_address (&remote_ip);
clib_memcpy (&mp->local_ip, &local_ip, sizeof (local_ip));
clib_memcpy (&mp->remote_ip, &remote_ip, sizeof (remote_ip));
@@ -18917,23 +18706,106 @@ api_netmap_delete (vat_main_t * vam)
return ret;
}
+static u8 *
+format_fib_api_path_nh_proto (u8 * s, va_list * args)
+{
+ vl_api_fib_path_nh_proto_t proto =
+ va_arg (*args, vl_api_fib_path_nh_proto_t);
+
+ switch (proto)
+ {
+ case FIB_API_PATH_NH_PROTO_IP4:
+ s = format (s, "ip4");
+ break;
+ case FIB_API_PATH_NH_PROTO_IP6:
+ s = format (s, "ip6");
+ break;
+ case FIB_API_PATH_NH_PROTO_MPLS:
+ s = format (s, "mpls");
+ break;
+ case FIB_API_PATH_NH_PROTO_BIER:
+ s = format (s, "bier");
+ break;
+ case FIB_API_PATH_NH_PROTO_ETHERNET:
+ s = format (s, "ethernet");
+ break;
+ }
+
+ return (s);
+}
+
+static u8 *
+format_vl_api_ip_address_union (u8 * s, va_list * args)
+{
+ vl_api_address_family_t af = va_arg (*args, vl_api_address_family_t);
+ const vl_api_address_union_t *u = va_arg (*args, vl_api_address_union_t *);
+
+ switch (af)
+ {
+ case ADDRESS_IP4:
+ s = format (s, "%U", format_ip4_address, u->ip4);
+ break;
+ case ADDRESS_IP6:
+ s = format (s, "%U", format_ip6_address, u->ip6);
+ break;
+ }
+ return (s);
+}
+
+static u8 *
+format_vl_api_fib_path_type (u8 * s, va_list * args)
+{
+ vl_api_fib_path_type_t t = va_arg (*args, vl_api_fib_path_type_t);
+
+ switch (t)
+ {
+ case FIB_API_PATH_TYPE_NORMAL:
+ s = format (s, "normal");
+ break;
+ case FIB_API_PATH_TYPE_LOCAL:
+ s = format (s, "local");
+ break;
+ case FIB_API_PATH_TYPE_DROP:
+ s = format (s, "drop");
+ break;
+ case FIB_API_PATH_TYPE_UDP_ENCAP:
+ s = format (s, "udp-encap");
+ break;
+ case FIB_API_PATH_TYPE_BIER_IMP:
+ s = format (s, "bier-imp");
+ break;
+ case FIB_API_PATH_TYPE_ICMP_UNREACH:
+ s = format (s, "unreach");
+ break;
+ case FIB_API_PATH_TYPE_ICMP_PROHIBIT:
+ s = format (s, "prohibit");
+ break;
+ case FIB_API_PATH_TYPE_SOURCE_LOOKUP:
+ s = format (s, "src-lookup");
+ break;
+ case FIB_API_PATH_TYPE_DVR:
+ s = format (s, "dvr");
+ break;
+ case FIB_API_PATH_TYPE_INTERFACE_RX:
+ s = format (s, "interface-rx");
+ break;
+ case FIB_API_PATH_TYPE_CLASSIFY:
+ s = format (s, "classify");
+ break;
+ }
+
+ return (s);
+}
+
static void
-vl_api_mpls_fib_path_print (vat_main_t * vam, vl_api_fib_path_t * fp)
+vl_api_fib_path_print (vat_main_t * vam, vl_api_fib_path_t * fp)
{
- if (fp->afi == IP46_TYPE_IP6)
- print (vam->ofp,
- " weight %d, sw_if_index %d, is_local %d, is_drop %d, "
- "is_unreach %d, is_prohitbit %d, afi %d, next_hop %U",
- fp->weight, ntohl (fp->sw_if_index), fp->is_local,
- fp->is_drop, fp->is_unreach, fp->is_prohibit, fp->afi,
- format_ip6_address, fp->next_hop);
- else if (fp->afi == IP46_TYPE_IP4)
- print (vam->ofp,
- " weight %d, sw_if_index %d, is_local %d, is_drop %d, "
- "is_unreach %d, is_prohitbit %d, afi %d, next_hop %U",
- fp->weight, ntohl (fp->sw_if_index), fp->is_local,
- fp->is_drop, fp->is_unreach, fp->is_prohibit, fp->afi,
- format_ip4_address, fp->next_hop);
+ print (vam->ofp,
+ " weight %d, sw_if_index %d, type %U, afi %U, next_hop %U",
+ ntohl (fp->weight), ntohl (fp->sw_if_index),
+ format_vl_api_fib_path_type, fp->type,
+ format_fib_api_path_nh_proto, fp->proto,
+ format_vl_api_ip_address_union, &fp->nh.address);
}
static void
@@ -18945,19 +18817,16 @@ vl_api_mpls_fib_path_json_print (vat_json_node_t * node,
vat_json_object_add_uint (node, "weight", ntohl (fp->weight));
vat_json_object_add_uint (node, "sw_if_index", ntohl (fp->sw_if_index));
- vat_json_object_add_uint (node, "is_local", fp->is_local);
- vat_json_object_add_uint (node, "is_drop", fp->is_drop);
- vat_json_object_add_uint (node, "is_unreach", fp->is_unreach);
- vat_json_object_add_uint (node, "is_prohibit", fp->is_prohibit);
- vat_json_object_add_uint (node, "next_hop_afi", fp->afi);
- if (fp->afi == IP46_TYPE_IP4)
- {
- clib_memcpy (&ip4, &fp->next_hop, sizeof (ip4));
+ vat_json_object_add_uint (node, "type", fp->type);
+ vat_json_object_add_uint (node, "next_hop_proto", fp->proto);
+ if (fp->proto == FIB_API_PATH_NH_PROTO_IP4)
+ {
+ clib_memcpy (&ip4, &fp->nh.address.ip4, sizeof (ip4));
vat_json_object_add_ip4 (node, "next_hop", ip4);
}
- else if (fp->afi == IP46_TYPE_IP6)
+ else if (fp->proto == FIB_API_PATH_NH_PROTO_IP4)
{
- clib_memcpy (&ip6, &fp->next_hop, sizeof (ip6));
+ clib_memcpy (&ip6, &fp->nh.address.ip6, sizeof (ip6));
vat_json_object_add_ip6 (node, "next_hop", ip6);
}
}
@@ -18966,16 +18835,16 @@ static void
vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_tunnel_details_t * mp)
{
vat_main_t *vam = &vat_main;
- int count = ntohl (mp->mt_count);
+ int count = ntohl (mp->mt_tunnel.mt_n_paths);
vl_api_fib_path_t *fp;
i32 i;
- print (vam->ofp, "[%d]: sw_if_index %d via:",
- ntohl (mp->mt_tunnel_index), ntohl (mp->mt_sw_if_index));
- fp = mp->mt_paths;
+ print (vam->ofp, "sw_if_index %d via:",
+ ntohl (mp->mt_tunnel.mt_sw_if_index));
+ fp = mp->mt_tunnel.mt_paths;
for (i = 0; i < count; i++)
{
- vl_api_mpls_fib_path_print (vam, fp);
+ vl_api_fib_path_print (vam, fp);
fp++;
}
@@ -18990,7 +18859,7 @@ vl_api_mpls_tunnel_details_t_handler_json (vl_api_mpls_tunnel_details_t * mp)
{
vat_main_t *vam = &vat_main;
vat_json_node_t *node = NULL;
- int count = ntohl (mp->mt_count);
+ int count = ntohl (mp->mt_tunnel.mt_n_paths);
vl_api_fib_path_t *fp;
i32 i;
@@ -19002,13 +18871,12 @@ vl_api_mpls_tunnel_details_t_handler_json (vl_api_mpls_tunnel_details_t * mp)
node = vat_json_array_add (&vam->json_tree);
vat_json_init_object (node);
- vat_json_object_add_uint (node, "tunnel_index",
- ntohl (mp->mt_tunnel_index));
- vat_json_object_add_uint (node, "sw_if_index", ntohl (mp->mt_sw_if_index));
+ vat_json_object_add_uint (node, "sw_if_index",
+ ntohl (mp->mt_tunnel.mt_sw_if_index));
- vat_json_object_add_uint (node, "l2_only", mp->mt_l2_only);
+ vat_json_object_add_uint (node, "l2_only", mp->mt_tunnel.mt_l2_only);
- fp = mp->mt_paths;
+ fp = mp->mt_tunnel.mt_paths;
for (i = 0; i < count; i++)
{
vl_api_mpls_fib_path_json_print (node, fp);
@@ -19021,20 +18889,57 @@ api_mpls_tunnel_dump (vat_main_t * vam)
{
vl_api_mpls_tunnel_dump_t *mp;
vl_api_control_ping_t *mp_ping;
- u32 sw_if_index = ~0;
int ret;
- /* Parse args required to build the message */
- while (unformat_check_input (vam->input) != UNFORMAT_END_OF_INPUT)
+ M (MPLS_TUNNEL_DUMP, mp);
+
+ S (mp);
+
+ /* Use a control ping for synchronization */
+ MPING (CONTROL_PING, mp_ping);
+ S (mp_ping);
+
+ W (ret);
+ return ret;
+}
+
+#define vl_api_mpls_table_details_t_endian vl_noop_handler
+#define vl_api_mpls_table_details_t_print vl_noop_handler
+
+
+static void
+vl_api_mpls_table_details_t_handler (vl_api_mpls_table_details_t * mp)
+{
+ vat_main_t *vam = &vat_main;
+
+ print (vam->ofp, "table-id %d,", ntohl (mp->mt_table.mt_table_id));
+}
+
+static void vl_api_mpls_table_details_t_handler_json
+ (vl_api_mpls_table_details_t * mp)
+{
+ vat_main_t *vam = &vat_main;
+ vat_json_node_t *node = NULL;
+
+ if (VAT_JSON_ARRAY != vam->json_tree.type)
{
- if (unformat (vam->input, "sw_if_index %d", &sw_if_index))
- ;
+ ASSERT (VAT_JSON_NONE == vam->json_tree.type);
+ vat_json_init_array (&vam->json_tree);
}
+ node = vat_json_array_add (&vam->json_tree);
- print (vam->ofp, " sw_if_index %d", sw_if_index);
+ vat_json_init_object (node);
+ vat_json_object_add_uint (node, "table", ntohl (mp->mt_table.mt_table_id));
+}
- M (MPLS_TUNNEL_DUMP, mp);
- mp->sw_if_index = htonl (sw_if_index);
+static int
+api_mpls_table_dump (vat_main_t * vam)
+{
+ vl_api_mpls_table_dump_t *mp;
+ vl_api_control_ping_t *mp_ping;
+ int ret;
+
+ M (MPLS_TABLE_DUMP, mp);
S (mp);
/* Use a control ping for synchronization */
@@ -19045,34 +18950,34 @@ api_mpls_tunnel_dump (vat_main_t * vam)
return ret;
}
-#define vl_api_mpls_fib_details_t_endian vl_noop_handler
-#define vl_api_mpls_fib_details_t_print vl_noop_handler
-
+#define vl_api_mpls_route_details_t_endian vl_noop_handler
+#define vl_api_mpls_route_details_t_print vl_noop_handler
static void
-vl_api_mpls_fib_details_t_handler (vl_api_mpls_fib_details_t * mp)
+vl_api_mpls_route_details_t_handler (vl_api_mpls_route_details_t * mp)
{
vat_main_t *vam = &vat_main;
- int count = ntohl (mp->count);
+ int count = ntohl (mp->mr_route.mr_n_paths);
vl_api_fib_path_t *fp;
int i;
print (vam->ofp,
"table-id %d, label %u, ess_bit %u",
- ntohl (mp->table_id), ntohl (mp->label), mp->eos_bit);
- fp = mp->path;
+ ntohl (mp->mr_route.mr_table_id),
+ ntohl (mp->mr_route.mr_label), mp->mr_route.mr_eos);
+ fp = mp->mr_route.mr_paths;
for (i = 0; i < count; i++)
{
- vl_api_mpls_fib_path_print (vam, fp);
+ vl_api_fib_path_print (vam, fp);
fp++;
}
}
-static void vl_api_mpls_fib_details_t_handler_json
- (vl_api_mpls_fib_details_t * mp)
+static void vl_api_mpls_route_details_t_handler_json
+ (vl_api_mpls_route_details_t * mp)
{
vat_main_t *vam = &vat_main;
- int count = ntohl (mp->count);
+ int count = ntohl (mp->mr_route.mr_n_paths);
vat_json_node_t *node = NULL;
vl_api_fib_path_t *fp;
int i;
@@ -19085,11 +18990,11 @@ static void vl_api_mpls_fib_details_t_handler_json
node = vat_json_array_add (&vam->json_tree);
vat_json_init_object (node);
- vat_json_object_add_uint (node, "table", ntohl (mp->table_id));
- vat_json_object_add_uint (node, "s_bit", mp->eos_bit);
- vat_json_object_add_uint (node, "label", ntohl (mp->label));
+ vat_json_object_add_uint (node, "table", ntohl (mp->mr_route.mr_table_id));
+ vat_json_object_add_uint (node, "s_bit", mp->mr_route.mr_eos);
+ vat_json_object_add_uint (node, "label", ntohl (mp->mr_route.mr_label));
vat_json_object_add_uint (node, "path_count", count);
- fp = mp->path;
+ fp = mp->mr_route.mr_paths;
for (i = 0; i < count; i++)
{
vl_api_mpls_fib_path_json_print (node, fp);
@@ -19098,13 +19003,30 @@ static void vl_api_mpls_fib_details_t_handler_json
}
static int
-api_mpls_fib_dump (vat_main_t * vam)
+api_mpls_route_dump (vat_main_t * vam)
{
- vl_api_mpls_fib_dump_t *mp;
+ unformat_input_t *input = vam->input;
+ vl_api_mpls_route_dump_t *mp;
vl_api_control_ping_t *mp_ping;
+ u32 table_id;
int ret;
- M (MPLS_FIB_DUMP, mp);
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table_id %d", &table_id))
+ ;
+ else
+ break;
+ }
+ if (table_id == ~0)
+ {
+ errmsg ("missing table id");
+ return -99;
+ }
+
+ M (MPLS_ROUTE_DUMP, mp);
+
+ mp->table.mt_table_id = ntohl (table_id);
S (mp);
/* Use a control ping for synchronization */
@@ -19115,54 +19037,25 @@ api_mpls_fib_dump (vat_main_t * vam)
return ret;
}
-#define vl_api_ip_fib_details_t_endian vl_noop_handler
-#define vl_api_ip_fib_details_t_print vl_noop_handler
+#define vl_api_ip_table_details_t_endian vl_noop_handler
+#define vl_api_ip_table_details_t_print vl_noop_handler
static void
-vl_api_ip_fib_details_t_handler (vl_api_ip_fib_details_t * mp)
+vl_api_ip_table_details_t_handler (vl_api_ip_table_details_t * mp)
{
vat_main_t *vam = &vat_main;
- int count = ntohl (mp->count);
- vl_api_fib_path_t *fp;
- int i;
print (vam->ofp,
- "table-id %d, prefix %U/%d stats-index %d",
- ntohl (mp->table_id), format_ip4_address, mp->address,
- mp->address_length, ntohl (mp->stats_index));
- fp = mp->path;
- for (i = 0; i < count; i++)
- {
- if (fp->afi == IP46_TYPE_IP6)
- print (vam->ofp,
- " weight %d, sw_if_index %d, is_local %d, is_drop %d, "
- "is_unreach %d, is_prohitbit %d, afi %d, next_hop %U, "
- "next_hop_table %d",
- ntohl (fp->weight), ntohl (fp->sw_if_index), fp->is_local,
- fp->is_drop, fp->is_unreach, fp->is_prohibit, fp->afi,
- format_ip6_address, fp->next_hop, ntohl (fp->table_id));
- else if (fp->afi == IP46_TYPE_IP4)
- print (vam->ofp,
- " weight %d, sw_if_index %d, is_local %d, is_drop %d, "
- "is_unreach %d, is_prohitbit %d, afi %d, next_hop %U, "
- "next_hop_table %d",
- ntohl (fp->weight), ntohl (fp->sw_if_index), fp->is_local,
- fp->is_drop, fp->is_unreach, fp->is_prohibit, fp->afi,
- format_ip4_address, fp->next_hop, ntohl (fp->table_id));
- fp++;
- }
+ "%s; table-id %d, prefix %U/%d",
+ mp->table.name, ntohl (mp->table.table_id));
}
-static void vl_api_ip_fib_details_t_handler_json
- (vl_api_ip_fib_details_t * mp)
+
+static void vl_api_ip_table_details_t_handler_json
+ (vl_api_ip_table_details_t * mp)
{
vat_main_t *vam = &vat_main;
- int count = ntohl (mp->count);
vat_json_node_t *node = NULL;
- struct in_addr ip4;
- struct in6_addr ip6;
- vl_api_fib_path_t *fp;
- int i;
if (VAT_JSON_ARRAY != vam->json_tree.type)
{
@@ -19172,42 +19065,17 @@ static void vl_api_ip_fib_details_t_handler_json
node = vat_json_array_add (&vam->json_tree);
vat_json_init_object (node);
- vat_json_object_add_uint (node, "table", ntohl (mp->table_id));
- clib_memcpy (&ip4, &mp->address, sizeof (ip4));
- vat_json_object_add_ip4 (node, "prefix", ip4);
- vat_json_object_add_uint (node, "mask_length", mp->address_length);
- vat_json_object_add_uint (node, "path_count", count);
- fp = mp->path;
- for (i = 0; i < count; i++)
- {
- vat_json_object_add_uint (node, "weight", ntohl (fp->weight));
- vat_json_object_add_uint (node, "sw_if_index", ntohl (fp->sw_if_index));
- vat_json_object_add_uint (node, "is_local", fp->is_local);
- vat_json_object_add_uint (node, "is_drop", fp->is_drop);
- vat_json_object_add_uint (node, "is_unreach", fp->is_unreach);
- vat_json_object_add_uint (node, "is_prohibit", fp->is_prohibit);
- vat_json_object_add_uint (node, "next_hop_afi", fp->afi);
- if (fp->afi == IP46_TYPE_IP4)
- {
- clib_memcpy (&ip4, &fp->next_hop, sizeof (ip4));
- vat_json_object_add_ip4 (node, "next_hop", ip4);
- }
- else if (fp->afi == IP46_TYPE_IP6)
- {
- clib_memcpy (&ip6, &fp->next_hop, sizeof (ip6));
- vat_json_object_add_ip6 (node, "next_hop", ip6);
- }
- }
+ vat_json_object_add_uint (node, "table", ntohl (mp->table.table_id));
}
static int
-api_ip_fib_dump (vat_main_t * vam)
+api_ip_table_dump (vat_main_t * vam)
{
- vl_api_ip_fib_dump_t *mp;
+ vl_api_ip_table_dump_t *mp;
vl_api_control_ping_t *mp_ping;
int ret;
- M (IP_FIB_DUMP, mp);
+ M (IP_TABLE_DUMP, mp);
S (mp);
/* Use a control ping for synchronization */
@@ -19219,13 +19087,53 @@ api_ip_fib_dump (vat_main_t * vam)
}
static int
-api_ip_mfib_dump (vat_main_t * vam)
+api_ip_mtable_dump (vat_main_t * vam)
{
- vl_api_ip_mfib_dump_t *mp;
+ vl_api_ip_mtable_dump_t *mp;
vl_api_control_ping_t *mp_ping;
int ret;
- M (IP_MFIB_DUMP, mp);
+ M (IP_MTABLE_DUMP, mp);
+ S (mp);
+
+ /* Use a control ping for synchronization */
+ MPING (CONTROL_PING, mp_ping);
+ S (mp_ping);
+
+ W (ret);
+ return ret;
+}
+
+static int
+api_ip_mroute_dump (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_control_ping_t *mp_ping;
+ vl_api_ip_mroute_dump_t *mp;
+ int ret, is_ip6;
+ u32 table_id;
+
+ is_ip6 = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table_id %d", &table_id))
+ ;
+ else if (unformat (input, "ip6"))
+ is_ip6 = 1;
+ else if (unformat (input, "ip4"))
+ is_ip6 = 0;
+ else
+ break;
+ }
+ if (table_id == ~0)
+ {
+ errmsg ("missing table id");
+ return -99;
+ }
+
+ M (IP_MROUTE_DUMP, mp);
+ mp->table.table_id = table_id;
+ mp->table.is_ip6 = is_ip6;
S (mp);
/* Use a control ping for synchronization */
@@ -19315,47 +19223,36 @@ api_ip_neighbor_dump (vat_main_t * vam)
return ret;
}
-#define vl_api_ip6_fib_details_t_endian vl_noop_handler
-#define vl_api_ip6_fib_details_t_print vl_noop_handler
+#define vl_api_ip_route_details_t_endian vl_noop_handler
+#define vl_api_ip_route_details_t_print vl_noop_handler
static void
-vl_api_ip6_fib_details_t_handler (vl_api_ip6_fib_details_t * mp)
+vl_api_ip_route_details_t_handler (vl_api_ip_route_details_t * mp)
{
vat_main_t *vam = &vat_main;
- int count = ntohl (mp->count);
+ u8 count = mp->route.n_paths;
vl_api_fib_path_t *fp;
int i;
print (vam->ofp,
- "table-id %d, prefix %U/%d stats-index %d",
- ntohl (mp->table_id), format_ip6_address, mp->address,
- mp->address_length, ntohl (mp->stats_index));
- fp = mp->path;
+ "table-id %d, prefix %U/%d",
+ ntohl (mp->route.table_id),
+ format_ip46_address,
+ mp->route.prefix.address, mp->route.prefix.address_length);
for (i = 0; i < count; i++)
{
- if (fp->afi == IP46_TYPE_IP6)
- print (vam->ofp,
- " weight %d, sw_if_index %d, is_local %d, is_drop %d, "
- "is_unreach %d, is_prohitbit %d, afi %d, next_hop %U",
- ntohl (fp->weight), ntohl (fp->sw_if_index), fp->is_local,
- fp->is_drop, fp->is_unreach, fp->is_prohibit, fp->afi,
- format_ip6_address, fp->next_hop);
- else if (fp->afi == IP46_TYPE_IP4)
- print (vam->ofp,
- " weight %d, sw_if_index %d, is_local %d, is_drop %d, "
- "is_unreach %d, is_prohitbit %d, afi %d, next_hop %U",
- ntohl (fp->weight), ntohl (fp->sw_if_index), fp->is_local,
- fp->is_drop, fp->is_unreach, fp->is_prohibit, fp->afi,
- format_ip4_address, fp->next_hop);
+ fp = &mp->route.paths[i];
+
+ vl_api_fib_path_print (vam, fp);
fp++;
}
}
-static void vl_api_ip6_fib_details_t_handler_json
- (vl_api_ip6_fib_details_t * mp)
+static void vl_api_ip_route_details_t_handler_json
+ (vl_api_ip_route_details_t * mp)
{
vat_main_t *vam = &vat_main;
- int count = ntohl (mp->count);
+ u8 count = mp->route.n_paths;
vat_json_node_t *node = NULL;
struct in_addr ip4;
struct in6_addr ip6;
@@ -19370,60 +19267,60 @@ static void vl_api_ip6_fib_details_t_handler_json
node = vat_json_array_add (&vam->json_tree);
vat_json_init_object (node);
- vat_json_object_add_uint (node, "table", ntohl (mp->table_id));
- clib_memcpy (&ip6, &mp->address, sizeof (ip6));
- vat_json_object_add_ip6 (node, "prefix", ip6);
- vat_json_object_add_uint (node, "mask_length", mp->address_length);
+ vat_json_object_add_uint (node, "table", ntohl (mp->route.table_id));
+ if (ADDRESS_IP6 == mp->route.prefix.address.af)
+ {
+ clib_memcpy (&ip6, &mp->route.prefix.address.un.ip6, sizeof (ip6));
+ vat_json_object_add_ip6 (node, "prefix", ip6);
+ }
+ else
+ {
+ clib_memcpy (&ip4, &mp->route.prefix.address.un.ip4, sizeof (ip4));
+ vat_json_object_add_ip4 (node, "prefix", ip4);
+ }
+ vat_json_object_add_uint (node, "mask_length",
+ mp->route.prefix.address_length);
vat_json_object_add_uint (node, "path_count", count);
- fp = mp->path;
for (i = 0; i < count; i++)
{
- vat_json_object_add_uint (node, "weight", ntohl (fp->weight));
- vat_json_object_add_uint (node, "sw_if_index", ntohl (fp->sw_if_index));
- vat_json_object_add_uint (node, "is_local", fp->is_local);
- vat_json_object_add_uint (node, "is_drop", fp->is_drop);
- vat_json_object_add_uint (node, "is_unreach", fp->is_unreach);
- vat_json_object_add_uint (node, "is_prohibit", fp->is_prohibit);
- vat_json_object_add_uint (node, "next_hop_afi", fp->afi);
- if (fp->afi == IP46_TYPE_IP4)
- {
- clib_memcpy (&ip4, &fp->next_hop, sizeof (ip4));
- vat_json_object_add_ip4 (node, "next_hop", ip4);
- }
- else if (fp->afi == IP46_TYPE_IP6)
- {
- clib_memcpy (&ip6, &fp->next_hop, sizeof (ip6));
- vat_json_object_add_ip6 (node, "next_hop", ip6);
- }
+ fp = &mp->route.paths[i];
+ vl_api_mpls_fib_path_json_print (node, fp);
}
}
static int
-api_ip6_fib_dump (vat_main_t * vam)
+api_ip_route_dump (vat_main_t * vam)
{
- vl_api_ip6_fib_dump_t *mp;
+ unformat_input_t *input = vam->input;
+ vl_api_ip_route_dump_t *mp;
vl_api_control_ping_t *mp_ping;
+ u32 table_id;
+ u8 is_ip6;
int ret;
- M (IP6_FIB_DUMP, mp);
- S (mp);
-
- /* Use a control ping for synchronization */
- MPING (CONTROL_PING, mp_ping);
- S (mp_ping);
+ is_ip6 = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table_id %d", &table_id))
+ ;
+ else if (unformat (input, "ip6"))
+ is_ip6 = 1;
+ else if (unformat (input, "ip4"))
+ is_ip6 = 0;
+ else
+ break;
+ }
+ if (table_id == ~0)
+ {
+ errmsg ("missing table id");
+ return -99;
+ }
- W (ret);
- return ret;
-}
+ M (IP_ROUTE_DUMP, mp);
-static int
-api_ip6_mfib_dump (vat_main_t * vam)
-{
- vl_api_ip6_mfib_dump_t *mp;
- vl_api_control_ping_t *mp_ping;
- int ret;
+ mp->table.table_id = table_id;
+ mp->table.is_ip6 = is_ip6;
- M (IP6_MFIB_DUMP, mp);
S (mp);
/* Use a control ping for synchronization */
@@ -22161,7 +22058,7 @@ _(sw_interface_slave_dump, \
"<vpp-if-name> | sw_if_index <id>") \
_(ip_table_add_del, \
"table <n> [ipv6] [add | del]\n") \
-_(ip_add_del_route, \
+_(ip_route_add_del, \
"<addr>/<mask> via <<addr>|<intfc>|sw_if_index <id>|via-label <n>>\n" \
"[table-id <n>] [<intfc> | sw_if_index <id>] [resolve-attempts <n>]\n"\
"[weight <n>] [drop] [local] [classify <n>] [out-label <n>]\n" \
@@ -22487,7 +22384,8 @@ _(netmap_create, "name <interface name> [hw-addr <mac>] [pipe] " \
"[master|slave]") \
_(netmap_delete, "name <interface name>") \
_(mpls_tunnel_dump, "tunnel_index <tunnel-id>") \
-_(mpls_fib_dump, "") \
+_(mpls_table_dump, "") \
+_(mpls_route_dump, "table-id <ID>") \
_(classify_table_ids, "") \
_(classify_table_by_interface, "sw_if_index <sw_if_index>") \
_(classify_table_info, "table_id <nn>") \
@@ -22523,10 +22421,10 @@ _(set_punt, "protocol <l4-protocol> [ip <ver>] [port <l4-port>] [del]") \
_(flow_classify_set_interface, \
"<intfc> | sw_if_index <nn> [ip4-table <nn>] [ip6-table <nn>] [del]") \
_(flow_classify_dump, "type [ip4|ip6]") \
-_(ip_fib_dump, "") \
-_(ip_mfib_dump, "") \
-_(ip6_fib_dump, "") \
-_(ip6_mfib_dump, "") \
+_(ip_table_dump, "") \
+_(ip_route_dump, "table-id [ip4|ip6]") \
+_(ip_mtable_dump, "") \
+_(ip_mroute_dump, "table-id [ip4|ip6]") \
_(feature_enable_disable, "arc_name <arc_name> " \
"feature_name <feature_name> <intfc> | sw_if_index <nn> [disable]") \
_(sw_interface_tag_add_del, "<intfc> | sw_if_index <nn> tag <text>" \
@@ -22578,7 +22476,7 @@ _(quit, "usage: quit") \
_(search_node_table, "usage: search_node_table <name>...") \
_(set, "usage: set <variable-name> <value>") \
_(script, "usage: script <file-name>") \
-_(statseg, "usage: statseg"); \
+_(statseg, "usage: statseg") \
_(unset, "usage: unset <variable-name>")
#define _(N,n) \
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index 1d7d4988002..5465d717f8c 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -1455,6 +1455,7 @@ list(APPEND VNET_SOURCES
mfib/mfib_forward.c
mfib/ip4_mfib.c
mfib/ip6_mfib.c
+ mfib/mfib_api.c
mfib/mfib_types.c
mfib/mfib_signal.c
mfib/mfib_itf.c
diff --git a/src/vnet/api_errno.h b/src/vnet/api_errno.h
index be42086e668..8771d2c1a88 100644
--- a/src/vnet/api_errno.h
+++ b/src/vnet/api_errno.h
@@ -44,7 +44,7 @@ _(FEATURE_DISABLED, -30, "Feature disabled by configuration") \
_(INVALID_REGISTRATION, -31, "Invalid registration") \
_(NEXT_HOP_NOT_IN_FIB, -50, "Next hop not in FIB") \
_(UNKNOWN_DESTINATION, -51, "Unknown destination") \
-_(PREFIX_MATCHES_NEXT_HOP, -52, "Prefix matches next hop") \
+_(NO_PATHS_IN_ROUTE, -52, "No paths specified in route") \
_(NEXT_HOP_NOT_FOUND_MP, -53, "Next hop not found (multipath)") \
_(NO_MATCHING_INTERFACE, -54, "No matching interface for probe") \
_(INVALID_VLAN, -55, "Invalid VLAN") \
@@ -148,7 +148,8 @@ _(BD_ALREADY_HAS_BVI, -152, "Bridge domain already has a BVI interface") \
_(INVALID_PROTOCOL, -153, "Invalid Protocol") \
_(INVALID_ALGORITHM, -154, "Invalid Algorithm") \
_(RSRC_IN_USE, -155, "Resource In Use") \
-_(KEY_LENGTH, -156, "invalid Key Length")
+_(KEY_LENGTH, -156, "invalid Key Length") \
+_(FIB_PATH_UNSUPPORTED_NH_PROTO, -157, "Unsupported FIB Path protocol")
typedef enum
{
diff --git a/src/vnet/bier/bier.api b/src/vnet/bier/bier.api
index b5ac8cabd39..0cc56cab50a 100644
--- a/src/vnet/bier/bier.api
+++ b/src/vnet/bier/bier.api
@@ -1,3 +1,4 @@
+/* Hey Emacs use -*- mode: C -*- */
/*
* Copyright (c) 2016 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,7 +19,7 @@
This file defines vpp BIER control-plane API messages which are generally
called through a shared memory interface.
*/
-option version = "1.1.0";
+option version = "1.2.0";
import "vnet/fib/fib_types.api";
/** \brief BIER Table Identifier
@@ -77,16 +78,21 @@ define bier_table_details
@param br_n_paths - The number of paths
@param br_paths - The array of paths
*/
+typedef bier_route
+{
+ u32 br_bp;
+ vl_api_bier_table_id_t br_tbl_id;
+ u8 br_n_paths;
+ vl_api_fib_path_t br_paths[br_n_paths];
+};
+
autoreply define bier_route_add_del
{
u32 client_index;
u32 context;
- u32 br_bp;
u8 br_is_add;
u8 br_is_replace;
- vl_api_bier_table_id_t br_tbl_id;
- u8 br_n_paths;
- vl_api_fib_path_t br_paths[br_n_paths];
+ vl_api_bier_route_t br_route;
};
define bier_route_dump
@@ -99,10 +105,7 @@ define bier_route_dump
define bier_route_details
{
u32 context;
- u16 br_bp;
- vl_api_bier_table_id_t br_tbl_id;
- u32 br_n_paths;
- vl_api_fib_path_t br_paths[br_n_paths];
+ vl_api_bier_route_t br_route;
};
/** \brief BIER Imposition Add
diff --git a/src/vnet/bier/bier_api.c b/src/vnet/bier/bier_api.c
index d8248b1b2bd..66f6b422810 100644
--- a/src/vnet/bier/bier_api.c
+++ b/src/vnet/bier/bier_api.c
@@ -169,10 +169,10 @@ vl_api_bier_route_add_del_t_handler (vl_api_bier_route_add_del_t * mp)
vnm = vnet_get_main ();
vnm->api_errno = 0;
- bp = ntohl(mp->br_bp);
+ bp = ntohl(mp->br_route.br_bp);
brpaths = NULL;
- if (mp->br_tbl_id.bt_hdr_len_id >= BIER_HDR_LEN_2048)
+ if (mp->br_route.br_tbl_id.bt_hdr_len_id >= BIER_HDR_LEN_2048)
{
rv = VNET_API_ERROR_BIER_BSL_UNSUP;
goto done;
@@ -184,19 +184,19 @@ vl_api_bier_route_add_del_t_handler (vl_api_bier_route_add_del_t * mp)
}
bier_table_id_t bti = {
- .bti_set = mp->br_tbl_id.bt_set,
- .bti_sub_domain = mp->br_tbl_id.bt_sub_domain,
- .bti_hdr_len = mp->br_tbl_id.bt_hdr_len_id,
+ .bti_set = mp->br_route.br_tbl_id.bt_set,
+ .bti_sub_domain = mp->br_route.br_tbl_id.bt_sub_domain,
+ .bti_hdr_len = mp->br_route.br_tbl_id.bt_hdr_len_id,
.bti_type = BIER_TABLE_MPLS_SPF,
.bti_ecmp = BIER_ECMP_TABLE_ID_MAIN,
};
- vec_validate(brpaths, mp->br_n_paths - 1);
+ vec_validate(brpaths, mp->br_route.br_n_paths - 1);
vec_foreach_index(ii, brpaths)
{
brpath = &brpaths[ii];
- rv = fib_path_api_parse(&mp->br_paths[ii], brpath);
+ rv = fib_api_path_decode(&mp->br_route.br_paths[ii], brpath);
if (0 != rv)
{
@@ -242,9 +242,12 @@ send_bier_route_details (const bier_table_t *bt,
const bier_entry_t *be,
void *args)
{
- fib_route_path_encode_t *api_rpaths = NULL, *api_rpath;
bier_route_details_walk_t *ctx = args;
vl_api_bier_route_details_t *mp;
+ fib_path_encode_ctx_t path_ctx = {
+ .rpaths = NULL,
+ };
+ fib_route_path_t *rpath;
vl_api_fib_path_t *fp;
u32 n_paths, m_size;
@@ -258,24 +261,25 @@ send_bier_route_details (const bier_table_t *bt,
mp->_vl_msg_id = ntohs(VL_API_BIER_ROUTE_DETAILS);
mp->context = ctx->context;
- mp->br_tbl_id.bt_set = bt->bt_id.bti_set;
- mp->br_tbl_id.bt_sub_domain = bt->bt_id.bti_sub_domain;
- mp->br_tbl_id.bt_hdr_len_id = bt->bt_id.bti_hdr_len;
- mp->br_bp = htons(be->be_bp);
- mp->br_n_paths = htonl(n_paths);
+ mp->br_route.br_tbl_id.bt_set = bt->bt_id.bti_set;
+ mp->br_route.br_tbl_id.bt_sub_domain = bt->bt_id.bti_sub_domain;
+ mp->br_route.br_tbl_id.bt_hdr_len_id = bt->bt_id.bti_hdr_len;
+ mp->br_route.br_bp = htonl(be->be_bp);
+ mp->br_route.br_n_paths = htonl(n_paths);
fib_path_list_walk_w_ext(be->be_path_list,
NULL,
fib_path_encode,
- &api_rpaths);
+ &path_ctx);
- fp = mp->br_paths;
- vec_foreach (api_rpath, api_rpaths)
+ fp = mp->br_route.br_paths;
+ vec_foreach (rpath, path_ctx.rpaths)
{
- fib_api_path_encode(api_rpath, fp);
+ fib_api_path_encode(rpath, fp);
fp++;
}
+ vec_free(path_ctx.rpaths);
vl_api_send_msg (ctx->reg, (u8 *) mp);
}
@@ -506,16 +510,16 @@ vl_api_bier_disp_entry_add_del_t_handler (vl_api_bier_disp_entry_add_del_t * mp)
brp->frp_rpf_id = ntohl(mp->bde_paths[ii].rpf_id);
}
- if (0 == mp->bde_paths[ii].afi)
+ if (FIB_API_PATH_NH_PROTO_IP4 == mp->bde_paths[ii].proto)
{
- clib_memcpy_fast (&brp->frp_addr.ip4,
- mp->bde_paths[ii].next_hop,
+ clib_memcpy (&brp->frp_addr.ip4,
+ &mp->bde_paths[ii].nh.address.ip4,
sizeof (brp->frp_addr.ip4));
}
- else
+ else if (FIB_API_PATH_NH_PROTO_IP6 == mp->bde_paths[ii].proto)
{
- clib_memcpy_fast (&brp->frp_addr.ip6,
- mp->bde_paths[ii].next_hop,
+ clib_memcpy (&brp->frp_addr.ip6,
+ &mp->bde_paths[ii].nh.address.ip6,
sizeof (brp->frp_addr.ip6));
}
if (ip46_address_is_zero(&brp->frp_addr))
@@ -601,7 +605,6 @@ send_bier_disp_entry_details (const bier_disp_table_t *bdt,
u16 bp,
void *args)
{
- fib_route_path_encode_t *api_rpaths = NULL, *api_rpath;
bier_disp_entry_details_walk_t *ctx = args;
vl_api_bier_disp_entry_details_t *mp;
bier_hdr_proto_id_t pproto;
@@ -611,8 +614,14 @@ send_bier_disp_entry_details (const bier_disp_table_t *bdt,
FOR_EACH_BIER_HDR_PROTO(pproto)
{
fib_node_index_t pl = bde->bde_pl[pproto];
+
if (INDEX_INVALID != pl)
{
+ fib_path_encode_ctx_t path_ctx = {
+ .rpaths = NULL,
+ };
+ fib_route_path_t *rpath;
+
n_paths = fib_path_list_get_n_paths(pl);
m_size = sizeof(*mp) + (n_paths * sizeof(vl_api_fib_path_t));
mp = vl_msg_api_alloc(m_size);
@@ -631,16 +640,17 @@ send_bier_disp_entry_details (const bier_disp_table_t *bdt,
fib_path_list_walk_w_ext(pl,
NULL,
fib_path_encode,
- &api_rpaths);
+ &path_ctx);
fp = mp->bde_paths;
- vec_foreach (api_rpath, api_rpaths)
+ vec_foreach (rpath, path_ctx.rpaths)
{
- fib_api_path_encode(api_rpath, fp);
+ fib_api_path_encode(rpath, fp);
fp++;
}
vl_api_send_msg (ctx->reg, (u8 *) mp);
+ vec_free(path_ctx.rpaths);
}
}
}
diff --git a/src/vnet/bier/bier_entry.c b/src/vnet/bier/bier_entry.c
index 77d96b80782..e8bf722d88f 100644
--- a/src/vnet/bier/bier_entry.c
+++ b/src/vnet/bier/bier_entry.c
@@ -147,7 +147,7 @@ bier_entry_delete (index_t bei)
be = bier_entry_get(bei);
/*
- * if we still ahve a path-list, unlink from it
+ * if we still have a path-list, unlink from it
*/
if (FIB_NODE_INDEX_INVALID != be->be_path_list)
{
diff --git a/src/vnet/bier/bier_fmask.c b/src/vnet/bier/bier_fmask.c
index b6169d3c4bb..a58a77ff281 100644
--- a/src/vnet/bier/bier_fmask.c
+++ b/src/vnet/bier/bier_fmask.c
@@ -404,7 +404,7 @@ bier_fmask_get_stats (index_t bfmi, u64 * packets, u64 * bytes)
void
bier_fmask_encode (index_t bfmi,
bier_table_id_t *btid,
- fib_route_path_encode_t *rpath)
+ fib_route_path_t *rpath)
{
bier_fmask_t *bfm;
@@ -413,17 +413,17 @@ bier_fmask_encode (index_t bfmi,
clib_memset(rpath, 0, sizeof(*rpath));
- rpath->rpath.frp_sw_if_index = ~0;
+ rpath->frp_sw_if_index = ~0;
switch (bfm->bfm_id->bfmi_nh_type)
{
case BIER_NH_UDP:
- rpath->rpath.frp_flags = FIB_ROUTE_PATH_UDP_ENCAP;
- rpath->rpath.frp_udp_encap_id = bfm->bfm_id->bfmi_id;
+ rpath->frp_flags = FIB_ROUTE_PATH_UDP_ENCAP;
+ rpath->frp_udp_encap_id = bfm->bfm_id->bfmi_id;
break;
case BIER_NH_IP:
- memcpy(&rpath->rpath.frp_addr, &bfm->bfm_id->bfmi_nh,
- sizeof(rpath->rpath.frp_addr));
+ memcpy(&rpath->frp_addr, &bfm->bfm_id->bfmi_nh,
+ sizeof(rpath->frp_addr));
break;
}
}
diff --git a/src/vnet/bier/bier_fmask.h b/src/vnet/bier/bier_fmask.h
index e522b0350e7..87845bb031b 100644
--- a/src/vnet/bier/bier_fmask.h
+++ b/src/vnet/bier/bier_fmask.h
@@ -166,7 +166,7 @@ extern void bier_fmask_child_remove (fib_node_index_t fib_entry_index,
extern void bier_fmask_get_stats (index_t bfmi, u64 * packets, u64 * bytes);
extern void bier_fmask_encode (index_t bfmi,
bier_table_id_t *btid,
- fib_route_path_encode_t *rpath);
+ fib_route_path_t *rpath);
/*
* provided for fast data-path access
diff --git a/src/vnet/bier/bier_table.c b/src/vnet/bier/bier_table.c
index a9f8a6d338f..0e8cc1e88b4 100644
--- a/src/vnet/bier/bier_table.c
+++ b/src/vnet/bier/bier_table.c
@@ -779,9 +779,12 @@ bier_table_ecmp_walk (index_t bti,
bt = bier_table_get(bti);
- fib_path_list_walk(bt->bt_pl,
- bier_table_ecmp_walk_path_list,
- &ewc);
+ if (FIB_NODE_INDEX_INVALID != bt->bt_pl)
+ {
+ fib_path_list_walk(bt->bt_pl,
+ bier_table_ecmp_walk_path_list,
+ &ewc);
+ }
}
void
diff --git a/src/vnet/dhcp/dhcp6_proxy_node.c b/src/vnet/dhcp/dhcp6_proxy_node.c
index a199b7a34e4..a253fa1959f 100644
--- a/src/vnet/dhcp/dhcp6_proxy_node.c
+++ b/src/vnet/dhcp/dhcp6_proxy_node.c
@@ -893,16 +893,16 @@ dhcp6_proxy_set_server (ip46_address_t * addr,
.frp_addr = zero_addr,
.frp_sw_if_index = 0xffffffff,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
if (dhcp_proxy_server_add (FIB_PROTOCOL_IP6, addr, src_addr,
rx_fib_index, server_table_id))
{
mfib_table_entry_path_update (rx_fib_index,
&all_dhcp_servers,
- MFIB_SOURCE_DHCP,
- &path_for_us, MFIB_ITF_FLAG_FORWARD);
+ MFIB_SOURCE_DHCP, &path_for_us);
/*
* Each interface that is enabled in this table, needs to be added
* as an accepting interface, but this is not easily doable in VPP.
diff --git a/src/vnet/dpo/mpls_disposition.c b/src/vnet/dpo/mpls_disposition.c
index cf0b5fcf1ef..7bc2cb65f87 100644
--- a/src/vnet/dpo/mpls_disposition.c
+++ b/src/vnet/dpo/mpls_disposition.c
@@ -88,8 +88,12 @@ format_mpls_disp_dpo (u8 *s, va_list *args)
mdd = mpls_disp_dpo_get(index);
- s = format(s, "mpls-disposition:[%d]:[%U, %U]",
- index,
+ s = format(s, "mpls-disposition:[%d]:[", index);
+
+ if (0 != mdd->mdd_rpf_id)
+ s = format(s, "rpf-id:%d ", mdd->mdd_rpf_id);
+
+ s = format(s, "%U, %U]",
format_dpo_proto, mdd->mdd_payload_proto,
format_fib_mpls_lsp_mode, mdd->mdd_mode);
@@ -132,7 +136,9 @@ mpls_disp_dpo_unlock (dpo_id_t *dpo)
*/
typedef struct mpls_label_disposition_trace_t_
{
- index_t mdd;
+ dpo_proto_t mddt_payload_proto;
+ fib_rpf_id_t mddt_rpf_id;
+ fib_mpls_lsp_mode_t mddt_mode;
} mpls_label_disposition_trace_t;
extern vlib_node_registration_t ip4_mpls_label_disposition_pipe_node;
@@ -293,13 +299,17 @@ mpls_label_disposition_inline (vlib_main_t * vm,
mpls_label_disposition_trace_t *tr =
vlib_add_trace(vm, node, b0, sizeof(*tr));
- tr->mdd = mddi0;
+ tr->mddt_payload_proto = mdd0->mdd_payload_proto;
+ tr->mddt_rpf_id = mdd0->mdd_rpf_id;
+ tr->mddt_mode = mdd0->mdd_mode;
}
if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
{
mpls_label_disposition_trace_t *tr =
vlib_add_trace(vm, node, b1, sizeof(*tr));
- tr->mdd = mddi1;
+ tr->mddt_payload_proto = mdd1->mdd_payload_proto;
+ tr->mddt_rpf_id = mdd1->mdd_rpf_id;
+ tr->mddt_mode = mdd1->mdd_mode;
}
vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
@@ -383,7 +393,9 @@ mpls_label_disposition_inline (vlib_main_t * vm,
{
mpls_label_disposition_trace_t *tr =
vlib_add_trace(vm, node, b0, sizeof(*tr));
- tr->mdd = mddi0;
+ tr->mddt_payload_proto = mdd0->mdd_payload_proto;
+ tr->mddt_rpf_id = mdd0->mdd_rpf_id;
+ tr->mddt_mode = mdd0->mdd_mode;
}
vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
@@ -403,7 +415,11 @@ format_mpls_label_disposition_trace (u8 * s, va_list * args)
t = va_arg(*args, mpls_label_disposition_trace_t *);
- s = format(s, "disp:%d", t->mdd);
+ s = format(s, "rpf-id:%d %U, %U",
+ t->mddt_rpf_id,
+ format_dpo_proto, t->mddt_payload_proto,
+ format_fib_mpls_lsp_mode, t->mddt_mode);
+
return (s);
}
diff --git a/src/vnet/fib/fib_api.c b/src/vnet/fib/fib_api.c
index 12c4f0de13f..5aa5c4ec875 100644
--- a/src/vnet/fib/fib_api.c
+++ b/src/vnet/fib/fib_api.c
@@ -16,10 +16,11 @@
#include <vnet/vnet.h>
#include <vlibmemory/api.h>
#include <vnet/fib/fib_api.h>
+#include <vnet/ip/ip_types_api.h>
#include <vnet/fib/fib_table.h>
#include <vnet/mfib/mfib_table.h>
#include <vnet/bier/bier_disp_table.h>
-#include <vnet/dpo/ip_null_dpo.h>
+#include <vpp/api/types.h>
#include <vnet/vnet_msg_enum.h>
@@ -40,288 +41,485 @@
#include <vlibapi/api_helper_macros.h>
int
-fib_path_api_parse (const vl_api_fib_path_t *in,
- fib_route_path_t *out)
+fib_api_table_id_decode (fib_protocol_t fproto,
+ u32 table_id,
+ u32 *fib_index)
{
- fib_route_path_flags_t path_flags;
- mpls_label_t next_hop_via_label;
- int rv = 0, n_labels;
- u8 ii;
+ *fib_index = fib_table_find(fproto, table_id);
- path_flags = FIB_ROUTE_PATH_FLAG_NONE;
- next_hop_via_label = ntohl (in->via_label);
- clib_memset(out, 0, sizeof(*out));
- out->frp_sw_if_index = ~0;
+ if (INDEX_INVALID == *fib_index)
+ {
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ }
- out->frp_proto = in->afi;
- // .frp_addr = (NULL == next_hop ? zero_addr : *next_hop),
- out->frp_sw_if_index = ntohl(in->sw_if_index);
- out->frp_weight = in->weight;
- out->frp_preference = in->preference;
+ return (0);
+}
- if (DPO_PROTO_IP4 == out->frp_proto ||
- DPO_PROTO_IP6 == out->frp_proto ||
- DPO_PROTO_MPLS == out->frp_proto)
- {
- out->frp_fib_index = fib_table_find (dpo_proto_to_fib(out->frp_proto),
- ntohl (in->table_id));
+int
+fib_api_mtable_id_decode (fib_protocol_t fproto,
+ u32 table_id,
+ u32 *fib_index)
+{
+ *fib_index = mfib_table_find(fproto, table_id);
- if (~0 == out->frp_fib_index)
- return (VNET_API_ERROR_NO_SUCH_FIB);
+ if (~0 == *fib_index)
+ {
+ return VNET_API_ERROR_NO_SUCH_FIB;
}
- /*
- * the special INVALID label means we are not recursing via a
- * label. Exp-null value is never a valid via-label so that
- * also means it's not a via-label and means clients that set
- * it to 0 by default get the expected behaviour
- */
- if ((MPLS_LABEL_INVALID != next_hop_via_label) &&
- (0 != next_hop_via_label))
+ return (0);
+}
+
+static void
+fib_api_next_hop_decode (const vl_api_fib_path_t *in,
+ ip46_address_t *out)
+{
+ if (in->proto == FIB_API_PATH_NH_PROTO_IP4)
+ memcpy (&out->ip4, &in->nh.address.ip4, sizeof (out->ip4));
+ else if (in->proto == FIB_API_PATH_NH_PROTO_IP6)
+ memcpy (&out->ip6, &in->nh.address.ip6, sizeof (out->ip6));
+}
+
+static vl_api_fib_path_nh_proto_t
+fib_api_path_dpo_proto_to_nh (dpo_proto_t dproto)
+{
+ switch (dproto)
{
- out->frp_proto = DPO_PROTO_MPLS;
- out->frp_local_label = next_hop_via_label;
- out->frp_eos = MPLS_NON_EOS;
+ case DPO_PROTO_IP4:
+ return (FIB_API_PATH_NH_PROTO_IP4);
+ case DPO_PROTO_IP6:
+ return (FIB_API_PATH_NH_PROTO_IP6);
+ case DPO_PROTO_MPLS:
+ return (FIB_API_PATH_NH_PROTO_MPLS);
+ case DPO_PROTO_BIER:
+ return (FIB_API_PATH_NH_PROTO_BIER);
+ case DPO_PROTO_ETHERNET:
+ return (FIB_API_PATH_NH_PROTO_ETHERNET);
+ case DPO_PROTO_NSH:
+ ASSERT(0);
+ break;
}
+ return (FIB_API_PATH_NH_PROTO_IP4);
+}
- n_labels = in->n_labels;
- if (n_labels == 0)
- ;
- else
+
+static void
+fib_api_next_hop_encode (const fib_route_path_t *rpath,
+ vl_api_fib_path_t *fp)
+{
+ fp->proto = fib_api_path_dpo_proto_to_nh(rpath->frp_proto);
+
+ if (rpath->frp_proto == DPO_PROTO_IP4)
+ memcpy (&fp->nh.address.ip4,
+ &rpath->frp_addr.ip4,
+ sizeof (rpath->frp_addr.ip4));
+ else if (rpath->frp_proto == DPO_PROTO_IP6)
+ memcpy (&fp->nh.address.ip6,
+ &rpath->frp_addr.ip6,
+ sizeof (rpath->frp_addr.ip6));
+}
+
+static int
+fib_api_path_nh_proto_to_dpo (vl_api_fib_path_nh_proto_t pp,
+ dpo_proto_t *dproto)
+{
+ switch (pp)
{
- vec_validate (out->frp_label_stack, n_labels - 1);
- for (ii = 0; ii < n_labels; ii++)
- {
- out->frp_label_stack[ii].fml_value =
- ntohl(in->label_stack[ii].label);
- out->frp_label_stack[ii].fml_ttl =
- in->label_stack[ii].ttl;
- out->frp_label_stack[ii].fml_exp =
- in->label_stack[ii].exp;
- out->frp_label_stack[ii].fml_mode =
- (in->label_stack[ii].is_uniform ?
- FIB_MPLS_LSP_MODE_UNIFORM :
- FIB_MPLS_LSP_MODE_PIPE);
- }
+ case FIB_API_PATH_NH_PROTO_IP4:
+ *dproto = DPO_PROTO_IP4;
+ break;
+ case FIB_API_PATH_NH_PROTO_IP6:
+ *dproto = DPO_PROTO_IP6;
+ break;
+ case FIB_API_PATH_NH_PROTO_MPLS:
+ *dproto = DPO_PROTO_MPLS;
+ break;
+ case FIB_API_PATH_NH_PROTO_BIER:
+ *dproto = DPO_PROTO_BIER;
+ break;
+ case FIB_API_PATH_NH_PROTO_ETHERNET:
+ *dproto = DPO_PROTO_ETHERNET;
+ break;
+ default:
+ return (-1);
}
+ return (0);
+}
+
+int
+fib_api_path_decode (vl_api_fib_path_t *in,
+ fib_route_path_t *out)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ int rv = 0, n_labels;
+ vnet_main_t *vnm;
+ u8 ii;
+
+ vnm = vnet_get_main ();
+ clib_memset(&out->frp_dpo, 0, sizeof(out->frp_dpo));
+
+ /* enums are u32 */
+ in->flags = ntohl (in->flags);
+ in->type = ntohl (in->type);
+ in->proto = ntohl (in->proto);
- if (in->is_dvr)
- path_flags |= FIB_ROUTE_PATH_DVR;
- if (in->is_resolve_host)
- path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_HOST;
- if (in->is_resolve_attached)
- path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED;
- /* if (in->is_interface_rx) */
- /* path_flags |= FIB_ROUTE_PATH_INTF_RX; */
- /* if (in->is_rpf_id) */
- /* path_flags |= FIB_ROUTE_PATH_RPF_ID; */
- if (in->is_source_lookup)
- path_flags |= FIB_ROUTE_PATH_SOURCE_LOOKUP;
-
- if (in->is_udp_encap)
+ /*
+ * attributes that apply to all path types
+ */
+ out->frp_flags = 0;
+ out->frp_weight = in->weight;
+ if (0 == out->frp_weight)
{
- path_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
- out->frp_udp_encap_id = ntohl(in->next_hop_id);
+ out->frp_weight = 1;
}
- else
+ out->frp_preference = in->preference;
+
+ rv = fib_api_path_nh_proto_to_dpo(in->proto, &out->frp_proto);
+
+ if (0 != rv)
+ return (rv);
+
+ /*
+ * convert the flags and the AFI to determine the path type
+ */
+ if (in->flags & FIB_API_PATH_FLAG_RESOLVE_VIA_HOST)
+ out->frp_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_HOST;
+ if (in->flags & FIB_API_PATH_FLAG_RESOLVE_VIA_ATTACHED)
+ out->frp_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED;
+
+ switch (in->type)
{
- if (DPO_PROTO_IP4 == in->afi)
- {
- clib_memcpy (&out->frp_addr.ip4,
- in->next_hop,
- sizeof (out->frp_addr.ip4));
- }
- else if (DPO_PROTO_IP6 == in->afi)
+ case FIB_API_PATH_TYPE_DVR:
+ out->frp_sw_if_index = ntohl(in->sw_if_index);
+ out->frp_flags |= FIB_ROUTE_PATH_DVR;
+ break;
+ case FIB_API_PATH_TYPE_INTERFACE_RX:
+ out->frp_sw_if_index = ntohl(in->sw_if_index);
+ out->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
+ break;
+ case FIB_API_PATH_TYPE_DROP:
+ out->frp_flags |= FIB_ROUTE_PATH_DROP;
+ break;
+ case FIB_API_PATH_TYPE_LOCAL:
+ out->frp_flags |= FIB_ROUTE_PATH_LOCAL;
+ out->frp_sw_if_index = ntohl(in->sw_if_index);
+ break;
+ case FIB_API_PATH_TYPE_ICMP_UNREACH:
+ out->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
+ break;
+ case FIB_API_PATH_TYPE_ICMP_PROHIBIT:
+ out->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
+ break;
+ case FIB_API_PATH_TYPE_CLASSIFY:
+ out->frp_flags |= FIB_ROUTE_PATH_CLASSIFY;
+
+ if (pool_is_free_index (cm->tables, ntohl (in->nh.classify_table_index)))
{
- clib_memcpy (&out->frp_addr.ip6,
- in->next_hop,
- sizeof (out->frp_addr.ip6));
+ return VNET_API_ERROR_NO_SUCH_TABLE;
}
+ out->frp_classify_table_id = ntohl (in->nh.classify_table_index);
+ break;
+ case FIB_API_PATH_TYPE_UDP_ENCAP:
+ out->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
+ out->frp_udp_encap_id = ntohl(in->nh.obj_id);
+ break;
+ case FIB_API_PATH_TYPE_BIER_IMP:
+ out->frp_flags |= FIB_ROUTE_PATH_BIER_IMP;
+ out->frp_bier_imp = ntohl (in->nh.obj_id);
+ break;
- if (ip46_address_is_zero(&out->frp_addr))
+ case FIB_API_PATH_TYPE_SOURCE_LOOKUP:
+ out->frp_flags |= FIB_ROUTE_PATH_SOURCE_LOOKUP;
+ /* fall through */
+ case FIB_API_PATH_TYPE_NORMAL:
+ switch (out->frp_proto)
{
- if (DPO_PROTO_BIER == in->afi)
+ case DPO_PROTO_IP4:
+ case DPO_PROTO_IP6:
+ fib_api_next_hop_decode(in, &out->frp_addr);
+ out->frp_sw_if_index = ntohl(in->sw_if_index);
+ out->frp_rpf_id = ntohl(in->rpf_id);
+
+ if (0 == out->frp_rpf_id)
{
- index_t bdti;
+ /* allow 0 to be an unset value on the API */
+ out->frp_rpf_id = ~0;
+ }
- bdti = bier_disp_table_find(ntohl(in->table_id));
+ if (~0 != out->frp_rpf_id)
+ {
+ out->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
+ }
- if (INDEX_INVALID != bdti)
+ if (~0 == out->frp_sw_if_index)
+ {
+ /* recursive or deag, validate the next-hop FIB */
+ if (~0 != out->frp_rpf_id)
{
- out->frp_fib_index = bdti;
- out->frp_proto = DPO_PROTO_BIER;
+ rv = fib_api_mtable_id_decode(
+ dpo_proto_to_fib(out->frp_proto),
+ ntohl(in->table_id),
+ &out->frp_fib_index);
}
else
{
- rv = VNET_API_ERROR_NO_SUCH_FIB;
+ rv = fib_api_table_id_decode(
+ dpo_proto_to_fib(out->frp_proto),
+ ntohl(in->table_id),
+ &out->frp_fib_index);
+ }
+ if (0 != rv)
+ {
+ return (rv);
}
}
- else if (out->frp_sw_if_index == ~0 &&
- out->frp_fib_index != ~0)
+ else
{
- path_flags |= FIB_ROUTE_PATH_DEAG;
+ if (pool_is_free_index (vnm->interface_main.sw_interfaces,
+ out->frp_sw_if_index))
+ {
+ return VNET_API_ERROR_NO_MATCHING_INTERFACE;
+ }
}
- }
- }
- out->frp_flags = path_flags;
+ if (ip46_address_is_zero(&out->frp_addr))
+ {
+ if (~0 == out->frp_sw_if_index &&
+ ~0 != out->frp_fib_index)
+ {
+ out->frp_flags |= FIB_ROUTE_PATH_DEAG;
+ }
+ }
- return (rv);
-}
+ break;
+ case DPO_PROTO_MPLS:
+ out->frp_local_label = ntohl (in->nh.via_label);
+ out->frp_eos = MPLS_NON_EOS;
+ out->frp_sw_if_index = ~0;
+ break;
+ case DPO_PROTO_BIER:
+ out->frp_sw_if_index = ntohl(in->sw_if_index);
+ out->frp_rpf_id = ntohl(in->rpf_id);
-void
-fib_prefix_to_api (const fib_prefix_t *pfx,
- u8 address[16],
- u8 *length,
- u8 *is_ip6)
-{
- *length = pfx->fp_len;
- *is_ip6 = (FIB_PROTOCOL_IP6 == pfx->fp_proto ? 1 : 0);
+ if (!(out->frp_flags & FIB_ROUTE_PATH_BIER_IMP))
+ {
+ fib_api_next_hop_decode(in, &out->frp_addr);
- if (FIB_PROTOCOL_IP6 == pfx->fp_proto)
- {
- memcpy (address, &pfx->fp_addr.ip6, sizeof (pfx->fp_addr.ip6));
- }
- else
- {
- memcpy (address, &pfx->fp_addr.ip4, sizeof (pfx->fp_addr.ip4));
+ if (ip46_address_is_zero(&out->frp_addr))
+ {
+ index_t bdti;
+
+ bdti = bier_disp_table_find(ntohl(in->table_id));
+
+ if (INDEX_INVALID != bdti)
+ {
+ out->frp_fib_index = bdti;
+ }
+ else
+ {
+ return (VNET_API_ERROR_NO_SUCH_FIB);
+ }
+ }
+ }
+ break;
+ case DPO_PROTO_ETHERNET:
+ out->frp_sw_if_index = ntohl(in->sw_if_index);
+ break;
+ case DPO_PROTO_NSH:
+ break;
+ }
}
-}
-static void
-fib_api_path_copy_next_hop (const fib_route_path_encode_t * api_rpath, void *fp_arg)
-{
- int is_ip4;
- vl_api_fib_path_t *fp = (vl_api_fib_path_t *) fp_arg;
-
- if (api_rpath->rpath.frp_proto == DPO_PROTO_IP4)
- fp->afi = IP46_TYPE_IP4;
- else if (api_rpath->rpath.frp_proto == DPO_PROTO_IP6)
- fp->afi = IP46_TYPE_IP6;
- else
+ n_labels = in->n_labels;
+ if (n_labels != 0)
{
- is_ip4 = ip46_address_is_ip4 (&api_rpath->rpath.frp_addr);
- if (is_ip4)
- fp->afi = IP46_TYPE_IP4;
- else
- fp->afi = IP46_TYPE_IP6;
+ vec_validate (out->frp_label_stack, n_labels - 1);
+ for (ii = 0; ii < n_labels; ii++)
+ {
+ out->frp_label_stack[ii].fml_value =
+ ntohl(in->label_stack[ii].label);
+ out->frp_label_stack[ii].fml_ttl =
+ in->label_stack[ii].ttl;
+ out->frp_label_stack[ii].fml_exp =
+ in->label_stack[ii].exp;
+ out->frp_label_stack[ii].fml_mode =
+ (in->label_stack[ii].is_uniform ?
+ FIB_MPLS_LSP_MODE_UNIFORM :
+ FIB_MPLS_LSP_MODE_PIPE);
+ }
}
- if (fp->afi == IP46_TYPE_IP4)
- memcpy (fp->next_hop, &api_rpath->rpath.frp_addr.ip4,
- sizeof (api_rpath->rpath.frp_addr.ip4));
- else
- memcpy (fp->next_hop, &api_rpath->rpath.frp_addr.ip6,
- sizeof (api_rpath->rpath.frp_addr.ip6));
+
+ return (0);
}
void
-fib_api_path_encode (const fib_route_path_encode_t * api_rpath,
+fib_api_path_encode (const fib_route_path_t * rpath,
vl_api_fib_path_t *out)
{
- int ii;
+ memset (out, 0, sizeof (*out));
- clib_memset (out, 0, sizeof (*out));
- switch (api_rpath->dpo.dpoi_type)
- {
- case DPO_RECEIVE:
- out->is_local = true;
- break;
- case DPO_DROP:
- out->is_drop = true;
- break;
- case DPO_IP_NULL:
- switch (ip_null_dpo_get_action(api_rpath->dpo.dpoi_index))
- {
- case IP_NULL_ACTION_NONE:
- out->is_drop = true;
- break;
- case IP_NULL_ACTION_SEND_ICMP_UNREACH:
- out->is_unreach = true;
- break;
- case IP_NULL_ACTION_SEND_ICMP_PROHIBIT:
- out->is_prohibit = true;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- out->weight = api_rpath->rpath.frp_weight;
- out->preference = api_rpath->rpath.frp_preference;
- out->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index);
- out->afi = api_rpath->rpath.frp_proto;
- fib_api_path_copy_next_hop (api_rpath, out);
+ out->weight = rpath->frp_weight;
+ out->preference = rpath->frp_preference;
+ out->sw_if_index = htonl (rpath->frp_sw_if_index);
+ out->proto = fib_api_path_dpo_proto_to_nh(rpath->frp_proto);
+ out->rpf_id = rpath->frp_rpf_id;
+ fib_api_next_hop_encode (rpath, out);
- if (0 != api_rpath->rpath.frp_fib_index)
+ if (0 != rpath->frp_fib_index)
{
- if ((DPO_PROTO_IP6 == api_rpath->rpath.frp_proto) ||
- (DPO_PROTO_IP4 == api_rpath->rpath.frp_proto))
+ if ((DPO_PROTO_IP6 == rpath->frp_proto) ||
+ (DPO_PROTO_IP4 == rpath->frp_proto))
{
- if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_RPF_ID)
+ if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
{
- out->table_id =
- htonl(mfib_table_get_table_id(
- api_rpath->rpath.frp_fib_index,
- dpo_proto_to_fib(api_rpath->rpath.frp_proto)));
+ out->table_id = htonl (mfib_table_get_table_id(
+ rpath->frp_fib_index,
+ dpo_proto_to_fib(rpath->frp_proto)));
}
else
{
- out->table_id =
- htonl(fib_table_get_table_id(
- api_rpath->rpath.frp_fib_index,
- dpo_proto_to_fib(api_rpath->rpath.frp_proto)));
+ out->table_id = htonl (fib_table_get_table_id(
+ rpath->frp_fib_index,
+ dpo_proto_to_fib(rpath->frp_proto)));
}
}
}
- if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_DVR)
+ if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
{
- out->is_dvr = 1;
+ out->type = FIB_API_PATH_TYPE_DVR;
}
- if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
+ else if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
{
- out->is_udp_encap = 1;
- out->next_hop_id = api_rpath->rpath.frp_udp_encap_id;
+ out->type = FIB_API_PATH_TYPE_ICMP_UNREACH;
}
- if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_INTF_RX)
+ else if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
{
- out->is_interface_rx = 1;
+ out->type = FIB_API_PATH_TYPE_ICMP_PROHIBIT;
}
- if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_LOCAL)
+ else if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
{
- out->is_local = 1;
+ out->type = FIB_API_PATH_TYPE_LOCAL;
}
- if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
+ else if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
{
- out->is_resolve_host = 1;
+ out->type = FIB_API_PATH_TYPE_DROP;
}
- if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
+ else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
{
- out->is_resolve_attached = 1;
+ out->type = FIB_API_PATH_TYPE_UDP_ENCAP;
+ out->nh.obj_id = rpath->frp_udp_encap_id;
}
- /* if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_ATTACHED) { */
- /* out->is_attached = 1; */
- /* } */
- /* if (api_rpath->rpath.frp_flags & FIB_ROUTE_PATH_CONNECTED) { */
- /* out->is_connected = 1; */
- /* } */
- if (api_rpath->rpath.frp_label_stack)
+ else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
+ {
+ out->type = FIB_API_PATH_TYPE_BIER_IMP;
+ out->nh.obj_id = rpath->frp_bier_imp;
+ }
+ else
+ {
+ out->type = FIB_API_PATH_TYPE_NORMAL;
+ }
+ if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
+ {
+ out->flags |= FIB_API_PATH_FLAG_RESOLVE_VIA_HOST;
+ }
+ if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
+ {
+ out->flags |= FIB_API_PATH_FLAG_RESOLVE_VIA_ATTACHED;
+ }
+
+ out->flags = htonl (out->flags);
+ out->type = htonl (out->type);
+ out->proto = htonl (out->proto);
+
+ if (rpath->frp_label_stack)
{
- for (ii = 0; ii < vec_len(api_rpath->rpath.frp_label_stack); ii++)
+ int ii;
+
+ for (ii = 0; ii < vec_len(rpath->frp_label_stack); ii++)
{
out->label_stack[ii].label =
- htonl(api_rpath->rpath.frp_label_stack[ii].fml_value);
+ htonl(rpath->frp_label_stack[ii].fml_value);
out->label_stack[ii].ttl =
- api_rpath->rpath.frp_label_stack[ii].fml_ttl;
+ rpath->frp_label_stack[ii].fml_ttl;
out->label_stack[ii].exp =
- api_rpath->rpath.frp_label_stack[ii].fml_exp;
+ rpath->frp_label_stack[ii].fml_exp;
}
out->n_labels = ii;
}
}
+void
+fib_api_route_add_del (u8 is_add,
+ u8 is_multipath,
+ u32 fib_index,
+ const fib_prefix_t * prefix,
+ fib_entry_flag_t entry_flags,
+ fib_route_path_t *rpaths)
+{
+ if (is_multipath)
+ {
+ /* Iterative path add/remove */
+ if (is_add)
+ fib_table_entry_path_add2 (fib_index,
+ prefix,
+ FIB_SOURCE_API,
+ entry_flags,
+ rpaths);
+ else
+ fib_table_entry_path_remove2 (fib_index,
+ prefix,
+ FIB_SOURCE_API,
+ rpaths);
+ }
+ else
+ {
+ if (is_add)
+ /* path replacement */
+ fib_table_entry_update (fib_index,
+ prefix,
+ FIB_SOURCE_API,
+ entry_flags,
+ rpaths);
+ else
+ /* entry delete */
+ fib_table_entry_delete (fib_index,
+ prefix,
+ FIB_SOURCE_API);
+ }
+}
+
+u8*
+format_vl_api_fib_path (u8 * s, va_list * args)
+{
+ const vl_api_fib_path_t *path = va_arg (*args, vl_api_fib_path_t*);
+
+ s = format (s, "sw_if_index %d", ntohl (path->sw_if_index));
+ switch (clib_net_to_host_u32(path->proto))
+ {
+ case FIB_API_PATH_NH_PROTO_IP4:
+ s = format (s, " %U", format_vl_api_address_union,
+ &path->nh.address, ADDRESS_IP4);
+ break;
+ case FIB_API_PATH_NH_PROTO_IP6:
+ s = format (s, " %U", format_vl_api_address_union,
+ &path->nh.address, ADDRESS_IP6);
+ break;
+ default:
+ break;
+ }
+ s = format (s, " weight %d", path->weight);
+ s = format (s, " preference %d", path->preference);
+ s = format (s, " type %d", ntohl(path->type));
+ s = format (s, " proto %d", ntohl(path->proto));
+ s = format (s, " flags %d", ntohl(path->flags));
+ s = format (s, " n_labels %d", ntohl(path->n_labels));
+ s = format (s, " table-id %d", ntohl(path->table_id));
+ s = format (s, " rpf-id %d", ntohl(path->rpf_id));
+
+ return (s);
+}
+
fib_protocol_t
fib_proto_from_api_address_family (int af)
{
diff --git a/src/vnet/fib/fib_api.h b/src/vnet/fib/fib_api.h
index 041f962e3d7..ffff2289b37 100644
--- a/src/vnet/fib/fib_api.h
+++ b/src/vnet/fib/fib_api.h
@@ -17,61 +17,38 @@
#define __FIB_API_H__
#include <vnet/fib/fib_types.h>
+#include <vnet/fib/fib_entry.h>
-int
-add_del_route_check (fib_protocol_t table_proto,
- u32 table_id,
- u32 next_hop_sw_if_index,
- dpo_proto_t next_hop_table_proto,
- u32 next_hop_table_id,
- u8 is_rpf_id,
- u32 * fib_index, u32 * next_hop_fib_index);
-
-int
-add_del_route_t_handler (u8 is_multipath,
- u8 is_add,
- u8 is_drop,
- u8 is_unreach,
- u8 is_prohibit,
- u8 is_local,
- u8 is_multicast,
- u8 is_classify,
- u32 classify_table_index,
- u8 is_resolve_host,
- u8 is_resolve_attached,
- u8 is_interface_rx,
- u8 is_rpf_id,
- u8 is_dvr,
- u8 is_source_lookup,
- u8 is_udp_encap,
- u32 fib_index,
- const fib_prefix_t * prefix,
- dpo_proto_t next_hop_proto,
- const ip46_address_t * next_hop,
- u32 next_hop_id,
- u32 next_hop_sw_if_index,
- u8 next_hop_fib_index,
- u16 next_hop_weight,
- u16 next_hop_preference,
- mpls_label_t next_hop_via_label,
- fib_mpls_label_t * next_hop_out_label_stack);
-
+/**
+ * Forward declare the API type, no need to include the generated api headers
+ */
struct _vl_api_fib_path;
+struct _vl_api_fib_prefix;
-extern void fib_api_path_encode (const fib_route_path_encode_t * api_rpath,
- struct _vl_api_fib_path *out);
+/**
+ * Encode and decode functions from the API types to internal types
+ */
+extern void fib_api_path_encode(const fib_route_path_t * api_rpath,
+ struct _vl_api_fib_path *out);
+extern int fib_api_path_decode(struct _vl_api_fib_path *in,
+ fib_route_path_t *out);
-void
-fib_prefix_to_api (const fib_prefix_t *pfx,
- u8 address[16],
- u8 *length,
- u8 *is_ip6);
+extern int fib_api_table_id_decode(fib_protocol_t fproto,
+ u32 table_id,
+ u32 *fib_index);
+/**
+ * Adding routes from the API
+ */
+extern void fib_api_route_add_del (u8 is_add,
+ u8 is_multipath,
+ u32 fib_index,
+ const fib_prefix_t * prefix,
+ fib_entry_flag_t entry_flags,
+ fib_route_path_t *rpaths);
-struct _vl_api_fib_path;
+extern u8* format_vl_api_fib_path(u8 * s, va_list * args);
-extern int fib_path_api_parse(const struct _vl_api_fib_path *in,
- fib_route_path_t *out);
extern fib_protocol_t fib_proto_from_api_address_family (int af);
extern int fib_proto_to_api_address_family (fib_protocol_t fproto);
diff --git a/src/vnet/fib/fib_entry.c b/src/vnet/fib/fib_entry.c
index edbfdf6e3a4..6ff692dea98 100644
--- a/src/vnet/fib/fib_entry.c
+++ b/src/vnet/fib/fib_entry.c
@@ -906,21 +906,19 @@ void
fib_entry_path_add (fib_node_index_t fib_entry_index,
fib_source_t source,
fib_entry_flag_t flags,
- const fib_route_path_t *rpath)
+ const fib_route_path_t *rpaths)
{
fib_source_t best_source;
fib_entry_t *fib_entry;
fib_entry_src_t *bsrc;
- ASSERT(1 == vec_len(rpath));
-
fib_entry = fib_entry_get(fib_entry_index);
ASSERT(NULL != fib_entry);
bsrc = fib_entry_get_best_src_i(fib_entry);
best_source = fib_entry_src_get_source(bsrc);
- fib_entry = fib_entry_src_action_path_add(fib_entry, source, flags, rpath);
+ fib_entry = fib_entry_src_action_path_add(fib_entry, source, flags, rpaths);
fib_entry_source_change(fib_entry, best_source, source);
@@ -1003,7 +1001,7 @@ fib_entry_source_removed (fib_entry_t *fib_entry,
fib_entry_src_flag_t
fib_entry_path_remove (fib_node_index_t fib_entry_index,
fib_source_t source,
- const fib_route_path_t *rpath)
+ const fib_route_path_t *rpaths)
{
fib_entry_src_flag_t sflag;
fib_source_t best_source;
@@ -1011,8 +1009,6 @@ fib_entry_path_remove (fib_node_index_t fib_entry_index,
fib_entry_t *fib_entry;
fib_entry_src_t *bsrc;
- ASSERT(1 == vec_len(rpath));
-
fib_entry = fib_entry_get(fib_entry_index);
ASSERT(NULL != fib_entry);
@@ -1020,7 +1016,7 @@ fib_entry_path_remove (fib_node_index_t fib_entry_index,
best_source = fib_entry_src_get_source(bsrc);
bflags = fib_entry_src_get_flags(bsrc);
- sflag = fib_entry_src_action_path_remove(fib_entry, source, rpath);
+ sflag = fib_entry_src_action_path_remove(fib_entry, source, rpaths);
FIB_ENTRY_DBG(fib_entry, "path remove:%U", format_fib_source, source);
@@ -1648,11 +1644,13 @@ fib_entry_module_init (void)
fib_entry_logger = vlib_log_register_class("fib", "entry");
}
-void
-fib_entry_encode (fib_node_index_t fib_entry_index,
- fib_route_path_encode_t **api_rpaths)
+fib_route_path_t *
+fib_entry_encode (fib_node_index_t fib_entry_index)
{
fib_path_ext_list_t *ext_list;
+ fib_path_encode_ctx_t ctx = {
+ .rpaths = NULL,
+ };
fib_entry_t *fib_entry;
fib_entry_src_t *bsrc;
@@ -1670,8 +1668,10 @@ fib_entry_encode (fib_node_index_t fib_entry_index,
fib_path_list_walk_w_ext(fib_entry->fe_parent,
ext_list,
fib_path_encode,
- api_rpaths);
+ &ctx);
}
+
+ return (ctx.rpaths);
}
const fib_prefix_t *
diff --git a/src/vnet/fib/fib_entry.h b/src/vnet/fib/fib_entry.h
index 8ede39c1e9d..5d0fb24bcb2 100644
--- a/src/vnet/fib/fib_entry.h
+++ b/src/vnet/fib/fib_entry.h
@@ -539,7 +539,7 @@ extern void fib_entry_update (fib_node_index_t fib_entry_index,
extern void fib_entry_path_add(fib_node_index_t fib_entry_index,
fib_source_t source,
fib_entry_flag_t flags,
- const fib_route_path_t *rpath);
+ const fib_route_path_t *rpaths);
extern void fib_entry_special_add(fib_node_index_t fib_entry_index,
fib_source_t source,
fib_entry_flag_t flags,
@@ -553,7 +553,7 @@ extern fib_entry_src_flag_t fib_entry_special_remove(fib_node_index_t fib_entry_
extern fib_entry_src_flag_t fib_entry_path_remove(fib_node_index_t fib_entry_index,
fib_source_t source,
- const fib_route_path_t *rpath);
+ const fib_route_path_t *rpaths);
extern void fib_entry_inherit(fib_node_index_t cover,
fib_node_index_t covered);
@@ -601,9 +601,8 @@ extern u32 fib_entry_get_resolving_interface_for_source(
fib_node_index_t fib_entry_index,
fib_source_t source);
-extern void fib_entry_encode(fib_node_index_t fib_entry_index,
- fib_route_path_encode_t **api_rpaths);
-extern const fib_prefix_t *fib_entry_get_prefix(fib_node_index_t fib_entry_index);
+extern fib_route_path_t* fib_entry_encode(fib_node_index_t fib_entry_index);
+extern const fib_prefix_t* fib_entry_get_prefix(fib_node_index_t fib_entry_index);
extern u32 fib_entry_get_fib_index(fib_node_index_t fib_entry_index);
extern void fib_entry_set_source_data(fib_node_index_t fib_entry_index,
fib_source_t source,
diff --git a/src/vnet/fib/fib_entry_src.c b/src/vnet/fib/fib_entry_src.c
index c6c2a04bed7..1766ec765ea 100644
--- a/src/vnet/fib/fib_entry_src.c
+++ b/src/vnet/fib/fib_entry_src.c
@@ -1490,34 +1490,39 @@ fib_entry_src_flags_2_path_list_flags (fib_entry_flag_t eflags)
static void
fib_entry_flags_update (const fib_entry_t *fib_entry,
- const fib_route_path_t *rpath,
+ const fib_route_path_t *rpaths,
fib_path_list_flags_t *pl_flags,
fib_entry_src_t *esrc)
{
- if ((esrc->fes_src == FIB_SOURCE_API) ||
- (esrc->fes_src == FIB_SOURCE_CLI))
- {
- if (fib_path_is_attached(rpath))
- {
- esrc->fes_entry_flags |= FIB_ENTRY_FLAG_ATTACHED;
- }
- else
- {
- esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_ATTACHED;
- }
- if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
- {
- esrc->fes_entry_flags |= FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT;
- }
- }
- if (fib_route_attached_cross_table(fib_entry, rpath) &&
- !(esrc->fes_entry_flags & FIB_ENTRY_FLAG_NO_ATTACHED_EXPORT))
- {
- esrc->fes_entry_flags |= FIB_ENTRY_FLAG_IMPORT;
- }
- else
+ const fib_route_path_t *rpath;
+
+ vec_foreach(rpath, rpaths)
{
- esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_IMPORT;
+ if ((esrc->fes_src == FIB_SOURCE_API) ||
+ (esrc->fes_src == FIB_SOURCE_CLI))
+ {
+ if (fib_path_is_attached(rpath))
+ {
+ esrc->fes_entry_flags |= FIB_ENTRY_FLAG_ATTACHED;
+ }
+ else
+ {
+ esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_ATTACHED;
+ }
+ if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
+ {
+ esrc->fes_entry_flags |= FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT;
+ }
+ }
+ if (fib_route_attached_cross_table(fib_entry, rpath) &&
+ !(esrc->fes_entry_flags & FIB_ENTRY_FLAG_NO_ATTACHED_EXPORT))
+ {
+ esrc->fes_entry_flags |= FIB_ENTRY_FLAG_IMPORT;
+ }
+ else
+ {
+ esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_IMPORT;
+ }
}
}
@@ -1533,7 +1538,7 @@ fib_entry_t*
fib_entry_src_action_path_add (fib_entry_t *fib_entry,
fib_source_t source,
fib_entry_flag_t flags,
- const fib_route_path_t *rpath)
+ const fib_route_path_t *rpaths)
{
fib_node_index_t old_path_list, fib_entry_index;
fib_path_list_flags_t pl_flags;
@@ -1550,7 +1555,7 @@ fib_entry_src_action_path_add (fib_entry_t *fib_entry,
const dpo_id_t *dpo;
if (flags == FIB_ENTRY_FLAG_EXCLUSIVE) {
- dpo = &rpath->dpo;
+ dpo = &rpaths->dpo;
} else {
dpo = drop_dpo_get(fib_entry_get_dpo_proto(fib_entry));
}
@@ -1574,10 +1579,10 @@ fib_entry_src_action_path_add (fib_entry_t *fib_entry,
ASSERT(FIB_ENTRY_SRC_VFT_EXISTS(esrc, fesv_path_add));
pl_flags = fib_entry_src_flags_2_path_list_flags(fib_entry_get_flags_i(fib_entry));
- fib_entry_flags_update(fib_entry, rpath, &pl_flags, esrc);
+ fib_entry_flags_update(fib_entry, rpaths, &pl_flags, esrc);
FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_path_add,
- (esrc, fib_entry, pl_flags, rpath));
+ (esrc, fib_entry, pl_flags, rpaths));
fib_entry = fib_entry_get(fib_entry_index);
fib_path_list_lock(esrc->fes_pl);
@@ -1670,7 +1675,7 @@ fib_entry_src_action_path_swap (fib_entry_t *fib_entry,
fib_entry_src_flag_t
fib_entry_src_action_path_remove (fib_entry_t *fib_entry,
fib_source_t source,
- const fib_route_path_t *rpath)
+ const fib_route_path_t *rpaths)
{
fib_path_list_flags_t pl_flags;
fib_node_index_t old_path_list;
@@ -1692,10 +1697,10 @@ fib_entry_src_action_path_remove (fib_entry_t *fib_entry,
ASSERT(FIB_ENTRY_SRC_VFT_EXISTS(esrc, fesv_path_remove));
pl_flags = fib_entry_src_flags_2_path_list_flags(fib_entry_get_flags_i(fib_entry));
- fib_entry_flags_update(fib_entry, rpath, &pl_flags, esrc);
+ fib_entry_flags_update(fib_entry, rpaths, &pl_flags, esrc);
FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_path_remove,
- (esrc, pl_flags, rpath));
+ (esrc, pl_flags, rpaths));
/*
* lock the new path-list, unlock the old if it had one
diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c
index 67a4bc1d4de..eebba1b1548 100644
--- a/src/vnet/fib/fib_path.c
+++ b/src/vnet/fib/fib_path.c
@@ -24,7 +24,8 @@
#include <vnet/dpo/interface_rx_dpo.h>
#include <vnet/dpo/mpls_disposition.h>
#include <vnet/dpo/dvr_dpo.h>
-#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
+#include <vnet/dpo/classify_dpo.h>
#include <vnet/adj/adj.h>
#include <vnet/adj/adj_mcast.h>
@@ -353,6 +354,12 @@ typedef struct fib_path_t_ {
} udp_encap;
struct {
/**
+ * The UDP Encap object this path resolves through
+ */
+ u32 fp_classify_table_id;
+ } classify;
+ struct {
+ /**
* The interface
*/
u32 fp_interface;
@@ -882,8 +889,8 @@ fib_path_unresolve (fib_path_t *path)
{
fib_entry_child_remove(path->fp_via_fib,
path->fp_sibling);
- fib_table_entry_special_remove(path->recursive.fp_tbl_id,
- fib_entry_get_prefix(path->fp_via_fib),
+ fib_table_entry_special_remove(path->recursive.fp_tbl_id,
+ fib_entry_get_prefix(path->fp_via_fib),
FIB_SOURCE_RR);
fib_table_unlock(path->recursive.fp_tbl_id,
dpo_proto_to_fib(path->fp_nh_proto),
@@ -1244,6 +1251,10 @@ fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
+ if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
+ cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
+ if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
+ cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
return (cfg_flags);
}
@@ -1337,6 +1348,16 @@ fib_path_create (fib_node_index_t pl_index,
path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
}
+ else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
+ (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH))
+ {
+ path->fp_type = FIB_PATH_TYPE_SPECIAL;
+ }
+ else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
+ {
+ path->fp_type = FIB_PATH_TYPE_SPECIAL;
+ path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
+ }
else if (~0 != rpath->frp_sw_if_index)
{
if (ip46_address_is_zero(&rpath->frp_addr))
@@ -1730,8 +1751,17 @@ fib_path_cmp_w_route_path (fib_node_index_t path_index,
case FIB_PATH_TYPE_EXCLUSIVE:
res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
break;
- case FIB_PATH_TYPE_SPECIAL:
case FIB_PATH_TYPE_RECEIVE:
+ if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
+ {
+ res = 0;
+ }
+ else
+ {
+ res = 1;
+ }
+ break;
+ case FIB_PATH_TYPE_SPECIAL:
res = 0;
break;
}
@@ -2006,11 +2036,33 @@ fib_path_resolve (fib_node_index_t path_index)
break;
}
case FIB_PATH_TYPE_SPECIAL:
- /*
- * Resolve via the drop
- */
- dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
- break;
+ if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
+ {
+ ip_null_dpo_add_and_lock (path->fp_nh_proto,
+ IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
+ &path->fp_dpo);
+ }
+ else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
+ {
+ ip_null_dpo_add_and_lock (path->fp_nh_proto,
+ IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ &path->fp_dpo);
+ }
+ else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
+ {
+ dpo_set (&path->fp_dpo, DPO_CLASSIFY,
+ path->fp_nh_proto,
+ classify_dpo_create (path->fp_nh_proto,
+ path->classify.fp_classify_table_id));
+ }
+ else
+ {
+ /*
+ * Resolve via the drop
+ */
+ dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
+ }
+ break;
case FIB_PATH_TYPE_DEAG:
{
if (DPO_PROTO_BIER == path->fp_nh_proto)
@@ -2459,10 +2511,10 @@ fib_path_contribute_forwarding (fib_node_index_t path_index,
case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
- dpo_copy(dpo, &path->fp_dpo);
- break;
case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
+ dpo_copy(dpo, &path->fp_dpo);
+ break;
case FIB_FORW_CHAIN_TYPE_BIER:
break;
case FIB_FORW_CHAIN_TYPE_ETHERNET:
@@ -2627,73 +2679,83 @@ fib_path_list_walk_rc_t
fib_path_encode (fib_node_index_t path_list_index,
fib_node_index_t path_index,
const fib_path_ext_t *path_ext,
- void *ctx)
+ void *args)
{
- fib_route_path_encode_t **api_rpaths = ctx;
- fib_route_path_encode_t *api_rpath;
+ fib_path_encode_ctx_t *ctx = args;
+ fib_route_path_t *rpath;
fib_path_t *path;
path = fib_path_get(path_index);
if (!path)
return (FIB_PATH_LIST_WALK_CONTINUE);
- vec_add2(*api_rpaths, api_rpath, 1);
- api_rpath->rpath.frp_weight = path->fp_weight;
- api_rpath->rpath.frp_preference = path->fp_preference;
- api_rpath->rpath.frp_proto = path->fp_nh_proto;
- api_rpath->rpath.frp_sw_if_index = ~0;
- api_rpath->rpath.frp_fib_index = 0;
- api_rpath->dpo = path->fp_dpo;
+
+ vec_add2(ctx->rpaths, rpath, 1);
+ rpath->frp_weight = path->fp_weight;
+ rpath->frp_preference = path->fp_preference;
+ rpath->frp_proto = path->fp_nh_proto;
+ rpath->frp_sw_if_index = ~0;
+ rpath->frp_fib_index = 0;
switch (path->fp_type)
{
case FIB_PATH_TYPE_RECEIVE:
- api_rpath->rpath.frp_addr = path->receive.fp_addr;
- api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
+ rpath->frp_addr = path->receive.fp_addr;
+ rpath->frp_sw_if_index = path->receive.fp_interface;
+ rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
break;
case FIB_PATH_TYPE_ATTACHED:
- api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
+ rpath->frp_sw_if_index = path->attached.fp_interface;
break;
case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
- api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
- api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
+ rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
+ rpath->frp_addr = path->attached_next_hop.fp_nh;
break;
case FIB_PATH_TYPE_BIER_FMASK:
- api_rpath->rpath.frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
+ rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
break;
case FIB_PATH_TYPE_SPECIAL:
break;
case FIB_PATH_TYPE_DEAG:
- api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
+ rpath->frp_fib_index = path->deag.fp_tbl_id;
if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
{
- api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_RPF_ID;
+ rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
}
break;
case FIB_PATH_TYPE_RECURSIVE:
- api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
- api_rpath->rpath.frp_fib_index = path->recursive.fp_tbl_id;
+ rpath->frp_addr = path->recursive.fp_nh.fp_ip;
+ rpath->frp_fib_index = path->recursive.fp_tbl_id;
break;
case FIB_PATH_TYPE_DVR:
- api_rpath->rpath.frp_sw_if_index = path->dvr.fp_interface;
- api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_DVR;
+ rpath->frp_sw_if_index = path->dvr.fp_interface;
+ rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
break;
case FIB_PATH_TYPE_UDP_ENCAP:
- api_rpath->rpath.frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
- api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
+ rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
+ rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
break;
case FIB_PATH_TYPE_INTF_RX:
- api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
- api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_INTF_RX;
+ rpath->frp_sw_if_index = path->receive.fp_interface;
+ rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
break;
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
default:
break;
}
if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS)
{
- api_rpath->rpath.frp_label_stack = path_ext->fpe_path.frp_label_stack;
+ rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
}
+ if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
+ rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
+ if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
+ rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
+ if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
+ rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
+
return (FIB_PATH_LIST_WALK_CONTINUE);
}
diff --git a/src/vnet/fib/fib_path.h b/src/vnet/fib/fib_path.h
index 57dec6d90b4..50aca9e1cf5 100644
--- a/src/vnet/fib/fib_path.h
+++ b/src/vnet/fib/fib_path.h
@@ -80,6 +80,18 @@ typedef enum fib_path_cfg_attribute_t_ {
*/
FIB_PATH_CFG_ATTRIBUTE_LOCAL,
/**
+ * The path reolves via an ICMP unreachable
+ */
+ FIB_PATH_CFG_ATTRIBUTE_ICMP_UNREACH,
+ /**
+ * The path reolves via an ICMP prohibit
+ */
+ FIB_PATH_CFG_ATTRIBUTE_ICMP_PROHIBIT,
+ /**
+ * The path reolves via a classify
+ */
+ FIB_PATH_CFG_ATTRIBUTE_CLASSIFY,
+ /**
* The deag path does a source lookup
*/
FIB_PATH_CFG_ATTRIBUTE_DEAG_SRC,
@@ -100,6 +112,9 @@ typedef enum fib_path_cfg_attribute_t_ {
[FIB_PATH_CFG_ATTRIBUTE_RESOLVE_HOST] = "resolve-host", \
[FIB_PATH_CFG_ATTRIBUTE_RESOLVE_ATTACHED] = "resolve-attached", \
[FIB_PATH_CFG_ATTRIBUTE_LOCAL] = "local", \
+ [FIB_PATH_CFG_ATTRIBUTE_ICMP_UNREACH] = "icmp-unreach", \
+ [FIB_PATH_CFG_ATTRIBUTE_ICMP_PROHIBIT] = "icmp-prohibit", \
+ [FIB_PATH_CFG_ATTRIBUTE_CLASSIFY] = "classify", \
[FIB_PATH_CFG_ATTRIBUTE_ATTACHED] = "attached", \
[FIB_PATH_CFG_ATTRIBUTE_INTF_RX] = "interface-rx", \
[FIB_PATH_CFG_ATTRIBUTE_RPF_ID] = "rpf-id", \
@@ -121,6 +136,9 @@ typedef enum fib_path_cfg_flags_t_ {
FIB_PATH_CFG_FLAG_RESOLVE_HOST = (1 << FIB_PATH_CFG_ATTRIBUTE_RESOLVE_HOST),
FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED = (1 << FIB_PATH_CFG_ATTRIBUTE_RESOLVE_ATTACHED),
FIB_PATH_CFG_FLAG_LOCAL = (1 << FIB_PATH_CFG_ATTRIBUTE_LOCAL),
+ FIB_PATH_CFG_FLAG_ICMP_UNREACH = (1 << FIB_PATH_CFG_ATTRIBUTE_ICMP_UNREACH),
+ FIB_PATH_CFG_FLAG_ICMP_PROHIBIT = (1 << FIB_PATH_CFG_ATTRIBUTE_ICMP_PROHIBIT),
+ FIB_PATH_CFG_FLAG_CLASSIFY = (1 << FIB_PATH_CFG_ATTRIBUTE_CLASSIFY),
FIB_PATH_CFG_FLAG_ATTACHED = (1 << FIB_PATH_CFG_ATTRIBUTE_ATTACHED),
FIB_PATH_CFG_FLAG_INTF_RX = (1 << FIB_PATH_CFG_ATTRIBUTE_INTF_RX),
FIB_PATH_CFG_FLAG_RPF_ID = (1 << FIB_PATH_CFG_ATTRIBUTE_RPF_ID),
@@ -181,6 +199,16 @@ extern u16 fib_path_get_preference(fib_node_index_t path_index);
extern u32 fib_path_get_rpf_id(fib_node_index_t path_index);
extern void fib_path_module_init(void);
+
+/**
+ * Path encode context to use when walking a path-list
+ * to encode paths
+ */
+typedef struct fib_path_encode_ctx_t_
+{
+ fib_route_path_t *rpaths;
+} fib_path_encode_ctx_t;
+
extern fib_path_list_walk_rc_t fib_path_encode(fib_node_index_t path_list_index,
fib_node_index_t path_index,
const struct fib_path_ext_t_ *ext_list,
diff --git a/src/vnet/fib/fib_path_list.c b/src/vnet/fib/fib_path_list.c
index 47170adf864..7c57c807327 100644
--- a/src/vnet/fib/fib_path_list.c
+++ b/src/vnet/fib/fib_path_list.c
@@ -830,12 +830,14 @@ fib_path_list_find_rpath (fib_node_index_t path_list_index,
* The path-list returned could either have been newly created, or
* can be a shared path-list from the data-base.
*/
-fib_node_index_t
-fib_path_list_path_add (fib_node_index_t path_list_index,
- const fib_route_path_t *rpaths)
+fib_node_index_t*
+fib_path_list_paths_add (fib_node_index_t path_list_index,
+ const fib_route_path_t *rpaths)
{
- fib_node_index_t new_path_index, *orig_path_index;
+ fib_node_index_t *new_path_indices, *path_index;
+ const fib_route_path_t *rpath;
fib_path_list_t *path_list;
+ u32 ii;
/*
* alloc the new list before we retrieve the old one, lest
@@ -843,40 +845,65 @@ fib_path_list_path_add (fib_node_index_t path_list_index,
*/
path_list = fib_path_list_get(path_list_index);
- ASSERT(1 == vec_len(rpaths));
ASSERT(!(path_list->fpl_flags & FIB_PATH_LIST_FLAG_SHARED));
- FIB_PATH_LIST_DBG(path_list, "path-add");
+ FIB_PATH_LIST_DBG(path_list, "paths-add");
- new_path_index = fib_path_create(path_list_index,
- rpaths);
+ new_path_indices = NULL;
+ vec_validate_init_empty(new_path_indices,
+ vec_len(rpaths) - 1,
+ FIB_NODE_INDEX_INVALID);
- vec_foreach (orig_path_index, path_list->fpl_paths)
+ vec_foreach (path_index, path_list->fpl_paths)
{
/*
* don't add duplicate paths
*/
- if (0 == fib_path_cmp(new_path_index, *orig_path_index))
+ int found = 0;
+
+ vec_foreach_index(ii, rpaths)
{
- fib_path_destroy(new_path_index);
- return (*orig_path_index);
+ rpath = &rpaths[ii];
+ if (0 == fib_path_cmp_w_route_path(*path_index, rpath))
+ {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ {
+ new_path_indices[ii] = *path_index;
}
}
/*
- * Add the new path - no sort, no sharing, no key..
+ * new_path_indices array contains INVALID for each path not found
+ * and something valid for matches
*/
- vec_add1(path_list->fpl_paths, new_path_index);
+ vec_foreach_index (ii, new_path_indices)
+ {
+ path_index = &new_path_indices[ii];
+ rpath = &rpaths[ii];
- FIB_PATH_LIST_DBG(path_list, "path-added");
+ if (FIB_NODE_INDEX_INVALID == *path_index)
+ {
+ *path_index = fib_path_create(path_list_index, rpath);
+ /*
+ * Add the new path - no sort, no sharing, no key..
+ */
+ vec_add1(path_list->fpl_paths, *path_index);
- /*
- * no shared path list requested. resolve and use the one
- * just created.
- */
- fib_path_resolve(new_path_index);
+ /*
+ * no shared path list requested. resolve and use the one
+ * just created.
+ */
+ fib_path_resolve(*path_index);
+ }
+ }
+
+ FIB_PATH_LIST_DBG(path_list, "paths-added");
- return (new_path_index);
+ return (new_path_indices);
}
fib_node_index_t
@@ -884,14 +911,13 @@ fib_path_list_copy_and_path_add (fib_node_index_t orig_path_list_index,
fib_path_list_flags_t flags,
const fib_route_path_t *rpaths)
{
- fib_node_index_t path_index, new_path_index, *orig_path_index;
+ fib_node_index_t new_path_index, *orig_path_index;
fib_path_list_t *path_list, *orig_path_list;
fib_node_index_t exist_path_list_index;
fib_node_index_t path_list_index;
+ const fib_route_path_t *rpath;
fib_node_index_t pi;
- ASSERT(1 == vec_len(rpaths));
-
/*
* alloc the new list before we retrieve the old one, lest
* the alloc result in a realloc
@@ -905,32 +931,50 @@ fib_path_list_copy_and_path_add (fib_node_index_t orig_path_list_index,
flags = fib_path_list_flags_fixup(flags);
path_list->fpl_flags = flags;
- vec_validate(path_list->fpl_paths, vec_len(orig_path_list->fpl_paths));
+ vec_validate(path_list->fpl_paths,
+ (vec_len(orig_path_list->fpl_paths) +
+ vec_len(rpaths) - 1));
pi = 0;
- new_path_index = fib_path_create(path_list_index,
- rpaths);
-
- vec_foreach (orig_path_index, orig_path_list->fpl_paths)
+ vec_foreach(orig_path_index, orig_path_list->fpl_paths)
{
/*
- * don't add duplicate paths
- * In the unlikely event the path is a duplicate, then we'll
- * find a matching path-list later and this one will be toast.
+ * copy the original paths over to the new list
*/
- if (0 != fib_path_cmp(new_path_index, *orig_path_index))
+ path_list->fpl_paths[pi++] = fib_path_copy(*orig_path_index,
+ path_list_index);
+ }
+ vec_foreach(rpath, rpaths)
+ {
+ int duplicate = 0;
+
+ new_path_index = fib_path_create(path_list_index, rpath);
+
+ vec_foreach(orig_path_index, orig_path_list->fpl_paths)
{
- path_index = fib_path_copy(*orig_path_index, path_list_index);
- path_list->fpl_paths[pi++] = path_index;
+ /*
+ * don't add duplicate paths
+ * In the unlikely event the path is a duplicate, then we'll
+ * find a matching path-list later and this one will be toast.
+ */
+ if (0 == fib_path_cmp(new_path_index, *orig_path_index))
+ {
+ duplicate = 1;
+ break;
+ }
+ }
+ if (duplicate)
+ {
+ _vec_len(path_list->fpl_paths) =
+ vec_len(path_list->fpl_paths) - 1;
+ fib_path_destroy(new_path_index);
}
else
{
- _vec_len(path_list->fpl_paths) = vec_len(orig_path_list->fpl_paths);
+ path_list->fpl_paths[pi++] = new_path_index;
}
}
- path_list->fpl_paths[pi] = new_path_index;
-
/*
* we sort the paths since the key for the path-list is
* the description of the paths it contains. The paths need to
@@ -978,51 +1022,60 @@ fib_path_list_copy_and_path_add (fib_node_index_t orig_path_list_index,
}
/*
- * fib_path_list_path_remove
+ * fib_path_list_paths_remove
*/
-fib_node_index_t
-fib_path_list_path_remove (fib_node_index_t path_list_index,
+fib_node_index_t*
+fib_path_list_paths_remove (fib_node_index_t path_list_index,
const fib_route_path_t *rpaths)
{
- fib_node_index_t match_path_index, tmp_path_index;
+ fib_node_index_t *match_path_indices;
fib_path_list_t *path_list;
- fib_node_index_t pi;
+ i32 ii, jj;
path_list = fib_path_list_get(path_list_index);
+ match_path_indices = NULL;
+ vec_validate_init_empty(match_path_indices,
+ vec_len(rpaths) - 1,
+ FIB_NODE_INDEX_INVALID);
- ASSERT(1 == vec_len(rpaths));
ASSERT(!(path_list->fpl_flags & FIB_PATH_LIST_FLAG_SHARED));
FIB_PATH_LIST_DBG(path_list, "path-remove");
/*
- * create a representation of the path to be removed, so it
- * can be used as a comparison object during the copy.
+ * the number of existing paths is likely to be larger than the
+ * number of paths being added.
+ * walk in reverse so the vec_del is ok
*/
- tmp_path_index = fib_path_create(path_list_index,
- rpaths);
- match_path_index = FIB_NODE_INDEX_INVALID;
-
- vec_foreach_index (pi, path_list->fpl_paths)
+ vec_foreach_index_backwards(ii, path_list->fpl_paths)
{
- if (0 == fib_path_cmp(tmp_path_index,
- path_list->fpl_paths[pi]))
+ int found = ~0;
+
+ vec_foreach_index(jj, rpaths)
{
+ if (0 == fib_path_cmp_w_route_path(path_list->fpl_paths[ii],
+ &rpaths[jj]))
+ {
+ found = jj;
+ break;
+ }
+ }
+ if (~0 != found)
+ {
+ fib_node_index_t match_path_index;
/*
* match - remove it
*/
- match_path_index = path_list->fpl_paths[pi];
+ match_path_index = path_list->fpl_paths[ii];
+ vec_del1(path_list->fpl_paths, ii);
fib_path_destroy(match_path_index);
- vec_del1(path_list->fpl_paths, pi);
- }
+ match_path_indices[jj] = match_path_index;
+ }
}
- /*
- * done with the temporary now
- */
- fib_path_destroy(tmp_path_index);
+ FIB_PATH_LIST_DBG(path_list, "paths-removed");
- return (match_path_index);
+ return (match_path_indices);
}
/*
@@ -1035,10 +1088,11 @@ fib_path_list_path_remove (fib_node_index_t path_list_index,
fib_node_index_t
fib_path_list_copy_and_path_remove (fib_node_index_t orig_path_list_index,
fib_path_list_flags_t flags,
- const fib_route_path_t *rpath)
+ const fib_route_path_t *rpaths)
{
- fib_node_index_t path_index, *orig_path_index, path_list_index, tmp_path_index;
+ fib_node_index_t *orig_path_index, path_list_index, tmp_path_index;
fib_path_list_t *path_list, *orig_path_list;
+ const fib_route_path_t *rpath;
fib_node_index_t pi;
path_list = fib_path_list_alloc(&path_list_index);
@@ -1053,44 +1107,42 @@ fib_path_list_copy_and_path_remove (fib_node_index_t orig_path_list_index,
* allocate as many paths as we might need in one go, rather than
* using vec_add to do a few at a time.
*/
- if (vec_len(orig_path_list->fpl_paths) > 1)
- {
- vec_validate(path_list->fpl_paths, vec_len(orig_path_list->fpl_paths) - 2);
- }
+ vec_validate(path_list->fpl_paths,
+ vec_len(orig_path_list->fpl_paths) - 1);
pi = 0;
/*
* create a representation of the path to be removed, so it
* can be used as a comparison object during the copy.
*/
- tmp_path_index = fib_path_create(path_list_index, rpath);
-
- vec_foreach (orig_path_index, orig_path_list->fpl_paths)
+ vec_foreach(orig_path_index, orig_path_list->fpl_paths)
{
- if (0 != fib_path_cmp(tmp_path_index, *orig_path_index)) {
- path_index = fib_path_copy(*orig_path_index, path_list_index);
- if (pi < vec_len(path_list->fpl_paths))
- {
- path_list->fpl_paths[pi++] = path_index;
- }
- else
- {
- /*
- * this is the unlikely case that the path being
- * removed does not match one in the path-list, so
- * we end up with as many paths as we started with.
- * the paths vector was sized above with the expectation
- * that we would have 1 less.
- */
- vec_add1(path_list->fpl_paths, path_index);
- }
- }
+ /*
+ * copy the original paths over to the new list
+ */
+ path_list->fpl_paths[pi++] = fib_path_copy(*orig_path_index,
+ path_list_index);
}
+ vec_foreach(rpath, rpaths)
+ {
+ int found = 0;
+ tmp_path_index = fib_path_create(path_list_index, rpath);
- /*
- * done with the temporary now
- */
- fib_path_destroy(tmp_path_index);
+ vec_foreach_index(pi, path_list->fpl_paths)
+ {
+ if (0 == fib_path_cmp(tmp_path_index, path_list->fpl_paths[pi]))
+ {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ {
+ fib_path_destroy(path_list->fpl_paths[pi]);
+ vec_del1(path_list->fpl_paths, pi);
+ }
+ fib_path_destroy(tmp_path_index);
+ }
/*
* if there are no paths, then the new path-list is aborted
diff --git a/src/vnet/fib/fib_path_list.h b/src/vnet/fib/fib_path_list.h
index 380eb1a6864..06c1b14204b 100644
--- a/src/vnet/fib/fib_path_list.h
+++ b/src/vnet/fib/fib_path_list.h
@@ -118,10 +118,10 @@ extern fib_node_index_t fib_path_list_copy_and_path_remove(
fib_node_index_t pl_index,
fib_path_list_flags_t flags,
const fib_route_path_t *path);
-extern fib_node_index_t fib_path_list_path_add (
+extern fib_node_index_t* fib_path_list_paths_add (
fib_node_index_t path_list_index,
const fib_route_path_t *rpaths);
-extern fib_node_index_t fib_path_list_path_remove (
+extern fib_node_index_t* fib_path_list_paths_remove (
fib_node_index_t path_list_index,
const fib_route_path_t *rpaths);
diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c
index 56c8f030fda..3778fa9a944 100644
--- a/src/vnet/fib/fib_table.c
+++ b/src/vnet/fib/fib_table.c
@@ -481,7 +481,7 @@ fib_table_entry_special_remove (u32 fib_index,
*/
static void
fib_table_route_path_fixup (const fib_prefix_t *prefix,
- fib_entry_flag_t eflags,
+ fib_entry_flag_t *eflags,
fib_route_path_t *path)
{
/*
@@ -496,7 +496,8 @@ fib_table_route_path_fixup (const fib_prefix_t *prefix,
/* Prefix recurses via itse;f */
path->frp_flags |= FIB_ROUTE_PATH_DROP;
}
- if (fib_prefix_is_host(prefix) &&
+ if (!(path->frp_flags & FIB_ROUTE_PATH_LOCAL) &&
+ fib_prefix_is_host(prefix) &&
ip46_address_is_zero(&path->frp_addr) &&
path->frp_sw_if_index != ~0 &&
path->frp_proto != DPO_PROTO_ETHERNET)
@@ -504,18 +505,27 @@ fib_table_route_path_fixup (const fib_prefix_t *prefix,
path->frp_addr = prefix->fp_addr;
path->frp_flags |= FIB_ROUTE_PATH_ATTACHED;
}
- if (eflags & FIB_ENTRY_FLAG_DROP)
+ if (*eflags & FIB_ENTRY_FLAG_DROP)
{
path->frp_flags |= FIB_ROUTE_PATH_DROP;
}
- if (eflags & FIB_ENTRY_FLAG_LOCAL)
+ if (*eflags & FIB_ENTRY_FLAG_LOCAL)
{
path->frp_flags |= FIB_ROUTE_PATH_LOCAL;
}
- if (eflags & FIB_ENTRY_FLAG_EXCLUSIVE)
+ if (*eflags & FIB_ENTRY_FLAG_EXCLUSIVE)
{
path->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
}
+ if (path->frp_flags & FIB_ROUTE_PATH_LOCAL)
+ {
+ *eflags |= FIB_ENTRY_FLAG_LOCAL;
+
+ if (path->frp_sw_if_index != ~0)
+ {
+ *eflags |= FIB_ENTRY_FLAG_CONNECTED;
+ }
+ }
}
fib_node_index_t
@@ -538,6 +548,7 @@ fib_table_entry_path_add (u32 fib_index,
.frp_fib_index = next_hop_fib_index,
.frp_weight = next_hop_weight,
.frp_flags = path_flags,
+ .frp_rpf_id = INDEX_INVALID,
.frp_label_stack = next_hop_labels,
};
fib_node_index_t fib_entry_index;
@@ -557,7 +568,7 @@ fib_table_entry_path_add2 (u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source,
fib_entry_flag_t flags,
- fib_route_path_t *rpath)
+ fib_route_path_t *rpaths)
{
fib_node_index_t fib_entry_index;
fib_table_t *fib_table;
@@ -566,16 +577,16 @@ fib_table_entry_path_add2 (u32 fib_index,
fib_table = fib_table_get(fib_index, prefix->fp_proto);
fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
- for (ii = 0; ii < vec_len(rpath); ii++)
+ for (ii = 0; ii < vec_len(rpaths); ii++)
{
- fib_table_route_path_fixup(prefix, flags, &rpath[ii]);
+ fib_table_route_path_fixup(prefix, &flags, &rpaths[ii]);
}
if (FIB_NODE_INDEX_INVALID == fib_entry_index)
{
fib_entry_index = fib_entry_create(fib_index, prefix,
source, flags,
- rpath);
+ rpaths);
fib_table_entry_insert(fib_table, prefix, fib_entry_index);
fib_table->ft_src_route_counts[source]++;
@@ -585,7 +596,7 @@ fib_table_entry_path_add2 (u32 fib_index,
int was_sourced;
was_sourced = fib_entry_is_sourced(fib_entry_index, source);
- fib_entry_path_add(fib_entry_index, source, flags, rpath);;
+ fib_entry_path_add(fib_entry_index, source, flags, rpaths);;
if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
{
@@ -600,7 +611,7 @@ void
fib_table_entry_path_remove2 (u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source,
- fib_route_path_t *rpath)
+ fib_route_path_t *rpaths)
{
/*
* 1 is it present
@@ -609,8 +620,8 @@ fib_table_entry_path_remove2 (u32 fib_index,
* no => cover walk
*/
fib_node_index_t fib_entry_index;
+ fib_route_path_t *rpath;
fib_table_t *fib_table;
- u32 ii;
fib_table = fib_table_get(fib_index, prefix->fp_proto);
fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
@@ -640,16 +651,16 @@ fib_table_entry_path_remove2 (u32 fib_index,
*/
fib_entry_lock(fib_entry_index);
- for (ii = 0; ii < vec_len(rpath); ii++)
+ vec_foreach(rpath, rpaths)
{
- fib_table_route_path_fixup(
- prefix,
- fib_entry_get_flags_for_source(fib_entry_index,
- source),
- &rpath[ii]);
+ fib_entry_flag_t eflags;
+
+ eflags = fib_entry_get_flags_for_source(fib_entry_index,
+ source);
+ fib_table_route_path_fixup(prefix, &eflags, rpath);
}
- src_flag = fib_entry_path_remove(fib_entry_index, source, rpath);
+ src_flag = fib_entry_path_remove(fib_entry_index, source, rpaths);
if (!(FIB_ENTRY_SRC_FLAG_ADDED & src_flag))
{
@@ -735,7 +746,7 @@ fib_table_entry_update (u32 fib_index,
for (ii = 0; ii < vec_len(paths); ii++)
{
- fib_table_route_path_fixup(prefix, flags, &paths[ii]);
+ fib_table_route_path_fixup(prefix, &flags, &paths[ii]);
}
/*
* sort the paths provided by the control plane. this means
diff --git a/src/vnet/fib/fib_types.api b/src/vnet/fib/fib_types.api
index 8268870c0a7..9073192c3c4 100644
--- a/src/vnet/fib/fib_types.api
+++ b/src/vnet/fib/fib_types.api
@@ -1,5 +1,6 @@
+/* Hey Emacs use -*- mode: C -*- */
/*
- * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Copyright (c) 2018 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -12,10 +13,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
+
+option version = "2.0.0";
+import "vnet/ip/ip_types.api";
+
/** \brief MPLS label
*/
-typeonly define fib_mpls_label
+typedef fib_mpls_label
{
u8 is_uniform;
u32 label;
@@ -23,48 +27,101 @@ typeonly define fib_mpls_label
u8 exp;
};
+/** brief A path's nexthop protocol
+ */
+enum fib_path_nh_proto
+{
+ FIB_API_PATH_NH_PROTO_IP4 = 0,
+ FIB_API_PATH_NH_PROTO_IP6,
+ FIB_API_PATH_NH_PROTO_MPLS,
+ FIB_API_PATH_NH_PROTO_ETHERNET,
+ FIB_API_PATH_NH_PROTO_BIER,
+};
+
+/** \brief Flags for the path
+ */
+enum fib_path_flags
+{
+ FIB_API_PATH_FLAG_NONE = 0,
+ /* the path must resolve via an attached route */
+ FIB_API_PATH_FLAG_RESOLVE_VIA_ATTACHED,
+ /* the path must resolve via a host route */
+ FIB_API_PATH_FLAG_RESOLVE_VIA_HOST,
+};
+
+/* \brief A description of the 'next-hop' for a path
+ * this can be something that needs resolving like an IP address
+ * (into an adjacency or another FIB entry) or the index of another
+ * VPP object that was previously created (i.e. a UDP encap object)
+ */
+typedef fib_path_nh
+{
+ /* proto = IP[46] */
+ vl_api_address_union_t address;
+ /* proto = MPLS */
+ u32 via_label;
+ /* proto = ANY, determined by path type */
+ u32 obj_id;
+ /* path-type = CLASSIFY */
+ u32 classify_table_index;
+};
+
+enum fib_path_type
+{
+ /* Normal Paths */
+ FIB_API_PATH_TYPE_NORMAL = 0,
+ /* local/for-us/receive = packet sent to VPP's L4 stack */
+ FIB_API_PATH_TYPE_LOCAL,
+ /* packet is dropped */
+ FIB_API_PATH_TYPE_DROP,
+ /* Packet is UDP encapped - set obj_id in fib_path_nh_id */
+ FIB_API_PATH_TYPE_UDP_ENCAP,
+ /* Packet is BIER encapped - set obj_id in fib_path_nh_id */
+ FIB_API_PATH_TYPE_BIER_IMP,
+ /* packet will generated ICMP unreach to sender */
+ FIB_API_PATH_TYPE_ICMP_UNREACH,
+ /* packet will generated ICMP prohibt to sender */
+ FIB_API_PATH_TYPE_ICMP_PROHIBIT,
+ /* perform a lookup based on the packet's source address */
+ FIB_API_PATH_TYPE_SOURCE_LOOKUP,
+ /* Distributed Virtual router, packet is forwarded with the original
+ L2 header unchanged */
+ FIB_API_PATH_TYPE_DVR,
+ /* packet's RX interface is changed */
+ FIB_API_PATH_TYPE_INTERFACE_RX,
+ /* packet will be sent to a classify table */
+ FIB_API_PATH_TYPE_CLASSIFY,
+};
+
/** \brief FIB path
@param sw_if_index - index of the interface
+ @param table_id - The table ID in which to find the next-hop address
+ (for recursive routes, i.e. when the interface is
+ not given)
@param weight - The weight, for UCMP
@param preference - The preference of the path. lowest preference
- is prefered
- @param is_local - local if non-zero, else remote
- @param is_drop - Drop the packet
- @param is_unreach - Drop the packet and rate limit send ICMP unreachable
- @param is_prohibit - Drop the packet and rate limit send ICMP prohibited
- @param is_udp_encap - The path describes a UDP-o-IP encapsulation.
- @param is_dvr - Does the route resolve via a DVR interface.
- @param is_source_lookup - The the path is a deaggregate path (i.e. a lookup
- in another table) is the lookup on the packet's
- source address or destination.
- @param afi - dpo_proto_t protocol that describes the next-hop address
- @param via_label - The next-hop is a resolved via a local label
- @param next_hop[16] - the next hop address
- @param next_hop_id - Used when the path resolves via an object
- that has a unique identifier. e.g. the UDP
- encap object
+ is prefered
+ @param rpf-id - For paths that pop to multicast, this the the
+ RPF ID the packet will be given (0 and ~0 => unset)
+ @param type - the path type
+ @param flags - path flags
+ @param proto - protocol that describes the next-hop address
+ @param nh - the next-hop/net resolving object
+ @param n_labels - the number of labels present in the stack
+ @param label_stack - a stack of MPLS labels
*/
-typeonly define fib_path
+typedef fib_path
{
u32 sw_if_index;
u32 table_id;
+ u32 rpf_id;
u8 weight;
u8 preference;
- u8 is_local;
- u8 is_drop;
- u8 is_udp_encap;
- u8 is_unreach;
- u8 is_prohibit;
- u8 is_resolve_host;
- u8 is_resolve_attached;
- u8 is_dvr;
- u8 is_source_lookup;
- u8 is_interface_rx;
- u8 afi;
- u8 next_hop[16];
- u32 next_hop_id;
- u32 rpf_id;
- u32 via_label;
+
+ vl_api_fib_path_type_t type;
+ vl_api_fib_path_flags_t flags;
+ vl_api_fib_path_nh_proto_t proto;
+ vl_api_fib_path_nh_t nh;
u8 n_labels;
vl_api_fib_mpls_label_t label_stack[16];
};
diff --git a/src/vnet/fib/fib_types.c b/src/vnet/fib/fib_types.c
index 7f0f13079ee..4b1280f6fa8 100644
--- a/src/vnet/fib/fib_types.c
+++ b/src/vnet/fib/fib_types.c
@@ -598,7 +598,14 @@ unformat_fib_route_path (unformat_input_t * input, va_list * args)
rpath->frp_proto = DPO_PROTO_IP4;
rpath->frp_flags = FIB_ROUTE_PATH_INTF_RX;
}
- else if (unformat (input, "out-labels"))
+ else if (unformat (input, "local"))
+ {
+ clib_memset (&rpath->frp_addr, 0, sizeof (rpath->frp_addr));
+ rpath->frp_sw_if_index = ~0;
+ rpath->frp_weight = 1;
+ rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
+ }
+ else if (unformat (input, "out-labels"))
{
while (unformat (input, "%U",
unformat_mpls_unicast_label, &out_label))
@@ -615,6 +622,15 @@ unformat_fib_route_path (unformat_input_t * input, va_list * args)
{
rpath->frp_proto = *payload_proto;
}
+ else if (unformat (input, "via"))
+ {
+ /* new path, back up and return */
+ unformat_put_input (input);
+ unformat_put_input (input);
+ unformat_put_input (input);
+ unformat_put_input (input);
+ break;
+ }
else
{
return (0);
diff --git a/src/vnet/fib/fib_types.h b/src/vnet/fib/fib_types.h
index 472ce888b4e..77b133fa9db 100644
--- a/src/vnet/fib/fib_types.h
+++ b/src/vnet/fib/fib_types.h
@@ -379,6 +379,10 @@ typedef enum fib_route_path_flags_t_
* A path that resolves via a DVR DPO
*/
FIB_ROUTE_PATH_DVR = (1 << 14),
+
+ FIB_ROUTE_PATH_ICMP_UNREACH = (1 << 15),
+ FIB_ROUTE_PATH_ICMP_PROHIBIT = (1 << 16),
+ FIB_ROUTE_PATH_CLASSIFY = (1 << 17),
} fib_route_path_flags_t;
/**
@@ -496,18 +500,24 @@ typedef struct fib_route_path_t_ {
*/
mpls_eos_bit_t frp_eos;
};
- };
- union {
/**
- * The interface.
- * Will be invalid for recursive paths.
+ * A path via a BIER imposition object.
+ * Present in an mfib path list
*/
- u32 frp_sw_if_index;
- /**
- * The RPF-ID
- */
- fib_rpf_id_t frp_rpf_id;
+ index_t frp_bier_imp;
};
+
+ /**
+ * The interface.
+ * Will be invalid for recursive paths.
+ */
+ u32 frp_sw_if_index;
+
+ /**
+ * The RPF-ID
+ */
+ fib_rpf_id_t frp_rpf_id;
+
union {
/**
* The FIB index to lookup the nexthop
@@ -523,7 +533,6 @@ typedef struct fib_route_path_t_ {
* The outgoing MPLS label Stack. NULL implies no label.
*/
fib_mpls_label_t *frp_label_stack;
-
/**
* Exclusive DPO
*/
@@ -540,20 +549,24 @@ typedef struct fib_route_path_t_ {
bier_table_id_t frp_bier_tbl;
/**
- * A path via a BIER imposition object.
- * Present in an mfib path list
+ * UDP encap ID
*/
- index_t frp_bier_imp;
+ u32 frp_udp_encap_id;
/**
- * UDP encap ID
+ * Classify table ID
*/
- u32 frp_udp_encap_id;
+ u32 frp_classify_table_id;
/**
* Resolving via a BIER Fmask
*/
index_t frp_bier_fmask;
+
+ /**
+ * The DPO for use with exclusive paths
+ */
+ dpo_id_t frp_dpo;
};
/**
* [un]equal cost path weight
@@ -582,15 +595,6 @@ extern uword unformat_fib_route_path(unformat_input_t * input, va_list * args);
#define FIB_ROUTE_PATH_HELP "[next-hop-address] [next-hop-interface] [next-hop-table <value>] [weight <value>] [preference <value>] [udp-encap-id <value>] [ip4-lookup-in-table <value>] [ip6-lookup-in-table <value>] [mpls-lookup-in-table <value>] [resolve-via-host] [resolve-via-connected] [rx-ip4 <interface>] [out-labels <value value value>]"
/**
- * @brief
- * A representation of a fib path for fib_path_encode to convey the information to the caller
- */
-typedef struct fib_route_path_encode_t_ {
- fib_route_path_t rpath;
- dpo_id_t dpo;
-} fib_route_path_encode_t;
-
-/**
* return code to control pat-hlist walk
*/
typedef enum fib_path_list_walk_rc_t_
diff --git a/src/vnet/fib/mpls_fib.c b/src/vnet/fib/mpls_fib.c
index 6440689eab5..6f59eb3ee44 100644
--- a/src/vnet/fib/mpls_fib.c
+++ b/src/vnet/fib/mpls_fib.c
@@ -442,11 +442,24 @@ mpls_fib_show (vlib_main_t * vm,
pool_foreach (fib_table, mpls_main.fibs,
({
+ fib_source_t source;
+ u8 *s = NULL;
+
if (table_id >= 0 && table_id != fib_table->ft_table_id)
continue;
- vlib_cli_output (vm, "%v, fib_index %d",
- fib_table->ft_desc, mpls_main.fibs - fib_table);
+ s = format (s, "%v, fib_index:%d locks:[",
+ fib_table->ft_desc, mpls_main.fibs - fib_table);
+ FOR_EACH_FIB_SOURCE(source)
+ {
+ if (0 != fib_table->ft_locks[source])
+ {
+ s = format(s, "%U:%d, ",
+ format_fib_source, source,
+ fib_table->ft_locks[source]);
+ }
+ }
+ vlib_cli_output (vm, "%v]", s);
if (MPLS_LABEL_INVALID == label)
{
diff --git a/src/vnet/geneve/geneve.c b/src/vnet/geneve/geneve.c
index 8b773c1e009..b0c17e2a988 100644
--- a/src/vnet/geneve/geneve.c
+++ b/src/vnet/geneve/geneve.c
@@ -524,8 +524,9 @@ int vnet_geneve_add_del_tunnel
.frp_addr = zero_addr,
.frp_sw_if_index = 0xffffffff,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
const mfib_prefix_t mpfx = {
.fp_proto = fp,
@@ -539,17 +540,14 @@ int vnet_geneve_add_del_tunnel
* - the accepting interface is that from the API
*/
mfib_table_entry_path_update (t->encap_fib_index,
- &mpfx,
- MFIB_SOURCE_GENEVE,
- &path, MFIB_ITF_FLAG_FORWARD);
+ &mpfx, MFIB_SOURCE_GENEVE, &path);
path.frp_sw_if_index = a->mcast_sw_if_index;
path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
mfei = mfib_table_entry_path_update (t->encap_fib_index,
&mpfx,
- MFIB_SOURCE_GENEVE,
- &path,
- MFIB_ITF_FLAG_ACCEPT);
+ MFIB_SOURCE_GENEVE, &path);
/*
* Create the mcast adjacency to send traffic to the group
diff --git a/src/vnet/ip/ip.api b/src/vnet/ip/ip.api
index afb0960c78a..2dae4385602 100644
--- a/src/vnet/ip/ip.api
+++ b/src/vnet/ip/ip.api
@@ -20,15 +20,13 @@
called through a shared memory interface.
*/
-option version = "2.0.1";
-import "vnet/ip/ip_types.api";
+option version = "3.0.0";
+
import "vnet/fib/fib_types.api";
import "vnet/ethernet/ethernet_types.api";
+import "vnet/mfib/mfib_types.api";
-/** \brief Add / del table request
- A table can be added multiple times, but need be deleted only once.
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
+/** \brief An IP table
@param is_ipv6 - V4 or V6 table
@param table_id - table ID associated with the route
This table ID will apply to both the unicats
@@ -37,70 +35,104 @@ import "vnet/ethernet/ethernet_types.api";
not set by the client, then VPP will generate something
meaningfull.
*/
+typeonly define ip_table
+{
+ u32 table_id;
+ u8 is_ip6;
+ u8 name[64];
+};
+
+/** \brief Add / del table request
+ A table can be added multiple times, but need be deleted only once.
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
autoreply define ip_table_add_del
{
u32 client_index;
u32 context;
- u32 table_id;
- u8 is_ipv6;
u8 is_add;
- u8 name[64];
+ vl_api_ip_table_t table;
};
-/** \brief Dump IP fib table
+/** \brief Dump IP all fib tables
@param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
*/
-define ip_fib_dump
+define ip_table_dump
{
u32 client_index;
u32 context;
};
/** \brief IP FIB table response
- @param table_id - IP fib table id
- @address_length - mask length
- @address - ip4 prefix
- @param count - the number of fib_path in path
- @param path - array of of fib_path structures
+ @param context - sender context
+ @param table - description of the table
*/
-manual_endian manual_print define ip_fib_details
+manual_endian manual_print define ip_table_details
{
u32 context;
+ vl_api_ip_table_t table;
+};
+
+/** \brief An IP route
+ @param table_id The IP table the route is in
+ @param stats_index The index of the route in the stats segment
+ @param prefix the prefix for the route
+ @param n_paths The number of paths the route has
+ @param paths The paths of the route
+*/
+typeonly define ip_route
+{
u32 table_id;
- u8 table_name[64];
- u8 address_length;
- u8 address[4];
- u32 count;
u32 stats_index;
- vl_api_fib_path_t path[count];
+ vl_api_prefix_t prefix;
+ u8 n_paths;
+ vl_api_fib_path_t paths[n_paths];
};
-/** \brief Dump IP6 fib table
+/** \brief Add / del route request
@param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param is_multipath - Set to 1 if these paths will be added/removed
+ to/from the existing set, or 0 to replace
+ the existing set.
+ is_add=0 & is_multipath=0 implies delete all paths
+ @param is_add - Are the paths being added or removed
*/
-define ip6_fib_dump
+define ip_route_add_del
{
u32 client_index;
u32 context;
+ u8 is_add;
+ u8 is_multipath;
+ vl_api_ip_route_t route;
+};
+define ip_route_add_del_reply
+{
+ u32 context;
+ i32 retval;
+ u32 stats_index;
};
-/** \brief IP6 FIB table entry response
- @param table_id - IP6 fib table id
- @param address_length - mask length
- @param address - ip6 prefix
- @param count - the number of fib_path in path
- @param path - array of of fib_path structures
+/** \brief Dump IP routes from a table
+ @param client_index - opaque cookie to identify the sender
+ @param table - The table from which to dump routes (ony ID an AF are needed)
*/
-manual_endian manual_print define ip6_fib_details
+define ip_route_dump
{
+ u32 client_index;
u32 context;
- u32 table_id;
- u8 table_name[64];
- u8 address_length;
- u8 address[16];
- u32 count;
- u32 stats_index;
- vl_api_fib_path_t path[count];
+ vl_api_ip_table_t table;
+};
+
+/** \brief IP FIB table entry response
+ @param route The route entry in the table
+*/
+manual_endian manual_print define ip_route_details
+{
+ u32 context;
+ vl_api_ip_route_t route;
};
/** \brief IP neighbor flags
@@ -359,76 +391,33 @@ autoreply define sw_interface_ip6_enable_disable
u8 enable; /* set to true if enable */
};
-/** \brief Add / del route request
+/** \brief IPv6 set link local address on interface request
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
- @param sw_if_index - software index of the new vlan's parent interface
- @param vrf_id - fib table /vrf associated with the route
- @param lookup_in_vrf -
- @param classify_table_index -
- @param is_add - 1 if adding the route, 0 if deleting
- @param is_drop - Drop the packet
- @param is_unreach - Drop the packet and rate limit send ICMP unreachable
- @param is_prohibit - Drop the packet and rate limit send ICMP prohibited
- @param is_ipv6 - 0 if an ip4 route, else ip6
- @param is_local - The route will result in packets sent to VPP IP stack
- @param is_udp_encap - The path describes a UDP-o-IP encapsulation.
- @param is_classify -
- @param is_multipath - Set to 1 if this is a multipath route, else 0
- @param is_dvr - Does the route resolve via a DVR interface.
- @param is_source_lookup - The the path is a deaggregate path (i.e. a lookup
- in another table) is the lookup on the packet's
- source address or destination.
- @param next_hop_weight - Weight for Unequal cost multi-path
- @param next_hop_preference - Path that are up that have the best preference are
- are used for forwarding. lower value is better.
- @param next_hop_id - Used when the path resolves via an object that has a unique
- identifier.
- @param dst_address_length -
- @param dst_address[16] -
- @param next_hop_address[16] -
- @param next_hop_n_out_labels - the number of labels in the label stack
- @param next_hop_out_label_stack - the next-hop output label stack, outer most first
- @param next_hop_via_label - The next-hop is a resolved via a local label
+ @param sw_if_index - interface to set link local on
+ @param address[] - the new link local address
*/
-define ip_add_del_route
+autoreply define sw_interface_ip6_set_link_local_address
{
u32 client_index;
u32 context;
- u32 next_hop_sw_if_index;
- u32 table_id;
- u32 classify_table_index;
- u32 next_hop_table_id;
- u32 next_hop_id;
- u8 is_add;
- u8 is_drop;
- u8 is_unreach;
- u8 is_prohibit;
- u8 is_ipv6;
- u8 is_local;
- u8 is_classify;
- u8 is_multipath;
- u8 is_resolve_host;
- u8 is_resolve_attached;
- u8 is_dvr;
- u8 is_source_lookup;
- u8 is_udp_encap;
- u8 next_hop_weight;
- u8 next_hop_preference;
- u8 next_hop_proto;
- u8 dst_address_length;
- u8 dst_address[16];
- u8 next_hop_address[16];
- u8 next_hop_n_out_labels;
- u32 next_hop_via_label;
- vl_api_fib_mpls_label_t next_hop_out_label_stack[next_hop_n_out_labels];
-};
-
-define ip_add_del_route_reply
+ u32 sw_if_index;
+ u8 address[16];
+};
+
+/** \brief Dump IP multicast fib table
+ @param client_index - opaque cookie to identify the sender
+*/
+define ip_mtable_dump
{
+ u32 client_index;
u32 context;
- i32 retval;
- u32 stats_index;
+};
+define ip_mtable_details
+{
+ u32 client_index;
+ u32 context;
+ vl_api_ip_table_t table;
};
/** \brief Add / del route request
@@ -459,105 +448,55 @@ define ip_add_del_route_reply
FIXME not complete yet
*/
-define ip_mroute_add_del
+typedef ip_mroute
{
- u32 client_index;
- u32 context;
- u32 next_hop_sw_if_index;
u32 table_id;
u32 entry_flags;
- u32 itf_flags;
u32 rpf_id;
- u32 bier_imp;
- u16 grp_address_length;
- u8 next_hop_afi;
- u8 is_add;
- u8 is_ipv6;
- u8 is_local;
- u8 grp_address[16];
- u8 src_address[16];
- u8 nh_address[16];
-};
-
-define ip_mroute_add_del_reply
-{
- u32 context;
- i32 retval;
- u32 stats_index;
+ vl_api_mprefix_t prefix;
+ u8 n_paths;
+ vl_api_mfib_path_t paths[n_paths];
};
-/** \brief Dump IP multicast fib table
- @param client_index - opaque cookie to identify the sender
-*/
-define ip_mfib_dump
+define ip_mroute_add_del
{
u32 client_index;
u32 context;
+ u8 is_add;
+ u8 is_multipath;
+ vl_api_ip_mroute_t route;
};
-
-/** \brief IP Multicast FIB table response
- @param table_id - IP fib table id
- @address_length - mask length
- @grp_address - Group address/prefix
- @src_address - Source address
- @param count - the number of fib_path in path
- @param path - array of of fib_path structures
-*/
-typedef mfib_path
-{
- vl_api_fib_path_t path;
- u32 itf_flags;
-};
-
-manual_endian manual_print define ip_mfib_details
+define ip_mroute_add_del_reply
{
u32 context;
- u32 table_id;
- u32 entry_flags;
- u32 rpf_id;
- u8 address_length;
- u8 grp_address[4];
- u8 src_address[4];
- u32 count;
+ i32 retval;
u32 stats_index;
- vl_api_mfib_path_t path[count];
};
-/** \brief Dump IP6 multicast fib table
- @param client_index - opaque cookie to identify the sender
+/** \brief Dump IP multicast fib table
+ @param table - The table from which to dump routes (ony ID an AF are needed)
*/
-define ip6_mfib_dump
+define ip_mroute_dump
{
u32 client_index;
u32 context;
+ vl_api_ip_table_t table;
};
-/** \brief IP6 Multicast FIB table response
- @param table_id - IP fib table id
- @address_length - mask length
- @grp_address - Group address/prefix
- @src_address - Source address
- @param count - the number of fib_path in path
- @param path - array of of fib_path structures
+/** \brief IP Multicast Route Details
+ @param route - Details of the route
*/
-manual_endian manual_print define ip6_mfib_details
+manual_endian manual_print define ip_mroute_details
{
u32 context;
- u32 table_id;
- u8 address_length;
- u8 grp_address[16];
- u8 src_address[16];
- u32 count;
- vl_api_mfib_path_t path[count];
+ vl_api_ip_mroute_t route;
};
define ip_address_details
{
u32 context;
- u8 ip[16];
- u8 prefix_length;
u32 sw_if_index;
- u8 is_ipv6;
+ vl_api_prefix_t prefix;
};
define ip_address_dump
@@ -614,9 +553,7 @@ define mfib_signal_details
u32 context;
u32 sw_if_index;
u32 table_id;
- u16 grp_address_len;
- u8 grp_address[16];
- u8 src_address[16];
+ vl_api_mprefix_t prefix;
u16 ip_packet_len;
u8 ip_packet_data[256];
};
diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c
index bcbcf5ac8d4..4d2f0704ca3 100644
--- a/src/vnet/ip/ip_api.c
+++ b/src/vnet/ip/ip_api.c
@@ -29,18 +29,15 @@
#include <vnet/ip/ip_types_api.h>
#include <vnet/ip/ip6_neighbor.h>
#include <vnet/ip/ip_punt_drop.h>
+#include <vnet/ip/ip_types_api.h>
#include <vnet/fib/fib_table.h>
#include <vnet/fib/fib_api.h>
-#include <vnet/dpo/drop_dpo.h>
-#include <vnet/dpo/receive_dpo.h>
-#include <vnet/dpo/lookup_dpo.h>
-#include <vnet/dpo/classify_dpo.h>
-#include <vnet/dpo/ip_null_dpo.h>
#include <vnet/ethernet/arp_packet.h>
#include <vnet/mfib/ip6_mfib.h>
#include <vnet/mfib/ip4_mfib.h>
#include <vnet/mfib/mfib_signal.h>
#include <vnet/mfib/mfib_entry.h>
+#include <vnet/mfib/mfib_api.h>
#include <vnet/ip/ip_source_and_port_range_check.h>
#include <vnet/fib/ip4_fib.h>
#include <vnet/fib/ip6_fib.h>
@@ -71,10 +68,10 @@
#define foreach_ip_api_msg \
-_(IP_FIB_DUMP, ip_fib_dump) \
-_(IP6_FIB_DUMP, ip6_fib_dump) \
-_(IP_MFIB_DUMP, ip_mfib_dump) \
-_(IP6_MFIB_DUMP, ip6_mfib_dump) \
+_(IP_TABLE_DUMP, ip_table_dump) \
+_(IP_ROUTE_DUMP, ip_route_dump) \
+_(IP_MTABLE_DUMP, ip_mtable_dump) \
+_(IP_MROUTE_DUMP, ip_mroute_dump) \
_(IP_NEIGHBOR_DUMP, ip_neighbor_dump) \
_(IP_MROUTE_ADD_DEL, ip_mroute_add_del) \
_(MFIB_SIGNAL_DUMP, mfib_signal_dump) \
@@ -93,7 +90,7 @@ _(PROXY_ARP_DUMP, proxy_arp_dump) \
_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \
_(PROXY_ARP_INTFC_DUMP, proxy_arp_intfc_dump) \
_(RESET_FIB, reset_fib) \
-_(IP_ADD_DEL_ROUTE, ip_add_del_route) \
+_(IP_ROUTE_ADD_DEL, ip_route_add_del) \
_(IP_TABLE_ADD_DEL, ip_table_add_del) \
_(IP_PUNT_POLICE, ip_punt_police) \
_(IP_PUNT_REDIRECT, ip_punt_redirect) \
@@ -211,137 +208,104 @@ vl_api_ip_neighbor_dump_t_handler (vl_api_ip_neighbor_dump_t * mp)
}
static void
-send_ip_fib_details (vpe_api_main_t * am,
- vl_api_registration_t * reg,
- const fib_table_t * table,
- const fib_prefix_t * pfx,
- fib_route_path_encode_t * api_rpaths, u32 context)
-{
- vl_api_ip_fib_details_t *mp;
- fib_route_path_encode_t *api_rpath;
- vl_api_fib_path_t *fp;
- int path_count;
+send_ip_table_details (vpe_api_main_t * am,
+ vl_api_registration_t * reg,
+ u32 context, const fib_table_t * table)
+{
+ vl_api_ip_table_details_t *mp;
- path_count = vec_len (api_rpaths);
- mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
+ mp = vl_msg_api_alloc (sizeof (*mp));
if (!mp)
return;
clib_memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_IP_FIB_DETAILS);
+ mp->_vl_msg_id = ntohs (VL_API_IP_TABLE_DETAILS);
mp->context = context;
- mp->table_id = htonl (table->ft_table_id);
- memcpy (mp->table_name, table->ft_desc,
- clib_min (vec_len (table->ft_desc), sizeof (mp->table_name)));
- mp->address_length = pfx->fp_len;
- memcpy (mp->address, &pfx->fp_addr.ip4, sizeof (pfx->fp_addr.ip4));
- mp->stats_index =
- htonl (fib_table_entry_get_stats_index (table->ft_index, pfx));
-
- mp->count = htonl (path_count);
- fp = mp->path;
- vec_foreach (api_rpath, api_rpaths)
- {
- fib_api_path_encode (api_rpath, fp);
- fp++;
- }
+ mp->table.is_ip6 = (table->ft_proto == FIB_PROTOCOL_IP6);
+ mp->table.table_id = htonl (table->ft_table_id);
+ memcpy (mp->table.name, table->ft_desc,
+ clib_min (vec_len (table->ft_desc), sizeof (mp->table.name)));
vl_api_send_msg (reg, (u8 *) mp);
}
-typedef struct vl_api_ip_fib_dump_walk_ctx_t_
-{
- fib_node_index_t *feis;
-} vl_api_ip_fib_dump_walk_ctx_t;
-
-static fib_table_walk_rc_t
-vl_api_ip_fib_dump_walk (fib_node_index_t fei, void *arg)
-{
- vl_api_ip_fib_dump_walk_ctx_t *ctx = arg;
-
- vec_add1 (ctx->feis, fei);
-
- return (FIB_TABLE_WALK_CONTINUE);
-}
-
static void
-vl_api_ip_fib_dump_t_handler (vl_api_ip_fib_dump_t * mp)
+vl_api_ip_table_dump_t_handler (vl_api_ip_table_dump_t * mp)
{
vpe_api_main_t *am = &vpe_api_main;
vl_api_registration_t *reg;
- ip4_main_t *im = &ip4_main;
fib_table_t *fib_table;
- fib_node_index_t *lfeip;
- const fib_prefix_t *pfx;
- u32 fib_index;
- fib_route_path_encode_t *api_rpaths;
- vl_api_ip_fib_dump_walk_ctx_t ctx = {
- .feis = NULL,
- };
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
/* *INDENT-OFF* */
- pool_foreach (fib_table, im->fibs,
+ pool_foreach (fib_table, ip4_main.fibs,
({
- fib_table_walk(fib_table->ft_index,
- FIB_PROTOCOL_IP4,
- vl_api_ip_fib_dump_walk,
- &ctx);
+ send_ip_table_details(am, reg, mp->context, fib_table);
+ }));
+ pool_foreach (fib_table, ip6_main.fibs,
+ ({
+ /* don't send link locals */
+ if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL)
+ continue;
+ send_ip_table_details(am, reg, mp->context, fib_table);
}));
/* *INDENT-ON* */
+}
- vec_sort_with_function (ctx.feis, fib_entry_cmp_for_sort);
+typedef struct vl_api_ip_fib_dump_walk_ctx_t_
+{
+ fib_node_index_t *feis;
+} vl_api_ip_fib_dump_walk_ctx_t;
- vec_foreach (lfeip, ctx.feis)
- {
- pfx = fib_entry_get_prefix (*lfeip);
- fib_index = fib_entry_get_fib_index (*lfeip);
- fib_table = fib_table_get (fib_index, pfx->fp_proto);
- api_rpaths = NULL;
- fib_entry_encode (*lfeip, &api_rpaths);
- send_ip_fib_details (am, reg, fib_table, pfx, api_rpaths, mp->context);
- vec_free (api_rpaths);
- }
+static fib_table_walk_rc_t
+vl_api_ip_fib_dump_walk (fib_node_index_t fei, void *arg)
+{
+ vl_api_ip_fib_dump_walk_ctx_t *ctx = arg;
- vec_free (ctx.feis);
+ vec_add1 (ctx->feis, fei);
+
+ return (FIB_TABLE_WALK_CONTINUE);
}
static void
-send_ip6_fib_details (vpe_api_main_t * am,
- vl_api_registration_t * reg,
- const fib_table_t * table,
- const fib_prefix_t * pfx,
- fib_route_path_encode_t * api_rpaths, u32 context)
-{
- vl_api_ip6_fib_details_t *mp;
- fib_route_path_encode_t *api_rpath;
+send_ip_route_details (vpe_api_main_t * am,
+ vl_api_registration_t * reg,
+ u32 context, fib_node_index_t fib_entry_index)
+{
+ fib_route_path_t *rpaths, *rpath;
+ vl_api_ip_route_details_t *mp;
+ const fib_prefix_t *pfx;
vl_api_fib_path_t *fp;
int path_count;
- path_count = vec_len (api_rpaths);
+ rpaths = NULL;
+ pfx = fib_entry_get_prefix (fib_entry_index);
+ rpaths = fib_entry_encode (fib_entry_index);
+
+ path_count = vec_len (rpaths);
mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
if (!mp)
return;
clib_memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_IP6_FIB_DETAILS);
+ mp->_vl_msg_id = ntohs (VL_API_IP_ROUTE_DETAILS);
mp->context = context;
- mp->table_id = htonl (table->ft_table_id);
- mp->address_length = pfx->fp_len;
- memcpy (mp->address, &pfx->fp_addr.ip6, sizeof (pfx->fp_addr.ip6));
- memcpy (mp->table_name, table->ft_desc,
- clib_min (vec_len (table->ft_desc), sizeof (mp->table_name)));
- mp->stats_index =
- htonl (fib_table_entry_get_stats_index (table->ft_index, pfx));
-
- mp->count = htonl (path_count);
- fp = mp->path;
- vec_foreach (api_rpath, api_rpaths)
+ ip_prefix_encode (pfx, &mp->route.prefix);
+ mp->route.table_id =
+ htonl (fib_table_get_table_id
+ (fib_entry_get_fib_index (fib_entry_index), pfx->fp_proto));
+ mp->route.n_paths = path_count;
+ mp->route.stats_index =
+ htonl (fib_table_entry_get_stats_index
+ (fib_entry_get_fib_index (fib_entry_index), pfx));
+
+ fp = mp->route.paths;
+ vec_foreach (rpath, rpaths)
{
- fib_api_path_encode (api_rpath, fp);
+ fib_api_path_encode (rpath, fp);
fp++;
}
@@ -353,232 +317,144 @@ typedef struct apt_ip6_fib_show_ctx_t_
fib_node_index_t *entries;
} api_ip6_fib_show_ctx_t;
-static fib_table_walk_rc_t
-api_ip6_fib_table_put_entries (fib_node_index_t fei, void *arg)
-{
- api_ip6_fib_show_ctx_t *ctx = arg;
-
- vec_add1 (ctx->entries, fei);
-
- return (FIB_TABLE_WALK_CONTINUE);
-}
-
static void
-api_ip6_fib_table_get_all (vl_api_registration_t * reg,
- vl_api_ip6_fib_dump_t * mp,
- fib_table_t * fib_table)
+vl_api_ip_route_dump_t_handler (vl_api_ip_route_dump_t * mp)
{
vpe_api_main_t *am = &vpe_api_main;
fib_node_index_t *fib_entry_index;
- api_ip6_fib_show_ctx_t ctx = {
- .entries = NULL,
- };
- fib_route_path_encode_t *api_rpaths;
- const fib_prefix_t *pfx;
-
- ip6_fib_table_walk (fib_table->ft_index,
- api_ip6_fib_table_put_entries, &ctx);
-
- vec_sort_with_function (ctx.entries, fib_entry_cmp_for_sort);
-
- vec_foreach (fib_entry_index, ctx.entries)
- {
- pfx = fib_entry_get_prefix (*fib_entry_index);
- api_rpaths = NULL;
- fib_entry_encode (*fib_entry_index, &api_rpaths);
- send_ip6_fib_details (am, reg, fib_table, pfx, api_rpaths, mp->context);
- vec_free (api_rpaths);
- }
-
- vec_free (ctx.entries);
-}
-
-static void
-vl_api_ip6_fib_dump_t_handler (vl_api_ip6_fib_dump_t * mp)
-{
vl_api_registration_t *reg;
- ip6_main_t *im6 = &ip6_main;
- fib_table_t *fib_table;
+ fib_protocol_t fproto;
+ u32 fib_index;
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
- /* *INDENT-OFF* */
- pool_foreach (fib_table, im6->fibs,
- ({
- /* don't send link locals */
- if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL)
- continue;
-
- api_ip6_fib_table_get_all(reg, mp, fib_table);
- }));
- /* *INDENT-ON* */
-}
-
-static void
-send_ip_mfib_details (vl_api_registration_t * reg,
- u32 context, u32 table_id, fib_node_index_t mfei)
-{
- fib_route_path_encode_t *api_rpath, *api_rpaths = NULL;
- vl_api_ip_mfib_details_t *mp;
- const mfib_prefix_t *pfx;
- mfib_entry_t *mfib_entry;
- vl_api_mfib_path_t *fp;
- int path_count;
+ vl_api_ip_fib_dump_walk_ctx_t ctx = {
+ .feis = NULL,
+ };
- mfib_entry = mfib_entry_get (mfei);
- pfx = mfib_entry_get_prefix (mfei);
- mfib_entry_encode (mfei, &api_rpaths);
+ fproto = (mp->table.is_ip6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4);
+ fib_index = fib_table_find (fproto, ntohl (mp->table.table_id));
- path_count = vec_len (api_rpaths);
- mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
- if (!mp)
+ if (INDEX_INVALID == fib_index)
return;
- clib_memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_IP_MFIB_DETAILS);
- mp->context = context;
- mp->rpf_id = mfib_entry->mfe_rpf_id;
- mp->entry_flags = mfib_entry->mfe_flags;
- mp->table_id = htonl (table_id);
- mp->address_length = pfx->fp_len;
- memcpy (mp->grp_address, &pfx->fp_grp_addr.ip4,
- sizeof (pfx->fp_grp_addr.ip4));
- memcpy (mp->src_address, &pfx->fp_src_addr.ip4,
- sizeof (pfx->fp_src_addr.ip4));
-
- mp->count = htonl (path_count);
- fp = mp->path;
- vec_foreach (api_rpath, api_rpaths)
+ fib_table_walk (fib_index, fproto, vl_api_ip_fib_dump_walk, &ctx);
+
+ vec_foreach (fib_entry_index, ctx.feis)
{
- fib_api_path_encode (api_rpath, &fp->path);
- fp->itf_flags = ntohl (api_rpath->rpath.frp_mitf_flags);
- fp++;
+ send_ip_route_details (am, reg, mp->context, *fib_entry_index);
}
- vec_free (api_rpaths);
- vl_api_send_msg (reg, (u8 *) mp);
+ vec_free (ctx.feis);
}
-typedef struct vl_api_ip_mfib_dump_ctc_t_
+static void
+send_ip_mtable_details (vl_api_registration_t * reg,
+ u32 context, const mfib_table_t * mfib_table)
{
- fib_node_index_t *entries;
-} vl_api_ip_mfib_dump_ctc_t;
+ vl_api_ip_mtable_details_t *mp;
-static int
-vl_api_ip_mfib_table_dump_walk (fib_node_index_t fei, void *arg)
-{
- vl_api_ip_mfib_dump_ctc_t *ctx = arg;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ if (!mp)
+ return;
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IP_MTABLE_DETAILS);
+ mp->context = context;
- vec_add1 (ctx->entries, fei);
+ mp->table.table_id = htonl (mfib_table->mft_table_id);
+ mp->table.is_ip6 = (FIB_PROTOCOL_IP6 == mfib_table->mft_proto);
- return (0);
+ vl_api_send_msg (reg, (u8 *) mp);
}
static void
-vl_api_ip_mfib_dump_t_handler (vl_api_ip_mfib_dump_t * mp)
+vl_api_ip_mtable_dump_t_handler (vl_api_ip_mtable_dump_t * mp)
{
vl_api_registration_t *reg;
- ip4_main_t *im = &ip4_main;
mfib_table_t *mfib_table;
- fib_node_index_t *mfeip;
- vl_api_ip_mfib_dump_ctc_t ctx = {
- .entries = NULL,
- };
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
/* *INDENT-OFF* */
- pool_foreach (mfib_table, im->mfibs,
+ pool_foreach (mfib_table, ip4_main.mfibs,
+ ({
+ send_ip_mtable_details (reg, mp->context, mfib_table);
+ }));
+ pool_foreach (mfib_table, ip6_main.mfibs,
({
- ip4_mfib_table_walk(&mfib_table->v4,
- vl_api_ip_mfib_table_dump_walk,
- &ctx);
+ send_ip_mtable_details (reg, mp->context, mfib_table);
+ }));
+ /* *INDENT-ON* */
+}
- vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort);
+typedef struct vl_api_ip_mfib_dump_ctx_t_
+{
+ fib_node_index_t *entries;
+} vl_api_ip_mfib_dump_ctx_t;
- vec_foreach (mfeip, ctx.entries)
- {
- send_ip_mfib_details (reg, mp->context,
- mfib_table->mft_table_id,
- *mfeip);
- }
- vec_reset_length (ctx.entries);
+static int
+mfib_route_dump_walk (fib_node_index_t fei, void *arg)
+{
+ vl_api_ip_mfib_dump_ctx_t *ctx = arg;
- }));
- /* *INDENT-ON* */
+ vec_add1 (ctx->entries, fei);
- vec_free (ctx.entries);
+ return (0);
}
static void
-send_ip6_mfib_details (vpe_api_main_t * am,
- vl_api_registration_t * reg,
- u32 table_id,
- const mfib_prefix_t * pfx,
- fib_route_path_encode_t * api_rpaths, u32 context)
+send_ip_mroute_details (vpe_api_main_t * am,
+ vl_api_registration_t * reg,
+ u32 context, fib_node_index_t mfib_entry_index)
{
- vl_api_ip6_mfib_details_t *mp;
- fib_route_path_encode_t *api_rpath;
+ fib_route_path_t *rpaths, *rpath;
+ vl_api_ip_mroute_details_t *mp;
+ const mfib_prefix_t *pfx;
vl_api_mfib_path_t *fp;
int path_count;
- path_count = vec_len (api_rpaths);
+ rpaths = NULL;
+ pfx = mfib_entry_get_prefix (mfib_entry_index);
+ rpaths = mfib_entry_encode (mfib_entry_index);
+
+ path_count = vec_len (rpaths);
mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
if (!mp)
return;
clib_memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_IP6_MFIB_DETAILS);
+ mp->_vl_msg_id = ntohs (VL_API_IP_MROUTE_DETAILS);
mp->context = context;
- mp->table_id = htonl (table_id);
- mp->address_length = pfx->fp_len;
- memcpy (mp->grp_address, &pfx->fp_grp_addr.ip6,
- sizeof (pfx->fp_grp_addr.ip6));
- memcpy (mp->src_address, &pfx->fp_src_addr.ip6,
- sizeof (pfx->fp_src_addr.ip6));
-
- mp->count = htonl (path_count);
- fp = mp->path;
- vec_foreach (api_rpath, api_rpaths)
+ ip_mprefix_encode (pfx, &mp->route.prefix);
+ mp->route.table_id =
+ htonl (mfib_table_get_table_id
+ (mfib_entry_get_fib_index (mfib_entry_index), pfx->fp_proto));
+ mp->route.n_paths = htonl (path_count);
+ fp = mp->route.paths;
+ vec_foreach (rpath, rpaths)
{
- fib_api_path_encode (api_rpath, &fp->path);
- fp->itf_flags = ntohl (api_rpath->rpath.frp_mitf_flags);
+ mfib_api_path_encode (rpath, fp);
fp++;
}
vl_api_send_msg (reg, (u8 *) mp);
-}
-
-typedef struct vl_api_ip6_mfib_dump_ctc_t_
-{
- fib_node_index_t *entries;
-} vl_api_ip6_mfib_dump_ctc_t;
-
-static int
-vl_api_ip6_mfib_table_dump_walk (fib_node_index_t fei, void *arg)
-{
- vl_api_ip6_mfib_dump_ctc_t *ctx = arg;
-
- vec_add1 (ctx->entries, fei);
-
- return (0);
+ vec_free (rpaths);
}
static void
-vl_api_ip6_mfib_dump_t_handler (vl_api_ip6_mfib_dump_t * mp)
+vl_api_ip_mroute_dump_t_handler (vl_api_ip_mroute_dump_t * mp)
{
vpe_api_main_t *am = &vpe_api_main;
vl_api_registration_t *reg;
- ip6_main_t *im = &ip6_main;
- mfib_table_t *mfib_table;
- const mfib_prefix_t *pfx;
fib_node_index_t *mfeip;
- fib_route_path_encode_t *api_rpaths = NULL;
- vl_api_ip6_mfib_dump_ctc_t ctx = {
+ fib_protocol_t fproto;
+ u32 fib_index;
+
+ vl_api_ip_mfib_dump_ctx_t ctx = {
.entries = NULL,
};
@@ -586,33 +462,22 @@ vl_api_ip6_mfib_dump_t_handler (vl_api_ip6_mfib_dump_t * mp)
if (!reg)
return;
+ fproto = fib_ip_proto (mp->table.is_ip6);
+ fib_index = mfib_table_find (fproto, ntohl (mp->table.table_id));
- /* *INDENT-OFF* */
- pool_foreach (mfib_table, im->mfibs,
- ({
- ip6_mfib_table_walk(&mfib_table->v6,
- vl_api_ip6_mfib_table_dump_walk,
- &ctx);
+ if (INDEX_INVALID == fib_index)
+ return;
- vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort);
+ mfib_table_walk (fib_index, fproto, mfib_route_dump_walk, &ctx);
- vec_foreach(mfeip, ctx.entries)
- {
- pfx = mfib_entry_get_prefix (*mfeip);
- mfib_entry_encode (*mfeip, &api_rpaths);
- send_ip6_mfib_details (am, reg,
- mfib_table->mft_table_id,
- pfx, api_rpaths,
- mp->context);
- }
- vec_reset_length (api_rpaths);
- vec_reset_length (ctx.entries);
+ vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort);
- }));
- /* *INDENT-ON* */
+ vec_foreach (mfeip, ctx.entries)
+ {
+ send_ip_mroute_details (am, reg, mp->context, *mfeip);
+ }
vec_free (ctx.entries);
- vec_free (api_rpaths);
}
static void
@@ -698,7 +563,7 @@ vl_api_ip_neighbor_add_del_t_handler (vl_api_ip_neighbor_add_del_t * mp,
ip46_address_t ip;
mac_address_t mac;
ip46_type_t type;
- int rv = 0;
+ int rv;
VALIDATE_SW_IF_INDEX ((&mp->neighbor));
@@ -767,13 +632,14 @@ void
vl_api_ip_table_add_del_t_handler (vl_api_ip_table_add_del_t * mp)
{
vl_api_ip_table_add_del_reply_t *rmp;
- fib_protocol_t fproto = (mp->is_ipv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4);
- u32 table_id = ntohl (mp->table_id);
+ fib_protocol_t fproto = (mp->table.is_ip6 ?
+ FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4);
+ u32 table_id = ntohl (mp->table.table_id);
int rv = 0;
if (mp->is_add)
{
- ip_table_create (fproto, table_id, 1, mp->name);
+ ip_table_create (fproto, table_id, 1, mp->table.name);
}
else
{
@@ -783,398 +649,71 @@ vl_api_ip_table_add_del_t_handler (vl_api_ip_table_add_del_t * mp)
REPLY_MACRO (VL_API_IP_TABLE_ADD_DEL_REPLY);
}
-int
-add_del_route_t_handler (u8 is_multipath,
- u8 is_add,
- u8 is_drop,
- u8 is_unreach,
- u8 is_prohibit,
- u8 is_local,
- u8 is_multicast,
- u8 is_classify,
- u32 classify_table_index,
- u8 is_resolve_host,
- u8 is_resolve_attached,
- u8 is_interface_rx,
- u8 is_rpf_id,
- u8 is_dvr,
- u8 is_source_lookup,
- u8 is_udp_encap,
- u32 fib_index,
- const fib_prefix_t * prefix,
- dpo_proto_t next_hop_proto,
- const ip46_address_t * next_hop,
- u32 next_hop_id,
- u32 next_hop_sw_if_index,
- u8 next_hop_fib_index,
- u16 next_hop_weight,
- u16 next_hop_preference,
- mpls_label_t next_hop_via_label,
- fib_mpls_label_t * next_hop_out_label_stack)
-{
- vnet_classify_main_t *cm = &vnet_classify_main;
- fib_route_path_flags_t path_flags = FIB_ROUTE_PATH_FLAG_NONE;
- fib_route_path_t path = {
- .frp_proto = next_hop_proto,
- .frp_addr = (NULL == next_hop ? zero_addr : *next_hop),
- .frp_sw_if_index = next_hop_sw_if_index,
- .frp_fib_index = next_hop_fib_index,
- .frp_weight = next_hop_weight,
- .frp_preference = next_hop_preference,
- .frp_label_stack = next_hop_out_label_stack,
- };
- fib_route_path_t *paths = NULL;
- fib_entry_flag_t entry_flags = FIB_ENTRY_FLAG_NONE;
-
- /*
- * the special INVALID label means we are not recursing via a
- * label. Exp-null value is never a valid via-label so that
- * also means it's not a via-label and means clients that set
- * it to 0 by default get the expected behaviour
- */
- if ((MPLS_LABEL_INVALID != next_hop_via_label) && (0 != next_hop_via_label))
- {
- path.frp_proto = DPO_PROTO_MPLS;
- path.frp_local_label = next_hop_via_label;
- path.frp_eos = MPLS_NON_EOS;
- }
- if (is_local)
- {
- path_flags |= FIB_ROUTE_PATH_LOCAL;
- if (~0 != next_hop_sw_if_index)
- {
- entry_flags |= (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL);
- }
- }
- if (is_dvr)
- path_flags |= FIB_ROUTE_PATH_DVR;
- if (is_resolve_host)
- path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_HOST;
- if (is_resolve_attached)
- path_flags |= FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED;
- if (is_interface_rx)
- path_flags |= FIB_ROUTE_PATH_INTF_RX;
- if (is_rpf_id)
- path_flags |= FIB_ROUTE_PATH_RPF_ID;
- if (is_source_lookup)
- path_flags |= FIB_ROUTE_PATH_SOURCE_LOOKUP;
- if (is_multicast)
- entry_flags |= FIB_ENTRY_FLAG_MULTICAST;
- if (is_udp_encap)
- {
- path_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
- path.frp_udp_encap_id = next_hop_id;
- }
- if (path.frp_sw_if_index == ~0 && ip46_address_is_zero (&path.frp_addr)
- && path.frp_fib_index != ~0)
- {
- path_flags |= FIB_ROUTE_PATH_DEAG;
- }
-
- path.frp_flags = path_flags;
-
- if (is_drop || (is_local && (~0 == next_hop_sw_if_index)) ||
- is_classify || is_unreach || is_prohibit)
- {
- /*
- * special route types that link directly to the adj
- */
- if (is_add)
- {
- dpo_id_t dpo = DPO_INVALID;
- dpo_proto_t dproto;
-
- dproto = fib_proto_to_dpo (prefix->fp_proto);
-
- if (is_drop)
- ip_null_dpo_add_and_lock (dproto, IP_NULL_ACTION_NONE, &dpo);
- else if (is_local)
- receive_dpo_add_or_lock (dproto, ~0, NULL, &dpo);
- else if (is_unreach)
- ip_null_dpo_add_and_lock (dproto,
- IP_NULL_ACTION_SEND_ICMP_UNREACH, &dpo);
- else if (is_prohibit)
- ip_null_dpo_add_and_lock (dproto,
- IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
- &dpo);
- else if (is_classify)
- {
- if (pool_is_free_index (cm->tables,
- ntohl (classify_table_index)))
- {
- return VNET_API_ERROR_NO_SUCH_TABLE;
- }
-
- dpo_set (&dpo, DPO_CLASSIFY, dproto,
- classify_dpo_create (dproto,
- ntohl (classify_table_index)));
- }
- else
- {
- return VNET_API_ERROR_NO_SUCH_TABLE;
- }
-
- fib_table_entry_special_dpo_update (fib_index,
- prefix,
- FIB_SOURCE_API,
- FIB_ENTRY_FLAG_EXCLUSIVE, &dpo);
- dpo_reset (&dpo);
- }
- else
- {
- fib_table_entry_special_remove (fib_index, prefix, FIB_SOURCE_API);
- }
- }
- else if (is_multipath)
- {
- vec_add1 (paths, path);
-
- if (is_add)
- fib_table_entry_path_add2 (fib_index,
- prefix,
- FIB_SOURCE_API, entry_flags, paths);
- else
- fib_table_entry_path_remove2 (fib_index,
- prefix, FIB_SOURCE_API, paths);
-
- vec_free (paths);
- }
- else
- {
- if (is_add)
- {
- vec_add1 (paths, path);
- fib_table_entry_update (fib_index,
- prefix, FIB_SOURCE_API, entry_flags, paths);
- vec_free (paths);
- }
- else
- {
- fib_table_entry_delete (fib_index, prefix, FIB_SOURCE_API);
- }
- }
-
- return (0);
-}
-
-int
-add_del_route_check (fib_protocol_t table_proto,
- u32 table_id,
- u32 next_hop_sw_if_index,
- dpo_proto_t next_hop_table_proto,
- u32 next_hop_table_id,
- u8 is_rpf_id, u32 * fib_index, u32 * next_hop_fib_index)
-{
- vnet_main_t *vnm = vnet_get_main ();
-
- *fib_index = fib_table_find (table_proto, ntohl (table_id));
- if (~0 == *fib_index)
- {
- /* No such VRF, and we weren't asked to create one */
- return VNET_API_ERROR_NO_SUCH_FIB;
- }
-
- if (!is_rpf_id && ~0 != ntohl (next_hop_sw_if_index))
- {
- if (pool_is_free_index (vnm->interface_main.sw_interfaces,
- ntohl (next_hop_sw_if_index)))
- {
- return VNET_API_ERROR_NO_MATCHING_INTERFACE;
- }
- }
- else
- {
- fib_protocol_t fib_nh_proto;
-
- if (next_hop_table_proto > DPO_PROTO_MPLS)
- return (0);
-
- fib_nh_proto = dpo_proto_to_fib (next_hop_table_proto);
-
- if (is_rpf_id)
- *next_hop_fib_index = mfib_table_find (fib_nh_proto,
- ntohl (next_hop_table_id));
- else
- *next_hop_fib_index = fib_table_find (fib_nh_proto,
- ntohl (next_hop_table_id));
-
- if (~0 == *next_hop_fib_index)
- {
- /* No such VRF, and we weren't asked to create one */
- return VNET_API_ERROR_NO_SUCH_FIB;
- }
- }
-
- return (0);
-}
-
static int
-ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp,
- u32 * stats_index)
+ip_route_add_del_t_handler (vl_api_ip_route_add_del_t * mp, u32 * stats_index)
{
- u32 fib_index, next_hop_fib_index;
- fib_mpls_label_t *label_stack = NULL;
- int rv, ii, n_labels;;
+ fib_route_path_t *rpaths = NULL, *rpath;
+ fib_entry_flag_t entry_flags;
+ vl_api_fib_path_t *apath;
+ fib_prefix_t pfx;
+ u32 fib_index;
+ int rv, ii;
- rv = add_del_route_check (FIB_PROTOCOL_IP4,
- mp->table_id,
- mp->next_hop_sw_if_index,
- DPO_PROTO_IP4,
- mp->next_hop_table_id,
- 0, &fib_index, &next_hop_fib_index);
+ entry_flags = FIB_ENTRY_FLAG_NONE;
+ ip_prefix_decode (&mp->route.prefix, &pfx);
+ rv = fib_api_table_id_decode (pfx.fp_proto,
+ ntohl (mp->route.table_id), &fib_index);
if (0 != rv)
- return (rv);
-
- fib_prefix_t pfx = {
- .fp_len = mp->dst_address_length,
- .fp_proto = FIB_PROTOCOL_IP4,
- };
- clib_memcpy (&pfx.fp_addr.ip4, mp->dst_address, sizeof (pfx.fp_addr.ip4));
-
- ip46_address_t nh;
- clib_memset (&nh, 0, sizeof (nh));
- memcpy (&nh.ip4, mp->next_hop_address, sizeof (nh.ip4));
+ goto out;
- n_labels = mp->next_hop_n_out_labels;
- if (n_labels == 0)
- ;
- else
+ if (0 == mp->route.n_paths)
{
- vec_validate (label_stack, n_labels - 1);
- for (ii = 0; ii < n_labels; ii++)
- {
- label_stack[ii].fml_value =
- ntohl (mp->next_hop_out_label_stack[ii].label);
- label_stack[ii].fml_ttl = mp->next_hop_out_label_stack[ii].ttl;
- label_stack[ii].fml_exp = mp->next_hop_out_label_stack[ii].exp;
- label_stack[ii].fml_mode =
- (mp->next_hop_out_label_stack[ii].is_uniform ?
- FIB_MPLS_LSP_MODE_UNIFORM : FIB_MPLS_LSP_MODE_PIPE);
- }
+ rv = VNET_API_ERROR_NO_PATHS_IN_ROUTE;
+ goto out;
}
- rv = add_del_route_t_handler (mp->is_multipath,
- mp->is_add,
- mp->is_drop,
- mp->is_unreach,
- mp->is_prohibit,
- mp->is_local, 0,
- mp->is_classify,
- mp->classify_table_index,
- mp->is_resolve_host,
- mp->is_resolve_attached, 0, 0,
- mp->is_dvr,
- mp->is_source_lookup,
- mp->is_udp_encap,
- fib_index, &pfx, DPO_PROTO_IP4,
- &nh,
- ntohl (mp->next_hop_id),
- ntohl (mp->next_hop_sw_if_index),
- next_hop_fib_index,
- mp->next_hop_weight,
- mp->next_hop_preference,
- ntohl (mp->next_hop_via_label), label_stack);
-
- if (mp->is_add && 0 == rv)
- *stats_index = fib_table_entry_get_stats_index (fib_index, &pfx);
-
- return (rv);
-}
-
-static int
-ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp,
- u32 * stats_index)
-{
- fib_mpls_label_t *label_stack = NULL;
- u32 fib_index, next_hop_fib_index;
- int rv, ii, n_labels;;
+ vec_validate (rpaths, mp->route.n_paths - 1);
- rv = add_del_route_check (FIB_PROTOCOL_IP6,
- mp->table_id,
- mp->next_hop_sw_if_index,
- DPO_PROTO_IP6,
- mp->next_hop_table_id,
- 0, &fib_index, &next_hop_fib_index);
+ for (ii = 0; ii < mp->route.n_paths; ii++)
+ {
+ apath = &mp->route.paths[ii];
+ rpath = &rpaths[ii];
- if (0 != rv)
- return (rv);
+ rv = fib_api_path_decode (apath, rpath);
- fib_prefix_t pfx = {
- .fp_len = mp->dst_address_length,
- .fp_proto = FIB_PROTOCOL_IP6,
- };
- clib_memcpy (&pfx.fp_addr.ip6, mp->dst_address, sizeof (pfx.fp_addr.ip6));
-
- ip46_address_t nh;
- clib_memset (&nh, 0, sizeof (nh));
- memcpy (&nh.ip6, mp->next_hop_address, sizeof (nh.ip6));
+ if ((rpath->frp_flags & FIB_ROUTE_PATH_LOCAL) &&
+ (~0 == rpath->frp_sw_if_index))
+ entry_flags |= (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL);
- n_labels = mp->next_hop_n_out_labels;
- if (n_labels == 0)
- ;
- else
- {
- vec_validate (label_stack, n_labels - 1);
- for (ii = 0; ii < n_labels; ii++)
- {
- label_stack[ii].fml_value =
- ntohl (mp->next_hop_out_label_stack[ii].label);
- label_stack[ii].fml_ttl = mp->next_hop_out_label_stack[ii].ttl;
- label_stack[ii].fml_exp = mp->next_hop_out_label_stack[ii].exp;
- label_stack[ii].fml_mode =
- (mp->next_hop_out_label_stack[ii].is_uniform ?
- FIB_MPLS_LSP_MODE_UNIFORM : FIB_MPLS_LSP_MODE_PIPE);
- }
+ if (0 != rv)
+ goto out;
}
- rv = add_del_route_t_handler (mp->is_multipath,
- mp->is_add,
- mp->is_drop,
- mp->is_unreach,
- mp->is_prohibit,
- mp->is_local, 0,
- mp->is_classify,
- mp->classify_table_index,
- mp->is_resolve_host,
- mp->is_resolve_attached, 0, 0,
- mp->is_dvr,
- mp->is_source_lookup,
- mp->is_udp_encap,
- fib_index, &pfx, DPO_PROTO_IP6,
- &nh, ntohl (mp->next_hop_id),
- ntohl (mp->next_hop_sw_if_index),
- next_hop_fib_index,
- mp->next_hop_weight,
- mp->next_hop_preference,
- ntohl (mp->next_hop_via_label), label_stack);
+ fib_api_route_add_del (mp->is_add,
+ mp->is_multipath,
+ fib_index, &pfx, entry_flags, rpaths);
if (mp->is_add && 0 == rv)
*stats_index = fib_table_entry_get_stats_index (fib_index, &pfx);
+out:
+ vec_free (rpaths);
+
return (rv);
}
void
-vl_api_ip_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
+vl_api_ip_route_add_del_t_handler (vl_api_ip_route_add_del_t * mp)
{
- vl_api_ip_add_del_route_reply_t *rmp;
- u32 stats_index;
+ vl_api_ip_route_add_del_reply_t *rmp;
+ u32 stats_index = ~0;
int rv;
- vnet_main_t *vnm = vnet_get_main ();
-
- vnm->api_errno = 0;
- stats_index = ~0;
- if (mp->is_ipv6)
- rv = ip6_add_del_route_t_handler (mp, &stats_index);
- else
- rv = ip4_add_del_route_t_handler (mp, &stats_index);
-
- rv = (rv == 0) ? vnm->api_errno : rv;
+ rv = ip_route_add_del_t_handler (mp, &stats_index);
/* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_IP_ADD_DEL_ROUTE_REPLY,
+ REPLY_MACRO2 (VL_API_IP_ROUTE_ADD_DEL_REPLY,
({
rmp->stats_index = htonl (stats_index);
}))
@@ -1221,80 +760,37 @@ ip_table_create (fib_protocol_t fproto,
}
}
-static int
-add_del_mroute_check (fib_protocol_t table_proto,
- u32 table_id,
- u32 next_hop_sw_if_index, u8 is_local, u32 * fib_index)
-{
- vnet_main_t *vnm = vnet_get_main ();
-
- *fib_index = mfib_table_find (table_proto, ntohl (table_id));
- if (~0 == *fib_index)
- {
- /* No such table */
- return VNET_API_ERROR_NO_SUCH_FIB;
- }
-
- if (~0 != ntohl (next_hop_sw_if_index))
- {
- if (pool_is_free_index (vnm->interface_main.sw_interfaces,
- ntohl (next_hop_sw_if_index)))
- {
- return VNET_API_ERROR_NO_MATCHING_INTERFACE;
- }
- }
-
- return (0);
-}
-
-static fib_node_index_t
+static u32
mroute_add_del_handler (u8 is_add,
- u8 is_local,
+ u8 is_multipath,
u32 fib_index,
const mfib_prefix_t * prefix,
- dpo_proto_t nh_proto,
u32 entry_flags,
- fib_rpf_id_t rpf_id,
- u32 next_hop_sw_if_index,
- ip46_address_t * nh, u32 itf_flags, u32 bier_imp)
+ u32 rpf_id, fib_route_path_t * rpaths)
{
- fib_node_index_t mfib_entry_index = ~0;
-
- fib_route_path_t path = {
- .frp_sw_if_index = next_hop_sw_if_index,
- .frp_proto = nh_proto,
- .frp_addr = *nh,
- };
+ u32 mfib_entry_index = ~0;
- if (is_local)
- path.frp_flags |= FIB_ROUTE_PATH_LOCAL;
-
- if (DPO_PROTO_BIER == nh_proto)
- {
- path.frp_bier_imp = bier_imp;
- path.frp_flags = FIB_ROUTE_PATH_BIER_IMP;
- }
- else if (!is_local && ~0 == next_hop_sw_if_index)
+ if (0 == vec_len (rpaths))
{
mfib_entry_index = mfib_table_entry_update (fib_index, prefix,
MFIB_SOURCE_API,
rpf_id, entry_flags);
- goto done;
- }
-
- if (is_add)
- {
- mfib_entry_index = mfib_table_entry_path_update (fib_index, prefix,
- MFIB_SOURCE_API,
- &path, itf_flags);
}
else
{
- mfib_table_entry_path_remove (fib_index, prefix,
- MFIB_SOURCE_API, &path);
+ if (is_add)
+ {
+ mfib_entry_index =
+ mfib_table_entry_paths_update (fib_index, prefix,
+ MFIB_SOURCE_API, rpaths);
+ }
+ else
+ {
+ mfib_table_entry_paths_remove (fib_index, prefix,
+ MFIB_SOURCE_API, rpaths);
+ }
}
-done:
return (mfib_entry_index);
}
@@ -1302,64 +798,43 @@ static int
api_mroute_add_del_t_handler (vl_api_ip_mroute_add_del_t * mp,
u32 * stats_index)
{
+ fib_route_path_t *rpath, *rpaths = NULL;
fib_node_index_t mfib_entry_index;
- fib_protocol_t fproto;
- dpo_proto_t nh_proto;
- ip46_address_t nh;
+ mfib_prefix_t pfx;
u32 fib_index;
int rv;
+ u16 ii;
- nh_proto = mp->next_hop_afi;
- fproto = (mp->is_ipv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4);
- rv = add_del_mroute_check (fproto,
- mp->table_id,
- mp->next_hop_sw_if_index,
- mp->is_local, &fib_index);
+ ip_mprefix_decode (&mp->route.prefix, &pfx);
+ rv = mfib_api_table_id_decode (pfx.fp_proto,
+ ntohl (mp->route.table_id), &fib_index);
if (0 != rv)
- return (rv);
+ goto out;
- mfib_prefix_t pfx = {
- .fp_len = ntohs (mp->grp_address_length),
- .fp_proto = fproto,
- };
+ vec_validate (rpaths, mp->route.n_paths - 1);
- if (FIB_PROTOCOL_IP4 == fproto)
- {
- clib_memcpy (&pfx.fp_grp_addr.ip4, mp->grp_address,
- sizeof (pfx.fp_grp_addr.ip4));
- clib_memcpy (&pfx.fp_src_addr.ip4, mp->src_address,
- sizeof (pfx.fp_src_addr.ip4));
- clib_memset (&nh.ip6, 0, sizeof (nh.ip6));
- clib_memcpy (&nh.ip4, mp->nh_address, sizeof (nh.ip4));
- if (!ip46_address_is_zero (&pfx.fp_src_addr))
- pfx.fp_len = 64;
- }
- else
+ for (ii = 0; ii < mp->route.n_paths; ii++)
{
- clib_memcpy (&pfx.fp_grp_addr.ip6, mp->grp_address,
- sizeof (pfx.fp_grp_addr.ip6));
- clib_memcpy (&pfx.fp_src_addr.ip6, mp->src_address,
- sizeof (pfx.fp_src_addr.ip6));
- clib_memcpy (&nh.ip6, mp->nh_address, sizeof (nh.ip6));
- if (!ip46_address_is_zero (&pfx.fp_src_addr))
- pfx.fp_len = 256;
+ rpath = &rpaths[ii];
+
+ rv = mfib_api_path_decode (&mp->route.paths[ii], rpath);
+
+ if (0 != rv)
+ goto out;
}
mfib_entry_index = mroute_add_del_handler (mp->is_add,
- mp->is_local,
+ mp->is_add,
fib_index, &pfx,
- nh_proto,
- ntohl (mp->entry_flags),
- ntohl (mp->rpf_id),
- ntohl (mp->next_hop_sw_if_index),
- &nh,
- ntohl (mp->itf_flags),
- ntohl (mp->bier_imp));
+ ntohl (mp->route.entry_flags),
+ ntohl (mp->route.rpf_id),
+ rpaths);
if (~0 != mfib_entry_index)
*stats_index = mfib_entry_get_stats_index (mfib_entry_index);
+out:
return (rv);
}
@@ -1367,14 +842,9 @@ void
vl_api_ip_mroute_add_del_t_handler (vl_api_ip_mroute_add_del_t * mp)
{
vl_api_ip_mroute_add_del_reply_t *rmp;
- vnet_main_t *vnm;
- u32 stats_index;
+ u32 stats_index = ~0;
int rv;
- vnm = vnet_get_main ();
- vnm->api_errno = 0;
- stats_index = ~0;
-
rv = api_mroute_add_del_t_handler (mp, &stats_index);
/* *INDENT-OFF* */
@@ -1406,8 +876,8 @@ send_ip_details (vpe_api_main_t * am,
static void
send_ip_address_details (vpe_api_main_t * am,
vl_api_registration_t * reg,
- u8 * ip, u16 prefix_length,
- u32 sw_if_index, u8 is_ipv6, u32 context)
+ const fib_prefix_t * pfx,
+ u32 sw_if_index, u32 context)
{
vl_api_ip_address_details_t *mp;
@@ -1415,19 +885,9 @@ send_ip_address_details (vpe_api_main_t * am,
clib_memset (mp, 0, sizeof (*mp));
mp->_vl_msg_id = ntohs (VL_API_IP_ADDRESS_DETAILS);
- if (is_ipv6)
- {
- clib_memcpy (&mp->ip, ip, sizeof (mp->ip));
- }
- else
- {
- u32 *tp = (u32 *) mp->ip;
- *tp = *(u32 *) ip;
- }
- mp->prefix_length = prefix_length;
+ ip_prefix_encode (pfx, &mp->prefix);
mp->context = context;
mp->sw_if_index = htonl (sw_if_index);
- mp->is_ipv6 = is_ipv6;
vl_api_send_msg (reg, (u8 *) mp);
}
@@ -1437,8 +897,6 @@ vl_api_ip_address_dump_t_handler (vl_api_ip_address_dump_t * mp)
{
vpe_api_main_t *am = &vpe_api_main;
vl_api_registration_t *reg;
- ip6_address_t *r6;
- ip4_address_t *r4;
ip6_main_t *im6 = &ip6_main;
ip4_main_t *im4 = &ip4_main;
ip_lookup_main_t *lm6 = &im6->lookup_main;
@@ -1464,10 +922,12 @@ vl_api_ip_address_dump_t_handler (vl_api_ip_address_dump_t * mp)
* than one interface */
foreach_ip_interface_address (lm6, ia, sw_if_index, 0,
({
- r6 = ip_interface_address_get_address (lm6, ia);
- u16 prefix_length = ia->address_length;
- send_ip_address_details(am, reg, (u8*)r6, prefix_length,
- sw_if_index, 1, mp->context);
+ fib_prefix_t pfx = {
+ .fp_addr.ip6 = *(ip6_address_t *)ip_interface_address_get_address (lm6, ia),
+ .fp_len = ia->address_length,
+ .fp_proto = FIB_PROTOCOL_IP6,
+ };
+ send_ip_address_details(am, reg, &pfx, sw_if_index, mp->context);
}));
/* *INDENT-ON* */
}
@@ -1476,10 +936,13 @@ vl_api_ip_address_dump_t_handler (vl_api_ip_address_dump_t * mp)
/* *INDENT-OFF* */
foreach_ip_interface_address (lm4, ia, sw_if_index, 0,
({
- r4 = ip_interface_address_get_address (lm4, ia);
- u16 prefix_length = ia->address_length;
- send_ip_address_details(am, reg, (u8*)r4, prefix_length,
- sw_if_index, 0, mp->context);
+ fib_prefix_t pfx = {
+ .fp_addr.ip4 = *(ip4_address_t *)ip_interface_address_get_address (lm4, ia),
+ .fp_len = ia->address_length,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+
+ send_ip_address_details(am, reg, &pfx, sw_if_index, mp->context);
}));
/* *INDENT-ON* */
}
@@ -1883,22 +1346,7 @@ vl_mfib_signal_send_one (vl_api_registration_t * reg,
mp->table_id = ntohl (mfib->mft_table_id);
mp->sw_if_index = ntohl (mfi->mfi_sw_if_index);
- if (FIB_PROTOCOL_IP4 == prefix->fp_proto)
- {
- mp->grp_address_len = ntohs (prefix->fp_len);
-
- memcpy (mp->grp_address, &prefix->fp_grp_addr.ip4, 4);
- if (prefix->fp_len > 32)
- {
- memcpy (mp->src_address, &prefix->fp_src_addr.ip4, 4);
- }
- }
- else
- {
- mp->grp_address_len = ntohs (prefix->fp_len);
-
- ASSERT (0);
- }
+ ip_mprefix_encode (prefix, &mp->prefix);
if (0 != mfs->mfs_buffer_len)
{
@@ -3369,9 +2817,11 @@ static walk_rc_t
send_ip_punt_redirect_details (u32 rx_sw_if_index,
const ip_punt_redirect_rx_t * ipr, void *arg)
{
- fib_route_path_encode_t *api_rpaths = NULL;
ip_punt_redirect_walk_ctx_t *ctx = arg;
vl_api_ip_punt_redirect_details_t *mp;
+ fib_path_encode_ctx_t path_ctx = {
+ .rpaths = NULL,
+ };
mp = vl_msg_api_alloc (sizeof (*mp));
if (!mp)
@@ -3381,17 +2831,17 @@ send_ip_punt_redirect_details (u32 rx_sw_if_index,
mp->_vl_msg_id = ntohs (VL_API_IP_PUNT_REDIRECT_DETAILS);
mp->context = ctx->context;
- fib_path_list_walk_w_ext (ipr->pl, NULL, fib_path_encode, &api_rpaths);
+ fib_path_list_walk_w_ext (ipr->pl, NULL, fib_path_encode, &path_ctx);
mp->punt.rx_sw_if_index = htonl (rx_sw_if_index);
- mp->punt.tx_sw_if_index = htonl (api_rpaths[0].rpath.frp_sw_if_index);
+ mp->punt.tx_sw_if_index = htonl (path_ctx.rpaths[0].frp_sw_if_index);
- ip_address_encode (&api_rpaths[0].rpath.frp_addr,
+ ip_address_encode (&path_ctx.rpaths[0].frp_addr,
fib_proto_to_ip46 (ipr->fproto), &mp->punt.nh);
vl_api_send_msg (ctx->reg, (u8 *) mp);
- vec_free (api_rpaths);
+ vec_free (path_ctx.rpaths);
return (WALK_CONTINUE);
}
@@ -3461,8 +2911,8 @@ ip_api_hookup (vlib_main_t * vm)
/*
* Mark the route add/del API as MP safe
*/
- am->is_mp_safe[VL_API_IP_ADD_DEL_ROUTE] = 1;
- am->is_mp_safe[VL_API_IP_ADD_DEL_ROUTE_REPLY] = 1;
+ am->is_mp_safe[VL_API_IP_ROUTE_ADD_DEL] = 1;
+ am->is_mp_safe[VL_API_IP_ROUTE_ADD_DEL_REPLY] = 1;
/*
* Set up the (msg_name, crc, message-id) table
diff --git a/src/vnet/ip/ip_types_api.c b/src/vnet/ip/ip_types_api.c
index fd8d24f36fe..6ad2c366dff 100644
--- a/src/vnet/ip/ip_types_api.c
+++ b/src/vnet/ip/ip_types_api.c
@@ -229,6 +229,9 @@ ip_mprefix_decode (const vl_api_mprefix_t * in, mfib_prefix_t * out)
ip_address_union_decode (&in->grp_address, in->af, &out->fp_grp_addr);
ip_address_union_decode (&in->src_address, in->af, &out->fp_src_addr);
+
+ if (!ip46_address_is_zero (&out->fp_src_addr))
+ out->fp_len = (out->fp_proto == FIB_PROTOCOL_IP6 ? 256 : 64);
}
/*
diff --git a/src/vnet/ip/lookup.c b/src/vnet/ip/lookup.c
index 5c6fec1810b..8c89ed4f490 100644
--- a/src/vnet/ip/lookup.c
+++ b/src/vnet/ip/lookup.c
@@ -441,7 +441,7 @@ vnet_ip_route_cmd (vlib_main_t * vm,
}
else if (0 < vec_len (rpaths))
{
- u32 k, j, n, incr;
+ u32 k, n, incr;
ip46_address_t dst = prefixs[i].fp_addr;
f64 t[2];
n = count;
@@ -451,25 +451,20 @@ vnet_ip_route_cmd (vlib_main_t * vm,
for (k = 0; k < n; k++)
{
- for (j = 0; j < vec_len (rpaths); j++)
- {
- fib_prefix_t rpfx = {
- .fp_len = prefixs[i].fp_len,
- .fp_proto = prefixs[i].fp_proto,
- .fp_addr = dst,
- };
-
- if (is_del)
- fib_table_entry_path_remove2 (fib_index,
- &rpfx,
- FIB_SOURCE_CLI, &rpaths[j]);
- else
- fib_table_entry_path_add2 (fib_index,
- &rpfx,
- FIB_SOURCE_CLI,
- FIB_ENTRY_FLAG_NONE,
- &rpaths[j]);
- }
+ fib_prefix_t rpfx = {
+ .fp_len = prefixs[i].fp_len,
+ .fp_proto = prefixs[i].fp_proto,
+ .fp_addr = dst,
+ };
+
+ if (is_del)
+ fib_table_entry_path_remove2 (fib_index,
+ &rpfx, FIB_SOURCE_CLI, rpaths);
+ else
+ fib_table_entry_path_add2 (fib_index,
+ &rpfx,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE, rpaths);
if (FIB_PROTOCOL_IP4 == prefixs[0].fp_proto)
{
@@ -485,9 +480,9 @@ vnet_ip_route_cmd (vlib_main_t * vm,
clib_host_to_net_u64 (incr +
clib_net_to_host_u64 (dst.ip6.as_u64
[bucket]));
-
}
}
+
t[1] = vlib_time_now (vm);
if (count > 1)
vlib_cli_output (vm, "%.6e routes/sec", count / (t[1] - t[0]));
@@ -499,7 +494,6 @@ vnet_ip_route_cmd (vlib_main_t * vm,
}
}
-
done:
vec_free (dpos);
vec_free (prefixs);
@@ -805,20 +799,17 @@ vnet_ip_mroute_cmd (vlib_main_t * vm,
unformat_input_t * main_input, vlib_cli_command_t * cmd)
{
unformat_input_t _line_input, *line_input = &_line_input;
+ fib_route_path_t rpath, *rpaths = NULL;
clib_error_t *error = NULL;
- fib_route_path_t rpath;
- u32 table_id, is_del;
- vnet_main_t *vnm;
+ u32 table_id, is_del, payload_proto;
mfib_prefix_t pfx;
u32 fib_index;
- mfib_itf_flags_t iflags = 0;
mfib_entry_flags_t eflags = 0;
u32 gcount, scount, ss, gg, incr;
f64 timet[2];
u32 rpf_id = MFIB_RPF_ID_NONE;
gcount = scount = 1;
- vnm = vnet_get_main ();
is_del = 0;
table_id = 0;
clib_memset (&pfx, 0, sizeof (pfx));
@@ -887,51 +878,6 @@ vnet_ip_mroute_cmd (vlib_main_t * vm,
pfx.fp_proto = FIB_PROTOCOL_IP6;
pfx.fp_len = 128;
}
- else if (unformat (line_input, "via %U %U %U",
- unformat_ip4_address, &rpath.frp_addr.ip4,
- unformat_vnet_sw_interface, vnm,
- &rpath.frp_sw_if_index,
- unformat_mfib_itf_flags, &iflags))
- {
- rpath.frp_weight = 1;
- }
- else if (unformat (line_input, "via %U %U %U",
- unformat_ip6_address, &rpath.frp_addr.ip6,
- unformat_vnet_sw_interface, vnm,
- &rpath.frp_sw_if_index,
- unformat_mfib_itf_flags, &iflags))
- {
- rpath.frp_weight = 1;
- }
- else if (unformat (line_input, "via %U %U",
- unformat_vnet_sw_interface, vnm,
- &rpath.frp_sw_if_index,
- unformat_mfib_itf_flags, &iflags))
- {
- clib_memset (&rpath.frp_addr, 0, sizeof (rpath.frp_addr));
- rpath.frp_weight = 1;
- }
- else if (unformat (line_input, "via %U %U",
- unformat_ip4_address, &rpath.frp_addr.ip4,
- unformat_vnet_sw_interface, vnm,
- &rpath.frp_sw_if_index))
- {
- rpath.frp_weight = 1;
- }
- else if (unformat (line_input, "via %U %U",
- unformat_ip6_address, &rpath.frp_addr.ip6,
- unformat_vnet_sw_interface, vnm,
- &rpath.frp_sw_if_index))
- {
- rpath.frp_weight = 1;
- }
- else if (unformat (line_input, "via %U",
- unformat_vnet_sw_interface, vnm,
- &rpath.frp_sw_if_index))
- {
- clib_memset (&rpath.frp_addr, 0, sizeof (rpath.frp_addr));
- rpath.frp_weight = 1;
- }
else if (unformat (line_input, "via local Forward"))
{
clib_memset (&rpath.frp_addr, 0, sizeof (rpath.frp_addr));
@@ -942,9 +888,17 @@ vnet_ip_mroute_cmd (vlib_main_t * vm,
* set the path proto appropriately for the prefix
*/
rpath.frp_proto = fib_proto_to_dpo (pfx.fp_proto);
- iflags = MFIB_ITF_FLAG_FORWARD;
+ rpath.frp_mitf_flags = MFIB_ITF_FLAG_FORWARD;
+ }
+ else if (unformat (line_input, "via %U",
+ unformat_fib_route_path, &rpath, &payload_proto))
+ {
+ vec_add1 (rpaths, rpath);
}
else if (unformat (line_input, "%U",
+ unformat_mfib_itf_flags, &rpath.frp_mitf_flags))
+ ;
+ else if (unformat (line_input, "%U",
unformat_mfib_entry_flags, &eflags))
;
else
@@ -987,7 +941,7 @@ vnet_ip_mroute_cmd (vlib_main_t * vm,
{
for (gg = 0; gg < gcount; gg++)
{
- if (is_del && 0 == rpath.frp_weight)
+ if (is_del && 0 == vec_len (rpaths))
{
/* no path provided => route delete */
mfib_table_entry_delete (fib_index, &pfx, MFIB_SOURCE_CLI);
@@ -1001,11 +955,10 @@ vnet_ip_mroute_cmd (vlib_main_t * vm,
{
if (is_del)
mfib_table_entry_path_remove (fib_index,
- &pfx, MFIB_SOURCE_CLI, &rpath);
+ &pfx, MFIB_SOURCE_CLI, rpaths);
else
mfib_table_entry_path_update (fib_index,
- &pfx, MFIB_SOURCE_CLI, &rpath,
- iflags);
+ &pfx, MFIB_SOURCE_CLI, rpaths);
}
if (FIB_PROTOCOL_IP4 == pfx.fp_proto)
@@ -1050,6 +1003,7 @@ vnet_ip_mroute_cmd (vlib_main_t * vm,
(scount * gcount) / (timet[1] - timet[0]));
done:
+ vec_free (rpaths);
unformat_free (line_input);
return error;
diff --git a/src/vnet/mfib/ip6_mfib.c b/src/vnet/mfib/ip6_mfib.c
index 31a92687e1a..5b15c8d1736 100644
--- a/src/vnet/mfib/ip6_mfib.c
+++ b/src/vnet/mfib/ip6_mfib.c
@@ -141,8 +141,9 @@ ip6_create_mfib_with_table_id (u32 table_id,
.frp_addr = zero_addr,
.frp_sw_if_index = 0xffffffff,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
pool_get_aligned(ip6_main.mfibs, mfib_table, CLIB_CACHE_LINE_BYTES);
@@ -180,8 +181,7 @@ ip6_create_mfib_with_table_id (u32 table_id,
mfib_table_entry_path_update(mfib_table->mft_index,
&pfx,
MFIB_SOURCE_SPECIAL,
- &path_for_us,
- MFIB_ITF_FLAG_FORWARD);
+ &path_for_us);
}));
return (mfib_table->mft_index);
@@ -200,7 +200,7 @@ ip6_mfib_table_destroy (ip6_mfib_t *mfib)
.frp_addr = zero_addr,
.frp_sw_if_index = 0xffffffff,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
};
@@ -236,7 +236,8 @@ ip6_mfib_interface_enable_disable (u32 sw_if_index, int is_enable)
.frp_addr = zero_addr,
.frp_sw_if_index = sw_if_index,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
+ .frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT,
};
mfib_prefix_t pfx = {
.fp_proto = FIB_PROTOCOL_IP6,
@@ -253,8 +254,7 @@ ip6_mfib_interface_enable_disable (u32 sw_if_index, int is_enable)
mfib_table_entry_path_update(mfib_index,
&pfx,
MFIB_SOURCE_SPECIAL,
- &path,
- MFIB_ITF_FLAG_ACCEPT);
+ &path);
});
}
else
diff --git a/src/vnet/mfib/mfib_api.c b/src/vnet/mfib/mfib_api.c
new file mode 100644
index 00000000000..bcab83ba0e5
--- /dev/null
+++ b/src/vnet/mfib/mfib_api.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+#include <vnet/mfib/mfib_api.h>
+#include <vnet/mfib/mfib_table.h>
+#include <vnet/fib/fib_api.h>
+#include <vnet/ip/ip_types_api.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+static vl_api_mfib_itf_flags_t
+mfib_api_path_itf_flags_encode (mfib_itf_flags_t flags)
+{
+ vl_api_mfib_itf_flags_t out = MFIB_API_ITF_FLAG_NONE;
+
+ switch (flags)
+ {
+ case MFIB_ITF_FLAG_NONE:
+ out = MFIB_API_ITF_FLAG_NONE;
+ break;
+ case MFIB_ITF_FLAG_NEGATE_SIGNAL:
+ out = MFIB_API_ITF_FLAG_NEGATE_SIGNAL;
+ break;
+ case MFIB_ITF_FLAG_ACCEPT:
+ out = MFIB_API_ITF_FLAG_ACCEPT;
+ break;
+ case MFIB_ITF_FLAG_FORWARD:
+ out = MFIB_API_ITF_FLAG_FORWARD;
+ break;
+ case MFIB_ITF_FLAG_SIGNAL_PRESENT:
+ out = MFIB_API_ITF_FLAG_SIGNAL_PRESENT;
+ break;
+ case MFIB_ITF_FLAG_DONT_PRESERVE:
+ out = MFIB_API_ITF_FLAG_DONT_PRESERVE;
+ break;
+ }
+ return (ntohl(out));
+}
+
+void
+mfib_api_path_encode (const fib_route_path_t *in,
+ vl_api_mfib_path_t *out)
+{
+ out->itf_flags = mfib_api_path_itf_flags_encode(in->frp_mitf_flags);
+
+ fib_api_path_encode(in, &out->path);
+}
+
+static void
+mfib_api_path_itf_flags_decode (vl_api_mfib_itf_flags_t in,
+ mfib_itf_flags_t *out)
+{
+ in = clib_net_to_host_u32(in);
+
+ if (in & MFIB_API_ITF_FLAG_NONE)
+ *out |= MFIB_ITF_FLAG_NONE;
+ if (in & MFIB_API_ITF_FLAG_NEGATE_SIGNAL)
+ *out |= MFIB_ITF_FLAG_NEGATE_SIGNAL;
+ if (in & MFIB_API_ITF_FLAG_ACCEPT)
+ *out |= MFIB_ITF_FLAG_ACCEPT;
+ if (in & MFIB_API_ITF_FLAG_FORWARD)
+ *out |= MFIB_ITF_FLAG_FORWARD;
+ if (in & MFIB_API_ITF_FLAG_SIGNAL_PRESENT)
+ *out |= MFIB_ITF_FLAG_SIGNAL_PRESENT;
+ if (in & MFIB_API_ITF_FLAG_DONT_PRESERVE)
+ *out |= MFIB_ITF_FLAG_DONT_PRESERVE;
+}
+
+int
+mfib_api_path_decode (vl_api_mfib_path_t *in,
+ fib_route_path_t *out)
+{
+ mfib_api_path_itf_flags_decode(in->itf_flags, &out->frp_mitf_flags);
+
+ return (fib_api_path_decode(&in->path, out));
+}
+
+int
+mfib_api_table_id_decode (fib_protocol_t fproto,
+ u32 table_id,
+ u32 *fib_index)
+{
+ *fib_index = mfib_table_find(fproto, table_id);
+
+ if (INDEX_INVALID == *fib_index)
+ {
+ return VNET_API_ERROR_NO_SUCH_FIB;
+ }
+
+ return (0);
+}
diff --git a/src/vnet/mfib/mfib_api.h b/src/vnet/mfib/mfib_api.h
new file mode 100644
index 00000000000..f9c0a74bedb
--- /dev/null
+++ b/src/vnet/mfib/mfib_api.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MFIB_API_H__
+#define __MFIB_API_H__
+
+#include <vnet/mfib/mfib_types.h>
+
+/**
+ * Forward declare the API type, no need to include the generated api headers
+ */
+struct _vl_api_mfib_path;
+
+/**
+ * Encode and decode functions from the API types to internal types
+ */
+extern void mfib_api_path_encode(const fib_route_path_t *in,
+ struct _vl_api_mfib_path *out);
+extern int mfib_api_path_decode(struct _vl_api_mfib_path *in,
+ fib_route_path_t *out);
+
+extern int mfib_api_table_id_decode(fib_protocol_t fproto,
+ u32 table_id,
+ u32 *fib_index);
+
+#endif /* __MFIB_API_H__ */
diff --git a/src/vnet/mfib/mfib_entry.c b/src/vnet/mfib/mfib_entry.c
index 18562219ce2..f169dc0886e 100644
--- a/src/vnet/mfib/mfib_entry.c
+++ b/src/vnet/mfib/mfib_entry.c
@@ -673,21 +673,12 @@ mfib_entry_stack (mfib_entry_t *mfib_entry,
&bw_ctx);
}
-static fib_node_index_t
-mfib_entry_src_path_add (mfib_entry_src_t *msrc,
- const fib_route_path_t *rpath)
+static fib_node_index_t*
+mfib_entry_src_paths_add (mfib_entry_src_t *msrc,
+ const fib_route_path_t *rpaths)
{
- fib_node_index_t path_index;
- fib_route_path_t *rpaths;
-
ASSERT(!(MFIB_ENTRY_FLAG_EXCLUSIVE & msrc->mfes_flags));
- /*
- * path-lists require a vector of paths
- */
- rpaths = NULL;
- vec_add1(rpaths, rpath[0]);
-
if (FIB_NODE_INDEX_INVALID == msrc->mfes_pl)
{
/* A non-shared path-list */
@@ -696,33 +687,16 @@ mfib_entry_src_path_add (mfib_entry_src_t *msrc,
fib_path_list_lock(msrc->mfes_pl);
}
- path_index = fib_path_list_path_add(msrc->mfes_pl, rpaths);
-
- vec_free(rpaths);
-
- return (path_index);
+ return (fib_path_list_paths_add(msrc->mfes_pl, rpaths));
}
-static fib_node_index_t
-mfib_entry_src_path_remove (mfib_entry_src_t *msrc,
- const fib_route_path_t *rpath)
+static fib_node_index_t*
+mfib_entry_src_paths_remove (mfib_entry_src_t *msrc,
+ const fib_route_path_t *rpaths)
{
- fib_node_index_t path_index;
- fib_route_path_t *rpaths;
-
ASSERT(!(MFIB_ENTRY_FLAG_EXCLUSIVE & msrc->mfes_flags));
- /*
- * path-lists require a vector of paths
- */
- rpaths = NULL;
- vec_add1(rpaths, rpath[0]);
-
- path_index = fib_path_list_path_remove(msrc->mfes_pl, rpaths);
-
- vec_free(rpaths);
-
- return (path_index);
+ return (fib_path_list_paths_remove(msrc->mfes_pl, rpaths));
}
static void
@@ -819,7 +793,12 @@ mfib_entry_src_ok_for_delete (const mfib_entry_src_t *msrc)
{
return ((INDEX_INVALID == msrc->mfes_cover &&
MFIB_ENTRY_FLAG_NONE == msrc->mfes_flags &&
- 0 == fib_path_list_get_n_paths(msrc->mfes_pl)));
+ 0 == fib_path_list_get_n_paths(msrc->mfes_pl)) &&
+ (0 == hash_elts(msrc->mfes_itfs)));
+
+ /* return ((MFIB_ENTRY_FLAG_NONE == msrc->mfes_flags) && */
+ /* (0 == fib_path_list_get_n_paths(msrc->mfes_pl)) && */
+ /* (0 == hash_elts(msrc->mfes_itfs))); */
}
@@ -931,18 +910,26 @@ mfib_entry_itf_remove (mfib_entry_src_t *msrc,
hash_unset(msrc->mfes_itfs, sw_if_index);
}
+static int
+mfib_entry_path_itf_based (const fib_route_path_t *rpath)
+{
+ return (!(rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP) &&
+ ~0 != rpath->frp_sw_if_index);
+}
+
void
mfib_entry_path_update (fib_node_index_t mfib_entry_index,
mfib_source_t source,
- const fib_route_path_t *rpath,
- mfib_itf_flags_t itf_flags)
+ const fib_route_path_t *rpaths)
{
- fib_node_index_t path_index;
+ fib_node_index_t* path_indices, path_index;
+ const fib_route_path_t *rpath;
mfib_source_t current_best;
mfib_path_ext_t *path_ext;
mfib_entry_t *mfib_entry;
mfib_entry_src_t *msrc;
mfib_itf_flags_t old;
+ u32 ii;
mfib_entry = mfib_entry_get(mfib_entry_index);
ASSERT(NULL != mfib_entry);
@@ -953,61 +940,73 @@ mfib_entry_path_update (fib_node_index_t mfib_entry_index,
* add the path to the path-list. If it's a duplicate we'll get
* back the original path.
*/
- path_index = mfib_entry_src_path_add(msrc, rpath);
+ path_indices = mfib_entry_src_paths_add(msrc, rpaths);
- /*
- * find the path extension for that path
- */
- path_ext = mfib_entry_path_ext_find(msrc->mfes_exts, path_index);
-
- if (NULL == path_ext)
+ vec_foreach_index(ii, path_indices)
{
- old = MFIB_ITF_FLAG_NONE;
- path_ext = mfib_path_ext_add(msrc, path_index, itf_flags);
- }
- else
- {
- old = path_ext->mfpe_flags;
- path_ext->mfpe_flags = itf_flags;
- }
+ path_index = path_indices[ii];
+ rpath = &rpaths[ii];
- /*
- * Has the path changed its contribution to the input interface set.
- * Which only paths with interfaces can do...
- */
- if (~0 != rpath[0].frp_sw_if_index)
- {
- mfib_itf_t *mfib_itf;
+ if (FIB_NODE_INDEX_INVALID == path_index)
+ continue;
+
+ /*
+ * find the path extension for that path
+ */
+ path_ext = mfib_entry_path_ext_find(msrc->mfes_exts, path_index);
- if (old != itf_flags)
+ if (NULL == path_ext)
{
- /*
- * change of flag contributions
- */
- mfib_itf = mfib_entry_itf_find(msrc->mfes_itfs,
- rpath[0].frp_sw_if_index);
+ old = MFIB_ITF_FLAG_NONE;
+ path_ext = mfib_path_ext_add(msrc, path_index,
+ rpath->frp_mitf_flags);
+ }
+ else
+ {
+ old = path_ext->mfpe_flags;
+ path_ext->mfpe_flags = rpath->frp_mitf_flags;
+ }
- if (NULL == mfib_itf)
- {
- mfib_entry_itf_add(msrc,
- rpath[0].frp_sw_if_index,
- mfib_itf_create(path_index, itf_flags));
- }
- else
+ /*
+ * Has the path changed its contribution to the input interface set.
+ * Which only paths with interfaces can do...
+ */
+ if (mfib_entry_path_itf_based(rpath))
+ {
+ mfib_itf_t *mfib_itf;
+
+ if (old != rpath->frp_mitf_flags)
{
- if (mfib_itf_update(mfib_itf,
- path_index,
- itf_flags))
+ /*
+ * change of flag contributions
+ */
+ mfib_itf = mfib_entry_itf_find(msrc->mfes_itfs,
+ rpath->frp_sw_if_index);
+
+ if (NULL == mfib_itf)
{
- /*
- * no more interface flags on this path, remove
- * from the data-plane set
- */
- mfib_entry_itf_remove(msrc, rpath[0].frp_sw_if_index);
+ mfib_entry_itf_add(msrc,
+ rpath->frp_sw_if_index,
+ mfib_itf_create(path_index,
+ rpath->frp_mitf_flags));
+ }
+ else
+ {
+ if (mfib_itf_update(mfib_itf,
+ path_index,
+ rpath->frp_mitf_flags))
+ {
+ /*
+ * no more interface flags on this path, remove
+ * from the data-plane set
+ */
+ mfib_entry_itf_remove(msrc, rpath->frp_sw_if_index);
+ }
}
}
}
}
+ vec_free(path_indices);
mfib_entry_recalculate_forwarding(mfib_entry, current_best);
}
@@ -1021,12 +1020,14 @@ mfib_entry_path_update (fib_node_index_t mfib_entry_index,
int
mfib_entry_path_remove (fib_node_index_t mfib_entry_index,
mfib_source_t source,
- const fib_route_path_t *rpath)
+ const fib_route_path_t *rpaths)
{
- fib_node_index_t path_index;
+ fib_node_index_t path_index, *path_indices;
+ const fib_route_path_t *rpath;
mfib_source_t current_best;
mfib_entry_t *mfib_entry;
mfib_entry_src_t *msrc;
+ u32 ii;
mfib_entry = mfib_entry_get(mfib_entry_index);
ASSERT(NULL != mfib_entry);
@@ -1042,23 +1043,29 @@ mfib_entry_path_remove (fib_node_index_t mfib_entry_index,
}
/*
- * remove the path from the path-list. If it's not there we'll get
- * back invalid
+ * remove the paths from the path-list. If it's not there we'll get
+ * back an empty vector
*/
- path_index = mfib_entry_src_path_remove(msrc, rpath);
+ path_indices = mfib_entry_src_paths_remove(msrc, rpaths);
- if (FIB_NODE_INDEX_INVALID != path_index)
+ vec_foreach_index(ii, path_indices)
{
+ path_index = path_indices[ii];
+ rpath = &rpaths[ii];
+
+ if (FIB_NODE_INDEX_INVALID == path_index)
+ continue;
+
/*
* don't need the extension, nor the interface anymore
*/
mfib_path_ext_remove(msrc, path_index);
- if (~0 != rpath[0].frp_sw_if_index)
+ if (mfib_entry_path_itf_based(rpath))
{
mfib_itf_t *mfib_itf;
mfib_itf = mfib_entry_itf_find(msrc->mfes_itfs,
- rpath[0].frp_sw_if_index);
+ rpath->frp_sw_if_index);
if (mfib_itf_update(mfib_itf,
path_index,
@@ -1068,19 +1075,20 @@ mfib_entry_path_remove (fib_node_index_t mfib_entry_index,
* no more interface flags on this path, remove
* from the data-plane set
*/
- mfib_entry_itf_remove(msrc, rpath[0].frp_sw_if_index);
+ mfib_entry_itf_remove(msrc, rpath->frp_sw_if_index);
}
}
- }
- if (mfib_entry_src_ok_for_delete(msrc))
- {
- /*
- * this source has no interfaces and no flags.
- * it has nothing left to give - remove it
- */
- mfib_entry_src_remove(mfib_entry, source);
+ if (mfib_entry_src_ok_for_delete(msrc))
+ {
+ /*
+ * this source has no interfaces and no flags.
+ * it has nothing left to give - remove it
+ */
+ mfib_entry_src_remove(mfib_entry, source);
+ }
}
+ vec_free(path_indices);
mfib_entry_recalculate_forwarding(mfib_entry, current_best);
@@ -1321,12 +1329,14 @@ mfib_entry_module_init (void)
mfib_entry_logger = vlib_log_register_class("mfib", "entry");
}
-void
-mfib_entry_encode (fib_node_index_t mfib_entry_index,
- fib_route_path_encode_t **api_rpaths)
+fib_route_path_t*
+mfib_entry_encode (fib_node_index_t mfib_entry_index)
{
- fib_route_path_encode_t *api_rpath;
+ fib_path_encode_ctx_t ctx = {
+ .rpaths = NULL,
+ };
mfib_entry_t *mfib_entry;
+ fib_route_path_t *rpath;
mfib_entry_src_t *bsrc;
mfib_entry = mfib_entry_get(mfib_entry_index);
@@ -1337,20 +1347,22 @@ mfib_entry_encode (fib_node_index_t mfib_entry_index,
fib_path_list_walk_w_ext(bsrc->mfes_pl,
NULL,
fib_path_encode,
- api_rpaths);
+ &ctx);
}
- vec_foreach(api_rpath, *api_rpaths)
+ vec_foreach(rpath, ctx.rpaths)
{
mfib_itf_t *mfib_itf;
mfib_itf = mfib_entry_itf_find(bsrc->mfes_itfs,
- api_rpath->rpath.frp_sw_if_index);
+ rpath->frp_sw_if_index);
if (mfib_itf)
{
- api_rpath->rpath.frp_mitf_flags = mfib_itf->mfi_flags;
+ rpath->frp_mitf_flags = mfib_itf->mfi_flags;
}
}
+
+ return (ctx.rpaths);
}
const mfib_prefix_t *
diff --git a/src/vnet/mfib/mfib_entry.h b/src/vnet/mfib/mfib_entry.h
index 8ab7cee5ea7..4a1121bc9d5 100644
--- a/src/vnet/mfib/mfib_entry.h
+++ b/src/vnet/mfib/mfib_entry.h
@@ -135,8 +135,7 @@ extern int mfib_entry_special_add(fib_node_index_t fib_entry_index,
extern void mfib_entry_path_update(fib_node_index_t fib_entry_index,
mfib_source_t source,
- const fib_route_path_t *rpath,
- mfib_itf_flags_t itf_flags);
+ const fib_route_path_t *rpath);
extern int mfib_entry_path_remove(fib_node_index_t fib_entry_index,
@@ -188,8 +187,7 @@ extern void mfib_entry_contribute_forwarding(
mfib_entry_fwd_flags_t flags,
dpo_id_t *dpo);
-extern void mfib_entry_encode(fib_node_index_t fib_entry_index,
- fib_route_path_encode_t **api_rpaths);
+extern fib_route_path_t* mfib_entry_encode(fib_node_index_t fib_entry_index);
extern void mfib_entry_module_init(void);
diff --git a/src/vnet/mfib/mfib_table.c b/src/vnet/mfib/mfib_table.c
index 68154b37ff8..504333a2474 100644
--- a/src/vnet/mfib/mfib_table.c
+++ b/src/vnet/mfib/mfib_table.c
@@ -286,12 +286,11 @@ mfib_table_entry_update (u32 fib_index,
return (mfib_entry_index);
}
-fib_node_index_t
-mfib_table_entry_path_update (u32 fib_index,
- const mfib_prefix_t *prefix,
- mfib_source_t source,
- const fib_route_path_t *rpath,
- mfib_itf_flags_t itf_flags)
+static fib_node_index_t
+mfib_table_entry_paths_update_i (u32 fib_index,
+ const mfib_prefix_t *prefix,
+ mfib_source_t source,
+ const fib_route_path_t *rpaths)
{
fib_node_index_t mfib_entry_index;
mfib_table_t *mfib_table;
@@ -308,30 +307,53 @@ mfib_table_entry_path_update (u32 fib_index,
MFIB_ENTRY_FLAG_NONE,
INDEX_INVALID);
- mfib_entry_path_update(mfib_entry_index,
- source,
- rpath,
- itf_flags);
+ mfib_entry_path_update(mfib_entry_index, source, rpaths);
mfib_table_entry_insert(mfib_table, prefix, mfib_entry_index);
}
else
{
- mfib_entry_path_update(mfib_entry_index,
- source,
- rpath,
- itf_flags);
+ mfib_entry_path_update(mfib_entry_index, source, rpaths);
}
return (mfib_entry_index);
}
-void
-mfib_table_entry_path_remove (u32 fib_index,
+
+fib_node_index_t
+mfib_table_entry_path_update (u32 fib_index,
const mfib_prefix_t *prefix,
mfib_source_t source,
const fib_route_path_t *rpath)
{
fib_node_index_t mfib_entry_index;
+ fib_route_path_t *rpaths = NULL;
+
+ vec_add1(rpaths, *rpath);
+
+ mfib_entry_index = mfib_table_entry_paths_update_i(fib_index, prefix,
+ source, rpaths);
+
+ vec_free(rpaths);
+ return (mfib_entry_index);
+}
+
+fib_node_index_t
+mfib_table_entry_paths_update (u32 fib_index,
+ const mfib_prefix_t *prefix,
+ mfib_source_t source,
+ const fib_route_path_t *rpaths)
+{
+ return (mfib_table_entry_paths_update_i(fib_index, prefix,
+ source, rpaths));
+}
+
+static void
+mfib_table_entry_paths_remove_i (u32 fib_index,
+ const mfib_prefix_t *prefix,
+ mfib_source_t source,
+ const fib_route_path_t *rpaths)
+{
+ fib_node_index_t mfib_entry_index;
mfib_table_t *mfib_table;
mfib_table = mfib_table_get(fib_index, prefix->fp_proto);
@@ -340,7 +362,7 @@ mfib_table_entry_path_remove (u32 fib_index,
if (FIB_NODE_INDEX_INVALID == mfib_entry_index)
{
/*
- * removing an etry that does not exist. i'll allow it.
+ * removing an entry that does not exist. i'll allow it.
*/
}
else
@@ -354,7 +376,7 @@ mfib_table_entry_path_remove (u32 fib_index,
no_more_sources = mfib_entry_path_remove(mfib_entry_index,
source,
- rpath);
+ rpaths);
if (no_more_sources)
{
@@ -367,6 +389,35 @@ mfib_table_entry_path_remove (u32 fib_index,
mfib_entry_unlock(mfib_entry_index);
}
}
+void
+mfib_table_entry_paths_remove (u32 fib_index,
+ const mfib_prefix_t *prefix,
+ mfib_source_t source,
+ const fib_route_path_t *rpaths)
+{
+ mfib_table_entry_paths_remove_i(fib_index,
+ prefix,
+ source,
+ rpaths);
+}
+
+void
+mfib_table_entry_path_remove (u32 fib_index,
+ const mfib_prefix_t *prefix,
+ mfib_source_t source,
+ const fib_route_path_t *rpath)
+{
+ fib_route_path_t *rpaths = NULL;
+
+ vec_add1(rpaths, *rpath);
+
+ mfib_table_entry_paths_remove_i(fib_index,
+ prefix,
+ source,
+ rpaths);
+
+ vec_free(rpaths);
+}
fib_node_index_t
mfib_table_entry_special_add (u32 fib_index,
@@ -464,12 +515,10 @@ void
mfib_table_entry_delete_index (fib_node_index_t mfib_entry_index,
mfib_source_t source)
{
- const mfib_prefix_t *prefix;
-
- prefix = mfib_entry_get_prefix(mfib_entry_index);
-
mfib_table_entry_delete_i(mfib_entry_get_fib_index(mfib_entry_index),
- mfib_entry_index, prefix, source);
+ mfib_entry_index,
+ mfib_entry_get_prefix(mfib_entry_index),
+ source);
}
u32
diff --git a/src/vnet/mfib/mfib_table.h b/src/vnet/mfib/mfib_table.h
index 6be4f798cd7..47461375a10 100644
--- a/src/vnet/mfib/mfib_table.h
+++ b/src/vnet/mfib/mfib_table.h
@@ -164,8 +164,11 @@ extern fib_node_index_t mfib_table_entry_update(u32 fib_index,
extern fib_node_index_t mfib_table_entry_path_update(u32 fib_index,
const mfib_prefix_t *prefix,
mfib_source_t source,
- const fib_route_path_t *rpath,
- mfib_itf_flags_t flags);
+ const fib_route_path_t *rpath);
+extern fib_node_index_t mfib_table_entry_paths_update(u32 fib_index,
+ const mfib_prefix_t *prefix,
+ mfib_source_t source,
+ const fib_route_path_t *rpath);
/**
* @brief
@@ -190,6 +193,10 @@ extern void mfib_table_entry_path_remove(u32 fib_index,
const mfib_prefix_t *prefix,
mfib_source_t source,
const fib_route_path_t *paths);
+extern void mfib_table_entry_paths_remove(u32 fib_index,
+ const mfib_prefix_t *prefix,
+ mfib_source_t source,
+ const fib_route_path_t *paths);
@@ -320,6 +327,20 @@ extern u32 mfib_table_get_table_id(u32 fib_index, fib_protocol_t proto);
*/
extern u32 mfib_table_find(fib_protocol_t proto, u32 table_id);
+/**
+ * @brief
+ * Get the Table-ID of the FIB from protocol and index
+ *
+ * @param fib_index
+ * The FIB index
+ *
+ * @paran proto
+ * The protocol of the FIB (and thus the entries therein)
+ *
+ * @return fib_index
+ * The tableID of the FIB
+ */
+extern u32 mfib_table_get_table_id(u32 fib_index, fib_protocol_t proto);
/**
* @brief
diff --git a/src/vnet/mfib/mfib_types.api b/src/vnet/mfib/mfib_types.api
new file mode 100644
index 00000000000..b2ba4329ea5
--- /dev/null
+++ b/src/vnet/mfib/mfib_types.api
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import "vnet/fib/fib_types.api";
+import "vnet/ip/ip_types.api";
+
+enum mfib_itf_flags
+{
+ MFIB_API_ITF_FLAG_NONE = 0,
+ MFIB_API_ITF_FLAG_NEGATE_SIGNAL = 0x1,
+ MFIB_API_ITF_FLAG_ACCEPT = 0x2,
+ MFIB_API_ITF_FLAG_FORWARD = 0x4,
+ MFIB_API_ITF_FLAG_SIGNAL_PRESENT = 0x8,
+ MFIB_API_ITF_FLAG_DONT_PRESERVE = 0x10,
+};
+
+/** \brief mFIB path
+*/
+typeonly define mfib_path
+{
+ vl_api_mfib_itf_flags_t itf_flags;
+ vl_api_fib_path_t path;
+};
diff --git a/src/vnet/mpls/mpls.api b/src/vnet/mpls/mpls.api
index ca1aa3a8513..5d85812807a 100644
--- a/src/vnet/mpls/mpls.api
+++ b/src/vnet/mpls/mpls.api
@@ -15,6 +15,7 @@
option version = "1.1.0";
import "vnet/fib/fib_types.api";
+import "vnet/ip/ip_types.api";
/** \brief Bind/Unbind an MPLS local label to an IP prefix. i.e. create
a per-prefix label entry.
@@ -36,28 +37,24 @@ autoreply define mpls_ip_bind_unbind
u32 mb_label;
u32 mb_ip_table_id;
u8 mb_is_bind;
- u8 mb_is_ip4;
- u8 mb_address_length;
- u8 mb_address[16];
+ vl_api_prefix_t mb_prefix;
};
+typeonly define mpls_tunnel
+{
+ u32 mt_sw_if_index;
+ u32 mt_tunnel_index;
+ u8 mt_l2_only;
+ u8 mt_is_multicast;
+ u8 mt_n_paths;
+ vl_api_fib_path_t mt_paths[mt_n_paths];
+};
define mpls_tunnel_add_del
{
u32 client_index;
u32 context;
- u32 mt_sw_if_index;
u8 mt_is_add;
- u8 mt_l2_only;
- u8 mt_is_multicast;
- u8 mt_next_hop_proto_is_ip4;
- u8 mt_next_hop_weight;
- u8 mt_next_hop_preference;
- u8 mt_next_hop[16];
- u8 mt_next_hop_n_out_labels;
- u32 mt_next_hop_via_label;
- u32 mt_next_hop_sw_if_index;
- u32 mt_next_hop_table_id;
- vl_api_fib_mpls_label_t mt_next_hop_out_label_stack[mt_next_hop_n_out_labels];
+ vl_api_mpls_tunnel_t mt_tunnel;
};
/** \brief Reply for MPLS tunnel add / del request
@@ -90,12 +87,7 @@ define mpls_tunnel_dump
manual_endian manual_print define mpls_tunnel_details
{
u32 context;
- u32 mt_sw_if_index;
- u32 mt_tunnel_index;
- u8 mt_l2_only;
- u8 mt_is_multicast;
- u32 mt_count;
- vl_api_fib_path_t mt_paths[mt_count];
+ vl_api_mpls_tunnel_t mt_tunnel;
};
/** \brief MPLS Route Add / del route
@@ -107,68 +99,71 @@ manual_endian manual_print define mpls_tunnel_details
is not set by the client, then VPP will generate
something meaningfull.
*/
+typeonly define mpls_table
+{
+ u32 mt_table_id;
+ u8 mt_name[64];
+};
autoreply define mpls_table_add_del
{
u32 client_index;
u32 context;
- u32 mt_table_id;
u8 mt_is_add;
- u8 mt_name[64];
+ vl_api_mpls_table_t mt_table;
};
-/** \brief MPLS Route Add / del route
+/** \brief Dump MPLS fib table
@param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
+*/
+define mpls_table_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+define mpls_table_details
+{
+ u32 context;
+ vl_api_mpls_table_t mt_table;
+};
+
+/** \brief MPLS Route
@param mr_label - The MPLS label value
@param mr_eos - The End of stack bit
+ @param mr_eos_proto - If EOS then this is the DPO packect's proto post pop
@param mr_table_id - The MPLS table-id the route is added in
- @param mr_classify_table_index - If this is a classify route,
- this is the classify table index
- create them
@param mr_is_add - Is this a route add or delete
- @param mr_is_classify - Is this route result a classify
@param mr_is_multicast - Is this a multicast route
+ @param mr_n_paths - The number of paths
+ @param mr_paths - The paths
+*/
+typeonly define mpls_route
+{
+ u32 mr_table_id;
+ u32 mr_label;
+ u8 mr_eos;
+ u8 mr_eos_proto;
+ u8 mr_is_multicast;
+ u8 mr_n_paths;
+ vl_api_fib_path_t mr_paths[mr_n_paths];
+};
+
+/** \brief MPLS Route Add / del route
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mr_table_id - The MPLS table-id the route is added in
+ @param mr_is_add - Is this a route add or delete
@param mr_is_multipath - Is this route update a multipath - i.e. is this
a path addition to an existing route
- @param mr_is_resolve_host - Recurse resolution constraint via a host prefix
- @param mr_is_resolve_attached - Recurse resolution constraint via attached prefix
- @param mr_is_interface_rx - Interface Receive path
- @param mr_is_interface_rx - RPF-ID Receive path. The next-hop interface
- is used as the RPF-ID
- @param mr_next_hop_proto - The next-hop protocol, of type dpo_proto_t
- @param mr_next_hop_weight - The weight, for UCMP
- @param mr_next_hop[16] - the nextop address
- @param mr_next_hop_sw_if_index - the next-hop SW interface
- @param mr_next_hop_table_id - the next-hop table-id (if appropriate)
- @param mr_next_hop_n_out_labels - the number of labels in the label stack
- @param mr_next_hop_out_label_stack - the next-hop output label stack, outer most first
- @param next_hop_via_label - The next-hop is a resolved via a local label
+ @param mr_route - The Route
*/
define mpls_route_add_del
{
u32 client_index;
u32 context;
- u32 mr_label;
- u8 mr_eos;
- u32 mr_table_id;
- u32 mr_classify_table_index;
u8 mr_is_add;
- u8 mr_is_classify;
- u8 mr_is_multicast;
u8 mr_is_multipath;
- u8 mr_is_resolve_host;
- u8 mr_is_resolve_attached;
- u8 mr_is_interface_rx;
- u8 mr_is_rpf_id;
- u8 mr_next_hop_proto;
- u8 mr_next_hop_weight;
- u8 mr_next_hop_preference;
- u8 mr_next_hop[16];
- u8 mr_next_hop_n_out_labels;
- u32 mr_next_hop_sw_if_index;
- u32 mr_next_hop_table_id;
- u32 mr_next_hop_via_label;
- vl_api_fib_mpls_label_t mr_next_hop_out_label_stack[mr_next_hop_n_out_labels];
+ vl_api_mpls_route_t mr_route;
};
define mpls_route_add_del_reply
@@ -181,10 +176,11 @@ define mpls_route_add_del_reply
/** \brief Dump MPLS fib table
@param client_index - opaque cookie to identify the sender
*/
-define mpls_fib_dump
+define mpls_route_dump
{
u32 client_index;
u32 context;
+ vl_api_mpls_table_t table;
};
/** \brief mpls FIB table response
@@ -194,15 +190,10 @@ define mpls_fib_dump
@param count - the number of fib_path in path
@param path - array of of fib_path structures
*/
-manual_endian manual_print define mpls_fib_details
+manual_endian manual_print define mpls_route_details
{
u32 context;
- u32 table_id;
- u8 table_name[64];
- u8 eos_bit;
- u32 label;
- u32 count;
- vl_api_fib_path_t path[count];
+ vl_api_mpls_route_t mr_route;
};
/** \brief Enable or Disable MPLS on and interface
diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c
index 52434dae8aa..cb20df5695b 100644
--- a/src/vnet/mpls/mpls_api.c
+++ b/src/vnet/mpls/mpls_api.c
@@ -28,6 +28,7 @@
#include <vnet/fib/fib_api.h>
#include <vnet/fib/mpls_fib.h>
#include <vnet/fib/fib_path_list.h>
+#include <vnet/ip/ip_types_api.h>
#include <vnet/vnet_msg_enum.h>
@@ -54,7 +55,8 @@ _(MPLS_TABLE_ADD_DEL, mpls_table_add_del) \
_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \
_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \
_(SW_INTERFACE_SET_MPLS_ENABLE, sw_interface_set_mpls_enable) \
-_(MPLS_FIB_DUMP, mpls_fib_dump)
+_(MPLS_TABLE_DUMP, mpls_table_dump) \
+_(MPLS_ROUTE_DUMP, mpls_route_dump)
void
mpls_table_delete (u32 table_id, u8 is_api)
@@ -90,9 +92,10 @@ vl_api_mpls_table_add_del_t_handler (vl_api_mpls_table_add_del_t * mp)
vnm->api_errno = 0;
if (mp->mt_is_add)
- mpls_table_create (ntohl (mp->mt_table_id), 1, mp->mt_name);
+ mpls_table_create (ntohl (mp->mt_table.mt_table_id),
+ 1, mp->mt_table.mt_name);
else
- mpls_table_delete (ntohl (mp->mt_table_id), 1);
+ mpls_table_delete (ntohl (mp->mt_table.mt_table_id), 1);
// NB: Nothing sets rv; none of the above returns an error
@@ -104,6 +107,7 @@ mpls_ip_bind_unbind_handler (vnet_main_t * vnm,
vl_api_mpls_ip_bind_unbind_t * mp)
{
u32 mpls_fib_index, ip_fib_index;
+ fib_prefix_t pfx;
mpls_fib_index =
fib_table_find (FIB_PROTOCOL_MPLS, ntohl (mp->mb_mpls_table_id));
@@ -113,30 +117,12 @@ mpls_ip_bind_unbind_handler (vnet_main_t * vnm,
return VNET_API_ERROR_NO_SUCH_FIB;
}
- ip_fib_index = fib_table_find ((mp->mb_is_ip4 ?
- FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6),
- ntohl (mp->mb_ip_table_id));
+ ip_prefix_decode (&mp->mb_prefix, &pfx);
+
+ ip_fib_index = fib_table_find (pfx.fp_proto, ntohl (mp->mb_ip_table_id));
if (~0 == ip_fib_index)
return VNET_API_ERROR_NO_SUCH_FIB;
- fib_prefix_t pfx = {
- .fp_len = mp->mb_address_length,
- };
-
- if (mp->mb_is_ip4)
- {
- pfx.fp_proto = FIB_PROTOCOL_IP4;
- clib_memcpy (&pfx.fp_addr.ip4, mp->mb_address,
- sizeof (pfx.fp_addr.ip4));
- }
- else
- {
- pfx.fp_proto = FIB_PROTOCOL_IP6;
- clib_memcpy (&pfx.fp_addr.ip6, mp->mb_address,
- sizeof (pfx.fp_addr.ip6));
- }
-
if (mp->mb_is_bind)
fib_table_entry_local_label_add (ip_fib_index, &pfx,
ntohl (mp->mb_label));
@@ -168,92 +154,58 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm,
vl_api_mpls_route_add_del_t * mp,
u32 * stats_index)
{
- fib_mpls_label_t *label_stack = NULL;
- u32 fib_index, next_hop_fib_index;
- int rv, ii, n_labels;;
+ fib_route_path_t *rpaths = NULL, *rpath;
+ vl_api_fib_path_t *apath;
+ u32 fib_index;
+ int rv, ii;
fib_prefix_t pfx = {
.fp_len = 21,
.fp_proto = FIB_PROTOCOL_MPLS,
- .fp_eos = mp->mr_eos,
- .fp_label = ntohl (mp->mr_label),
+ .fp_eos = mp->mr_route.mr_eos,
+ .fp_label = ntohl (mp->mr_route.mr_label),
};
if (pfx.fp_eos)
{
- pfx.fp_payload_proto = mp->mr_next_hop_proto;
+ pfx.fp_payload_proto = mp->mr_route.mr_eos_proto;
}
else
{
pfx.fp_payload_proto = DPO_PROTO_MPLS;
}
- rv = add_del_route_check (FIB_PROTOCOL_MPLS,
- mp->mr_table_id,
- mp->mr_next_hop_sw_if_index,
- pfx.fp_payload_proto,
- mp->mr_next_hop_table_id,
- mp->mr_is_rpf_id,
- &fib_index, &next_hop_fib_index);
-
+ rv = fib_api_table_id_decode (FIB_PROTOCOL_MPLS,
+ ntohl (mp->mr_route.mr_table_id), &fib_index);
if (0 != rv)
- return (rv);
-
- ip46_address_t nh;
- clib_memset (&nh, 0, sizeof (nh));
+ goto out;
- if (DPO_PROTO_IP4 == mp->mr_next_hop_proto)
- memcpy (&nh.ip4, mp->mr_next_hop, sizeof (nh.ip4));
- else if (DPO_PROTO_IP6 == mp->mr_next_hop_proto)
- memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6));
+ vec_validate (rpaths, mp->mr_route.mr_n_paths - 1);
- n_labels = mp->mr_next_hop_n_out_labels;
- if (n_labels == 0)
- ;
- else
+ for (ii = 0; ii < mp->mr_route.mr_n_paths; ii++)
{
- vec_validate (label_stack, n_labels - 1);
- for (ii = 0; ii < n_labels; ii++)
- {
- label_stack[ii].fml_value =
- ntohl (mp->mr_next_hop_out_label_stack[ii].label);
- label_stack[ii].fml_ttl = mp->mr_next_hop_out_label_stack[ii].ttl;
- label_stack[ii].fml_exp = mp->mr_next_hop_out_label_stack[ii].exp;
- label_stack[ii].fml_mode =
- (mp->mr_next_hop_out_label_stack[ii].is_uniform ?
- FIB_MPLS_LSP_MODE_UNIFORM : FIB_MPLS_LSP_MODE_PIPE);
- }
+ apath = &mp->mr_route.mr_paths[ii];
+ rpath = &rpaths[ii];
+
+ rv = fib_api_path_decode (apath, rpath);
+
+ if (0 != rv)
+ goto out;
}
- /* *INDENT-OFF* */
- rv = add_del_route_t_handler (mp->mr_is_multipath, mp->mr_is_add,
- 0, // mp->is_drop,
- 0, // mp->is_unreach,
- 0, // mp->is_prohibit,
- 0, // mp->is_local,
- mp->mr_is_multicast,
- mp->mr_is_classify,
- mp->mr_classify_table_index,
- mp->mr_is_resolve_host,
- mp->mr_is_resolve_attached,
- mp->mr_is_interface_rx,
- mp->mr_is_rpf_id,
- 0, // l2_bridged
- 0, // is source_lookup
- 0, // is_udp_encap
- fib_index, &pfx,
- mp->mr_next_hop_proto,
- &nh, ~0, // next_hop_id
- ntohl (mp->mr_next_hop_sw_if_index),
- next_hop_fib_index,
- mp->mr_next_hop_weight,
- mp->mr_next_hop_preference,
- ntohl (mp->mr_next_hop_via_label),
- label_stack);
- /* *INDENT-ON* */
+ fib_api_route_add_del (mp->mr_is_add,
+ mp->mr_is_multipath,
+ fib_index,
+ &pfx,
+ (mp->mr_route.mr_is_multicast ?
+ FIB_ENTRY_FLAG_MULTICAST :
+ FIB_ENTRY_FLAG_NONE), rpaths);
if (mp->mr_is_add && 0 == rv)
*stats_index = fib_table_entry_get_stats_index (fib_index, &pfx);
+out:
+ vec_free (rpaths);
+
return (rv);
}
@@ -308,73 +260,30 @@ mpls_table_create (u32 table_id, u8 is_api, const u8 * name)
static void
vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp)
{
- u32 tunnel_sw_if_index = ~0, tunnel_index = ~0, next_hop_via_label;
+ u32 tunnel_sw_if_index = ~0, tunnel_index = ~0;
vl_api_mpls_tunnel_add_del_reply_t *rmp;
- fib_route_path_t rpath, *rpaths = NULL;
+ fib_route_path_t *rpath, *rpaths = NULL;
int ii, rv = 0;
- clib_memset (&rpath, 0, sizeof (rpath));
+ vec_validate (rpaths, mp->mt_tunnel.mt_n_paths - 1);
- if (mp->mt_next_hop_proto_is_ip4)
+ for (ii = 0; ii < mp->mt_tunnel.mt_n_paths; ii++)
{
- rpath.frp_proto = DPO_PROTO_IP4;
- clib_memcpy (&rpath.frp_addr.ip4,
- mp->mt_next_hop, sizeof (rpath.frp_addr.ip4));
- }
- else
- {
- rpath.frp_proto = DPO_PROTO_IP6;
- clib_memcpy (&rpath.frp_addr.ip6,
- mp->mt_next_hop, sizeof (rpath.frp_addr.ip6));
- }
- rpath.frp_sw_if_index = ntohl (mp->mt_next_hop_sw_if_index);
- rpath.frp_weight = mp->mt_next_hop_weight;
- rpath.frp_preference = mp->mt_next_hop_preference;
+ rpath = &rpaths[ii];
- next_hop_via_label = ntohl (mp->mt_next_hop_via_label);
- if ((MPLS_LABEL_INVALID != next_hop_via_label) && (0 != next_hop_via_label))
- {
- rpath.frp_proto = DPO_PROTO_MPLS;
- rpath.frp_local_label = next_hop_via_label;
- rpath.frp_eos = MPLS_NON_EOS;
- }
+ rv = fib_api_path_decode (&mp->mt_tunnel.mt_paths[ii], rpath);
- if (rpath.frp_sw_if_index == ~0)
- { /* recursive path, set fib index */
- rpath.frp_fib_index =
- fib_table_find (dpo_proto_to_fib (rpath.frp_proto),
- ntohl (mp->mt_next_hop_table_id));
- if (rpath.frp_fib_index == ~0)
- {
- rv = VNET_API_ERROR_NO_SUCH_FIB;
- goto out;
- }
+ if (0 != rv)
+ goto out;
}
-
- if (mp->mt_is_add)
- {
- for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++)
- {
- fib_mpls_label_t fml = {
- .fml_value = ntohl (mp->mt_next_hop_out_label_stack[ii].label),
- .fml_ttl = mp->mt_next_hop_out_label_stack[ii].ttl,
- .fml_exp = mp->mt_next_hop_out_label_stack[ii].exp,
- .fml_mode = (mp->mt_next_hop_out_label_stack[ii].is_uniform ?
- FIB_MPLS_LSP_MODE_UNIFORM : FIB_MPLS_LSP_MODE_PIPE),
- };
- vec_add1 (rpath.frp_label_stack, fml);
- }
- }
-
- vec_add1 (rpaths, rpath);
-
- tunnel_sw_if_index = ntohl (mp->mt_sw_if_index);
+ tunnel_sw_if_index = ntohl (mp->mt_tunnel.mt_sw_if_index);
if (mp->mt_is_add)
{
if (~0 == tunnel_sw_if_index)
- tunnel_sw_if_index = vnet_mpls_tunnel_create (mp->mt_l2_only,
- mp->mt_is_multicast);
+ tunnel_sw_if_index =
+ vnet_mpls_tunnel_create (mp->mt_tunnel.mt_l2_only,
+ mp->mt_tunnel.mt_is_multicast);
vnet_mpls_tunnel_path_add (tunnel_sw_if_index, rpaths);
tunnel_index = vnet_mpls_tunnel_get_index (tunnel_sw_if_index);
@@ -382,7 +291,7 @@ vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp)
else
{
tunnel_index = vnet_mpls_tunnel_get_index (tunnel_sw_if_index);
- tunnel_sw_if_index = ntohl (mp->mt_sw_if_index);
+ tunnel_sw_if_index = ntohl (mp->mt_tunnel.mt_sw_if_index);
if (!vnet_mpls_tunnel_path_remove (tunnel_sw_if_index, rpaths))
vnet_mpls_tunnel_del (tunnel_sw_if_index);
}
@@ -426,10 +335,13 @@ typedef struct mpls_tunnel_send_walk_ctx_t_
static void
send_mpls_tunnel_entry (u32 mti, void *arg)
{
- fib_route_path_encode_t *api_rpaths = NULL, *api_rpath;
mpls_tunnel_send_walk_ctx_t *ctx;
vl_api_mpls_tunnel_details_t *mp;
+ fib_path_encode_ctx_t path_ctx = {
+ .rpaths = NULL,
+ };
const mpls_tunnel_t *mt;
+ fib_route_path_t *rpath;
vl_api_fib_path_t *fp;
u32 n;
@@ -448,21 +360,25 @@ send_mpls_tunnel_entry (u32 mti, void *arg)
mp->_vl_msg_id = ntohs (VL_API_MPLS_TUNNEL_DETAILS);
mp->context = ctx->context;
- mp->mt_tunnel_index = ntohl (mti);
- mp->mt_sw_if_index = ntohl (mt->mt_sw_if_index);
- mp->mt_count = ntohl (n);
+ mp->mt_tunnel.mt_n_paths = ntohl (n);
+ mp->mt_tunnel.mt_sw_if_index = ntohl (mt->mt_sw_if_index);
+ mp->mt_tunnel.mt_tunnel_index = ntohl (mti);
+ mp->mt_tunnel.mt_l2_only = ! !(MPLS_TUNNEL_FLAG_L2 & mt->mt_flags);
+ mp->mt_tunnel.mt_is_multicast = ! !(MPLS_TUNNEL_FLAG_MCAST & mt->mt_flags);
fib_path_list_walk_w_ext (mt->mt_path_list,
- &mt->mt_path_exts, fib_path_encode, &api_rpaths);
+ &mt->mt_path_exts, fib_path_encode, &path_ctx);
- fp = mp->mt_paths;
- vec_foreach (api_rpath, api_rpaths)
+ fp = mp->mt_tunnel.mt_paths;
+ vec_foreach (rpath, path_ctx.rpaths)
{
- fib_api_path_encode (api_rpath, fp);
+ fib_api_path_encode (rpath, fp);
fp++;
}
vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ vec_free (path_ctx.rpaths);
}
static void
@@ -483,51 +399,95 @@ vl_api_mpls_tunnel_dump_t_handler (vl_api_mpls_tunnel_dump_t * mp)
}
static void
-send_mpls_fib_details (vpe_api_main_t * am,
- vl_api_registration_t * reg,
- const fib_table_t * table,
- const fib_prefix_t * pfx,
- fib_route_path_encode_t * api_rpaths, u32 context)
+send_mpls_table_details (vpe_api_main_t * am,
+ vl_api_registration_t * reg,
+ u32 context, const fib_table_t * table)
+{
+ vl_api_mpls_table_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_MPLS_TABLE_DETAILS);
+ mp->context = context;
+
+ mp->mt_table.mt_table_id = htonl (table->ft_table_id);
+ memcpy (mp->mt_table.mt_name,
+ table->ft_desc,
+ clib_min (vec_len (table->ft_desc), sizeof (mp->mt_table.mt_name)));
+
+ vl_api_send_msg (reg, (u8 *) mp);
+}
+
+static void
+vl_api_mpls_table_dump_t_handler (vl_api_mpls_table_dump_t * mp)
{
- vl_api_mpls_fib_details_t *mp;
- fib_route_path_encode_t *api_rpath;
+ vpe_api_main_t *am = &vpe_api_main;
+ vl_api_registration_t *reg;
+ mpls_main_t *mm = &mpls_main;
+ fib_table_t *fib_table;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, mm->fibs,
+ ({
+ send_mpls_table_details(am, reg, mp->context, fib_table);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+send_mpls_route_details (vpe_api_main_t * am,
+ vl_api_registration_t * reg,
+ u32 context, fib_node_index_t fib_entry_index)
+{
+ fib_route_path_t *rpaths, *rpath;
+ vl_api_mpls_route_details_t *mp;
+ const fib_prefix_t *pfx;
vl_api_fib_path_t *fp;
int path_count;
- path_count = vec_len (api_rpaths);
+ rpaths = fib_entry_encode (fib_entry_index);
+ pfx = fib_entry_get_prefix (fib_entry_index);
+
+ path_count = vec_len (rpaths);
mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
if (!mp)
return;
clib_memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_MPLS_FIB_DETAILS);
+ mp->_vl_msg_id = ntohs (VL_API_MPLS_ROUTE_DETAILS);
mp->context = context;
- mp->table_id = htonl (table->ft_table_id);
- memcpy (mp->table_name, table->ft_desc,
- clib_min (vec_len (table->ft_desc), sizeof (mp->table_name)));
- mp->eos_bit = pfx->fp_eos;
- mp->label = htonl (pfx->fp_label);
+ mp->mr_route.mr_table_id =
+ htonl (fib_table_get_table_id
+ (fib_entry_get_fib_index (fib_entry_index), pfx->fp_proto));
+ mp->mr_route.mr_eos = pfx->fp_eos;
+ mp->mr_route.mr_eos_proto = pfx->fp_payload_proto;
+ mp->mr_route.mr_label = htonl (pfx->fp_label);
- mp->count = htonl (path_count);
- fp = mp->path;
- vec_foreach (api_rpath, api_rpaths)
+ mp->mr_route.mr_n_paths = path_count;
+ fp = mp->mr_route.mr_paths;
+ vec_foreach (rpath, rpaths)
{
- fib_api_path_encode (api_rpath, fp);
+ fib_api_path_encode (rpath, fp);
fp++;
}
+ vec_free (rpaths);
vl_api_send_msg (reg, (u8 *) mp);
}
-typedef struct vl_api_mpls_fib_dump_table_walk_ctx_t_
+typedef struct vl_api_mpls_route_dump_table_walk_ctx_t_
{
fib_node_index_t *lfeis;
-} vl_api_mpls_fib_dump_table_walk_ctx_t;
+} vl_api_mpls_route_dump_table_walk_ctx_t;
static fib_table_walk_rc_t
-vl_api_mpls_fib_dump_table_walk (fib_node_index_t fei, void *arg)
+vl_api_mpls_route_dump_table_walk (fib_node_index_t fei, void *arg)
{
- vl_api_mpls_fib_dump_table_walk_ctx_t *ctx = arg;
+ vl_api_mpls_route_dump_table_walk_ctx_t *ctx = arg;
vec_add1 (ctx->lfeis, fei);
@@ -535,47 +495,37 @@ vl_api_mpls_fib_dump_table_walk (fib_node_index_t fei, void *arg)
}
static void
-vl_api_mpls_fib_dump_t_handler (vl_api_mpls_fib_dump_t * mp)
+vl_api_mpls_route_dump_t_handler (vl_api_mpls_route_dump_t * mp)
{
vpe_api_main_t *am = &vpe_api_main;
vl_api_registration_t *reg;
- mpls_main_t *mm = &mpls_main;
- fib_table_t *fib_table;
- mpls_fib_t *mpls_fib;
fib_node_index_t *lfeip = NULL;
- const fib_prefix_t *pfx;
- u32 fib_index;
- fib_route_path_encode_t *api_rpaths;
- vl_api_mpls_fib_dump_table_walk_ctx_t ctx = {
+ vl_api_mpls_route_dump_table_walk_ctx_t ctx = {
.lfeis = NULL,
};
+ u32 fib_index;
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
return;
- /* *INDENT-OFF* */
- pool_foreach (mpls_fib, mm->mpls_fibs,
- ({
- mpls_fib_table_walk (mpls_fib,
- vl_api_mpls_fib_dump_table_walk,
- &ctx);
- }));
- /* *INDENT-ON* */
- vec_sort_with_function (ctx.lfeis, fib_entry_cmp_for_sort);
+ fib_index = fib_table_find (FIB_PROTOCOL_MPLS,
+ ntohl (mp->table.mt_table_id));
- vec_foreach (lfeip, ctx.lfeis)
- {
- pfx = fib_entry_get_prefix (*lfeip);
- fib_index = fib_entry_get_fib_index (*lfeip);
- fib_table = fib_table_get (fib_index, pfx->fp_proto);
- api_rpaths = NULL;
- fib_entry_encode (*lfeip, &api_rpaths);
- send_mpls_fib_details (am, reg, fib_table, pfx, api_rpaths, mp->context);
- vec_free (api_rpaths);
- }
+ if (INDEX_INVALID != fib_index)
+ {
+ fib_table_walk (fib_index,
+ FIB_PROTOCOL_MPLS,
+ vl_api_mpls_route_dump_table_walk, &ctx);
+ vec_sort_with_function (ctx.lfeis, fib_entry_cmp_for_sort);
- vec_free (ctx.lfeis);
+ vec_foreach (lfeip, ctx.lfeis)
+ {
+ send_mpls_route_details (am, reg, mp->context, *lfeip);
+ }
+
+ vec_free (ctx.lfeis);
+ }
}
/*
diff --git a/src/vnet/mpls/mpls_tunnel.c b/src/vnet/mpls/mpls_tunnel.c
index 8db08c3a387..b7bcbfd14f7 100644
--- a/src/vnet/mpls/mpls_tunnel.c
+++ b/src/vnet/mpls/mpls_tunnel.c
@@ -242,7 +242,7 @@ mpls_tunnel_stack (adj_index_t ai)
mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
- if (NULL == mt)
+ if (NULL == mt || FIB_NODE_INDEX_INVALID == mt->mt_path_list)
return;
if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
@@ -654,6 +654,7 @@ void
vnet_mpls_tunnel_path_add (u32 sw_if_index,
fib_route_path_t *rpaths)
{
+ fib_route_path_t *rpath;
mpls_tunnel_t *mt;
u32 mti;
@@ -695,10 +696,13 @@ vnet_mpls_tunnel_path_add (u32 sw_if_index,
*/
fib_path_ext_list_resolve(&mt->mt_path_exts, mt->mt_path_list);
}
- fib_path_ext_list_insert(&mt->mt_path_exts,
- mt->mt_path_list,
- FIB_PATH_EXT_MPLS,
- rpaths);
+ vec_foreach(rpath, rpaths)
+ {
+ fib_path_ext_list_insert(&mt->mt_path_exts,
+ mt->mt_path_list,
+ FIB_PATH_EXT_MPLS,
+ rpath);
+ }
mpls_tunnel_restack(mt);
}
diff --git a/src/vnet/udp/udp_encap.c b/src/vnet/udp/udp_encap.c
index c8268276288..df4a811f3dd 100644
--- a/src/vnet/udp/udp_encap.c
+++ b/src/vnet/udp/udp_encap.c
@@ -211,7 +211,7 @@ format_udp_encap_i (u8 * s, va_list * args)
ue = udp_encap_get (uei);
// FIXME
- s = format (s, "udp-ecap:[%d]: ip-fib-index:%d ", uei, ue->ue_fib_index);
+ s = format (s, "udp-encap:[%d]: ip-fib-index:%d ", uei, ue->ue_fib_index);
if (FIB_PROTOCOL_IP4 == ue->ue_ip_proto)
{
s = format (s, "ip:[src:%U, dst:%U] udp:[src:%d, dst:%d]",
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.c b/src/vnet/vxlan-gbp/vxlan_gbp.c
index 001de73b840..7b09b57bd59 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.c
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.c
@@ -557,6 +557,7 @@ int vnet_vxlan_gbp_tunnel_add_del
.frp_fib_index = ~0,
.frp_weight = 0,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
const mfib_prefix_t mpfx = {
.fp_proto = fp,
@@ -571,16 +572,15 @@ int vnet_vxlan_gbp_tunnel_add_del
*/
mfib_table_entry_path_update (t->encap_fib_index,
&mpfx,
- MFIB_SOURCE_VXLAN_GBP,
- &path, MFIB_ITF_FLAG_FORWARD);
+ MFIB_SOURCE_VXLAN_GBP, &path);
path.frp_sw_if_index = a->mcast_sw_if_index;
path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
mfei = mfib_table_entry_path_update (t->encap_fib_index,
&mpfx,
MFIB_SOURCE_VXLAN_GBP,
- &path,
- MFIB_ITF_FLAG_ACCEPT);
+ &path);
/*
* Create the mcast adjacency to send traffic to the group
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.c b/src/vnet/vxlan-gpe/vxlan_gpe.c
index ca17c12a87b..dd0e544352f 100644
--- a/src/vnet/vxlan-gpe/vxlan_gpe.c
+++ b/src/vnet/vxlan-gpe/vxlan_gpe.c
@@ -646,8 +646,9 @@ int vnet_vxlan_gpe_add_del_tunnel
.frp_addr = zero_addr,
.frp_sw_if_index = 0xffffffff,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
const mfib_prefix_t mpfx = {
.fp_proto = fp,
@@ -662,16 +663,15 @@ int vnet_vxlan_gpe_add_del_tunnel
*/
mfib_table_entry_path_update (t->encap_fib_index,
&mpfx,
- MFIB_SOURCE_VXLAN_GPE,
- &path, MFIB_ITF_FLAG_FORWARD);
+ MFIB_SOURCE_VXLAN_GPE, &path);
path.frp_sw_if_index = a->mcast_sw_if_index;
path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
mfei = mfib_table_entry_path_update (t->encap_fib_index,
&mpfx,
MFIB_SOURCE_VXLAN_GPE,
- &path,
- MFIB_ITF_FLAG_ACCEPT);
+ &path);
/*
* Create the mcast adjacency to send traffic to the group
diff --git a/src/vnet/vxlan/vxlan.c b/src/vnet/vxlan/vxlan.c
index 52d0812235f..def306a7846 100644
--- a/src/vnet/vxlan/vxlan.c
+++ b/src/vnet/vxlan/vxlan.c
@@ -538,8 +538,9 @@ int vnet_vxlan_add_del_tunnel
.frp_addr = zero_addr,
.frp_sw_if_index = 0xffffffff,
.frp_fib_index = ~0,
- .frp_weight = 0,
+ .frp_weight = 1,
.frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
};
const mfib_prefix_t mpfx = {
.fp_proto = fp,
@@ -553,17 +554,14 @@ int vnet_vxlan_add_del_tunnel
* - the accepting interface is that from the API
*/
mfib_table_entry_path_update (t->encap_fib_index,
- &mpfx,
- MFIB_SOURCE_VXLAN,
- &path, MFIB_ITF_FLAG_FORWARD);
+ &mpfx, MFIB_SOURCE_VXLAN, &path);
path.frp_sw_if_index = a->mcast_sw_if_index;
path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
mfei = mfib_table_entry_path_update (t->encap_fib_index,
&mpfx,
- MFIB_SOURCE_VXLAN,
- &path,
- MFIB_ITF_FLAG_ACCEPT);
+ MFIB_SOURCE_VXLAN, &path);
/*
* Create the mcast adjacency to send traffic to the group
diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c
index b6e7c848764..9c2381189a2 100644
--- a/src/vpp/api/api.c
+++ b/src/vpp/api/api.c
@@ -540,7 +540,7 @@ vpe_api_hookup (vlib_main_t * vm)
*/
am->is_mp_safe[VL_API_CONTROL_PING] = 1;
am->is_mp_safe[VL_API_CONTROL_PING_REPLY] = 1;
- am->is_mp_safe[VL_API_IP_ADD_DEL_ROUTE] = 1;
+ am->is_mp_safe[VL_API_IP_ROUTE_ADD_DEL] = 1;
am->is_mp_safe[VL_API_GET_NODE_GRAPH] = 1;
/*
diff --git a/src/vpp/api/custom_dump.c b/src/vpp/api/custom_dump.c
index 4d764d40d16..d754c3ade1c 100644
--- a/src/vpp/api/custom_dump.c
+++ b/src/vpp/api/custom_dump.c
@@ -21,6 +21,7 @@
#include <vnet/ip/ip.h>
#include <vnet/ip/ip_neighbor.h>
#include <vnet/ip/ip_types_api.h>
+#include <vnet/fib/fib_api.h>
#include <vnet/unix/tuntap.h>
#include <vnet/mpls/mpls.h>
#include <vnet/dhcp/dhcp_proxy.h>
@@ -728,62 +729,19 @@ __clib_unused
FINISH;
}
-static void *vl_api_ip_add_del_route_t_print
- (vl_api_ip_add_del_route_t * mp, void *handle)
+static void *vl_api_ip_route_add_del_t_print
+ (vl_api_ip_route_add_del_t * mp, void *handle)
{
- u8 *s;
+ u8 *s, p;
- s = format (0, "SCRIPT: ip_add_del_route ");
+ s = format (0, "SCRIPT: ip_route_add_del ");
if (mp->is_add == 0)
s = format (s, "del ");
- if (mp->is_ipv6)
- s = format (s, "%U/%d ", format_ip6_address, mp->dst_address,
- mp->dst_address_length);
- else
- s = format (s, "%U/%d ", format_ip4_address, mp->dst_address,
- mp->dst_address_length);
-
- if (mp->table_id != 0)
- s = format (s, "vrf %d ", ntohl (mp->table_id));
-
- if (mp->is_local)
- s = format (s, "local ");
- else if (mp->is_drop)
- s = format (s, "drop ");
- else if (mp->is_classify)
- s = format (s, "classify %d", ntohl (mp->classify_table_index));
- else if (mp->next_hop_via_label != htonl (MPLS_LABEL_INVALID))
- s = format (s, "via via_label %d ", ntohl (mp->next_hop_via_label));
- else
- {
- if (mp->is_ipv6)
- s = format (s, "via %U ", format_ip6_address, mp->next_hop_address);
- else
- s = format (s, "via %U ", format_ip4_address, mp->next_hop_address);
- if (mp->next_hop_sw_if_index != ~0)
- s = format (s, "sw_if_index %d ", ntohl (mp->next_hop_sw_if_index));
+ s = format (s, "%U", format_vl_api_prefix, &mp->route.prefix);
- }
-
- if (mp->next_hop_weight != 1)
- s = format (s, "weight %d ", (u32) mp->next_hop_weight);
-
- if (mp->is_multipath)
- s = format (s, "multipath ");
-
- if (mp->next_hop_table_id)
- s = format (s, "lookup-in-vrf %d ", ntohl (mp->next_hop_table_id));
-
- if (mp->next_hop_n_out_labels)
- {
- u8 i;
- for (i = 0; i < mp->next_hop_n_out_labels; i++)
- {
- s = format (s, "out-label %d ",
- ntohl (mp->next_hop_out_label_stack[i].label));
- }
- }
+ for (p = 0; p < mp->route.n_paths; p++)
+ s = format (s, " [%U]", format_vl_api_fib_path, &mp->route.paths[p]);
FINISH;
}
@@ -791,7 +749,7 @@ static void *vl_api_ip_add_del_route_t_print
static void *vl_api_mpls_route_add_del_t_print
(vl_api_mpls_route_add_del_t * mp, void *handle)
{
- u8 *s;
+ u8 *s, p;
s = format (0, "SCRIPT: mpls_route_add_del ");
@@ -800,67 +758,21 @@ static void *vl_api_mpls_route_add_del_t_print
else
s = format (s, "del ");
- s = format (s, "%d ", ntohl (mp->mr_label));
+ s = format (s, "table %d ", ntohl (mp->mr_route.mr_table_id));
+ s = format (s, "%d ", ntohl (mp->mr_route.mr_label));
- if (mp->mr_eos)
+ if (mp->mr_route.mr_eos)
s = format (s, "eos ");
else
s = format (s, "non-eos ");
+ if (mp->mr_route.mr_is_multicast)
+ s = format (s, "multicast ");
- if (mp->mr_next_hop_proto == DPO_PROTO_IP4)
- {
- ip4_address_t ip4_null = {.as_u32 = 0, };
- if (memcmp (mp->mr_next_hop, &ip4_null, sizeof (ip4_null)))
- s = format (s, "via %U ", format_ip4_address, mp->mr_next_hop);
- else
- s = format (s, "via lookup-in-ip4-table %d ",
- ntohl (mp->mr_next_hop_table_id));
- }
- else if (mp->mr_next_hop_proto == DPO_PROTO_IP6)
- {
- ip6_address_t ip6_null = { {0}
- };
- if (memcmp (mp->mr_next_hop, &ip6_null, sizeof (ip6_null)))
- s = format (s, "via %U ", format_ip6_address, mp->mr_next_hop);
- else
- s = format (s, "via lookup-in-ip6-table %d ",
- ntohl (mp->mr_next_hop_table_id));
- }
- else if (mp->mr_next_hop_proto == DPO_PROTO_ETHERNET)
- {
- s = format (s, "via l2-input-on ");
- }
- else if (mp->mr_next_hop_proto == DPO_PROTO_MPLS)
- {
- if (mp->mr_next_hop_via_label != htonl (MPLS_LABEL_INVALID))
- s =
- format (s, "via via-label %d ", ntohl (mp->mr_next_hop_via_label));
- else
- s = format (s, "via next-hop-table %d ",
- ntohl (mp->mr_next_hop_table_id));
- }
- if (mp->mr_next_hop_sw_if_index != ~0)
- s = format (s, "sw_if_index %d ", ntohl (mp->mr_next_hop_sw_if_index));
-
- if (mp->mr_next_hop_weight != 1)
- s = format (s, "weight %d ", (u32) mp->mr_next_hop_weight);
+ for (p = 0; p < mp->mr_route.mr_n_paths; p++)
+ s =
+ format (s, " [%U]", format_vl_api_fib_path, &mp->mr_route.mr_paths[p]);
- if (mp->mr_is_multipath)
- s = format (s, "multipath ");
-
- if (mp->mr_is_classify)
- s = format (s, "classify %d", ntohl (mp->mr_classify_table_index));
-
- if (mp->mr_next_hop_n_out_labels)
- {
- u8 i;
- for (i = 0; i < mp->mr_next_hop_n_out_labels; i++)
- {
- s = format (s, "out-label %d ",
- ntohl (mp->mr_next_hop_out_label_stack[i].label));
- }
- }
FINISH;
}
@@ -875,9 +787,10 @@ static void *vl_api_ip_table_add_del_t_print
s = format (s, "add ");
else
s = format (s, "del ");
- if (mp->is_ipv6)
+ if (mp->table.is_ip6)
s = format (s, "ip6 ");
- s = format (s, "table %d ", ntohl (mp->table_id));
+ s = format (s, "table %d ", ntohl (mp->table.table_id));
+ s = format (s, "%s ", mp->table.name);
FINISH;
}
@@ -892,7 +805,7 @@ static void *vl_api_mpls_table_add_del_t_print
s = format (s, "add ");
else
s = format (s, "del ");
- s = format (s, "table %d ", ntohl (mp->mt_table_id));
+ s = format (s, "table %d ", ntohl (mp->mt_table.mt_table_id));
FINISH;
}
@@ -933,38 +846,27 @@ static void *vl_api_proxy_arp_intfc_enable_disable_t_print
static void *vl_api_mpls_tunnel_add_del_t_print
(vl_api_mpls_tunnel_add_del_t * mp, void *handle)
{
- u8 *s;
+ u8 *s, p;
s = format (0, "SCRIPT: mpls_tunnel_add_del ");
if (mp->mt_is_add == 0)
- s = format (s, "del sw_if_index %d ", ntohl (mp->mt_sw_if_index));
-
- mpls_label_t label = ntohl (mp->mt_next_hop_via_label);
- if (label != MPLS_LABEL_INVALID)
- s = format (s, "via-label %d ", label);
- else if (mp->mt_next_hop_proto_is_ip4)
- s = format (s, "via %U ", format_ip4_address, mp->mt_next_hop);
+ s =
+ format (s, "del sw_if_index %d ", ntohl (mp->mt_tunnel.mt_sw_if_index));
else
- s = format (s, "via %U ", format_ip6_address, mp->mt_next_hop);
+ s = format (s, "sw_if_index %d ", ntohl (mp->mt_tunnel.mt_sw_if_index));
- if (mp->mt_next_hop_sw_if_index != ~0)
- s = format (s, "sw_if_index %d ", ntohl (mp->mt_next_hop_sw_if_index));
- else if (mp->mt_next_hop_table_id)
- s = format (s, "next-hop-table %d ", ntohl (mp->mt_next_hop_table_id));
- if (mp->mt_l2_only)
+ if (mp->mt_tunnel.mt_l2_only)
s = format (s, "l2-only ");
+ if (mp->mt_tunnel.mt_is_multicast)
+ s = format (s, "multicast ");
+ if (mp->mt_tunnel.mt_tunnel_index)
+ s = format (s, "tunnel-index ");
- if (mp->mt_next_hop_n_out_labels)
- {
- u8 i;
- for (i = 0; i < mp->mt_next_hop_n_out_labels; i++)
- {
- s = format (s, "out-label %d ",
- ntohl (mp->mt_next_hop_out_label_stack[i].label));
- }
- }
+ for (p = 0; p < mp->mt_tunnel.mt_n_paths; p++)
+ s = format (s, " [%U]", format_vl_api_fib_path,
+ &mp->mt_tunnel.mt_paths[p]);
FINISH;
}
@@ -2624,38 +2526,47 @@ static void *vl_api_mpls_tunnel_dump_t_print
u8 *s;
s = format (0, "SCRIPT: mpls_tunnel_dump ");
-
s = format (s, "sw_if_index %d ", ntohl (mp->sw_if_index));
FINISH;
}
-static void *vl_api_mpls_fib_dump_t_print
- (vl_api_mpls_fib_dump_t * mp, void *handle)
+static void *vl_api_mpls_table_dump_t_print
+ (vl_api_mpls_table_dump_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: mpls_table_decap_dump ");
+
+ FINISH;
+}
+
+static void *vl_api_mpls_route_dump_t_print
+ (vl_api_mpls_route_dump_t * mp, void *handle)
{
u8 *s;
- s = format (0, "SCRIPT: mpls_fib_decap_dump ");
+ s = format (0, "SCRIPT: mpls_route_decap_dump ");
FINISH;
}
-static void *vl_api_ip_fib_dump_t_print
- (vl_api_ip_fib_dump_t * mp, void *handle)
+static void *vl_api_ip_table_dump_t_print
+ (vl_api_ip_table_dump_t * mp, void *handle)
{
u8 *s;
- s = format (0, "SCRIPT: ip_fib_dump ");
+ s = format (0, "SCRIPT: ip_table_dump ");
FINISH;
}
-static void *vl_api_ip6_fib_dump_t_print
- (vl_api_ip6_fib_dump_t * mp, void *handle)
+static void *vl_api_ip_route_dump_t_print
+ (vl_api_ip_route_dump_t * mp, void *handle)
{
u8 *s;
- s = format (0, "SCRIPT: ip6_fib_dump ");
+ s = format (0, "SCRIPT: ip_route_dump ");
FINISH;
}
@@ -3782,10 +3693,10 @@ _(BOND_DETACH_SLAVE, bond_detach_slave) \
_(TAP_CREATE_V2, tap_create_v2) \
_(TAP_DELETE_V2, tap_delete_v2) \
_(SW_INTERFACE_TAP_V2_DUMP, sw_interface_tap_v2_dump) \
-_(IP_ADD_DEL_ROUTE, ip_add_del_route) \
_(IP_TABLE_ADD_DEL, ip_table_add_del) \
_(MPLS_ROUTE_ADD_DEL, mpls_route_add_del) \
_(MPLS_TABLE_ADD_DEL, mpls_table_add_del) \
+_(IP_ROUTE_ADD_DEL, ip_route_add_del) \
_(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \
_(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \
_(MPLS_TUNNEL_ADD_DEL, mpls_tunnel_add_del) \
@@ -3876,7 +3787,8 @@ _(AF_PACKET_CREATE, af_packet_create) \
_(AF_PACKET_DELETE, af_packet_delete) \
_(AF_PACKET_DUMP, af_packet_dump) \
_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) \
-_(MPLS_FIB_DUMP, mpls_fib_dump) \
+_(MPLS_TABLE_DUMP, mpls_table_dump) \
+_(MPLS_ROUTE_DUMP, mpls_route_dump) \
_(MPLS_TUNNEL_DUMP, mpls_tunnel_dump) \
_(CLASSIFY_TABLE_IDS,classify_table_ids) \
_(CLASSIFY_TABLE_BY_INTERFACE, classify_table_by_interface) \
@@ -3941,8 +3853,8 @@ _(FLOW_CLASSIFY_DUMP, flow_classify_dump) \
_(GET_FIRST_MSG_ID, get_first_msg_id) \
_(IOAM_ENABLE, ioam_enable) \
_(IOAM_DISABLE, ioam_disable) \
-_(IP_FIB_DUMP, ip_fib_dump) \
-_(IP6_FIB_DUMP, ip6_fib_dump) \
+_(IP_TABLE_DUMP, ip_table_dump) \
+_(IP_ROUTE_DUMP, ip_route_dump) \
_(FEATURE_ENABLE_DISABLE, feature_enable_disable) \
_(SW_INTERFACE_TAG_ADD_DEL, sw_interface_tag_add_del) \
_(HW_INTERFACE_SET_MTU, hw_interface_set_mtu) \
diff --git a/src/vpp/api/types.c b/src/vpp/api/types.c
index a4766c298c2..3d556b72bfd 100644
--- a/src/vpp/api/types.c
+++ b/src/vpp/api/types.c
@@ -172,3 +172,50 @@ unformat_vl_api_prefix (unformat_input_t * input, va_list * args)
return (0);
}
+uword
+unformat_vl_api_mprefix (unformat_input_t * input, va_list * args)
+{
+ vl_api_mprefix_t *pfx = va_arg (*args, vl_api_mprefix_t *);
+
+ if (unformat (input, "%U/%d",
+ unformat_vl_api_ip4_address, &pfx->grp_address.ip4,
+ &pfx->grp_address_length))
+ pfx->af = ADDRESS_IP4;
+ else if (unformat (input, "%U/%d",
+ unformat_vl_api_ip6_address, &pfx->grp_address.ip6,
+ &pfx->grp_address_length))
+ pfx->af = ADDRESS_IP6;
+ else if (unformat (input, "%U %U",
+ unformat_vl_api_ip4_address, &pfx->src_address.ip4,
+ unformat_vl_api_ip4_address, &pfx->grp_address.ip4))
+ {
+ pfx->af = ADDRESS_IP4;
+ pfx->grp_address_length = 64;
+ }
+ else if (unformat (input, "%U %U",
+ unformat_vl_api_ip6_address, &pfx->src_address.ip6,
+ unformat_vl_api_ip6_address, &pfx->grp_address.ip6))
+ {
+ pfx->af = ADDRESS_IP6;
+ pfx->grp_address_length = 256;
+ }
+ else if (unformat (input, "%U",
+ unformat_vl_api_ip4_address, &pfx->grp_address.ip4))
+ {
+ pfx->af = ADDRESS_IP4;
+ pfx->grp_address_length = 32;
+ clib_memset(&pfx->src_address, 0, sizeof(pfx->src_address));
+ }
+ else if (unformat (input, "%U",
+ unformat_vl_api_ip6_address, &pfx->grp_address.ip6))
+ {
+ pfx->af = ADDRESS_IP6;
+ pfx->grp_address_length = 128;
+ clib_memset(&pfx->src_address, 0, sizeof(pfx->src_address));
+ }
+ else
+ return (0);
+
+ return (1);
+}
+
diff --git a/src/vpp/api/types.h b/src/vpp/api/types.h
index 40d80a09546..95797b4da2e 100644
--- a/src/vpp/api/types.h
+++ b/src/vpp/api/types.h
@@ -32,6 +32,7 @@ extern uword unformat_vl_api_address (unformat_input_t * input, va_list * args);
extern uword unformat_vl_api_ip4_address (unformat_input_t * input, va_list * args);
extern uword unformat_vl_api_ip6_address (unformat_input_t * input, va_list * args);
extern uword unformat_vl_api_prefix (unformat_input_t * input, va_list * args);
+extern uword unformat_vl_api_mprefix (unformat_input_t * input, va_list * args);
extern u8 *format_vl_api_address (u8 * s, va_list * args);
extern u8 *format_vl_api_address_family (u8 * s, va_list * args);
diff --git a/test/remote_test.py b/test/remote_test.py
index 21913b6c959..092d3f8d2e7 100644
--- a/test/remote_test.py
+++ b/test/remote_test.py
@@ -10,7 +10,7 @@ import six
from six import moves
from framework import VppTestCase
-from enum import Enum
+from aenum import Enum
class SerializableClassCopy(object):
@@ -19,6 +19,9 @@ class SerializableClassCopy(object):
"""
pass
+ def __repr__(self):
+ return '<SerializableClassCopy dict=%s>' % self.__dict__
+
class RemoteClassAttr(object):
"""
@@ -44,7 +47,8 @@ class RemoteClassAttr(object):
def __getattr__(self, attr):
if attr[0] == '_':
if not (attr.startswith('__') and attr.endswith('__')):
- raise AttributeError
+ raise AttributeError('tried to get private attribute: %s ',
+ attr)
self._path.append(attr)
return self
@@ -58,8 +62,9 @@ class RemoteClassAttr(object):
True, value=val)
def __call__(self, *args, **kwargs):
+ ret = True if 'vapi' in self.path_to_str() else False
return self._remote._remote_exec(RemoteClass.CALL, self.path_to_str(),
- True, *args, **kwargs)
+ ret, *args, **kwargs)
class RemoteClass(Process):
@@ -119,7 +124,7 @@ class RemoteClass(Process):
if not (attr.startswith('__') and attr.endswith('__')):
if hasattr(super(RemoteClass, self), '__getattr__'):
return super(RemoteClass, self).__getattr__(attr)
- raise AttributeError
+ raise AttributeError('missing: %s', attr)
return RemoteClassAttr(self, attr)
def __setattr__(self, attr, val):
@@ -137,12 +142,12 @@ class RemoteClass(Process):
mutable_args = list(args)
for i, val in enumerate(mutable_args):
if isinstance(val, RemoteClass) or \
- isinstance(val, RemoteClassAttr):
+ isinstance(val, RemoteClassAttr):
mutable_args[i] = val.get_remote_value()
args = tuple(mutable_args)
for key, val in six.iteritems(kwargs):
if isinstance(val, RemoteClass) or \
- isinstance(val, RemoteClassAttr):
+ isinstance(val, RemoteClassAttr):
kwargs[key] = val.get_remote_value()
# send request
args = self._make_serializable(args)
@@ -244,7 +249,10 @@ class RemoteClass(Process):
# copy at least serializable attributes and properties
for name, member in inspect.getmembers(obj):
- if name[0] == '_': # skip private members
+ # skip private members and non-writable dunder methods.
+ if name[0] == '_':
+ if name in ['__weakref__']:
+ continue
if not (name.startswith('__') and name.endswith('__')):
continue
if callable(member) and not isinstance(member, property):
diff --git a/test/test_abf.py b/test/test_abf.py
index 350af0dce07..221a793fed3 100644
--- a/test/test_abf.py
+++ b/test/test_abf.py
@@ -5,7 +5,8 @@ import unittest
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
-from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsLabel, VppIpTable
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsLabel, \
+ VppIpTable, FibPathProto
from scapy.packet import Raw
from scapy.layers.l2 import Ether
@@ -45,30 +46,9 @@ class VppAbfPolicy(VppObject):
self.policy_id = policy_id
self.acl = acl
self.paths = paths
-
- def encode_paths(self):
- br_paths = []
- for p in self.paths:
- lstack = []
- for l in p.nh_labels:
- if type(l) == VppMplsLabel:
- lstack.append(l.encode())
- else:
- lstack.append({'label': l, 'ttl': 255})
- n_labels = len(lstack)
- while (len(lstack) < 16):
- lstack.append({})
- br_paths.append({'next_hop': p.nh_addr,
- 'weight': 1,
- 'afi': p.proto,
- 'sw_if_index': 0xffffffff,
- 'preference': 0,
- 'table_id': p.nh_table_id,
- 'next_hop_id': p.next_hop_id,
- 'is_udp_encap': p.is_udp_encap,
- 'n_labels': n_labels,
- 'label_stack': lstack})
- return br_paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
def add_vpp_config(self):
self._test.vapi.abf_policy_add_del(
@@ -76,7 +56,7 @@ class VppAbfPolicy(VppObject):
{'policy_id': self.policy_id,
'acl_index': self.acl.acl_index,
'n_paths': len(self.paths),
- 'paths': self.encode_paths()})
+ 'paths': self.encoded_paths})
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
@@ -85,7 +65,7 @@ class VppAbfPolicy(VppObject):
{'policy_id': self.policy_id,
'acl_index': self.acl.acl_index,
'n_paths': len(self.paths),
- 'paths': self.encode_paths()})
+ 'paths': self.encoded_paths})
def query_vpp_config(self):
return find_abf_policy(self._test, self.policy_id)
@@ -324,8 +304,7 @@ class TestAbf(VppTestCase):
#
abf_1 = VppAbfPolicy(self, 10, acl_1,
[VppRoutePath("3001::1",
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6)])
+ 0xffffffff)])
abf_1.add_vpp_config()
attach_1 = VppAbfAttach(self, 10, self.pg0.sw_if_index,
@@ -352,9 +331,7 @@ class TestAbf(VppTestCase):
#
route = VppIpRoute(self, "3001::1", 32,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route.add_vpp_config()
#
diff --git a/test/test_bfd.py b/test/test_bfd.py
index 4dca11247ce..9a37eb8fed7 100644
--- a/test/test_bfd.py
+++ b/test/test_bfd.py
@@ -1764,14 +1764,10 @@ class BFDFIBTestCase(VppTestCase):
# will have a BFD session
ip_2001_s_64 = VppIpRoute(self, "2001::", 64,
[VppRoutePath(self.pg0.remote_ip6,
- self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg0.sw_if_index)])
ip_2002_s_64 = VppIpRoute(self, "2002::", 64,
[VppRoutePath(self.pg0.remote_ip6,
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ 0xffffffff)])
ip_2001_s_64.add_vpp_config()
ip_2002_s_64.add_vpp_config()
diff --git a/test/test_bier.py b/test/test_bier.py
index b8130ce93f2..793c8ca0362 100644
--- a/test/test_bier.py
+++ b/test/test_bier.py
@@ -7,7 +7,7 @@ from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, \
VppMplsTable, VppIpMRoute, VppMRoutePath, VppIpTable, \
MRouteEntryFlags, MRouteItfFlags, MPLS_LABEL_INVALID, \
- VppMplsLabel
+ VppMplsLabel, FibPathProto, FibPathType
from vpp_bier import BIER_HDR_PAYLOAD, VppBierImp, VppBierDispEntry, \
VppBierDispTable, VppBierTable, VppBierTableID, VppBierRoute
from vpp_udp_encap import VppUdpEncap
@@ -278,6 +278,7 @@ class TestBier(VppTestCase):
labels=[VppMplsLabel(101)])])
rx = self.send_and_expect(self.pg0, pkts, self.pg1)
+
for nh in nhs:
self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx))
@@ -353,7 +354,8 @@ class TestBier(VppTestCase):
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(0xffffffff,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_BIER,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
bier_imp=bi.bi_index)])
route_ing_232_1_1_1.add_vpp_config()
@@ -418,7 +420,7 @@ class TestBier(VppTestCase):
self, bti, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
- proto=DpoProto.DPO_PROTO_BIER,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
nh_table_id=8)])
bier_route_1.add_vpp_config()
@@ -427,7 +429,7 @@ class TestBier(VppTestCase):
#
bier_de_1 = VppBierDispEntry(self, bdt.id, 99,
BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
- DpoProto.DPO_PROTO_BIER,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
"0.0.0.0", 0, rpf_id=8192)
bier_de_1.add_vpp_config()
@@ -477,7 +479,7 @@ class TestBier(VppTestCase):
#
bier_de_2 = VppBierDispEntry(self, bdt.id, 0,
BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
- DpoProto.DPO_PROTO_BIER,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
"0.0.0.0", 0, rpf_id=8192)
bier_de_2.add_vpp_config()
@@ -501,6 +503,7 @@ class TestBier(VppTestCase):
paths=[VppMRoutePath(0xffffffff,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
proto=DpoProto.DPO_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
bier_imp=bi.bi_index),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
@@ -552,7 +555,8 @@ class TestBier(VppTestCase):
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(0xffffffff,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_BIER,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
bier_imp=bi_low.bi_index)])
route_ing_232_1_1_1.add_vpp_config()
route_ing_232_1_1_2 = VppIpMRoute(
@@ -564,7 +568,8 @@ class TestBier(VppTestCase):
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(0xffffffff,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_BIER,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
bier_imp=bi_high.bi_index)])
route_ing_232_1_1_2.add_vpp_config()
@@ -582,15 +587,15 @@ class TestBier(VppTestCase):
self, bti, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
- proto=DpoProto.DPO_PROTO_BIER,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
nh_table_id=8)])
bier_route_1.add_vpp_config()
bier_route_max = VppBierRoute(
self, bti, max_bp,
[VppRoutePath("0.0.0.0",
0xffffffff,
- nh_table_id=8,
- proto=DpoProto.DPO_PROTO_BIER)])
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
bier_route_max.add_vpp_config()
#
@@ -599,12 +604,12 @@ class TestBier(VppTestCase):
#
bier_de_1 = VppBierDispEntry(self, bdt.id, 333,
BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
- DpoProto.DPO_PROTO_BIER,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
"0.0.0.0", 10, rpf_id=8192)
bier_de_1.add_vpp_config()
bier_de_1 = VppBierDispEntry(self, bdt.id, 334,
BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
- DpoProto.DPO_PROTO_BIER,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
"0.0.0.0", 10, rpf_id=8193)
bier_de_1.add_vpp_config()
@@ -713,7 +718,7 @@ class TestBier(VppTestCase):
self, bti, 1,
[VppRoutePath("0.0.0.0",
0xFFFFFFFF,
- is_udp_encap=1,
+ type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
next_hop_id=udp_encap.id)])
bier_route.add_vpp_config()
@@ -739,7 +744,8 @@ class TestBier(VppTestCase):
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(0xffffffff,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_BIER,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
bier_imp=bi2.bi_index)])
route_ing_232_1_1_1.add_vpp_config()
@@ -793,7 +799,7 @@ class TestBier(VppTestCase):
self, bti, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
- proto=DpoProto.DPO_PROTO_BIER,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
nh_table_id=8)])
bier_route_1.add_vpp_config()
@@ -802,7 +808,7 @@ class TestBier(VppTestCase):
#
bier_de_1 = VppBierDispEntry(self, bdt.id, 99,
BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
- DpoProto.DPO_PROTO_BIER,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
"0.0.0.0", 0, rpf_id=8192)
bier_de_1.add_vpp_config()
diff --git a/test/test_classifier.py b/test/test_classifier.py
index 5b0eddb5bce..4892d26a515 100644
--- a/test/test_classifier.py
+++ b/test/test_classifier.py
@@ -10,6 +10,8 @@ from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, TCP
from util import ppp
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
class TestClassifier(VppTestCase):
@@ -95,18 +97,6 @@ class TestClassifier(VppTestCase):
self.logger.info(self.vapi.cli("show classify table verbose"))
self.logger.info(self.vapi.cli("show ip fib"))
- def config_pbr_fib_entry(self, intf, is_add=1):
- """Configure fib entry to route traffic toward PBR VRF table
-
- :param VppInterface intf: destination interface to be routed for PBR.
-
- """
- addr_len = 24
- self.vapi.ip_add_del_route(dst_address=intf.local_ip4n,
- dst_address_length=addr_len,
- next_hop_address=intf.remote_ip4n,
- table_id=self.pbr_vrfid, is_add=is_add)
-
def create_stream(self, src_if, dst_if, packet_sizes,
proto_l=UDP(sport=1234, dport=5678)):
"""Create input packet stream for defined interfaces.
@@ -183,11 +173,9 @@ class TestClassifier(VppTestCase):
:param int vrf_id: The FIB table / VRF ID to be verified.
:return: 1 if the FIB table / VRF ID is configured, otherwise return 0.
"""
- ip_fib_dump = self.vapi.ip_fib_dump()
- vrf_count = 0
- for ip_fib_details in ip_fib_dump:
- if ip_fib_details[2] == vrf_id:
- vrf_count += 1
+ ip_fib_dump = self.vapi.ip_route_dump(vrf_id, False)
+ vrf_count = len(ip_fib_dump)
+
if vrf_count == 0:
self.logger.info("IPv4 VRF ID %d is not configured" % vrf_id)
return 0
@@ -846,7 +834,12 @@ class TestClassifierPBR(TestClassifier):
self.build_ip_match(src_ip=self.pg0.remote_ip4),
pbr_option, self.pbr_vrfid)
self.assertTrue(self.verify_vrf(self.pbr_vrfid))
- self.config_pbr_fib_entry(self.pg3)
+ r = VppIpRoute(self, self.pg3.local_ip4, 24,
+ [VppRoutePath(self.pg3.remote_ip4,
+ INVALID_INDEX)],
+ table_id=self.pbr_vrfid)
+ r.add_vpp_config()
+
self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
self.pg_enable_capture(self.pg_interfaces)
@@ -860,7 +853,7 @@ class TestClassifierPBR(TestClassifier):
self.pg2.assert_nothing_captured(remark="packets forwarded")
# remove the classify session and the route
- self.config_pbr_fib_entry(self.pg3, is_add=0)
+ r.remove_vpp_config()
self.create_classify_session(
self.acl_tbl_idx.get(key),
self.build_ip_match(src_ip=self.pg0.remote_ip4),
diff --git a/test/test_dhcp6.py b/test/test_dhcp6.py
index e017feebffe..2037e62e9ea 100644
--- a/test/test_dhcp6.py
+++ b/test/test_dhcp6.py
@@ -226,7 +226,7 @@ class TestDHCPv6IANAControlPlane(VppTestCase):
self.T1 = 1
self.T2 = 2
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
self.initial_addresses = set(self.get_interface_addresses(fib,
self.pg0))
@@ -247,14 +247,14 @@ class TestDHCPv6IANAControlPlane(VppTestCase):
def get_interface_addresses(fib, pg):
lst = []
for entry in fib:
- if entry.address_length == 128:
- path = entry.path[0]
+ if entry.route.prefix.prefixlen == 128:
+ path = entry.route.paths[0]
if path.sw_if_index == pg.sw_if_index:
- lst.append(entry.address)
+ lst.append(str(entry.route.prefix.network_address))
return lst
def get_addresses(self):
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg0))
return addresses.difference(self.initial_addresses)
@@ -376,12 +376,12 @@ class TestDHCPv6IANAControlPlane(VppTestCase):
new_addresses = self.get_addresses()
self.assertEqual(len(new_addresses), 1)
addr = list(new_addresses)[0]
- self.assertEqual(inet_ntop(AF_INET6, addr), '7:8::2')
+ self.assertEqual(addr, '7:8::2')
self.sleep(2)
# check that the address is deleted
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg0))
new_addresses = addresses.difference(self.initial_addresses)
self.assertEqual(len(new_addresses), 0)
@@ -430,7 +430,7 @@ class TestDHCPv6IANAControlPlane(VppTestCase):
self.sleep(0.5)
# check FIB contains no addresses
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg0))
new_addresses = addresses.difference(self.initial_addresses)
self.assertEqual(len(new_addresses), 0)
@@ -447,7 +447,7 @@ class TestDHCPv6IANAControlPlane(VppTestCase):
self.sleep(0.5)
# check FIB contains no addresses
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg0))
new_addresses = addresses.difference(self.initial_addresses)
self.assertEqual(len(new_addresses), 0)
@@ -477,7 +477,7 @@ class TestDHCPv6PDControlPlane(VppTestCase):
self.T1 = 1
self.T2 = 2
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
self.initial_addresses = set(self.get_interface_addresses(fib,
self.pg1))
@@ -503,14 +503,14 @@ class TestDHCPv6PDControlPlane(VppTestCase):
def get_interface_addresses(fib, pg):
lst = []
for entry in fib:
- if entry.address_length == 128:
- path = entry.path[0]
+ if entry.route.prefix.prefixlen == 128:
+ path = entry.route.paths[0]
if path.sw_if_index == pg.sw_if_index:
- lst.append(entry.address)
+ lst.append(str(entry.route.prefix.network_address))
return lst
def get_addresses(self):
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg1))
return addresses.difference(self.initial_addresses)
@@ -642,7 +642,7 @@ class TestDHCPv6PDControlPlane(VppTestCase):
new_addresses = self.get_addresses()
self.assertEqual(len(new_addresses), 1)
addr = list(new_addresses)[0]
- self.assertEqual(inet_ntop(AF_INET6, addr), '7:8:0:2::405')
+ self.assertEqual(addr, '7:8:0:2::405')
self.sleep(1)
@@ -656,21 +656,21 @@ class TestDHCPv6PDControlPlane(VppTestCase):
self.sleep(1)
# check FIB contains 2 addresses
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg1))
new_addresses = addresses.difference(self.initial_addresses)
self.assertEqual(len(new_addresses), 2)
addr1 = list(new_addresses)[0]
addr2 = list(new_addresses)[1]
- if inet_ntop(AF_INET6, addr1) == '7:8:0:76::406':
+ if addr1 == '7:8:0:76::406':
addr1, addr2 = addr2, addr1
- self.assertEqual(inet_ntop(AF_INET6, addr1), '7:8:0:2::405')
- self.assertEqual(inet_ntop(AF_INET6, addr2), '7:8:0:76::406')
+ self.assertEqual(addr1, '7:8:0:2::405')
+ self.assertEqual(addr2, '7:8:0:76::406')
self.sleep(1)
# check that the addresses are deleted
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg1))
new_addresses = addresses.difference(self.initial_addresses)
self.assertEqual(len(new_addresses), 0)
@@ -738,7 +738,7 @@ class TestDHCPv6PDControlPlane(VppTestCase):
self.sleep(0.5)
# check FIB contains no addresses
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg1))
new_addresses = addresses.difference(self.initial_addresses)
self.assertEqual(len(new_addresses), 0)
@@ -771,7 +771,7 @@ class TestDHCPv6PDControlPlane(VppTestCase):
self.sleep(0.5)
# check FIB contains no addresses
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg1))
new_addresses = addresses.difference(self.initial_addresses)
self.assertEqual(len(new_addresses), 0)
diff --git a/test/test_dvr.py b/test/test_dvr.py
index ae7864cc3e6..62dcb611d55 100644
--- a/test/test_dvr.py
+++ b/test/test_dvr.py
@@ -2,7 +2,7 @@
import unittest
from framework import VppTestCase, VppTestRunner
-from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathType
from vpp_l2 import L2_PORT_TYPE
from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint
@@ -120,7 +120,7 @@ class TestDVR(VppTestCase):
self, ip_non_tag_bridged, 32,
[VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
- is_dvr=1)])
+ type=FibPathType.FIB_PATH_TYPE_DVR)])
route_no_tag.add_vpp_config()
#
@@ -138,7 +138,7 @@ class TestDVR(VppTestCase):
self, ip_tag_bridged, 32,
[VppRoutePath("0.0.0.0",
sub_if_on_pg3.sw_if_index,
- is_dvr=1)])
+ type=FibPathType.FIB_PATH_TYPE_DVR)])
route_with_tag.add_vpp_config()
#
@@ -235,17 +235,19 @@ class TestDVR(VppTestCase):
#
# Do a FIB dump to make sure the paths are correctly reported as DVR
#
- routes = self.vapi.ip_fib_dump()
+ routes = self.vapi.ip_route_dump(0)
for r in routes:
- if (inet_pton(AF_INET, ip_tag_bridged) == r.address):
- self.assertEqual(r.path[0].sw_if_index,
+ if (ip_tag_bridged == str(r.route.prefix.network_address)):
+ self.assertEqual(r.route.paths[0].sw_if_index,
sub_if_on_pg3.sw_if_index)
- self.assertEqual(r.path[0].is_dvr, 1)
- if (inet_pton(AF_INET, ip_non_tag_bridged) == r.address):
- self.assertEqual(r.path[0].sw_if_index,
+ self.assertEqual(r.route.paths[0].type,
+ FibPathType.FIB_PATH_TYPE_DVR)
+ if (ip_non_tag_bridged == str(r.route.prefix.network_address)):
+ self.assertEqual(r.route.paths[0].sw_if_index,
self.pg1.sw_if_index)
- self.assertEqual(r.path[0].is_dvr, 1)
+ self.assertEqual(r.route.paths[0].type,
+ FibPathType.FIB_PATH_TYPE_DVR)
#
# the explicit route delete is require so it happens before
@@ -332,14 +334,16 @@ class TestDVR(VppTestCase):
#
# Add a DVR route to steer traffic at L3
#
- route_1 = VppIpRoute(self, "1.1.1.1", 32,
- [VppRoutePath("0.0.0.0",
- self.pg1.sw_if_index,
- is_dvr=1)])
- route_2 = VppIpRoute(self, "1.1.1.2", 32,
- [VppRoutePath("0.0.0.0",
- sub_if_on_pg2.sw_if_index,
- is_dvr=1)])
+ route_1 = VppIpRoute(
+ self, "1.1.1.1", 32,
+ [VppRoutePath("0.0.0.0",
+ self.pg1.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR)])
+ route_2 = VppIpRoute(
+ self, "1.1.1.2", 32,
+ [VppRoutePath("0.0.0.0",
+ sub_if_on_pg2.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR)])
route_1.add_vpp_config()
route_2.add_vpp_config()
diff --git a/test/test_gbp.py b/test/test_gbp.py
index 19ca81b9cfe..42defbf0d5c 100644
--- a/test/test_gbp.py
+++ b/test/test_gbp.py
@@ -17,7 +17,8 @@ from framework import VppTestCase, VppTestRunner
from vpp_object import VppObject
from vpp_interface import VppInterface
from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, \
- VppIpInterfaceAddress, VppIpInterfaceBind, find_route
+ VppIpInterfaceAddress, VppIpInterfaceBind, find_route, FibPathProto, \
+ FibPathType
from vpp_l2 import VppBridgeDomain, VppBridgeDomainPort, \
VppBridgeDomainArpEntry, VppL2FibEntry, find_bridge_domain_port, VppL2Vtr
from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint
@@ -920,13 +921,13 @@ class TestGBP(VppTestCase):
ba.add_vpp_config()
# floating IPs route via EPG recirc
- r = VppIpRoute(self, fip.address, fip.length,
- [VppRoutePath(fip.address,
- ep.recirc.recirc.sw_if_index,
- is_dvr=1,
- proto=fip.dpo_proto)],
- table_id=20,
- is_ip6=fip.is_ip6)
+ r = VppIpRoute(
+ self, fip.address, fip.length,
+ [VppRoutePath(fip.address,
+ ep.recirc.recirc.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR,
+ proto=fip.dpo_proto)],
+ table_id=20)
r.add_vpp_config()
# L2 FIB entries in the NAT EPG BD to bridge the packets from
diff --git a/test/test_geneve.py b/test/test_geneve.py
index 86515f40430..04271e31b69 100644
--- a/test/test_geneve.py
+++ b/test/test_geneve.py
@@ -10,6 +10,8 @@ from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.geneve import GENEVE
from scapy.utils import atol
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
class TestGeneve(BridgeDomain, VppTestCase):
@@ -85,13 +87,16 @@ class TestGeneve(BridgeDomain, VppTestCase):
# Create 10 ucast geneve tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
- next_hop_address = cls.pg0.remote_ip4n
- for dest_ip4n in ip4n_range(next_hop_address, ip_range_start,
- ip_range_end):
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
# add host route so dest_ip4n will not be resolved
- cls.vapi.ip_add_del_route(dst_address=dest_ip4n,
- dst_address_length=32,
- next_hop_address=next_hop_address)
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ dest_ip4n = socket.inet_pton(socket.AF_INET, dest_ip4)
r = cls.vapi.geneve_add_del_tunnel(
local_address=cls.pg0.local_ip4n, remote_address=dest_ip4n,
vni=vni)
diff --git a/test/test_gre.py b/test/test_gre.py
index 7936334ba77..c5239b2cf9d 100644
--- a/test/test_gre.py
+++ b/test/test_gre.py
@@ -13,7 +13,7 @@ from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint
from vpp_gre_interface import VppGreInterface
from vpp_ip import DpoProto
-from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, FibPathProto
from util import ppp, ppc
from vpp_papi import VppEnum
@@ -576,8 +576,7 @@ class TestGRE(VppTestCase):
self, "2001::1", 128,
[VppRoutePath("::",
gre_if.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=DpoProto.DPO_PROTO_IP6)])
route6_via_tun.add_vpp_config()
tx = self.create_stream_ip6(self.pg0, "2001::2", "2001::1")
@@ -615,12 +614,9 @@ class TestGRE(VppTestCase):
gre_if.admin_up()
gre_if.config_ip6()
- route_via_tun = VppIpRoute(
- self, "4004::1", 128,
- [VppRoutePath("0::0",
- gre_if.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ route_via_tun = VppIpRoute(self, "4004::1", 128,
+ [VppRoutePath("0::0",
+ gre_if.sw_if_index)])
route_via_tun.add_vpp_config()
@@ -638,12 +634,9 @@ class TestGRE(VppTestCase):
#
# Add a route that resolves the tunnel's destination
#
- route_tun_dst = VppIpRoute(
- self, "1002::1", 128,
- [VppRoutePath(self.pg2.remote_ip6,
- self.pg2.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ route_tun_dst = VppIpRoute(self, "1002::1", 128,
+ [VppRoutePath(self.pg2.remote_ip6,
+ self.pg2.sw_if_index)])
route_tun_dst.add_vpp_config()
#
diff --git a/test/test_gtpu.py b/test/test_gtpu.py
index 23f1bd8f694..957181a71e4 100644
--- a/test/test_gtpu.py
+++ b/test/test_gtpu.py
@@ -11,6 +11,8 @@ from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from scapy.contrib.gtp import GTP_U_Header
from scapy.utils import atol
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
class TestGtpuUDP(VppTestCase):
@@ -224,13 +226,16 @@ class TestGtpu(BridgeDomain, VppTestCase):
# Create 10 ucast gtpu tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
- next_hop_address = cls.pg0.remote_ip4n
- for dest_ip4n in ip4n_range(next_hop_address, ip_range_start,
- ip_range_end):
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
# add host route so dest_ip4n will not be resolved
- cls.vapi.ip_add_del_route(dst_address=dest_ip4n,
- dst_address_length=32,
- next_hop_address=next_hop_address)
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ dest_ip4n = socket.inet_pton(socket.AF_INET, dest_ip4)
r = cls.vapi.gtpu_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_ip4n,
diff --git a/test/test_interface_crud.py b/test/test_interface_crud.py
index b41b2fe5b7c..ae30a6d47ea 100644
--- a/test/test_interface_crud.py
+++ b/test/test_interface_crud.py
@@ -91,7 +91,7 @@ class TestLoopbackInterfaceCRUD(VppTestCase):
# read (check sw if dump, ip4 fib, ip6 fib)
if_dump = self.vapi.sw_interface_dump()
- fib4_dump = self.vapi.ip_fib_dump()
+ fib4_dump = self.vapi.ip_route_dump(0)
for i in loopbacks:
self.assertTrue(i.is_interface_config_in_dump(if_dump))
self.assertTrue(i.is_ip4_entry_in_fib_dump(fib4_dump))
@@ -111,7 +111,7 @@ class TestLoopbackInterfaceCRUD(VppTestCase):
# read (check not in sw if dump, ip4 fib, ip6 fib)
if_dump = self.vapi.sw_interface_dump()
- fib4_dump = self.vapi.ip_fib_dump()
+ fib4_dump = self.vapi.ip_route_dump(0)
for i in loopbacks:
self.assertFalse(i.is_interface_config_in_dump(if_dump))
self.assertFalse(i.is_ip4_entry_in_fib_dump(fib4_dump))
@@ -138,7 +138,7 @@ class TestLoopbackInterfaceCRUD(VppTestCase):
# read (check not in sw if dump, ip4 fib, ip6 fib)
if_dump = self.vapi.sw_interface_dump()
- fib4_dump = self.vapi.ip_fib_dump()
+ fib4_dump = self.vapi.ip_route_dump(0)
for i in loopbacks:
self.assertTrue(i.is_interface_config_in_dump(if_dump))
self.assertFalse(i.is_ip4_entry_in_fib_dump(fib4_dump))
diff --git a/test/test_ip4.py b/test/test_ip4.py
index 6d6aeb0a5d9..933958911fe 100644
--- a/test/test_ip4.py
+++ b/test/test_ip4.py
@@ -15,7 +15,7 @@ from framework import VppTestCase, VppTestRunner
from util import ppp
from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpMRoute, \
VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \
- VppMplsTable, VppIpTable
+ VppMplsTable, VppIpTable, FibPathType, find_route
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from vpp_papi import VppEnum
@@ -80,7 +80,6 @@ class TestIPv4(VppTestCase):
i.resolve_arp()
# config 2M FIB entries
- self.config_fib_entries(200)
def tearDown(self):
"""Run standard test teardown and log ``show ip arp``."""
@@ -90,33 +89,6 @@ class TestIPv4(VppTestCase):
self.logger.info(self.vapi.cli("show ip arp"))
# info(self.vapi.cli("show ip fib")) # many entries
- def config_fib_entries(self, count):
- """For each interface add to the FIB table *count* routes to
- "10.0.0.1/32" destination with interface's local address as next-hop
- address.
-
- :param int count: Number of FIB entries.
-
- - *TODO:* check if the next-hop address shouldn't be remote address
- instead of local address.
- """
- n_int = len(self.interfaces)
- percent = 0
- counter = 0.0
- dest_addr = socket.inet_pton(socket.AF_INET, "10.0.0.1")
- dest_addr_len = 32
- for i in self.interfaces:
- next_hop_address = i.local_ip4n
- for j in range(count / n_int):
- self.vapi.ip_add_del_route(dst_address=dest_addr,
- dst_address_length=dest_addr_len,
- next_hop_address=next_hop_address)
- counter += 1
- if counter / count * 100 > percent:
- self.logger.info("Configure %d FIB entries .. %d%% done" %
- (count, percent))
- percent += 1
-
def modify_packet(self, src_if, packet_size, pkt):
"""Add load, set destination IP and extend packet to required packet
size for defined interface.
@@ -318,7 +290,8 @@ class TestIPv4FibCrud(VppTestCase):
..note:: Python API is too slow to add many routes, needs replacement.
"""
- def config_fib_many_to_one(self, start_dest_addr, next_hop_addr, count):
+ def config_fib_many_to_one(self, start_dest_addr, next_hop_addr,
+ count, start=0):
"""
:param start_dest_addr:
@@ -326,42 +299,30 @@ class TestIPv4FibCrud(VppTestCase):
:param count:
:return list: added ips with 32 prefix
"""
- added_ips = []
- dest_addr = int(binascii.hexlify(socket.inet_pton(socket.AF_INET,
- start_dest_addr)), 16)
- dest_addr_len = 32
- n_next_hop_addr = socket.inet_pton(socket.AF_INET, next_hop_addr)
- for _ in range(count):
- n_dest_addr = binascii.unhexlify('{:08x}'.format(dest_addr))
- self.vapi.ip_add_del_route(dst_address=n_dest_addr,
- dst_address_length=dest_addr_len,
- next_hop_address=n_next_hop_addr)
- added_ips.append(socket.inet_ntoa(n_dest_addr))
- dest_addr += 1
- return added_ips
-
- def unconfig_fib_many_to_one(self, start_dest_addr, next_hop_addr, count):
-
- removed_ips = []
- dest_addr = int(binascii.hexlify(socket.inet_pton(socket.AF_INET,
- start_dest_addr)), 16)
- dest_addr_len = 32
- n_next_hop_addr = socket.inet_pton(socket.AF_INET, next_hop_addr)
- for _ in range(count):
- n_dest_addr = binascii.unhexlify('{:08x}'.format(dest_addr))
- self.vapi.ip_add_del_route(dst_address=n_dest_addr,
- dst_address_length=dest_addr_len,
- next_hop_address=n_next_hop_addr,
- is_add=0)
- removed_ips.append(socket.inet_ntoa(n_dest_addr))
- dest_addr += 1
- return removed_ips
-
- def create_stream(self, src_if, dst_if, dst_ips, count):
+ routes = []
+ for i in range(count):
+ r = VppIpRoute(self, start_dest_addr % (i + start), 32,
+ [VppRoutePath(next_hop_addr, 0xffffffff)])
+ r.add_vpp_config()
+ routes.append(r)
+ return routes
+
+ def unconfig_fib_many_to_one(self, start_dest_addr, next_hop_addr,
+ count, start=0):
+
+ routes = []
+ for i in range(count):
+ r = VppIpRoute(self, start_dest_addr % (i + start), 32,
+ [VppRoutePath(next_hop_addr, 0xffffffff)])
+ r.remove_vpp_config()
+ routes.append(r)
+ return routes
+
+ def create_stream(self, src_if, dst_if, routes, count):
pkts = []
for _ in range(count):
- dst_addr = random.choice(dst_ips)
+ dst_addr = random.choice(routes).prefix.address
info = self.create_packet_info(src_if, dst_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
@@ -389,18 +350,6 @@ class TestIPv4FibCrud(VppTestCase):
return p
return None
- @staticmethod
- def _match_route_detail(route_detail, ip, address_length=32, table_id=0):
- if route_detail.address == socket.inet_pton(socket.AF_INET, ip):
- if route_detail.table_id != table_id:
- return False
- elif route_detail.address_length != address_length:
- return False
- else:
- return True
- else:
- return False
-
def verify_capture(self, dst_interface, received_pkts, expected_pkts):
self.assertEqual(len(received_pkts), len(expected_pkts))
to_verify = list(expected_pkts)
@@ -411,27 +360,13 @@ class TestIPv4FibCrud(VppTestCase):
to_verify.remove(x)
self.assertListEqual(to_verify, [])
- def verify_route_dump(self, fib_dump, ips):
-
- def _ip_in_route_dump(ip, fib_dump):
- return next((route for route in fib_dump
- if self._match_route_detail(route, ip)),
- False)
-
- for ip in ips:
- self.assertTrue(_ip_in_route_dump(ip, fib_dump),
- 'IP {!s} is not in fib dump.'.format(ip))
-
- def verify_not_in_route_dump(self, fib_dump, ips):
-
- def _ip_in_route_dump(ip, fib_dump):
- return next((route for route in fib_dump
- if self._match_route_detail(route, ip)),
- False)
+ def verify_route_dump(self, routes):
+ for r in routes:
+ self.assertTrue(find_route(self, r.prefix.address, r.prefix.len))
- for ip in ips:
- self.assertFalse(_ip_in_route_dump(ip, fib_dump),
- 'IP {!s} is in fib dump.'.format(ip))
+ def verify_not_in_route_dump(self, routes):
+ for r in routes:
+ self.assertFalse(find_route(self, r.prefix.address, r.prefix.len))
@classmethod
def setUpClass(cls):
@@ -474,16 +409,13 @@ class TestIPv4FibCrud(VppTestCase):
self.deleted_routes = []
def test_1_add_routes(self):
- """ Add 1k routes
+ """ Add 1k routes """
- - add 100 routes check with traffic script.
- """
- # config 1M FIB entries
+ # add 100 routes check with traffic script.
self.configured_routes.extend(self.config_fib_many_to_one(
- "10.0.0.0", self.pg0.remote_ip4, 100))
+ "10.0.0.%d", self.pg0.remote_ip4, 100))
- fib_dump = self.vapi.ip_fib_dump()
- self.verify_route_dump(fib_dump, self.configured_routes)
+ self.verify_route_dump(self.configured_routes)
self.stream_1 = self.create_stream(
self.pg1, self.pg0, self.configured_routes, 100)
@@ -505,14 +437,13 @@ class TestIPv4FibCrud(VppTestCase):
"""
# config 1M FIB entries
self.configured_routes.extend(self.config_fib_many_to_one(
- "10.0.0.0", self.pg0.remote_ip4, 100))
+ "10.0.0.%d", self.pg0.remote_ip4, 100))
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
- "10.0.0.10", self.pg0.remote_ip4, 10))
+ "10.0.0.%d", self.pg0.remote_ip4, 10, start=10))
for x in self.deleted_routes:
self.configured_routes.remove(x)
- fib_dump = self.vapi.ip_fib_dump()
- self.verify_route_dump(fib_dump, self.configured_routes)
+ self.verify_route_dump(self.configured_routes)
self.stream_1 = self.create_stream(
self.pg1, self.pg0, self.configured_routes, 100)
@@ -538,23 +469,22 @@ class TestIPv4FibCrud(VppTestCase):
"""
# config 1M FIB entries
self.configured_routes.extend(self.config_fib_many_to_one(
- "10.0.0.0", self.pg0.remote_ip4, 100))
+ "10.0.0.%d", self.pg0.remote_ip4, 100))
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
- "10.0.0.10", self.pg0.remote_ip4, 10))
+ "10.0.0.%d", self.pg0.remote_ip4, 10, start=10))
for x in self.deleted_routes:
self.configured_routes.remove(x)
tmp = self.config_fib_many_to_one(
- "10.0.0.10", self.pg0.remote_ip4, 5)
+ "10.0.0.%d", self.pg0.remote_ip4, 5, start=10)
self.configured_routes.extend(tmp)
for x in tmp:
self.deleted_routes.remove(x)
self.configured_routes.extend(self.config_fib_many_to_one(
- "10.0.1.0", self.pg0.remote_ip4, 100))
+ "10.0.1.%d", self.pg0.remote_ip4, 100))
- fib_dump = self.vapi.ip_fib_dump()
- self.verify_route_dump(fib_dump, self.configured_routes)
+ self.verify_route_dump(self.configured_routes)
self.stream_1 = self.create_stream(
self.pg1, self.pg0, self.configured_routes, 300)
@@ -573,20 +503,15 @@ class TestIPv4FibCrud(VppTestCase):
pkts = self.pg0.get_capture(len(self.stream_1) + len(self.stream_2))
self.verify_capture(self.pg0, pkts, self.stream_1 + self.stream_2)
- def test_4_del_routes(self):
- """ Delete 1.5k routes
-
- - delete 5 routes check with traffic script.
- - add 100 routes check with traffic script.
- """
+ # delete 5 routes check with traffic script.
+ # add 100 routes check with traffic script.
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
- "10.0.0.0", self.pg0.remote_ip4, 15))
+ "10.0.0.%d", self.pg0.remote_ip4, 15))
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
- "10.0.0.20", self.pg0.remote_ip4, 85))
+ "10.0.0.%d", self.pg0.remote_ip4, 85))
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
- "10.0.1.0", self.pg0.remote_ip4, 100))
- fib_dump = self.vapi.ip_fib_dump()
- self.verify_not_in_route_dump(fib_dump, self.deleted_routes)
+ "10.0.1.%d", self.pg0.remote_ip4, 100))
+ self.verify_not_in_route_dump(self.deleted_routes)
class TestIPNull(VppTestCase):
@@ -623,7 +548,11 @@ class TestIPNull(VppTestCase):
#
# A route via IP NULL that will reply with ICMP unreachables
#
- ip_unreach = VppIpRoute(self, "10.0.0.1", 32, [], is_unreach=1)
+ ip_unreach = VppIpRoute(
+ self, "10.0.0.1", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ type=FibPathType.FIB_PATH_TYPE_ICMP_UNREACH)])
ip_unreach.add_vpp_config()
p_unreach = (Ether(src=self.pg0.remote_mac,
@@ -631,7 +560,6 @@ class TestIPNull(VppTestCase):
IP(src=self.pg0.remote_ip4, dst="10.0.0.1") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
-
self.pg0.add_stream(p_unreach)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
@@ -653,7 +581,11 @@ class TestIPNull(VppTestCase):
#
# A route via IP NULL that will reply with ICMP prohibited
#
- ip_prohibit = VppIpRoute(self, "10.0.0.2", 32, [], is_prohibit=1)
+ ip_prohibit = VppIpRoute(
+ self, "10.0.0.2", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ type=FibPathType.FIB_PATH_TYPE_ICMP_PROHIBIT)])
ip_prohibit.add_vpp_config()
p_prohibit = (Ether(src=self.pg0.remote_mac,
@@ -695,7 +627,10 @@ class TestIPNull(VppTestCase):
#
# insert a more specific as a drop
#
- r2 = VppIpRoute(self, "1.1.1.1", 32, [], is_drop=1)
+ r2 = VppIpRoute(self, "1.1.1.1", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ type=FibPathType.FIB_PATH_TYPE_DROP)])
r2.add_vpp_config()
self.send_and_assert_no_replies(self.pg0, p * NUM_PKTS, "Drop Route")
@@ -1452,11 +1387,12 @@ class TestIPDeag(VppTestCase):
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
- route_to_src = VppIpRoute(self, "1.1.1.2", 32,
- [VppRoutePath("0.0.0.0",
- 0xffffffff,
- nh_table_id=2,
- is_source_lookup=1)])
+ route_to_src = VppIpRoute(
+ self, "1.1.1.2", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=2,
+ type=FibPathType.FIB_PATH_TYPE_SOURCE_LOOKUP)])
route_to_dst.add_vpp_config()
route_to_src.add_vpp_config()
@@ -1490,6 +1426,7 @@ class TestIPDeag(VppTestCase):
self.pg1.sw_if_index)],
table_id=1)
route_in_dst.add_vpp_config()
+
self.send_and_expect(self.pg0, pkts_dst, self.pg1)
#
diff --git a/test/test_ip4_vrf_multi_instance.py b/test/test_ip4_vrf_multi_instance.py
index 38604a53474..1004814b8f1 100644
--- a/test/test_ip4_vrf_multi_instance.py
+++ b/test/test_ip4_vrf_multi_instance.py
@@ -337,17 +337,19 @@ class TestIp4VrfMultiInst(VppTestCase):
:param int vrf_id: The FIB table / VRF ID to be verified.
:return: 1 if the FIB table / VRF ID is configured, otherwise return 0.
"""
- ip_fib_dump = self.vapi.ip_fib_dump()
- vrf_exist = False
+ ip_fib_dump = self.vapi.ip_route_dump(vrf_id)
+ vrf_exist = len(ip_fib_dump)
vrf_count = 0
for ip_fib_details in ip_fib_dump:
- if ip_fib_details.table_id == vrf_id:
- if not vrf_exist:
- vrf_exist = True
- addr = socket.inet_ntoa(ip_fib_details.address)
- found = False
- for pg_if in self.pg_if_by_vrf_id[vrf_id]:
- if found:
+ addr = ip_fib_details.route.prefix.network_address
+ found = False
+ for pg_if in self.pg_if_by_vrf_id[vrf_id]:
+ if found:
+ break
+ for host in pg_if.remote_hosts:
+ if str(addr) == host.ip4:
+ vrf_count += 1
+ found = True
break
for host in pg_if.remote_hosts:
if scapy.compat.raw(addr) == \
diff --git a/test/test_ip6.py b/test/test_ip6.py
index 4f267b815c4..35061b0b53c 100644
--- a/test/test_ip6.py
+++ b/test/test_ip6.py
@@ -23,11 +23,11 @@ from util import ppp, ip6_normalize, mk_ll_addr
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, VppIpMRoute, \
VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \
- VppMplsRoute, VppMplsTable, VppIpTable
+ VppMplsRoute, VppMplsTable, VppIpTable, FibPathType
from vpp_neighbor import find_nbr, VppNeighbor
from vpp_pg_interface import is_ipv6_misc
from vpp_sub_interface import VppSubInterface, VppDot1QSubint
-from ipaddress import IPv6Network, IPv4Network
+from ipaddress import IPv6Network, IPv4Network, IPv6Address
AF_INET6 = socket.AF_INET6
@@ -217,9 +217,6 @@ class TestIPv6(TestIPv6ND):
i.config_ip6()
i.resolve_ndp()
- # config 2M FIB entries
- self.config_fib_entries(200)
-
def tearDown(self):
"""Run standard test teardown and log ``show ip6 neighbors``."""
for i in self.interfaces:
@@ -234,34 +231,6 @@ class TestIPv6(TestIPv6ND):
self.logger.info(self.vapi.cli("show ip6 neighbors"))
# info(self.vapi.cli("show ip6 fib")) # many entries
- def config_fib_entries(self, count):
- """For each interface add to the FIB table *count* routes to
- "fd02::1/128" destination with interface's local address as next-hop
- address.
-
- :param int count: Number of FIB entries.
-
- - *TODO:* check if the next-hop address shouldn't be remote address
- instead of local address.
- """
- n_int = len(self.interfaces)
- percent = 0
- counter = 0.0
- dest_addr = inet_pton(AF_INET6, "fd02::1")
- dest_addr_len = 128
- for i in self.interfaces:
- next_hop_address = i.local_ip6n
- for j in range(count / n_int):
- self.vapi.ip_add_del_route(dst_address=dest_addr,
- dst_address_length=dest_addr_len,
- next_hop_address=next_hop_address,
- is_ipv6=1)
- counter += 1
- if counter / count * 100 > percent:
- self.logger.info("Configure %d FIB entries .. %d%% done" %
- (count, percent))
- percent += 1
-
def modify_packet(self, src_if, packet_size, pkt):
"""Add load, set destination IP and extend packet to required packet
size for defined interface.
@@ -483,8 +452,7 @@ class TestIPv6(TestIPv6ND):
self.pg0._remote_hosts[2].ip6))
self.assertFalse(find_route(self,
self.pg0._remote_hosts[2].ip6,
- 128,
- inet=AF_INET6))
+ 128))
#
# send an NS from a link local address to the interface's global
@@ -511,8 +479,7 @@ class TestIPv6(TestIPv6ND):
self.pg0._remote_hosts[2].ip6_ll))
self.assertFalse(find_route(self,
self.pg0._remote_hosts[2].ip6_ll,
- 128,
- inet=AF_INET6))
+ 128))
#
# An NS to the router's own Link-local
@@ -538,8 +505,7 @@ class TestIPv6(TestIPv6ND):
self.pg0._remote_hosts[3].ip6_ll))
self.assertFalse(find_route(self,
self.pg0._remote_hosts[3].ip6_ll,
- 128,
- inet=AF_INET6))
+ 128))
def test_ns_duplicates(self):
""" ND Duplicates"""
@@ -1204,33 +1170,33 @@ class TestIPv6RDControlPlane(TestIPv6ND):
def get_default_routes(fib):
list = []
for entry in fib:
- if entry.address_length == 0:
- for path in entry.path:
+ if entry.route.prefix.prefixlen == 0:
+ for path in entry.route.paths:
if path.sw_if_index != 0xFFFFFFFF:
- default_route = {}
- default_route['sw_if_index'] = path.sw_if_index
- default_route['next_hop'] = path.next_hop
- list.append(default_route)
+ defaut_route = {}
+ defaut_route['sw_if_index'] = path.sw_if_index
+ defaut_route['next_hop'] = path.nh.address.ip6
+ list.append(defaut_route)
return list
@staticmethod
def get_interface_addresses(fib, pg):
list = []
for entry in fib:
- if entry.address_length == 128:
- path = entry.path[0]
+ if entry.route.prefix.prefixlen == 128:
+ path = entry.route.paths[0]
if path.sw_if_index == pg.sw_if_index:
- list.append(entry.address)
+ list.append(str(entry.route.prefix.network_address))
return list
def test_all(self):
""" Test handling of SLAAC addresses and default routes """
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
default_routes = self.get_default_routes(fib)
initial_addresses = set(self.get_interface_addresses(fib, self.pg0))
self.assertEqual(default_routes, [])
- router_address = self.pg0.remote_ip6n_ll
+ router_address = IPv6Address(text_type(self.pg0.remote_ip6_ll))
self.vapi.ip6_nd_address_autoconfig(self.pg0.sw_if_index, 1, 1)
@@ -1258,14 +1224,15 @@ class TestIPv6RDControlPlane(TestIPv6ND):
self.sleep(0.1)
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
# check FIB for new address
addresses = set(self.get_interface_addresses(fib, self.pg0))
new_addresses = addresses.difference(initial_addresses)
self.assertEqual(len(new_addresses), 1)
- prefix = list(new_addresses)[0][:8] + '\0\0\0\0\0\0\0\0'
- self.assertEqual(inet_ntop(AF_INET6, prefix), '1::')
+ prefix = IPv6Network(text_type("%s/%d" % (list(new_addresses)[0], 20)),
+ strict=False)
+ self.assertEqual(prefix, IPv6Network(text_type('1::/20')))
# check FIB for new default route
default_routes = self.get_default_routes(fib)
@@ -1282,7 +1249,7 @@ class TestIPv6RDControlPlane(TestIPv6ND):
self.sleep(0.1)
# check that default route is deleted
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
default_routes = self.get_default_routes(fib)
self.assertEqual(len(default_routes), 0)
@@ -1296,7 +1263,7 @@ class TestIPv6RDControlPlane(TestIPv6ND):
self.sleep(0.1)
# check FIB for new default route
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
default_routes = self.get_default_routes(fib)
self.assertEqual(len(default_routes), 1)
dr = default_routes[0]
@@ -1311,7 +1278,7 @@ class TestIPv6RDControlPlane(TestIPv6ND):
self.sleep(0.1)
# check that default route still exists
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
default_routes = self.get_default_routes(fib)
self.assertEqual(len(default_routes), 1)
dr = default_routes[0]
@@ -1321,7 +1288,7 @@ class TestIPv6RDControlPlane(TestIPv6ND):
self.sleep(1)
# check that default route is deleted
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
default_routes = self.get_default_routes(fib)
self.assertEqual(len(default_routes), 0)
@@ -1330,13 +1297,14 @@ class TestIPv6RDControlPlane(TestIPv6ND):
new_addresses = addresses.difference(initial_addresses)
self.assertEqual(len(new_addresses), 1)
- prefix = list(new_addresses)[0][:8] + '\0\0\0\0\0\0\0\0'
- self.assertEqual(inet_ntop(AF_INET6, prefix), '1::')
+ prefix = IPv6Network(text_type("%s/%d" % (list(new_addresses)[0], 20)),
+ strict=False)
+ self.assertEqual(prefix, IPv6Network(text_type('1::/20')))
self.sleep(1)
# check that SLAAC address is deleted
- fib = self.vapi.ip6_fib_dump()
+ fib = self.vapi.ip_route_dump(0, True)
addresses = set(self.get_interface_addresses(fib, self.pg0))
new_addresses = addresses.difference(initial_addresses)
self.assertEqual(len(new_addresses), 0)
@@ -1579,7 +1547,10 @@ class TestIPNull(VppTestCase):
#
# A route via IP NULL that will reply with ICMP unreachables
#
- ip_unreach = VppIpRoute(self, "2001::", 64, [], is_unreach=1, is_ip6=1)
+ ip_unreach = VppIpRoute(
+ self, "2001::", 64,
+ [VppRoutePath("::", 0xffffffff,
+ type=FibPathType.FIB_PATH_TYPE_ICMP_UNREACH)])
ip_unreach.add_vpp_config()
self.pg0.add_stream(p)
@@ -1599,8 +1570,10 @@ class TestIPNull(VppTestCase):
#
# A route via IP NULL that will reply with ICMP prohibited
#
- ip_prohibit = VppIpRoute(self, "2001::1", 128, [],
- is_prohibit=1, is_ip6=1)
+ ip_prohibit = VppIpRoute(
+ self, "2001::1", 128,
+ [VppRoutePath("::", 0xffffffff,
+ type=FibPathType.FIB_PATH_TYPE_ICMP_PROHIBIT)])
ip_prohibit.add_vpp_config()
self.pg0.add_stream(p)
@@ -1661,8 +1634,7 @@ class TestIPDisabled(VppTestCase):
[VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg0.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
- is_ip6=1)
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_ff_01.add_vpp_config()
pu = (Ether(src=self.pg1.remote_mac,
@@ -1821,12 +1793,9 @@ class TestIP6LoadBalance(VppTestCase):
#
route_3000_1 = VppIpRoute(self, "3000::1", 128,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6),
+ self.pg1.sw_if_index),
VppRoutePath(self.pg2.remote_ip6,
- self.pg2.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg2.sw_if_index)])
route_3000_1.add_vpp_config()
#
@@ -1841,12 +1810,10 @@ class TestIP6LoadBalance(VppTestCase):
route_67 = VppMplsRoute(self, 67, 0,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index,
- labels=[67],
- proto=DpoProto.DPO_PROTO_IP6),
+ labels=[67]),
VppRoutePath(self.pg2.remote_ip6,
self.pg2.sw_if_index,
- labels=[67],
- proto=DpoProto.DPO_PROTO_IP6)])
+ labels=[67])])
route_67.add_vpp_config()
#
@@ -1920,22 +1887,16 @@ class TestIP6LoadBalance(VppTestCase):
route_3000_2 = VppIpRoute(self, "3000::2", 128,
[VppRoutePath(self.pg3.remote_ip6,
- self.pg3.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6),
+ self.pg3.sw_if_index),
VppRoutePath(self.pg4.remote_ip6,
- self.pg4.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg4.sw_if_index)])
route_3000_2.add_vpp_config()
route_4000_1 = VppIpRoute(self, "4000::1", 128,
[VppRoutePath("3000::1",
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6),
+ 0xffffffff),
VppRoutePath("3000::2",
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ 0xffffffff)])
route_4000_1.add_vpp_config()
#
@@ -1966,16 +1927,12 @@ class TestIP6LoadBalance(VppTestCase):
route_5000_2 = VppIpRoute(self, "5000::2", 128,
[VppRoutePath(self.pg3.remote_ip6,
- self.pg3.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg3.sw_if_index)])
route_5000_2.add_vpp_config()
route_6000_1 = VppIpRoute(self, "6000::1", 128,
[VppRoutePath("5000::2",
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ 0xffffffff)])
route_6000_1.add_vpp_config()
#
@@ -2168,16 +2125,14 @@ class TestIPDeag(VppTestCase):
route_to_dst = VppIpRoute(self, "1::1", 128,
[VppRoutePath("::",
0xffffffff,
- nh_table_id=1,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
- route_to_src = VppIpRoute(self, "1::2", 128,
- [VppRoutePath("::",
- 0xffffffff,
- nh_table_id=2,
- is_source_lookup=1,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ nh_table_id=1)])
+ route_to_src = VppIpRoute(
+ self, "1::2", 128,
+ [VppRoutePath("::",
+ 0xffffffff,
+ nh_table_id=2,
+ type=FibPathType.FIB_PATH_TYPE_SOURCE_LOOKUP)])
+
route_to_dst.add_vpp_config()
route_to_src.add_vpp_config()
@@ -2208,9 +2163,7 @@ class TestIPDeag(VppTestCase):
#
route_in_dst = VppIpRoute(self, "1::1", 128,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1,
+ self.pg1.sw_if_index)],
table_id=1)
route_in_dst.add_vpp_config()
@@ -2221,9 +2174,7 @@ class TestIPDeag(VppTestCase):
#
route_in_src = VppIpRoute(self, "2::2", 128,
[VppRoutePath(self.pg2.remote_ip6,
- self.pg2.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1,
+ self.pg2.sw_if_index)],
table_id=2)
route_in_src.add_vpp_config()
self.send_and_expect(self.pg0, pkts_src, self.pg2)
@@ -2233,9 +2184,7 @@ class TestIPDeag(VppTestCase):
#
route_loop = VppIpRoute(self, "3::3", 128,
[VppRoutePath("::",
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ 0xffffffff)])
route_loop.add_vpp_config()
p_l = (Ether(src=self.pg0.remote_mac,
diff --git a/test/test_ip6_vrf_multi_instance.py b/test/test_ip6_vrf_multi_instance.py
index c4b057ba29d..88aed978d97 100644
--- a/test/test_ip6_vrf_multi_instance.py
+++ b/test/test_ip6_vrf_multi_instance.py
@@ -17,7 +17,7 @@
- send IP6 packets between all pg-ip6 interfaces in all VRF groups
**verify 1**
- - check VRF data by parsing output of ip6_fib_dump API command
+ - check VRF data by parsing output of ip_route_dump API command
- all packets received correctly in case of pg-ip6 interfaces in the same
VRF
- no packet received in case of pg-ip6 interfaces not in VRF
@@ -30,7 +30,7 @@
- send IP6 packets between all pg-ip6 interfaces in all VRF groups
**verify 2**
- - check VRF data by parsing output of ip6_fib_dump API command
+ - check VRF data by parsing output of ip_route_dump API command
- all packets received correctly in case of pg-ip6 interfaces in the same
VRF
- no packet received in case of pg-ip6 interfaces not in VRF
@@ -43,7 +43,7 @@
- send IP6 packets between all pg-ip6 interfaces in all VRF groups
**verify 3**
- - check VRF data by parsing output of ip6_fib_dump API command
+ - check VRF data by parsing output of ip_route_dump API command
- all packets received correctly in case of pg-ip6 interfaces in the same
VRF
- no packet received in case of pg-ip6 interfaces not in VRF
@@ -56,7 +56,7 @@
- send IP6 packets between all pg-ip6 interfaces in all VRF groups
**verify 4**
- - check VRF data by parsing output of ip6_fib_dump API command
+ - check VRF data by parsing output of ip_route_dump API command
- all packets received correctly in case of pg-ip6 interfaces in the same
VRF
- no packet received in case of pg-ip6 interfaces not in VRF
@@ -349,23 +349,20 @@ class TestIP6VrfMultiInst(VppTestCase):
:param int vrf_id: The FIB table / VRF ID to be verified.
:return: 1 if the FIB table / VRF ID is configured, otherwise return 0.
"""
- ip6_fib_dump = self.vapi.ip6_fib_dump()
- vrf_exist = False
+ ip6_fib_dump = self.vapi.ip_route_dump(vrf_id, True)
+ vrf_exist = len(ip6_fib_dump)
vrf_count = 0
for ip6_fib_details in ip6_fib_dump:
- if ip6_fib_details.table_id == vrf_id:
- if not vrf_exist:
- vrf_exist = True
- addr = inet_ntop(socket.AF_INET6, ip6_fib_details.address)
- found = False
- for pg_if in self.pg_if_by_vrf_id[vrf_id]:
- if found:
+ addr = ip6_fib_details.route.prefix.network_address
+ found = False
+ for pg_if in self.pg_if_by_vrf_id[vrf_id]:
+ if found:
+ break
+ for host in pg_if.remote_hosts:
+ if str(addr) == host.ip6:
+ vrf_count += 1
+ found = True
break
- for host in pg_if.remote_hosts:
- if str(addr) == str(host.ip6):
- vrf_count += 1
- found = True
- break
if not vrf_exist and vrf_count == 0:
self.logger.info("IPv6 VRF ID %d is not configured" % vrf_id)
return VRFState.not_configured
diff --git a/test/test_ip_ecmp.py b/test/test_ip_ecmp.py
index e3ceb594826..2fe266023f7 100644
--- a/test/test_ip_ecmp.py
+++ b/test/test_ip_ecmp.py
@@ -12,6 +12,7 @@ from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
+from vpp_ip_route import VppIpRoute, VppRoutePath
try:
text_type = unicode
@@ -178,22 +179,18 @@ class TestECMP(VppTestCase):
:param int dst_prefix_len: IP address prefix length.
:param int is_ipv6: 0 if an ip4 route, else ip6
"""
- af = socket.AF_INET if is_ipv6 == 0 else socket.AF_INET6
- dst_ip = socket.inet_pton(af, dst_ip_net)
+ paths = []
for pg_if in self.pg_interfaces[1:]:
for nh_host in pg_if.remote_hosts:
nh_host_ip = nh_host.ip4 if is_ipv6 == 0 else nh_host.ip6
- next_hop_address = socket.inet_pton(af, nh_host_ip)
- next_hop_sw_if_index = pg_if.sw_if_index
- self.vapi.ip_add_del_route(
- dst_address=dst_ip,
- dst_address_length=dst_prefix_len,
- next_hop_address=next_hop_address,
- next_hop_sw_if_index=next_hop_sw_if_index,
- is_ipv6=is_ipv6, is_multipath=1)
- self.logger.info("Route via %s on %s created" %
- (nh_host_ip, pg_if.name))
+ paths.append(VppRoutePath(nh_host_ip,
+ pg_if.sw_if_index))
+
+ rip = VppIpRoute(self, dst_ip_net, dst_prefix_len, paths)
+ rip.add_vpp_config()
+ self.logger.info("Route via %s on %s created" %
+ (nh_host_ip, pg_if.name))
self.logger.debug(self.vapi.ppcli("show ip fib"))
self.logger.debug(self.vapi.ppcli("show ip6 fib"))
diff --git a/test/test_ip_mcast.py b/test/test_ip_mcast.py
index 21794d63c3e..b753f9a3354 100644
--- a/test/test_ip_mcast.py
+++ b/test/test_ip_mcast.py
@@ -5,7 +5,7 @@ import unittest
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpMRoute, VppMRoutePath, VppMFibSignal, \
- MRouteItfFlags, MRouteEntryFlags, VppIpTable
+ MRouteItfFlags, MRouteEntryFlags, VppIpTable, FibPathProto
from scapy.packet import Raw
from scapy.layers.l2 import Ether
@@ -421,17 +421,16 @@ class TestIPMcast(VppTestCase):
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT,
- proto=DpoProto.DPO_PROTO_IP6),
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_IP6),
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_IP6),
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6),
VppMRoutePath(self.pg3.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
route_ff01_1.add_vpp_config()
#
@@ -445,14 +444,13 @@ class TestIPMcast(VppTestCase):
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT,
- proto=DpoProto.DPO_PROTO_IP6),
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_IP6),
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
route_2001_ff01_1.add_vpp_config()
#
@@ -466,11 +464,10 @@ class TestIPMcast(VppTestCase):
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT,
- proto=DpoProto.DPO_PROTO_IP6),
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
route_ff01.add_vpp_config()
#
@@ -687,6 +684,7 @@ class TestIPMcast(VppTestCase):
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
+
route_232_1_1_1.update_entry_flags(
MRouteEntryFlags.MFIB_ENTRY_FLAG_SIGNAL)
@@ -811,15 +809,14 @@ class TestIPMcast(VppTestCase):
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg8.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT,
- proto=DpoProto.DPO_PROTO_IP6),
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_IP6),
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
- proto=DpoProto.DPO_PROTO_IP6)],
- table_id=10,
- is_ip6=1)
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)],
+ table_id=10)
route_2001_ff01_1.add_vpp_config()
#
diff --git a/test/test_ipip.py b/test/test_ipip.py
index 6f1d3b883c0..1887417a5e0 100644
--- a/test/test_ipip.py
+++ b/test/test_ipip.py
@@ -6,7 +6,7 @@ from scapy.layers.inet6 import IPv6, Ether, IP, UDP, IPv6ExtHdrFragment
from scapy.all import fragment, fragment6, RandShort, defragment6
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
-from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, FibPathProto
from socket import AF_INET, AF_INET6, inet_pton
from util import reassemble4
@@ -101,14 +101,14 @@ class TestIPIP(VppTestCase):
self, "130.67.0.0", 16,
[VppRoutePath("0.0.0.0",
sw_if_index,
- proto=DpoProto.DPO_PROTO_IP4)], is_ip6=0)
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)])
ip4_via_tunnel.add_vpp_config()
ip6_via_tunnel = VppIpRoute(
self, "dead::", 16,
[VppRoutePath("::",
sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1)
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
ip6_via_tunnel.add_vpp_config()
# IPv6 in to IPv4 tunnel
@@ -308,14 +308,14 @@ class TestIPIP6(VppTestCase):
self, "130.67.0.0", 16,
[VppRoutePath("0.0.0.0",
sw_if_index,
- proto=DpoProto.DPO_PROTO_IP4)], is_ip6=0)
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)])
ip4_via_tunnel.add_vpp_config()
ip6_via_tunnel = VppIpRoute(
self, "dead::", 16,
[VppRoutePath("::",
sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1)
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
ip6_via_tunnel.add_vpp_config()
self.tunnel_ip6_via_tunnel = ip6_via_tunnel
diff --git a/test/test_ipsec_ah.py b/test/test_ipsec_ah.py
index d6cbf58a46e..94c7ffc634e 100644
--- a/test/test_ipsec_ah.py
+++ b/test/test_ipsec_ah.py
@@ -93,8 +93,7 @@ class ConfigIpsecAH(TemplateIpsec):
r = VppIpRoute(self, p.remote_tun_if_host, p.addr_len,
[VppRoutePath(self.tun_if.remote_addr[p.addr_type],
0xffffffff,
- proto=d)],
- is_ip6=p.is_ipv6)
+ proto=d)])
r.add_vpp_config()
self.net_objs.append(r)
self.logger.info(self.vapi.ppcli("show ipsec all"))
diff --git a/test/test_ipsec_esp.py b/test/test_ipsec_esp.py
index 0abd96d4a10..8ed80c3d8de 100644
--- a/test/test_ipsec_esp.py
+++ b/test/test_ipsec_esp.py
@@ -74,8 +74,7 @@ class ConfigIpsecESP(TemplateIpsec):
r = VppIpRoute(self, p.remote_tun_if_host, p.addr_len,
[VppRoutePath(self.tun_if.remote_addr[p.addr_type],
0xffffffff,
- proto=d)],
- is_ip6=p.is_ipv6)
+ proto=d)])
r.add_vpp_config()
self.net_objs.append(r)
diff --git a/test/test_ipsec_nat.py b/test/test_ipsec_nat.py
index 3209def213b..07670d71b03 100644
--- a/test/test_ipsec_nat.py
+++ b/test/test_ipsec_nat.py
@@ -63,8 +63,7 @@ class IPSecNATTestCase(TemplateIpsec):
VppIpRoute(self, p.remote_tun_if_host, p.addr_len,
[VppRoutePath(self.tun_if.remote_addr[p.addr_type],
0xffffffff,
- proto=d)],
- is_ip6=p.is_ipv6).add_vpp_config()
+ proto=d)]).add_vpp_config()
def tearDown(self):
super(IPSecNATTestCase, self).tearDown()
diff --git a/test/test_ipsec_tun_if_esp.py b/test/test_ipsec_tun_if_esp.py
index 47b138fe486..5ef0bdbb8c6 100644
--- a/test/test_ipsec_tun_if_esp.py
+++ b/test/test_ipsec_tun_if_esp.py
@@ -47,14 +47,15 @@ class TemplateIpsec4TunIfEsp(TemplateIpsec):
p.tun_if.config_ip4()
p.tun_if.config_ip6()
- VppIpRoute(self, p.remote_tun_if_host, 32,
- [VppRoutePath(p.tun_if.remote_ip4,
- 0xffffffff)]).add_vpp_config()
- VppIpRoute(self, p.remote_tun_if_host6, 128,
- [VppRoutePath(p.tun_if.remote_ip6,
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1).add_vpp_config()
+ r = VppIpRoute(self, p.remote_tun_if_host, 32,
+ [VppRoutePath(p.tun_if.remote_ip4,
+ 0xffffffff)])
+ r.add_vpp_config()
+ r = VppIpRoute(self, p.remote_tun_if_host6, 128,
+ [VppRoutePath(p.tun_if.remote_ip6,
+ 0xffffffff,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ r.add_vpp_config()
def tearDown(self):
if not self.vpp_dead:
@@ -119,14 +120,15 @@ class TemplateIpsec6TunIfEsp(TemplateIpsec):
tun_if.config_ip6()
tun_if.config_ip4()
- VppIpRoute(self, p.remote_tun_if_host, 128,
- [VppRoutePath(tun_if.remote_ip6,
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1).add_vpp_config()
- VppIpRoute(self, p.remote_tun_if_host4, 32,
- [VppRoutePath(tun_if.remote_ip4,
- 0xffffffff)]).add_vpp_config()
+ r = VppIpRoute(self, p.remote_tun_if_host, 128,
+ [VppRoutePath(tun_if.remote_ip6,
+ 0xffffffff,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ r.add_vpp_config()
+ r = VppIpRoute(self, p.remote_tun_if_host4, 32,
+ [VppRoutePath(tun_if.remote_ip4,
+ 0xffffffff)])
+ r.add_vpp_config()
def tearDown(self):
if not self.vpp_dead:
@@ -433,11 +435,11 @@ class TestIpsec6MultiTunIfEsp(TemplateIpsec, IpsecTun6):
p.tun_if.admin_up()
p.tun_if.config_ip6()
- VppIpRoute(self, p.remote_tun_if_host, 128,
- [VppRoutePath(p.tun_if.remote_ip6,
- 0xffffffff,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1).add_vpp_config()
+ r = VppIpRoute(self, p.remote_tun_if_host, 128,
+ [VppRoutePath(p.tun_if.remote_ip6,
+ 0xffffffff,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ r.add_vpp_config()
def tearDown(self):
if not self.vpp_dead:
diff --git a/test/test_l3xc.py b/test/test_l3xc.py
index ceb95ce02dd..696e23507ac 100644
--- a/test/test_l3xc.py
+++ b/test/test_l3xc.py
@@ -34,30 +34,9 @@ class VppL3xc(VppObject):
self.intf = intf
self.is_ip6 = is_ip6
self.paths = paths
-
- def encode_paths(self):
- br_paths = []
- for p in self.paths:
- lstack = []
- for l in p.nh_labels:
- if type(l) == VppMplsLabel:
- lstack.append(l.encode())
- else:
- lstack.append({'label': l, 'ttl': 255})
- n_labels = len(lstack)
- while (len(lstack) < 16):
- lstack.append({})
- br_paths.append({'next_hop': p.nh_addr,
- 'weight': 1,
- 'afi': p.proto,
- 'sw_if_index': p.nh_itf,
- 'preference': 0,
- 'table_id': p.nh_table_id,
- 'next_hop_id': p.next_hop_id,
- 'is_udp_encap': p.is_udp_encap,
- 'n_labels': n_labels,
- 'label_stack': lstack})
- return br_paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
def add_vpp_config(self):
self._test.vapi.l3xc_update(
@@ -65,7 +44,7 @@ class VppL3xc(VppObject):
'is_ip6': self.is_ip6,
'sw_if_index': self.intf.sw_if_index,
'n_paths': len(self.paths),
- 'paths': self.encode_paths()
+ 'paths': self.encoded_paths
})
self._test.registry.register(self, self._test.logger)
diff --git a/test/test_lb.py b/test/test_lb.py
index 93b389a1444..4603bd10db8 100644
--- a/test/test_lb.py
+++ b/test/test_lb.py
@@ -9,6 +9,8 @@ from scapy.data import IP_PROTOS
from framework import VppTestCase
from util import ppp
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
""" TestLB is a subclass of VPPTestCase classes.
@@ -50,13 +52,17 @@ class TestLB(VppTestCase):
i.disable_ipv6_ra()
i.resolve_arp()
i.resolve_ndp()
- dst4 = socket.inet_pton(socket.AF_INET, "10.0.0.0")
- dst6 = socket.inet_pton(socket.AF_INET6, "2002::")
- cls.vapi.ip_add_del_route(dst_address=dst4, dst_address_length=24,
- next_hop_address=cls.pg1.remote_ip4n)
- cls.vapi.ip_add_del_route(dst_address=dst6, dst_address_length=16,
- next_hop_address=cls.pg1.remote_ip6n,
- is_ipv6=1)
+
+ dst4 = VppIpRoute(cls, "10.0.0.0", 24,
+ [VppRoutePath(cls.pg1.remote_ip4,
+ INVALID_INDEX)],
+ register=False)
+ dst4.add_vpp_config()
+ dst6 = VppIpRoute(cls, "2002::", 16,
+ [VppRoutePath(cls.pg1.remote_ip6,
+ INVALID_INDEX)],
+ register=False)
+ dst6.add_vpp_config()
cls.vapi.lb_conf(ip4_src_address="39.40.41.42",
ip6_src_address="2004::1")
except Exception:
diff --git a/test/test_map.py b/test/test_map.py
index 39698cd2f7d..f1388b39c65 100644
--- a/test/test_map.py
+++ b/test/test_map.py
@@ -2,6 +2,7 @@
import ipaddress
import unittest
+from ipaddress import IPv6Network, IPv4Network
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
@@ -105,9 +106,7 @@ class TestMAP(VppTestCase):
map_br_pfx,
map_br_pfx_len,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
map_route.add_vpp_config()
#
@@ -198,12 +197,9 @@ class TestMAP(VppTestCase):
# Add a route to 4001::1. Expect the encapped traffic to be
# sent via that routes next-hop
#
- pre_res_route = VppIpRoute(
- self, "4001::1", 128,
- [VppRoutePath(self.pg1.remote_hosts[2].ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ pre_res_route = VppIpRoute(self, "4001::1", 128,
+ [VppRoutePath(self.pg1.remote_hosts[2].ip6,
+ self.pg1.sw_if_index)])
pre_res_route.add_vpp_config()
self.send_and_assert_encapped(v4, "3000::1",
@@ -214,8 +210,7 @@ class TestMAP(VppTestCase):
# change the route to the pre-solved next-hop
#
pre_res_route.modify([VppRoutePath(self.pg1.remote_hosts[3].ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)])
+ self.pg1.sw_if_index)])
pre_res_route.add_vpp_config()
self.send_and_assert_encapped(v4, "3000::1",
@@ -289,8 +284,7 @@ class TestMAP(VppTestCase):
32,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=DpoProto.DPO_PROTO_IP6)])
map_route.add_vpp_config()
#
diff --git a/test/test_memif.py b/test/test_memif.py
index d89e06bfb4b..c41d0aac983 100644
--- a/test/test_memif.py
+++ b/test/test_memif.py
@@ -9,8 +9,10 @@ from framework import VppTestCase, VppTestRunner, running_extended_tests
from remote_test import RemoteClass, RemoteVppTestCase
from vpp_memif import MEMIF_MODE, MEMIF_ROLE, remove_all_memif_vpp_config, \
VppSocketFilename, VppMemif
+from vpp_ip_route import VppIpRoute, VppRoutePath
+@unittest.skipIf(True, "doesn't work with VppEnums")
class TestMemif(VppTestCase):
""" Memif Test Case """
@@ -51,7 +53,7 @@ class TestMemif(VppTestCase):
return False
def test_memif_socket_filename_add_del(self):
- """ Memif socket filenale add/del """
+ """ Memif socket filename add/del """
# dump default socket filename
dump = self.vapi.memif_socket_filename_dump()
@@ -225,6 +227,7 @@ class TestMemif(VppTestCase):
def test_memif_ping(self):
""" Memif ping """
+
memif = VppMemif(self, MEMIF_ROLE.SLAVE, MEMIF_MODE.ETHERNET)
remote_socket = VppSocketFilename(self.remote_test, 1,
@@ -247,12 +250,8 @@ class TestMemif(VppTestCase):
self.assertTrue(remote_memif.wait_for_link_up(5))
# add routing to remote vpp
- dst_addr = socket.inet_pton(socket.AF_INET, self.pg0._local_ip4_subnet)
- dst_addr_len = 24
- next_hop_addr = socket.inet_pton(socket.AF_INET, memif.ip4_addr)
- self.remote_test.vapi.ip_add_del_route(dst_address=dst_addr,
- dst_address_length=dst_addr_len,
- next_hop_address=next_hop_addr)
+ VppIpRoute(self.remote_test, self.pg0._local_ip4_subnet, 24,
+ [VppRoutePath(memif.ip4_addr, 0xffffffff)]).add_vpp_config()
# create ICMP echo-request from local pg to remote memif
packet_num = 10
diff --git a/test/test_mpls.py b/test/test_mpls.py
index 79f3204c53e..d068bc37ee2 100644
--- a/test/test_mpls.py
+++ b/test/test_mpls.py
@@ -8,7 +8,8 @@ from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
MRouteItfFlags, MRouteEntryFlags, VppIpTable, VppMplsTable, \
- VppMplsLabel, MplsLspMode, find_mpls_route
+ VppMplsLabel, MplsLspMode, find_mpls_route, \
+ FibPathProto, FibPathType, FibPathFlags, VppMplsLabel, MplsLspMode
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
import scapy.compat
@@ -498,8 +499,8 @@ class TestMPLS(VppTestCase):
self, 333, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- labels=[],
- proto=DpoProto.DPO_PROTO_IP6)])
+ labels=[])],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
route_333_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)])
@@ -523,8 +524,8 @@ class TestMPLS(VppTestCase):
self, 334, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- labels=[VppMplsLabel(3)],
- proto=DpoProto.DPO_PROTO_IP6)])
+ labels=[VppMplsLabel(3)])],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
route_334_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(self.pg0,
@@ -539,8 +540,8 @@ class TestMPLS(VppTestCase):
self, 335, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)],
- proto=DpoProto.DPO_PROTO_IP6)])
+ labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
route_335_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(
@@ -586,6 +587,7 @@ class TestMPLS(VppTestCase):
labels=[VppMplsLabel(44),
VppMplsLabel(45)])])
route_34_eos.add_vpp_config()
+ self.logger.info(self.vapi.cli("sh mpls fib 34"))
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(34, ttl=3)])
@@ -775,10 +777,8 @@ class TestMPLS(VppTestCase):
self, "2001::3", 128,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6,
labels=[VppMplsLabel(32,
- mode=MplsLspMode.UNIFORM)])],
- is_ip6=1)
+ mode=MplsLspMode.UNIFORM)])])
route_2001_3.add_vpp_config()
tx = self.create_stream_ip6(self.pg0, "2001::3",
@@ -968,7 +968,7 @@ class TestMPLS(VppTestCase):
VppMplsLabel(33, ttl=47)])
def test_mpls_tunnel_many(self):
- """ Multiple Tunnels """
+ """ MPLS Multiple Tunnels """
for ii in range(10):
mpls_tun = VppMPLSTunnelInterface(
@@ -1111,10 +1111,11 @@ class TestMPLS(VppTestCase):
# if the packet egresses, then we must have swapped to pg1
# so as to have matched the route in table 1
#
- route_34_eos = VppMplsRoute(self, 34, 1,
- [VppRoutePath("0.0.0.0",
- self.pg1.sw_if_index,
- is_interface_rx=1)])
+ route_34_eos = VppMplsRoute(
+ self, 34, 1,
+ [VppRoutePath("0.0.0.0",
+ self.pg1.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)])
route_34_eos.add_vpp_config()
#
@@ -1154,7 +1155,7 @@ class TestMPLS(VppTestCase):
labels=[VppMplsLabel(3402)]),
VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
- is_interface_rx=1)],
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)],
is_multicast=1)
route_3400_eos.add_vpp_config()
@@ -1235,6 +1236,7 @@ class TestMPLS(VppTestCase):
VppMRoutePath(mpls_tun._sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
+ self.logger.info(self.vapi.cli("sh ip mfib index 0"))
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
@@ -1273,12 +1275,14 @@ class TestMPLS(VppTestCase):
# if the packet egresses, then we must have matched the route in
# table 1
#
- route_34_eos = VppMplsRoute(self, 34, 1,
- [VppRoutePath("0.0.0.0",
- self.pg1.sw_if_index,
- nh_table_id=1,
- rpf_id=55)],
- is_multicast=1)
+ route_34_eos = VppMplsRoute(
+ self, 34, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=1,
+ rpf_id=55)],
+ is_multicast=1,
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)
route_34_eos.add_vpp_config()
@@ -1294,6 +1298,7 @@ class TestMPLS(VppTestCase):
# set the RPF-ID of the entry to match the input packet's
#
route_232_1_1_1.update_rpf_id(55)
+ self.logger.info(self.vapi.cli("sh ip mfib index 1 232.1.1.1"))
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1")
@@ -1330,8 +1335,8 @@ class TestMPLS(VppTestCase):
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
table_id=1,
paths=[VppMRoutePath(self.pg1.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
- is_ip6=1)
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
route_ff.add_vpp_config()
#
@@ -1345,11 +1350,11 @@ class TestMPLS(VppTestCase):
route_34_eos = VppMplsRoute(
self, 34, 1,
[VppRoutePath("::",
- self.pg1.sw_if_index,
+ 0xffffffff,
nh_table_id=1,
- rpf_id=55,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_multicast=1)
+ rpf_id=55)],
+ is_multicast=1,
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
route_34_eos.add_vpp_config()
@@ -1572,16 +1577,19 @@ class TestMPLSPIC(VppTestCase):
pkts = []
for ii in range(NUM_PKTS):
dst = "192.168.1.%d" % ii
- vpn_routes.append(VppIpRoute(self, dst, 32,
- [VppRoutePath("10.0.0.45",
- 0xffffffff,
- labels=[145],
- is_resolve_host=1),
- VppRoutePath("10.0.0.46",
- 0xffffffff,
- labels=[146],
- is_resolve_host=1)],
- table_id=1))
+ vpn_routes.append(VppIpRoute(
+ self, dst, 32,
+ [VppRoutePath(
+ "10.0.0.45",
+ 0xffffffff,
+ labels=[145],
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST),
+ VppRoutePath(
+ "10.0.0.46",
+ 0xffffffff,
+ labels=[146],
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST)],
+ table_id=1))
vpn_routes[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg2.local_mac,
@@ -1686,16 +1694,19 @@ class TestMPLSPIC(VppTestCase):
for ii in range(NUM_PKTS):
dst = "192.168.1.%d" % ii
local_label = 1600 + ii
- vpn_routes.append(VppIpRoute(self, dst, 32,
- [VppRoutePath(self.pg2.remote_ip4,
- 0xffffffff,
- nh_table_id=1,
- is_resolve_attached=1),
- VppRoutePath(self.pg3.remote_ip4,
- 0xffffffff,
- nh_table_id=1,
- is_resolve_attached=1)],
- table_id=1))
+ vpn_routes.append(VppIpRoute(
+ self, dst, 32,
+ [VppRoutePath(
+ self.pg2.remote_ip4,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+ VppRoutePath(
+ self.pg3.remote_ip4,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+ table_id=1))
vpn_routes[ii].add_vpp_config()
vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
@@ -1807,23 +1818,21 @@ class TestMPLSPIC(VppTestCase):
local_label = 1600 + ii
vpn_routes.append(VppIpRoute(
self, dst, 128,
- [VppRoutePath(self.pg2.remote_ip6,
- 0xffffffff,
- nh_table_id=1,
- is_resolve_attached=1,
- proto=DpoProto.DPO_PROTO_IP6),
- VppRoutePath(self.pg3.remote_ip6,
- 0xffffffff,
- nh_table_id=1,
- proto=DpoProto.DPO_PROTO_IP6,
- is_resolve_attached=1)],
- table_id=1,
- is_ip6=1))
+ [VppRoutePath(
+ self.pg2.remote_ip6,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+ VppRoutePath(
+ self.pg3.remote_ip6,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+ table_id=1))
vpn_routes[ii].add_vpp_config()
vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
- ip_table_id=1,
- is_ip6=1))
+ ip_table_id=1))
vpn_bindings[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg0.local_mac,
@@ -1832,6 +1841,7 @@ class TestMPLSPIC(VppTestCase):
IPv6(src=self.pg0.remote_ip6, dst=dst) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
+ self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst))
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
@@ -1988,8 +1998,9 @@ class TestMPLSL2(VppTestCase):
self, 55, 1,
[VppRoutePath("0.0.0.0",
mpls_tun_1.sw_if_index,
- is_interface_rx=1,
- proto=DpoProto.DPO_PROTO_ETHERNET)])
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
route_55_eos.add_vpp_config()
#
@@ -2050,8 +2061,9 @@ class TestMPLSL2(VppTestCase):
self, 55, 1,
[VppRoutePath("0.0.0.0",
mpls_tun.sw_if_index,
- is_interface_rx=1,
- proto=DpoProto.DPO_PROTO_ETHERNET)])
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
route_55_eos.add_vpp_config()
#
diff --git a/test/test_mtu.py b/test/test_mtu.py
index 1c7f6413f61..568a147a9a4 100644
--- a/test/test_mtu.py
+++ b/test/test_mtu.py
@@ -13,7 +13,7 @@ from scapy.layers.inet6 import IPv6, Ether, IP, UDP, ICMPv6PacketTooBig
from scapy.layers.inet import ICMP
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
-from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto
from socket import AF_INET, AF_INET6, inet_pton
from util import reassemble4
diff --git a/test/test_nat.py b/test/test_nat.py
index f7364747efd..bf22602059e 100644
--- a/test/test_nat.py
+++ b/test/test_nat.py
@@ -25,6 +25,8 @@ from syslog_rfc5424_parser import SyslogMessage, ParseError
from syslog_rfc5424_parser.constants import SyslogFacility, SyslogSeverity
from io import BytesIO
from vpp_papi import VppEnum
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathType
+from vpp_neighbor import VppNeighbor
from scapy.all import bind_layers, Packet, ByteEnumField, ShortField, \
IPField, IntField, LongField, XByteField, FlagsField, FieldLenField, \
PacketListField
@@ -83,29 +85,6 @@ class MethodHolder(VppTestCase):
Clear NAT44 configuration.
"""
if hasattr(self, 'pg7') and hasattr(self, 'pg8'):
- # I found no elegant way to do this
- self.vapi.ip_add_del_route(
- dst_address=self.pg7.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg7.remote_ip4n,
- next_hop_sw_if_index=self.pg7.sw_if_index,
- is_add=0)
- self.vapi.ip_add_del_route(
- dst_address=self.pg8.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg8.remote_ip4n,
- next_hop_sw_if_index=self.pg8.sw_if_index,
- is_add=0)
-
- for intf in [self.pg7, self.pg8]:
- self.vapi.ip_neighbor_add_del(
- intf.sw_if_index,
- intf.remote_mac,
- intf.remote_ip4,
- flags=(VppEnum.vl_api_ip_neighbor_flags_t.
- IP_API_NEIGHBOR_FLAG_STATIC),
- is_add=0)
-
if self.pg7.has_ip4_config:
self.pg7.unconfig_ip4()
@@ -3159,31 +3138,32 @@ class TestNAT44(MethodHolder):
capture = self.pg2.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip1)
+ def create_routes_and_neigbors(self):
+ r1 = VppIpRoute(self, self.pg7.remote_ip4, 32,
+ [VppRoutePath(self.pg7.remote_ip4,
+ self.pg7.sw_if_index)])
+ r2 = VppIpRoute(self, self.pg8.remote_ip4, 32,
+ [VppRoutePath(self.pg8.remote_ip4,
+ self.pg8.sw_if_index)])
+ r1.add_vpp_config()
+ r2.add_vpp_config()
+
+ n1 = VppNeighbor(self,
+ self.pg7.sw_if_index,
+ self.pg7.remote_mac,
+ self.pg7.remote_ip4,
+ is_static=1)
+ n2 = VppNeighbor(self,
+ self.pg8.sw_if_index,
+ self.pg8.remote_mac,
+ self.pg8.remote_ip4,
+ is_static=1)
+ n1.add_vpp_config()
+ n2.add_vpp_config()
+
def test_dynamic_ipless_interfaces(self):
""" NAT44 interfaces without configured IP address """
-
- self.vapi.ip_neighbor_add_del(
- self.pg7.sw_if_index,
- self.pg7.remote_mac,
- self.pg7.remote_ip4,
- flags=(VppEnum.vl_api_ip_neighbor_flags_t.
- IP_API_NEIGHBOR_FLAG_STATIC))
- self.vapi.ip_neighbor_add_del(
- self.pg8.sw_if_index,
- self.pg8.remote_mac,
- self.pg8.remote_ip4,
- flags=(VppEnum.vl_api_ip_neighbor_flags_t.
- IP_API_NEIGHBOR_FLAG_STATIC))
-
- self.vapi.ip_add_del_route(dst_address=self.pg7.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg7.remote_ip4n,
- next_hop_sw_if_index=self.pg7.sw_if_index)
- self.vapi.ip_add_del_route(dst_address=self.pg8.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg8.remote_ip4n,
- next_hop_sw_if_index=self.pg8.sw_if_index)
-
+ self.create_routes_and_neigbors()
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
@@ -3212,28 +3192,7 @@ class TestNAT44(MethodHolder):
def test_static_ipless_interfaces(self):
""" NAT44 interfaces without configured IP address - 1:1 NAT """
- self.vapi.ip_neighbor_add_del(
- self.pg7.sw_if_index,
- self.pg7.remote_mac,
- self.pg7.remote_ip4,
- flags=(VppEnum.vl_api_ip_neighbor_flags_t.
- IP_API_NEIGHBOR_FLAG_STATIC))
- self.vapi.ip_neighbor_add_del(
- self.pg8.sw_if_index,
- self.pg8.remote_mac,
- self.pg8.remote_ip4,
- flags=(VppEnum.vl_api_ip_neighbor_flags_t.
- IP_API_NEIGHBOR_FLAG_STATIC))
-
- self.vapi.ip_add_del_route(dst_address=self.pg7.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg7.remote_ip4n,
- next_hop_sw_if_index=self.pg7.sw_if_index)
- self.vapi.ip_add_del_route(dst_address=self.pg8.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg8.remote_ip4n,
- next_hop_sw_if_index=self.pg8.sw_if_index)
-
+ self.create_routes_and_neigbors()
self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
@@ -3266,28 +3225,7 @@ class TestNAT44(MethodHolder):
self.udp_port_out = 30607
self.icmp_id_out = 30608
- self.vapi.ip_neighbor_add_del(
- self.pg7.sw_if_index,
- self.pg7.remote_mac,
- self.pg7.remote_ip4,
- flags=(VppEnum.vl_api_ip_neighbor_flags_t.
- IP_API_NEIGHBOR_FLAG_STATIC))
- self.vapi.ip_neighbor_add_del(
- self.pg8.sw_if_index,
- self.pg8.remote_mac,
- self.pg8.remote_ip4,
- flags=(VppEnum.vl_api_ip_neighbor_flags_t.
- IP_API_NEIGHBOR_FLAG_STATIC))
-
- self.vapi.ip_add_del_route(dst_address=self.pg7.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg7.remote_ip4n,
- next_hop_sw_if_index=self.pg7.sw_if_index)
- self.vapi.ip_add_del_route(dst_address=self.pg8.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg8.remote_ip4n,
- next_hop_sw_if_index=self.pg8.sw_if_index)
-
+ self.create_routes_and_neigbors()
self.nat44_add_address(self.nat_addr)
self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr,
self.tcp_port_in, self.tcp_port_out,
@@ -3476,16 +3414,16 @@ class TestNAT44(MethodHolder):
nat_ip_vrf10 = "10.0.0.10"
nat_ip_vrf20 = "10.0.0.20"
- self.vapi.ip_add_del_route(dst_address=self.pg3.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg3.remote_ip4n,
- next_hop_sw_if_index=self.pg3.sw_if_index,
- table_id=10)
- self.vapi.ip_add_del_route(dst_address=self.pg3.remote_ip4n,
- dst_address_length=32,
- next_hop_address=self.pg3.remote_ip4n,
- next_hop_sw_if_index=self.pg3.sw_if_index,
- table_id=20)
+ r1 = VppIpRoute(self, self.pg3.remote_ip4, 32,
+ [VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index)],
+ table_id=10)
+ r2 = VppIpRoute(self, self.pg3.remote_ip4, 32,
+ [VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index)],
+ table_id=20)
+ r1.add_vpp_config()
+ r2.add_vpp_config()
self.nat44_add_address(nat_ip_vrf10, vrf_id=10)
self.nat44_add_address(nat_ip_vrf20, vrf_id=20)
@@ -4499,11 +4437,12 @@ class TestNAT44EndpointDependent(MethodHolder):
cls.pg5.set_table_ip4(1)
cls.pg5.config_ip4()
cls.pg5.admin_up()
- cls.vapi.ip_add_del_route(dst_address=cls.pg5.remote_ip4n,
- dst_address_length=32,
- next_hop_address=zero_ip4n,
- next_hop_sw_if_index=cls.pg5.sw_if_index,
- table_id=1)
+ r1 = VppIpRoute(cls, cls.pg5.remote_ip4, 32,
+ [VppRoutePath("0.0.0.0",
+ cls.pg5.sw_if_index)],
+ table_id=1,
+ register=False)
+ r1.add_vpp_config()
cls.pg6._local_ip4 = "10.1.2.1"
cls.pg6._local_ip4n = socket.inet_pton(socket.AF_INET,
@@ -4514,25 +4453,31 @@ class TestNAT44EndpointDependent(MethodHolder):
cls.pg6.set_table_ip4(1)
cls.pg6.config_ip4()
cls.pg6.admin_up()
- cls.vapi.ip_add_del_route(dst_address=cls.pg6.remote_ip4n,
- dst_address_length=32,
- next_hop_address=zero_ip4n,
- next_hop_sw_if_index=cls.pg6.sw_if_index,
- table_id=1)
-
- cls.vapi.ip_add_del_route(dst_address=cls.pg6.remote_ip4n,
- dst_address_length=16,
- next_hop_address=zero_ip4n, table_id=0,
- next_hop_table_id=1)
- cls.vapi.ip_add_del_route(dst_address=zero_ip4n,
- dst_address_length=0,
- next_hop_address=zero_ip4n, table_id=1,
- next_hop_table_id=0)
- cls.vapi.ip_add_del_route(dst_address=zero_ip4n,
- dst_address_length=0,
- next_hop_address=cls.pg1.local_ip4n,
- next_hop_sw_if_index=cls.pg1.sw_if_index,
- table_id=0)
+
+ r2 = VppIpRoute(cls, cls.pg6.remote_ip4, 32,
+ [VppRoutePath("0.0.0.0",
+ cls.pg6.sw_if_index)],
+ table_id=1,
+ register=False)
+ r3 = VppIpRoute(cls, cls.pg6.remote_ip4, 16,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=1)],
+ table_id=0,
+ register=False)
+ r4 = VppIpRoute(cls, "0.0.0.0", 0,
+ [VppRoutePath("0.0.0.0", 0xffffffff,
+ nh_table_id=0)],
+ table_id=1,
+ register=False)
+ r5 = VppIpRoute(cls, "0.0.0.0", 0,
+ [VppRoutePath(cls.pg1.local_ip4,
+ cls.pg1.sw_if_index)],
+ register=False)
+ r2.add_vpp_config()
+ r3.add_vpp_config()
+ r4.add_vpp_config()
+ r5.add_vpp_config()
cls.pg5.resolve_arp()
cls.pg6.resolve_arp()
@@ -6938,11 +6883,11 @@ class TestNAT44Out2InDPO(MethodHolder):
cls.pg1.config_ip6()
cls.pg1.resolve_ndp()
- cls.vapi.ip_add_del_route(dst_address=b'\x00' * 16,
- dst_address_length=0,
- next_hop_address=cls.pg1.remote_ip6n,
- next_hop_sw_if_index=cls.pg1.sw_if_index,
- is_ipv6=True)
+ r1 = VppIpRoute(cls, "::", 0,
+ [VppRoutePath(cls.pg1.remote_ip6,
+ cls.pg1.sw_if_index)],
+ register=False)
+ r1.add_vpp_config()
except Exception:
super(TestNAT44Out2InDPO, cls).tearDownClass()
@@ -9386,11 +9331,10 @@ class TestDSliteCE(MethodHolder):
aftr_ip6_n = socket.inet_pton(socket.AF_INET6, aftr_ip6)
self.vapi.dslite_set_aftr_addr(ip4_addr=aftr_ip4, ip6_addr=aftr_ip6)
- self.vapi.ip_add_del_route(dst_address=aftr_ip6_n,
- dst_address_length=128,
- next_hop_address=self.pg1.remote_ip6n,
- next_hop_sw_if_index=self.pg1.sw_if_index,
- is_ipv6=1)
+ r1 = VppIpRoute(self, aftr_ip6, 128,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ r1.add_vpp_config()
# UDP encapsulation
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
diff --git a/test/test_neighbor.py b/test/test_neighbor.py
index 6f781ff54c2..69b00ea6ff2 100644
--- a/test/test_neighbor.py
+++ b/test/test_neighbor.py
@@ -6,7 +6,7 @@ from socket import AF_INET, AF_INET6, inet_pton
from framework import VppTestCase, VppTestRunner
from vpp_neighbor import VppNeighbor, find_nbr
from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, \
- VppIpTable, DpoProto
+ VppIpTable, DpoProto, FibPathType
from vpp_papi import VppEnum
import scapy.compat
@@ -1362,8 +1362,7 @@ class ARPTestCase(VppTestCase):
ip_10_1 = VppIpRoute(self, "10::1", 128,
[VppRoutePath(self.pg0.remote_hosts[1].ip6,
self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=DpoProto.DPO_PROTO_IP6)])
ip_10_1.add_vpp_config()
p1 = (Ether(dst=self.pg1.local_mac,
@@ -1396,10 +1395,11 @@ class ARPTestCase(VppTestCase):
#
self.pg0.generate_remote_hosts(2)
- forus = VppIpRoute(self, self.pg0.remote_hosts[1].ip4, 32,
- [VppRoutePath(self.pg0.remote_hosts[1].ip4,
- self.pg0.sw_if_index)],
- is_local=1)
+ forus = VppIpRoute(
+ self, self.pg0.remote_hosts[1].ip4, 32,
+ [VppRoutePath("0.0.0.0",
+ self.pg0.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_LOCAL)])
forus.add_vpp_config()
p = (Ether(dst="ff:ff:ff:ff:ff:ff",
diff --git a/test/test_p2p_ethernet.py b/test/test_p2p_ethernet.py
index 6719400cb5c..8ae6cb26fe0 100644
--- a/test/test_p2p_ethernet.py
+++ b/test/test_p2p_ethernet.py
@@ -192,9 +192,7 @@ class P2PEthernetIPV6(VppTestCase):
route_8000 = VppIpRoute(self, "8000::", 64,
[VppRoutePath(self.pg0.remote_ip6,
- self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg0.sw_if_index)])
route_8000.add_vpp_config()
self.packets = [(Ether(dst=self.pg1.local_mac,
@@ -212,9 +210,7 @@ class P2PEthernetIPV6(VppTestCase):
route_9001 = VppIpRoute(self, "9001::", 64,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route_9001.add_vpp_config()
self.packets.append(
@@ -237,9 +233,7 @@ class P2PEthernetIPV6(VppTestCase):
route_3 = VppIpRoute(self, "9000::", 64,
[VppRoutePath(self.pg1._remote_hosts[0].ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route_3.add_vpp_config()
self.packets.append(
@@ -262,9 +256,7 @@ class P2PEthernetIPV6(VppTestCase):
route_9001 = VppIpRoute(self, "9000::", 64,
[VppRoutePath(self.pg1._remote_hosts[0].ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route_9001.add_vpp_config()
self.packets.append(
@@ -283,21 +275,17 @@ class P2PEthernetIPV6(VppTestCase):
route_8000 = VppIpRoute(self, "8000::", 64,
[VppRoutePath(self.pg0.remote_ip6,
- self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg0.sw_if_index)])
route_8000.add_vpp_config()
route_8001 = VppIpRoute(self, "8001::", 64,
- [VppRoutePath(self.p2p_sub_ifs[0].remote_ip6,
- self.p2p_sub_ifs[0].sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ [VppRoutePath(
+ self.p2p_sub_ifs[0].remote_ip6,
+ self.p2p_sub_ifs[0].sw_if_index)])
route_8001.add_vpp_config()
route_8002 = VppIpRoute(self, "8002::", 64,
- [VppRoutePath(self.p2p_sub_ifs[1].remote_ip6,
- self.p2p_sub_ifs[1].sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ [VppRoutePath(
+ self.p2p_sub_ifs[1].remote_ip6,
+ self.p2p_sub_ifs[1].sw_if_index)])
route_8002.add_vpp_config()
for i in range(0, 3):
diff --git a/test/test_punt.py b/test/test_punt.py
index b93188e4fe2..e7ac4dd3fd6 100644
--- a/test/test_punt.py
+++ b/test/test_punt.py
@@ -1080,8 +1080,7 @@ class TestPunt(VppTestCase):
ip_1_2 = VppIpRoute(self, "1::2", 128,
[VppRoutePath(self.pg3.remote_ip6,
self.pg3.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=DpoProto.DPO_PROTO_IP6)])
ip_1_2.add_vpp_config()
p4 = (Ether(src=self.pg2.remote_mac,
diff --git a/test/test_qos.py b/test/test_qos.py
index d4bd0872243..94062b89ae2 100644
--- a/test/test_qos.py
+++ b/test/test_qos.py
@@ -6,7 +6,7 @@ from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppDot1QSubint
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
- VppMplsLabel, VppMplsTable
+ VppMplsLabel, VppMplsTable, FibPathProto
import scapy.compat
from scapy.packet import Raw
@@ -521,15 +521,11 @@ class TestQOS(VppTestCase):
route_10_0_0_2.add_vpp_config()
route_2001_1 = VppIpRoute(self, "2001::1", 128,
[VppRoutePath(sub_if.remote_ip6,
- sub_if.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ sub_if.sw_if_index)])
route_2001_1.add_vpp_config()
route_2001_2 = VppIpRoute(self, "2001::2", 128,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route_2001_2.add_vpp_config()
p_v1 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
diff --git a/test/test_reassembly.py b/test/test_reassembly.py
index 26ff7486b92..6a356a00b0c 100644
--- a/test/test_reassembly.py
+++ b/test/test_reassembly.py
@@ -17,7 +17,7 @@ from framework import VppTestCase, VppTestRunner
from util import ppp, fragment_rfc791, fragment_rfc8200
from vpp_gre_interface import VppGreInterface
from vpp_ip import DpoProto
-from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto
# 35 is enough to have >257 400-byte fragments
test_packet_count = 35
@@ -1203,10 +1203,9 @@ class TestFIFReassembly(VppTestCase):
sw_if_index=self.gre6.sw_if_index, enable_ip6=True)
self.route6 = VppIpRoute(self, self.tun_ip6, 128,
- [VppRoutePath(self.src_if.remote_ip6,
- self.src_if.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ [VppRoutePath(
+ self.src_if.remote_ip6,
+ self.src_if.sw_if_index)])
self.route6.add_vpp_config()
self.reset_packet_infos()
diff --git a/test/test_sixrd.py b/test/test_sixrd.py
index 9eeae57d37f..ae4af15c2e3 100644
--- a/test/test_sixrd.py
+++ b/test/test_sixrd.py
@@ -7,7 +7,7 @@ from scapy.layers.inet6 import IPv6
from scapy.packet import Raw
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
-from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, FibPathProto
from socket import AF_INET, AF_INET6, inet_pton
""" Test6rd is a subclass of VPPTestCase classes.
@@ -335,11 +335,9 @@ class Test6RD(VppTestCase):
self.tunnel_index = rv.sw_if_index
- default_route = VppIpRoute(
- self, "DEAD::", 16, [VppRoutePath("2002:0808:0808::",
- self.tunnel_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ default_route = VppIpRoute(self, "DEAD::", 16,
+ [VppRoutePath("2002:0808:0808::",
+ self.tunnel_index)])
default_route.add_vpp_config()
ip4_route = VppIpRoute(self, "8.0.0.0", 8,
diff --git a/test/test_srv6.py b/test/test_srv6.py
index de98ff28a03..b3e69724028 100644
--- a/test/test_srv6.py
+++ b/test/test_srv6.py
@@ -5,7 +5,7 @@ import binascii
from socket import AF_INET6
from framework import VppTestCase, VppTestRunner
-from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto, VppIpTable
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto, VppIpTable
from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \
SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes
@@ -146,9 +146,7 @@ class TestSRv6(VppTestCase):
# configure FIB entries
route = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route.add_vpp_config()
# configure encaps IPv6 source address
@@ -248,9 +246,7 @@ class TestSRv6(VppTestCase):
# configure FIB entries
route = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route.add_vpp_config()
# configure encaps IPv6 source address
@@ -341,9 +337,7 @@ class TestSRv6(VppTestCase):
# configure FIB entries
route = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route.add_vpp_config()
# configure encaps IPv6 source address
@@ -425,9 +419,7 @@ class TestSRv6(VppTestCase):
# configure FIB entries
route = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route.add_vpp_config()
# configure encaps IPv6 source address
@@ -512,9 +504,7 @@ class TestSRv6(VppTestCase):
# configure FIB entries
route = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route.add_vpp_config()
# configure SRv6 localSID End without PSP behavior
@@ -584,9 +574,7 @@ class TestSRv6(VppTestCase):
# configure FIB entries
route = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg1.sw_if_index)])
route.add_vpp_config()
# configure SRv6 localSID End with PSP behavior
@@ -655,12 +643,9 @@ class TestSRv6(VppTestCase):
# a4::/64 via pg1 and pg2
route = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6),
+ self.pg1.sw_if_index),
VppRoutePath(self.pg2.remote_ip6,
- self.pg2.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg2.sw_if_index)])
route.add_vpp_config()
self.logger.debug(self.vapi.cli("show ip6 fib"))
@@ -734,13 +719,11 @@ class TestSRv6(VppTestCase):
# configure FIB entries
# a4::/64 via pg1 and pg2
route = VppIpRoute(self, "a4::", 64,
- [VppRoutePath(self.pg1.remote_ip6,
- self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6),
+ [VppRoutePath(
+ self.pg1.remote_ip6,
+ self.pg1.sw_if_index),
VppRoutePath(self.pg2.remote_ip6,
- self.pg2.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg2.sw_if_index)])
route.add_vpp_config()
# configure SRv6 localSID End with PSP behavior
@@ -880,18 +863,14 @@ class TestSRv6(VppTestCase):
route0 = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6,
nh_table_id=0)],
- table_id=0,
- is_ip6=1)
+ table_id=0)
route0.add_vpp_config()
route1 = VppIpRoute(self, "a4::", 64,
[VppRoutePath(self.pg2.remote_ip6,
self.pg2.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6,
nh_table_id=vrf_1)],
- table_id=vrf_1,
- is_ip6=1)
+ table_id=vrf_1)
route1.add_vpp_config()
self.logger.debug(self.vapi.cli("show ip6 fib"))
@@ -1038,15 +1017,13 @@ class TestSRv6(VppTestCase):
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index,
nh_table_id=0)],
- table_id=0,
- is_ip6=0)
+ table_id=0)
route0.add_vpp_config()
route1 = VppIpRoute(self, "4.1.1.0", 24,
[VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index,
nh_table_id=vrf_1)],
- table_id=vrf_1,
- is_ip6=0)
+ table_id=vrf_1)
route1.add_vpp_config()
self.logger.debug(self.vapi.cli("show ip fib"))
@@ -1200,10 +1177,9 @@ class TestSRv6(VppTestCase):
# configure FIB entries
route = VppIpRoute(self, "a4::", 64,
- [VppRoutePath(self.pg4.remote_ip6,
- self.pg4.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ [VppRoutePath(
+ self.pg4.remote_ip6,
+ self.pg4.sw_if_index)])
route.add_vpp_config()
# configure encaps IPv6 source address
diff --git a/test/test_srv6_ad.py b/test/test_srv6_ad.py
index a788f1e4974..aa4b8d3c088 100644
--- a/test/test_srv6_ad.py
+++ b/test/test_srv6_ad.py
@@ -151,8 +151,7 @@ class TestSRv6(VppTestCase):
route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=DpoProto.DPO_PROTO_IP6)])
route.add_vpp_config()
# configure SRv6 localSID behavior
@@ -286,8 +285,7 @@ class TestSRv6(VppTestCase):
route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=DpoProto.DPO_PROTO_IP6)])
route.add_vpp_config()
# configure SRv6 localSID behavior
@@ -429,8 +427,7 @@ class TestSRv6(VppTestCase):
route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ proto=DpoProto.DPO_PROTO_IP6)])
route.add_vpp_config()
# configure SRv6 localSID behavior
diff --git a/test/test_srv6_as.py b/test/test_srv6_as.py
index 108fcdb5d27..2be7865d5bd 100755
--- a/test/test_srv6_as.py
+++ b/test/test_srv6_as.py
@@ -5,7 +5,7 @@ import binascii
from socket import AF_INET6
from framework import VppTestCase, VppTestRunner
-from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto, VppIpTable
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto, VppIpTable
from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \
SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes
@@ -196,9 +196,7 @@ class TestSRv6(VppTestCase):
# configure route to next segment
route = VppIpRoute(self, sid_list[test_sid_index + 1], 128,
[VppRoutePath(self.pg0.remote_ip6,
- self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg0.sw_if_index)])
route.add_vpp_config()
# configure SRv6 localSID behavior
@@ -267,9 +265,7 @@ class TestSRv6(VppTestCase):
# configure route to next segment
route = VppIpRoute(self, sid_list[test_sid_index + 1], 128,
[VppRoutePath(self.pg0.remote_ip6,
- self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg0.sw_if_index)])
route.add_vpp_config()
# configure SRv6 localSID behavior
@@ -338,9 +334,7 @@ class TestSRv6(VppTestCase):
# configure route to next segment
route = VppIpRoute(self, sid_list[test_sid_index + 1], 128,
[VppRoutePath(self.pg0.remote_ip6,
- self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_ip6=1)
+ self.pg0.sw_if_index)])
route.add_vpp_config()
# configure SRv6 localSID behavior
diff --git a/test/test_udp.py b/test/test_udp.py
index c714f25e27f..fc77434184c 100644
--- a/test/test_udp.py
+++ b/test/test_udp.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python
import unittest
from framework import VppTestCase, VppTestRunner
+
from vpp_udp_encap import find_udp_encap, VppUdpEncap
-from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, VppMplsLabel
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, VppMplsLabel, \
+ FibPathType
from scapy.packet import Raw
from scapy.layers.l2 import Ether
@@ -131,32 +133,34 @@ class TestUdpEncap(VppTestCase):
#
# Routes via each UDP encap object - all combinations of v4 and v6.
#
- route_4o4 = VppIpRoute(self, "1.1.0.1", 32,
- [VppRoutePath("0.0.0.0",
- 0xFFFFFFFF,
- is_udp_encap=1,
- next_hop_id=udp_encap_0.id)])
- route_4o6 = VppIpRoute(self, "1.1.2.1", 32,
- [VppRoutePath("0.0.0.0",
- 0xFFFFFFFF,
- is_udp_encap=1,
- next_hop_id=udp_encap_2.id)])
- route_6o4 = VppIpRoute(self, "2001::1", 128,
- [VppRoutePath("0.0.0.0",
- 0xFFFFFFFF,
- is_udp_encap=1,
- next_hop_id=udp_encap_1.id)],
- is_ip6=1)
- route_6o6 = VppIpRoute(self, "2001::3", 128,
- [VppRoutePath("0.0.0.0",
- 0xFFFFFFFF,
- is_udp_encap=1,
- next_hop_id=udp_encap_3.id)],
- is_ip6=1)
- route_4o4.add_vpp_config()
+ route_4o4 = VppIpRoute(
+ self, "1.1.0.1", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
+ next_hop_id=udp_encap_0.id)])
+ route_4o6 = VppIpRoute(
+ self, "1.1.2.1", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
+ next_hop_id=udp_encap_2.id)])
+ route_6o4 = VppIpRoute(
+ self, "2001::1", 128,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
+ next_hop_id=udp_encap_1.id)])
+ route_6o6 = VppIpRoute(
+ self, "2001::3", 128,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
+ next_hop_id=udp_encap_3.id)])
route_4o6.add_vpp_config()
route_6o6.add_vpp_config()
route_6o4.add_vpp_config()
+ route_4o4.add_vpp_config()
#
# 4o4 encap
@@ -222,12 +226,13 @@ class TestUdpEncap(VppTestCase):
# A route with an output label
# the TTL of the inner packet is decremented on LSP ingress
#
- route_4oMPLSo4 = VppIpRoute(self, "1.1.2.22", 32,
- [VppRoutePath("0.0.0.0",
- 0xFFFFFFFF,
- is_udp_encap=1,
- next_hop_id=1,
- labels=[VppMplsLabel(66)])])
+ route_4oMPLSo4 = VppIpRoute(
+ self, "1.1.2.22", 32,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
+ next_hop_id=1,
+ labels=[VppMplsLabel(66)])])
route_4oMPLSo4.add_vpp_config()
p_4omo4 = (Ether(src=self.pg0.remote_mac,
diff --git a/test/test_vcl.py b/test/test_vcl.py
index 3e10764c1e3..18279e9f226 100644
--- a/test/test_vcl.py
+++ b/test/test_vcl.py
@@ -7,7 +7,7 @@ import subprocess
import signal
from framework import VppTestCase, VppTestRunner, running_extended_tests, \
Worker
-from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath, DpoProto
+from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath, FibPathProto
class VCLAppWorker(Worker):
@@ -152,14 +152,12 @@ class VCLTestCase(VppTestCase):
# Add inter-table routes
ip_t01 = VppIpRoute(self, self.loop1.local_ip6, 128,
[VppRoutePath("::0", 0xffffffff,
- nh_table_id=2,
- proto=DpoProto.DPO_PROTO_IP6)],
- table_id=1, is_ip6=1)
+ nh_table_id=2)],
+ table_id=1)
ip_t10 = VppIpRoute(self, self.loop0.local_ip6, 128,
[VppRoutePath("::0", 0xffffffff,
- nh_table_id=1,
- proto=DpoProto.DPO_PROTO_IP6)],
- table_id=2, is_ip6=1)
+ nh_table_id=1)],
+ table_id=2)
ip_t01.add_vpp_config()
ip_t10.add_vpp_config()
self.logger.debug(self.vapi.cli("show interface addr"))
diff --git a/test/test_vxlan.py b/test/test_vxlan.py
index aa069dc6e7b..c74efe7d062 100644
--- a/test/test_vxlan.py
+++ b/test/test_vxlan.py
@@ -10,6 +10,8 @@ from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
class TestVxlan(BridgeDomain, VppTestCase):
@@ -84,17 +86,20 @@ class TestVxlan(BridgeDomain, VppTestCase):
# Create 10 ucast vxlan tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
- next_hop_address = cls.pg0.remote_ip4n
- for dest_ip4n in ip4n_range(next_hop_address, ip_range_start,
- ip_range_end):
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
# add host route so dest_ip4n will not be resolved
- cls.vapi.ip_add_del_route(dst_address=dest_ip4n,
- dst_address_length=32,
- next_hop_address=next_hop_address)
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ dest_ip4n = socket.inet_pton(socket.AF_INET, dest_ip4)
+
r = cls.vapi.vxlan_add_del_tunnel(src_address=cls.pg0.local_ip4n,
dst_address=dest_ip4n, vni=vni)
- cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
- bd_id=vni)
+ cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni)
@classmethod
def add_del_shared_mcast_dst_load(cls, is_add):
diff --git a/test/test_vxlan6.py b/test/test_vxlan6.py
index 4053fadff8b..1e382e349c7 100644
--- a/test/test_vxlan6.py
+++ b/test/test_vxlan6.py
@@ -9,6 +9,8 @@ from scapy.layers.l2 import Ether
from scapy.layers.inet6 import IPv6, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
class TestVxlan6(BridgeDomain, VppTestCase):
@@ -85,18 +87,17 @@ class TestVxlan6(BridgeDomain, VppTestCase):
# Create 10 ucast vxlan tunnels under bd
start = 10
end = start + n_ucast_tunnels
- next_hop = cls.pg0.remote_ip6n
for dest_ip6 in cls.ip_range(start, end):
dest_ip6n = socket.inet_pton(socket.AF_INET6, dest_ip6)
# add host route so dest ip will not be resolved
- cls.vapi.ip_add_del_route(dst_address=dest_ip6n,
- dst_address_length=128,
- next_hop_address=next_hop, is_ipv6=1)
+ rip = VppIpRoute(cls, dest_ip6, 128,
+ [VppRoutePath(cls.pg0.remote_ip6, INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
r = cls.vapi.vxlan_add_del_tunnel(src_address=cls.pg0.local_ip6n,
dst_address=dest_ip6n, is_ipv6=1,
vni=vni)
- cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
- bd_id=vni)
+ cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni)
@classmethod
def add_mcast_tunnels_load(cls):
diff --git a/test/test_vxlan_gbp.py b/test/test_vxlan_gbp.py
index b4eb069cc89..9abff19bb79 100644
--- a/test/test_vxlan_gbp.py
+++ b/test/test_vxlan_gbp.py
@@ -11,6 +11,8 @@ from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
class TestVxlanGbp(VppTestCase):
@@ -90,18 +92,19 @@ class TestVxlanGbp(VppTestCase):
# Create 2 ucast vxlan tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
- next_hop_address = cls.pg0.remote_ip4n
+ next_hop_address = cls.pg0.remote_ip4
for dest_ip4 in ip4_range(cls.pg0.remote_ip4,
ip_range_start,
ip_range_end):
# add host route so dest_ip4n will not be resolved
- vip = VppIpAddress(dest_ip4)
- cls.vapi.ip_add_del_route(dst_address=vip.bytes,
- dst_address_length=32,
- next_hop_address=next_hop_address)
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
r = cls.vapi.vxlan_gbp_tunnel_add_del(
- VppIpAddress(cls.pg0.local_ip4).encode(),
- vip.encode(),
+ cls.pg0.local_ip4,
+ dest_ip4,
vni=vni)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=vni)
diff --git a/test/test_vxlan_gpe.py b/test/test_vxlan_gpe.py
index 7ee1225dd8c..f13a5287172 100644
--- a/test/test_vxlan_gpe.py
+++ b/test/test_vxlan_gpe.py
@@ -10,6 +10,8 @@ from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
@unittest.skipUnless(running_extended_tests, "part of extended tests")
@@ -80,16 +82,20 @@ class TestVxlanGpe(BridgeDomain, VppTestCase):
@classmethod
def create_vxlan_gpe_flood_test_bd(cls, vni, n_ucast_tunnels):
- # Create 10 ucast vxlan_gpe tunnels under bd
+ # Create 10 ucast vxlan tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
- next_hop_address = cls.pg0.remote_ip4n
- for dest_ip4n in ip4n_range(next_hop_address, ip_range_start,
- ip_range_end):
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
# add host route so dest_ip4n will not be resolved
- cls.vapi.ip_add_del_route(dst_address=dest_ip4n,
- dst_address_length=32,
- next_hop_address=next_hop_address)
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ dest_ip4n = socket.inet_pton(socket.AF_INET, dest_ip4)
+
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_ip4n,
diff --git a/test/vpp_bier.py b/test/vpp_bier.py
index 8e27f25d10f..6e087a8ee0b 100644
--- a/test/vpp_bier.py
+++ b/test/vpp_bier.py
@@ -38,10 +38,10 @@ def find_bier_table(test, bti):
def find_bier_route(test, bti, bp):
routes = test.vapi.bier_route_dump(bti)
for r in routes:
- if bti.set_id == r.br_tbl_id.bt_set \
- and bti.sub_domain_id == r.br_tbl_id.bt_sub_domain \
- and bti.hdr_len_id == r.br_tbl_id.bt_hdr_len_id \
- and bp == r.br_bp:
+ if bti.set_id == r.br_route.br_tbl_id.bt_set \
+ and bti.sub_domain_id == r.br_route.br_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == r.br_route.br_tbl_id.bt_hdr_len_id \
+ and bp == r.br_route.br_bp:
return True
return False
@@ -116,39 +116,15 @@ class VppBierRoute(VppObject):
self.tbl_id = tbl_id
self.bp = bp
self.paths = paths
-
- def encode_path(self, p):
- lstack = []
- for l in p.nh_labels:
- if type(l) == VppMplsLabel:
- lstack.append(l.encode())
- else:
- lstack.append({'label': l, 'ttl': 255})
- n_labels = len(lstack)
- while (len(lstack) < 16):
- lstack.append({})
- return {'next_hop': p.nh_addr,
- 'weight': 1,
- 'afi': p.proto,
- 'sw_if_index': 0xffffffff,
- 'preference': 0,
- 'table_id': p.nh_table_id,
- 'next_hop_id': p.next_hop_id,
- 'is_udp_encap': p.is_udp_encap,
- 'n_labels': n_labels,
- 'label_stack': lstack}
-
- def encode_paths(self):
- br_paths = []
- for p in self.paths:
- br_paths.append(self.encode_path(p))
- return br_paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
def add_vpp_config(self):
self._test.vapi.bier_route_add_del(
self.tbl_id,
self.bp,
- self.encode_paths(),
+ self.encoded_paths,
is_add=1)
self._test.registry.register(self, self._test.logger)
@@ -156,32 +132,37 @@ class VppBierRoute(VppObject):
self._test.vapi.bier_route_add_del(
self.tbl_id,
self.bp,
- self.encode_paths(),
+ self.encoded_paths,
is_add=0)
def update_paths(self, paths):
self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
self._test.vapi.bier_route_add_del(
self.tbl_id,
self.bp,
- self.encode_paths(),
+ self.encoded_paths,
is_replace=1)
def add_path(self, path):
+ self.encoded_paths.append(path.encode())
self._test.vapi.bier_route_add_del(
self.tbl_id,
self.bp,
- [self.encode_path(path)],
+ [path.encode()],
is_add=1,
is_replace=0)
self.paths.append(path)
self._test.registry.register(self, self._test.logger)
def remove_path(self, path):
+ self.encoded_paths.remove(path.encode())
self._test.vapi.bier_route_add_del(
self.tbl_id,
self.bp,
- [self.encode_path(path)],
+ [path.encode()],
is_add=0,
is_replace=0)
self.paths.remove(path)
diff --git a/test/vpp_interface.py b/test/vpp_interface.py
index 36118192915..7b9de828e6c 100644
--- a/test/vpp_interface.py
+++ b/test/vpp_interface.py
@@ -7,6 +7,12 @@ from six import moves
from util import Host, mk_ll_addr
from vpp_papi import mac_ntop
+from ipaddress import IPv4Network
+
+try:
+ text_type = unicode
+except NameError:
+ text_type = str
@six.add_metaclass(abc.ABCMeta)
@@ -406,9 +412,10 @@ class VppInterface(object):
def is_ip4_entry_in_fib_dump(self, dump):
for i in dump:
- if i.address == self.local_ip4n and \
- i.address_length == self.local_ip4_prefix_len and \
- i.table_id == self.ip4_table_id:
+ n = IPv4Network(text_type("%s/%d" % (self.local_ip4,
+ self.local_ip4_prefix_len)))
+ if i.route.prefix == n and \
+ i.route.table_id == self.ip4_table_id:
return True
return False
diff --git a/test/vpp_ip.py b/test/vpp_ip.py
index 8b7ea222a67..5396e8457e2 100644
--- a/test/vpp_ip.py
+++ b/test/vpp_ip.py
@@ -34,9 +34,9 @@ class VppIpAddressUnion():
def encode(self):
if self.version == 6:
- return {'ip6': self.ip_addr.packed}
+ return {'ip6': self.ip_addr}
else:
- return {'ip4': self.ip_addr.packed}
+ return {'ip4': self.ip_addr}
@property
def version(self):
@@ -69,6 +69,9 @@ class VppIpAddressUnion():
self, other)
return NotImplemented
+ def __str__(self):
+ return str(self.ip_addr)
+
class VppIpAddress():
def __init__(self, addr):
@@ -153,11 +156,20 @@ class VppIpPrefix():
self.addr = VppIpAddress(addr)
self.len = len
+ def __eq__(self, other):
+ if self.address == other.address and self.len == other.len:
+ return True
+ return False
+
def encode(self):
return {'address': self.addr.encode(),
'address_length': self.len}
@property
+ def version(self):
+ return self.addr.version
+
+ @property
def address(self):
return self.addr.address
@@ -191,12 +203,12 @@ class VppIpPrefix():
class VppIpMPrefix():
- def __init__(self, saddr, gaddr, len):
+ def __init__(self, saddr, gaddr, glen):
self.saddr = saddr
self.gaddr = gaddr
- self.len = len
- self.ip_saddr = ip_address(text_type(self.saddr))
- self.ip_gaddr = ip_address(text_type(self.gaddr))
+ self.glen = glen
+ self.ip_saddr = VppIpAddressUnion(text_type(self.saddr))
+ self.ip_gaddr = VppIpAddressUnion(text_type(self.gaddr))
if self.ip_saddr.version != self.ip_gaddr.version:
raise ValueError('Source and group addresses must be of the '
'same address family.')
@@ -205,15 +217,58 @@ class VppIpMPrefix():
if 6 == self.ip_saddr.version:
prefix = {
'af': VppEnum.vl_api_address_family_t.ADDRESS_IP6,
- 'grp_address': {'ip6': self.ip_gaddr.packed},
- 'src_address': {'ip6': self.ip_saddr.packed},
- 'grp_address_length': self.len,
+ 'grp_address': {
+ 'ip6': self.gaddr
+ },
+ 'src_address': {
+ 'ip6': self.saddr
+ },
+ 'grp_address_length': self.glen,
}
else:
prefix = {
'af': VppEnum.vl_api_address_family_t.ADDRESS_IP4,
- 'grp_address': {'ip4': self.ip_gaddr.packed},
- 'src_address': {'ip4': self.ip_saddr.packed},
- 'grp_address_length': self.len,
+ 'grp_address': {
+ 'ip4': self.gaddr
+ },
+ 'src_address': {
+ 'ip4': self.saddr
+ },
+ 'grp_address_length': self.glen,
}
return prefix
+
+ @property
+ def length(self):
+ return self.glen
+
+ @property
+ def version(self):
+ return self.ip_gaddr.version
+
+ def __str__(self):
+ return "(%s,%s)/%d" % (self.saddr, self.gaddr, self.glen)
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return (self.glen == other.glen and
+ self.ip_saddr == other.ip_gaddr and
+ self.ip_saddr == other.ip_saddr)
+ elif (hasattr(other, "grp_address_length") and
+ hasattr(other, "grp_address") and
+ hasattr(other, "src_address")):
+ # vl_api_mprefix_t
+ if 4 == self.ip_saddr.version:
+ if self.glen == other.grp_address_length and \
+ self.gaddr == str(other.grp_address.ip4) and \
+ self.saddr == str(other.src_address.ip4):
+ return True
+ return False
+ else:
+ return (self.glen == other.grp_address_length and
+ self.gaddr == other.grp_address.ip6 and
+ self.saddr == other.src_address.ip6)
+ else:
+ raise Exception("Comparing VppIpPrefix:%s with unknown type: %s" %
+ (self, other))
+ return False
diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py
index db5f4b636bf..5175de7dd0f 100644
--- a/test/vpp_ip_route.py
+++ b/test/vpp_ip_route.py
@@ -6,12 +6,19 @@
from vpp_object import VppObject
from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
-from vpp_ip import DpoProto, VppIpPrefix
+from vpp_ip import DpoProto, VppIpPrefix, INVALID_INDEX, VppIpAddressUnion, \
+ VppIpMPrefix
+from ipaddress import ip_address, IPv4Network, IPv6Network
# from vnet/vnet/mpls/mpls_types.h
MPLS_IETF_MAX_LABEL = 0xfffff
MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1
+try:
+ text_type = unicode
+except NameError:
+ text_type = str
+
class MRouteItfFlags:
MFIB_ITF_FLAG_NONE = 0
@@ -30,6 +37,35 @@ class MRouteEntryFlags:
MFIB_ENTRY_FLAG_INHERIT_ACCEPT = 8
+class FibPathProto:
+ FIB_PATH_NH_PROTO_IP4 = 0
+ FIB_PATH_NH_PROTO_IP6 = 1
+ FIB_PATH_NH_PROTO_MPLS = 2
+ FIB_PATH_NH_PROTO_ETHERNET = 3
+ FIB_PATH_NH_PROTO_BIER = 4
+ FIB_PATH_NH_PROTO_NSH = 5
+
+
+class FibPathType:
+ FIB_PATH_TYPE_NORMAL = 0
+ FIB_PATH_TYPE_LOCAL = 1
+ FIB_PATH_TYPE_DROP = 2
+ FIB_PATH_TYPE_UDP_ENCAP = 3
+ FIB_PATH_TYPE_BIER_IMP = 4
+ FIB_PATH_TYPE_ICMP_UNREACH = 5
+ FIB_PATH_TYPE_ICMP_PROHIBIT = 6
+ FIB_PATH_TYPE_SOURCE_LOOKUP = 7
+ FIB_PATH_TYPE_DVR = 8
+ FIB_PATH_TYPE_INTERFACE_RX = 9
+ FIB_PATH_TYPE_CLASSIFY = 10
+
+
+class FibPathFlags:
+ FIB_PATH_FLAG_NONE = 0
+ FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED = 1
+ FIB_PATH_FLAG_RESOLVE_VIA_HOST = 2
+
+
class MplsLspMode:
PIPE = 0
UNIFORM = 1
@@ -42,73 +78,80 @@ def ip_to_dpo_proto(addr):
return DpoProto.DPO_PROTO_IP4
-def find_route(test, ip_addr, len, table_id=0, inet=AF_INET):
- if inet == AF_INET:
- s = 4
- routes = test.vapi.ip_fib_dump()
+def address_proto(ip_addr):
+ if ip_addr.ip_addr.version is 4:
+ return FibPathProto.FIB_PATH_NH_PROTO_IP4
else:
- s = 16
- routes = test.vapi.ip6_fib_dump()
+ return FibPathProto.FIB_PATH_NH_PROTO_IP6
+
+
+def find_route(test, addr, len, table_id=0):
+ ip_addr = ip_address(text_type(addr))
+
+ if 4 is ip_addr.version:
+ routes = test.vapi.ip_route_dump(table_id, False)
+ prefix = IPv4Network("%s/%d" % (text_type(addr), len), strict=False)
+ else:
+ routes = test.vapi.ip_route_dump(table_id, True)
+ prefix = IPv6Network("%s/%d" % (text_type(addr), len), strict=False)
- route_addr = inet_pton(inet, ip_addr)
for e in routes:
- if route_addr == e.address[:s] \
- and len == e.address_length \
- and table_id == e.table_id:
+ if table_id == e.route.table_id \
+ and prefix == e.route.prefix:
return True
return False
def find_mroute(test, grp_addr, src_addr, grp_addr_len,
- table_id=0, inet=AF_INET):
- if inet == AF_INET:
- s = 4
- routes = test.vapi.ip_mfib_dump()
+ table_id=0):
+ ip_mprefix = VppIpMPrefix(text_type(src_addr),
+ text_type(grp_addr),
+ grp_addr_len)
+
+ if 4 is ip_mprefix.version:
+ routes = test.vapi.ip_mroute_dump(table_id, False)
else:
- s = 16
- routes = test.vapi.ip6_mfib_dump()
- gaddr = inet_pton(inet, grp_addr)
- saddr = inet_pton(inet, src_addr)
+ routes = test.vapi.ip_mroute_dump(table_id, True)
+
for e in routes:
- if gaddr == e.grp_address[:s] \
- and grp_addr_len == e.address_length \
- and saddr == e.src_address[:s] \
- and table_id == e.table_id:
+ if table_id == e.route.table_id and ip_mprefix == e.route.prefix:
return True
return False
def find_mpls_route(test, table_id, label, eos_bit, paths=None):
- dump = test.vapi.mpls_fib_dump()
+ dump = test.vapi.mpls_route_dump(table_id)
for e in dump:
- if label == e.label \
- and eos_bit == e.eos_bit \
- and table_id == e.table_id:
+ if label == e.mr_route.mr_label \
+ and eos_bit == e.mr_route.mr_eos \
+ and table_id == e.mr_route.mr_table_id:
if not paths:
return True
else:
- if (len(paths) != len(e.path)):
+ if (len(paths) != len(e.mr_route.mr_paths)):
return False
for i in range(len(paths)):
- if (paths[i] != e.path[i]):
+ if (paths[i] != e.mr_route.mr_paths[i]):
return False
return True
return False
def fib_interface_ip_prefix(test, address, length, sw_if_index):
- vp = VppIpPrefix(address, length)
- addrs = test.vapi.ip_address_dump(sw_if_index, is_ipv6=vp.is_ip6)
+ ip_addr = ip_address(text_type(address))
- if vp.is_ip6:
- n = 16
+ if 4 is ip_addr.version:
+ addrs = test.vapi.ip_address_dump(sw_if_index)
+ prefix = IPv4Network("%s/%d" % (text_type(address), length),
+ strict=False)
else:
- n = 4
+ addrs = test.vapi.ip_address_dump(sw_if_index, is_ipv6=1)
+ prefix = IPv6Network("%s/%d" % (text_type(address), length),
+ strict=False)
for a in addrs:
- if a.prefix_length == length and \
- a.sw_if_index == sw_if_index and \
- a.ip[:n] == vp.bytes:
+ if a.sw_if_index == sw_if_index and \
+ a.prefix == prefix:
return True
return False
@@ -140,8 +183,7 @@ class VppIpTable(VppObject):
return find_route(self._test,
"::" if self.is_ip6 else "0.0.0.0",
0,
- self.table_id,
- inet=AF_INET6 if self.is_ip6 == 1 else AF_INET)
+ self.table_id)
def object_id(self):
return ("table-%s-%d" %
@@ -244,6 +286,37 @@ class VppMplsLabel(object):
return not (self == other)
+class VppFibPathNextHop(object):
+ def __init__(self, addr,
+ via_label=MPLS_LABEL_INVALID,
+ next_hop_id=INVALID_INDEX):
+ self.addr = VppIpAddressUnion(addr)
+ self.via_label = via_label
+ self.obj_id = next_hop_id
+
+ def encode(self):
+ if self.via_label is not MPLS_LABEL_INVALID:
+ return {'via_label': self.via_label}
+ if self.obj_id is not INVALID_INDEX:
+ return {'obj_id': self.obj_id}
+ else:
+ return {'address': self.addr.encode()}
+
+ def proto(self):
+ if self.via_label is MPLS_LABEL_INVALID:
+ return address_proto(self.addr)
+ else:
+ return FibPathProto.FIB_PATH_NH_PROTO_MPLS
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ # try the other instance's __eq__.
+ return NotImplemented
+ return (self.addr == other.addr and
+ self.via_label == other.via_label and
+ self.obj_id == other.obj_id)
+
+
class VppRoutePath(object):
def __init__(
@@ -254,40 +327,26 @@ class VppRoutePath(object):
labels=[],
nh_via_label=MPLS_LABEL_INVALID,
rpf_id=0,
- is_interface_rx=0,
- is_resolve_host=0,
- is_resolve_attached=0,
- is_source_lookup=0,
- is_udp_encap=0,
- is_dvr=0,
- next_hop_id=0xffffffff,
- proto=DpoProto.DPO_PROTO_IP4):
- self.proto = proto
+ next_hop_id=INVALID_INDEX,
+ proto=None,
+ flags=FibPathFlags.FIB_PATH_FLAG_NONE,
+ type=FibPathType.FIB_PATH_TYPE_NORMAL):
self.nh_itf = nh_sw_if_index
self.nh_table_id = nh_table_id
- self.nh_via_label = nh_via_label
self.nh_labels = labels
self.weight = 1
self.rpf_id = rpf_id
- if self.proto is DpoProto.DPO_PROTO_IP6:
- self.nh_addr = inet_pton(AF_INET6, nh_addr)
- elif self.proto is DpoProto.DPO_PROTO_IP4:
- self.nh_addr = inet_pton(AF_INET, nh_addr)
+ self.proto = proto
+ self.flags = flags
+ self.type = type
+ self.nh = VppFibPathNextHop(nh_addr, nh_via_label, next_hop_id)
+ if proto is None:
+ self.proto = self.nh.proto()
else:
- self.nh_addr = inet_pton(AF_INET6, "::")
- self.is_resolve_host = is_resolve_host
- self.is_resolve_attached = is_resolve_attached
- self.is_interface_rx = is_interface_rx
- self.is_source_lookup = is_source_lookup
- self.is_rpf_id = 0
- if rpf_id != 0:
- self.is_rpf_id = 1
- self.nh_itf = rpf_id
- self.is_udp_encap = is_udp_encap
+ self.proto = proto
self.next_hop_id = next_hop_id
- self.is_dvr = is_dvr
- def encode_labels(self, pad_labels=False):
+ def encode_labels(self):
lstack = []
for l in self.nh_labels:
if type(l) == VppMplsLabel:
@@ -295,26 +354,28 @@ class VppRoutePath(object):
else:
lstack.append({'label': l,
'ttl': 255})
- if (pad_labels):
- while (len(lstack) < 16):
- lstack.append({})
+ while (len(lstack) < 16):
+ lstack.append({})
+
return lstack
- def encode(self, pad_labels=False):
- return {'next_hop': self.nh_addr,
- 'weight': 1,
+ def encode(self):
+ return {'weight': 1,
'preference': 0,
'table_id': self.nh_table_id,
+ 'nh': self.nh.encode(),
'next_hop_id': self.next_hop_id,
'sw_if_index': self.nh_itf,
- 'afi': self.proto,
- 'is_udp_encap': self.is_udp_encap,
+ 'rpf_id': self.rpf_id,
+ 'proto': self.proto,
+ 'type': self.type,
+ 'flags': self.flags,
'n_labels': len(self.nh_labels),
- 'label_stack': self.encode_labels(pad_labels)}
+ 'label_stack': self.encode_labels()}
def __eq__(self, other):
if isinstance(other, self.__class__):
- return self.nh_addr == other.nh_addr
+ return self.nh == other.nh
elif hasattr(other, 'sw_if_index'):
# vl_api_fib_path_t
if (len(self.nh_labels) != other.n_labels):
@@ -334,16 +395,24 @@ class VppMRoutePath(VppRoutePath):
def __init__(self, nh_sw_if_index, flags,
nh=None,
- proto=DpoProto.DPO_PROTO_IP4,
- bier_imp=0):
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP4,
+ type=FibPathType.FIB_PATH_TYPE_NORMAL,
+ bier_imp=INVALID_INDEX):
if not nh:
- nh = "::" if proto is DpoProto.DPO_PROTO_IP6 else "0.0.0.0"
+ nh = "::" if proto is FibPathProto.FIB_PATH_NH_PROTO_IP6 \
+ else "0.0.0.0"
super(VppMRoutePath, self).__init__(nh,
nh_sw_if_index,
- proto=proto)
+ proto=proto,
+ type=type,
+ next_hop_id=bier_imp)
self.nh_i_flags = flags
self.bier_imp = bier_imp
+ def encode(self):
+ return {'path': super(VppMRoutePath, self).encode(),
+ 'itf_flags': self.nh_i_flags}
+
class VppIpRoute(VppObject):
"""
@@ -351,107 +420,72 @@ class VppIpRoute(VppObject):
"""
def __init__(self, test, dest_addr,
- dest_addr_len, paths, table_id=0, is_ip6=0, is_local=0,
- is_unreach=0, is_prohibit=0, is_drop=0):
+ dest_addr_len, paths, table_id=0, register=True):
self._test = test
self.paths = paths
- self.dest_addr_len = dest_addr_len
self.table_id = table_id
- self.is_ip6 = is_ip6
- self.is_local = is_local
- self.is_unreach = is_unreach
- self.is_prohibit = is_prohibit
- self.is_drop = is_drop
- self.dest_addr_p = dest_addr
- if is_ip6:
- self.dest_addr = inet_pton(AF_INET6, dest_addr)
- else:
- self.dest_addr = inet_pton(AF_INET, dest_addr)
+ self.prefix = VppIpPrefix(dest_addr, dest_addr_len)
+ self.register = register
+
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+
+ def __eq__(self, other):
+ if self.table_id == other.table_id and \
+ self.prefix == other.prefix:
+ return True
+ return False
- def modify(self, paths, is_local=0,
- is_unreach=0, is_prohibit=0):
+ def modify(self, paths):
self.paths = paths
- self.is_local = is_local
- self.is_unreach = is_unreach
- self.is_prohibit = is_prohibit
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+
+ self._test.vapi.ip_route_add_del(route={'table_id': self.table_id,
+ 'prefix': self.prefix.encode(),
+ 'n_paths': len(
+ self.encoded_paths),
+ 'paths': self.encoded_paths,
+ },
+ is_add=1,
+ is_multipath=0)
def add_vpp_config(self):
- if self.is_unreach or self.is_prohibit or self.is_drop:
- r = self._test.vapi.ip_add_del_route(
- dst_address=self.dest_addr,
- dst_address_length=self.dest_addr_len,
- next_hop_address=inet_pton(
- AF_INET6, "::"),
- next_hop_sw_if_index=0xffffffff,
- table_id=self.table_id,
- is_drop=self.is_drop,
- is_unreach=self.is_unreach,
- is_prohibit=self.is_prohibit,
- is_ipv6=self.is_ip6,
- is_local=self.is_local)
- else:
- for path in self.paths:
- lstack = path.encode_labels()
-
- r = self._test.vapi.ip_add_del_route(
- dst_address=self.dest_addr,
- dst_address_length=self.dest_addr_len,
- next_hop_address=path.nh_addr,
- next_hop_sw_if_index=path.nh_itf, table_id=self.table_id,
- next_hop_table_id=path.nh_table_id,
- next_hop_n_out_labels=len(lstack),
- next_hop_out_label_stack=lstack,
- next_hop_via_label=path.nh_via_label,
- next_hop_id=path.next_hop_id,
- is_resolve_host=path.is_resolve_host,
- is_resolve_attached=path.is_resolve_attached,
- is_ipv6=self.is_ip6, is_local=self.is_local,
- is_multipath=1 if len(self.paths) > 1 else 0,
- is_dvr=path.is_dvr, is_udp_encap=path.is_udp_encap,
- is_source_lookup=path.is_source_lookup)
+ r = self._test.vapi.ip_route_add_del(
+ route={'table_id': self.table_id,
+ 'prefix': self.prefix.encode(),
+ 'n_paths': len(self.encoded_paths),
+ 'paths': self.encoded_paths,
+ },
+ is_add=1,
+ is_multipath=0)
self.stats_index = r.stats_index
- self._test.registry.register(self, self._test.logger)
+ if self.register:
+ self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
- if self.is_unreach or self.is_prohibit or self.is_drop:
- self._test.vapi.ip_add_del_route(
- dst_address=self.dest_addr,
- dst_address_length=self.dest_addr_len,
- next_hop_address=inet_pton(
- AF_INET6, "::"),
- next_hop_sw_if_index=0xffffffff,
- table_id=self.table_id, is_add=0,
- is_unreach=self.is_unreach,
- is_prohibit=self.is_prohibit,
- is_ipv6=self.is_ip6,
- is_local=self.is_local)
- else:
- for path in self.paths:
- self._test.vapi.ip_add_del_route(
- dst_address=self.dest_addr,
- dst_address_length=self.dest_addr_len,
- next_hop_address=path.nh_addr,
- next_hop_sw_if_index=path.nh_itf,
- table_id=self.table_id,
- next_hop_table_id=path.nh_table_id,
- next_hop_via_label=path.nh_via_label,
- next_hop_id=path.next_hop_id,
- is_add=0, is_ipv6=self.is_ip6,
- is_dvr=path.is_dvr,
- is_udp_encap=path.is_udp_encap)
+ self._test.vapi.ip_route_add_del(route={'table_id': self.table_id,
+ 'prefix': self.prefix.encode(),
+ 'n_paths': len(
+ self.encoded_paths),
+ 'paths': self.encoded_paths,
+ },
+ is_add=0,
+ is_multipath=0)
def query_vpp_config(self):
return find_route(self._test,
- self.dest_addr_p,
- self.dest_addr_len,
- self.table_id,
- inet=AF_INET6 if self.is_ip6 == 1 else AF_INET)
+ self.prefix.address,
+ self.prefix.len,
+ self.table_id)
def object_id(self):
return ("%d:%s/%d"
% (self.table_id,
- self.dest_addr_p,
- self.dest_addr_len))
+ self.prefix.address,
+ self.prefix.len))
def get_stats_to(self):
c = self._test.statistics.get_counter("/net/route/to")
@@ -469,120 +503,81 @@ class VppIpMRoute(VppObject):
def __init__(self, test, src_addr, grp_addr,
grp_addr_len, e_flags, paths, table_id=0,
- rpf_id=0, is_ip6=0):
+ rpf_id=0):
self._test = test
self.paths = paths
- self.grp_addr_len = grp_addr_len
self.table_id = table_id
self.e_flags = e_flags
- self.is_ip6 = is_ip6
self.rpf_id = rpf_id
- self.grp_addr_p = grp_addr
- self.src_addr_p = src_addr
- if is_ip6:
- self.grp_addr = inet_pton(AF_INET6, grp_addr)
- self.src_addr = inet_pton(AF_INET6, src_addr)
- else:
- self.grp_addr = inet_pton(AF_INET, grp_addr)
- self.src_addr = inet_pton(AF_INET, src_addr)
+ self.prefix = VppIpMPrefix(src_addr, grp_addr, grp_addr_len)
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
def add_vpp_config(self):
- for path in self.paths:
- r = self._test.vapi.ip_mroute_add_del(self.src_addr,
- self.grp_addr,
- self.grp_addr_len,
- self.e_flags,
- path.proto,
- path.nh_itf,
- path.nh_addr,
- path.nh_i_flags,
- bier_imp=path.bier_imp,
- rpf_id=self.rpf_id,
- table_id=self.table_id,
- is_ipv6=self.is_ip6)
- self.stats_index = r.stats_index
+ r = self._test.vapi.ip_mroute_add_del(self.table_id,
+ self.prefix.encode(),
+ self.e_flags,
+ self.rpf_id,
+ self.encoded_paths,
+ is_add=1)
+ self.stats_index = r.stats_index
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
- for path in self.paths:
- self._test.vapi.ip_mroute_add_del(self.src_addr,
- self.grp_addr,
- self.grp_addr_len,
- self.e_flags,
- path.proto,
- path.nh_itf,
- path.nh_addr,
- path.nh_i_flags,
- table_id=self.table_id,
- bier_imp=path.bier_imp,
- is_add=0,
- is_ipv6=self.is_ip6)
+ self._test.vapi.ip_mroute_add_del(self.table_id,
+ self.prefix.encode(),
+ self.e_flags,
+ self.rpf_id,
+ self.encoded_paths,
+ is_add=0)
def update_entry_flags(self, flags):
self.e_flags = flags
- self._test.vapi.ip_mroute_add_del(self.src_addr,
- self.grp_addr,
- self.grp_addr_len,
+ self._test.vapi.ip_mroute_add_del(self.table_id,
+ self.prefix.encode(),
self.e_flags,
- 0,
- 0xffffffff,
- "",
- 0,
- table_id=self.table_id,
- is_ipv6=self.is_ip6)
+ self.rpf_id,
+ [],
+ is_add=1)
def update_rpf_id(self, rpf_id):
self.rpf_id = rpf_id
- self._test.vapi.ip_mroute_add_del(self.src_addr,
- self.grp_addr,
- self.grp_addr_len,
+ self._test.vapi.ip_mroute_add_del(self.table_id,
+ self.prefix.encode(),
self.e_flags,
- 0,
- 0xffffffff,
- "",
- 0,
- rpf_id=self.rpf_id,
- table_id=self.table_id,
- is_ipv6=self.is_ip6)
+ self.rpf_id,
+ [],
+ is_add=1)
def update_path_flags(self, itf, flags):
- for path in self.paths:
- if path.nh_itf == itf:
- path.nh_i_flags = flags
- break
- self._test.vapi.ip_mroute_add_del(self.src_addr,
- self.grp_addr,
- self.grp_addr_len,
+ for p in range(len(self.paths)):
+ if self.paths[p].nh_itf == itf:
+ self.paths[p].nh_i_flags = flags
+ self.encoded_paths[p] = self.paths[p].encode()
+ break
+
+ self._test.vapi.ip_mroute_add_del(self.table_id,
+ self.prefix.encode(),
self.e_flags,
- path.proto,
- path.nh_itf,
- path.nh_addr,
- path.nh_i_flags,
- table_id=self.table_id,
- is_ipv6=self.is_ip6)
+ self.rpf_id,
+ [self.encoded_paths[p]],
+ is_add=1,
+ is_multipath=0)
def query_vpp_config(self):
return find_mroute(self._test,
- self.grp_addr_p,
- self.src_addr_p,
- self.grp_addr_len,
- self.table_id,
- inet=AF_INET6 if self.is_ip6 == 1 else AF_INET)
+ self.prefix.gaddr,
+ self.prefix.saddr,
+ self.prefix.length,
+ self.table_id)
def object_id(self):
- if self.is_ip6:
- return ("%d:(%s,%s/%d)"
- % (self.table_id,
- inet_ntop(AF_INET6, self.src_addr),
- inet_ntop(AF_INET6, self.grp_addr),
- self.grp_addr_len))
- else:
- return ("%d:(%s,%s/%d)"
- % (self.table_id,
- inet_ntop(AF_INET, self.src_addr),
- inet_ntop(AF_INET, self.grp_addr),
- self.grp_addr_len))
+ return ("%d:(%s,%s/%d)" % (self.table_id,
+ self.prefix.saddr,
+ self.prefix.gaddr,
+ self.prefix.length))
def get_stats(self):
c = self._test.statistics.get_counter("/net/mroute")
@@ -599,15 +594,7 @@ class VppMFibSignal(object):
def compare(self, signal):
self.test.assertEqual(self.interface, signal.sw_if_index)
self.test.assertEqual(self.route.table_id, signal.table_id)
- self.test.assertEqual(self.route.grp_addr_len,
- signal.grp_address_len)
- for i in range(self.route.grp_addr_len / 8):
- self.test.assertEqual(self.route.grp_addr[i],
- signal.grp_address[i])
- if (self.route.grp_addr_len > 32):
- for i in range(4):
- self.test.assertEqual(self.route.src_addr[i],
- signal.src_address[i])
+ self.test.assertEqual(self.route.prefix, signal.prefix)
class VppMplsIpBind(VppObject):
@@ -620,38 +607,31 @@ class VppMplsIpBind(VppObject):
self._test = test
self.dest_addr_len = dest_addr_len
self.dest_addr = dest_addr
+ self.ip_addr = ip_address(text_type(dest_addr))
self.local_label = local_label
self.table_id = table_id
self.ip_table_id = ip_table_id
- self.is_ip6 = is_ip6
- if is_ip6:
- self.dest_addrn = inet_pton(AF_INET6, dest_addr)
- else:
- self.dest_addrn = inet_pton(AF_INET, dest_addr)
+ self.prefix = VppIpPrefix(dest_addr, dest_addr_len)
def add_vpp_config(self):
self._test.vapi.mpls_ip_bind_unbind(self.local_label,
- self.dest_addrn,
- self.dest_addr_len,
+ self.prefix.encode(),
table_id=self.table_id,
- ip_table_id=self.ip_table_id,
- is_ip4=(self.is_ip6 == 0))
+ ip_table_id=self.ip_table_id)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.mpls_ip_bind_unbind(self.local_label,
- self.dest_addrn,
- self.dest_addr_len,
+ self.prefix.encode(),
table_id=self.table_id,
ip_table_id=self.ip_table_id,
- is_bind=0,
- is_ip4=(self.is_ip6 == 0))
+ is_bind=0)
def query_vpp_config(self):
- dump = self._test.vapi.mpls_fib_dump()
+ dump = self._test.vapi.mpls_route_dump(self.table_id)
for e in dump:
- if self.local_label == e.label \
- and self.table_id == e.table_id:
+ if self.local_label == e.mr_route.mr_label \
+ and self.table_id == e.mr_route.mr_table_id:
return True
return False
@@ -684,10 +664,10 @@ class VppMplsTable(VppObject):
is_add=0)
def query_vpp_config(self):
- # find the default route
- dump = self._test.vapi.mpls_fib_dump()
- if len(dump):
- return True
+ dump = self._test.vapi.mpls_table_dump()
+ for d in dump:
+ if d.mt_table.mt_table_id == self.table_id:
+ return True
return False
def object_id(self):
@@ -700,49 +680,41 @@ class VppMplsRoute(VppObject):
"""
def __init__(self, test, local_label, eos_bit, paths, table_id=0,
- is_multicast=0):
+ is_multicast=0,
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4):
self._test = test
self.paths = paths
self.local_label = local_label
self.eos_bit = eos_bit
+ self.eos_proto = eos_proto
self.table_id = table_id
self.is_multicast = is_multicast
def add_vpp_config(self):
- is_multipath = len(self.paths) > 1
+ paths = []
for path in self.paths:
- lstack = path.encode_labels()
-
- r = self._test.vapi.mpls_route_add_del(
- mr_label=self.local_label,
- mr_eos=self.eos_bit,
- mr_next_hop_proto=path.proto,
- mr_next_hop=path.nh_addr,
- mr_next_hop_sw_if_index=path.nh_itf,
- mr_table_id=self.table_id,
- mr_next_hop_table_id=path.nh_table_id,
- mr_next_hop_n_out_labels=len(
- lstack),
- mr_next_hop_out_label_stack=lstack,
- mr_next_hop_via_label=path.nh_via_label,
- mr_is_interface_rx=path.is_interface_rx,
- mr_is_rpf_id=path.is_rpf_id,
- mr_is_multicast=self.is_multicast,
- mr_is_multipath=is_multipath)
+ paths.append(path.encode())
+
+ r = self._test.vapi.mpls_route_add_del(self.table_id,
+ self.local_label,
+ self.eos_bit,
+ self.eos_proto,
+ self.is_multicast,
+ paths, 1, 0)
self.stats_index = r.stats_index
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
+ paths = []
for path in self.paths:
- self._test.vapi.mpls_route_add_del(
- mr_label=self.local_label,
- mr_eos=self.eos_bit,
- mr_next_hop_proto=path.proto,
- mr_next_hop=path.nh_addr,
- mr_next_hop_sw_if_index=path.nh_itf,
- mr_table_id=self.table_id,
- mr_is_rpf_id=path.is_rpf_id,
- mr_is_add=0)
+ paths.append(path.encode())
+
+ self._test.vapi.mpls_route_add_del(self.table_id,
+ self.local_label,
+ self.eos_bit,
+ self.eos_proto,
+ self.is_multicast,
+ paths, 0, 0)
def query_vpp_config(self):
return find_mpls_route(self._test, self.table_id,
diff --git a/test/vpp_memif.py b/test/vpp_memif.py
index 7836dc14a09..7fa45092740 100644
--- a/test/vpp_memif.py
+++ b/test/vpp_memif.py
@@ -65,7 +65,7 @@ class VppSocketFilename(VppObject):
return self._test.vapi.memif_socket_filename_dump()
def object_id(self):
- return "%d" % (self.socket_id)
+ return "socket-filename-%d-%s" % (self.socket_id, self.socket_filename)
class VppMemif(VppObject):
@@ -88,12 +88,26 @@ class VppMemif(VppObject):
self.ip4_addr_len = 24
def add_vpp_config(self):
- rv = self._test.vapi.memif_create(self.role, self.mode, self.rx_queues,
- self.tx_queues, self.if_id,
- self.socket_id, self.secret,
- self.ring_size, self.buffer_size,
- self.hw_addr)
- self.sw_if_index = rv.sw_if_index
+ rv = self._test.vapi.memif_create(
+ role=self.role,
+ mode=self.mode,
+ rx_queues=self.rx_queues,
+ tx_queues=self.tx_queues,
+ id=self.if_id,
+ socket_id=self.socket_id,
+ secret=self.secret,
+ ring_size=self.ring_size,
+ buffer_size=self.buffer_size,
+ hw_addr=self.hw_addr)
+ try:
+ self.sw_if_index = 0
+ except AttributeError:
+ raise AttributeError('self: %s' % self.__dict__)
+ try:
+ self.sw_if_index = rv.sw_if_index
+ except AttributeError:
+ raise AttributeError("%s %s", self, rv)
+
return self.sw_if_index
def admin_up(self):
diff --git a/test/vpp_mpls_tunnel_interface.py b/test/vpp_mpls_tunnel_interface.py
index 12f62fb4b51..598936136ad 100644
--- a/test/vpp_mpls_tunnel_interface.py
+++ b/test/vpp_mpls_tunnel_interface.py
@@ -13,50 +13,31 @@ class VppMPLSTunnelInterface(VppInterface):
self.t_paths = paths
self.is_multicast = is_multicast
self.is_l2 = is_l2
-
- def add_vpp_config(self):
- sw_if_index = 0xffffffff
+ self.encoded_paths = []
for path in self.t_paths:
- lstack = path.encode_labels()
+ self.encoded_paths.append(path.encode())
- reply = self.test.vapi.mpls_tunnel_add_del(
- sw_if_index,
- 1, # IPv4 next-hop
- path.nh_addr,
- path.nh_itf,
- path.nh_table_id,
- path.weight,
- next_hop_via_label=path.nh_via_label,
- next_hop_out_label_stack=lstack,
- next_hop_n_out_labels=len(lstack),
- is_multicast=self.is_multicast,
- l2_only=self.is_l2)
- sw_if_index = reply.sw_if_index
- self.tunnel_index = reply.tunnel_index
- self.set_sw_if_index(sw_if_index)
+ def add_vpp_config(self):
+ reply = self.test.vapi.mpls_tunnel_add_del(
+ 0xffffffff,
+ self.encoded_paths,
+ is_multicast=self.is_multicast,
+ l2_only=self.is_l2)
+ self.set_sw_if_index(reply.sw_if_index)
+ self.tunnel_index = reply.tunnel_index
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
- for path in self.t_paths:
- lstack = path.encode_labels()
-
- self.test.vapi.mpls_tunnel_add_del(
- self.sw_if_index,
- 1, # IPv4 next-hop
- path.nh_addr,
- path.nh_itf,
- path.nh_table_id,
- path.weight,
- next_hop_via_label=path.nh_via_label,
- next_hop_out_label_stack=lstack,
- next_hop_n_out_labels=len(lstack),
- is_add=0)
+ reply = self.test.vapi.mpls_tunnel_add_del(
+ self.sw_if_index,
+ self.encoded_paths,
+ is_add=0)
def query_vpp_config(self):
dump = self._test.vapi.mpls_tunnel_dump()
for t in dump:
- if self.sw_if_index == t.mt_sw_if_index and \
- self.tunnel_index == t.mt_tunnel_index:
+ if self.sw_if_index == t.mt_tunnel.mt_sw_if_index and \
+ self.tunnel_index == t.mt_tunnel.mt_tunnel_index:
return True
return False
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index 6a6fb45529e..038a3718dde 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -67,6 +67,7 @@ defaultmapping = {
'ip_neighbor_add_del': {'is_add': 1, },
'ip_punt_police': {'is_add': 1, },
'ip_punt_redirect': {'is_add': 1, },
+ 'ip_route_add_del': {'is_add': 1, },
'ip_table_add_del': {'is_add': 1, },
'ip_unnumbered_dump': {'sw_if_index': 4294967295, },
'ipsec_interface_add_del_spd': {'is_add': 1, },
@@ -506,6 +507,37 @@ class VppPapiProvider(object):
return self.api(self.papi.create_loopback,
{'mac_address': mac})
+ def ip_table_add_del(self,
+ table_id,
+ is_add=1,
+ is_ipv6=0):
+ """
+
+ :param table_id
+ :param is_add: (Default value = 1)
+ :param is_ipv6: (Default value = 0)
+
+ """
+
+ return self.api(
+ self.papi.ip_table_add_del,
+ {'table':
+ {
+ 'table_id': table_id,
+ 'is_ip6': is_ipv6
+ },
+ 'is_add': is_add})
+
+ def ip_table_dump(self):
+ return self.api(self.papi.ip_table_dump, {})
+
+ def ip_route_dump(self, table_id, is_ip6=False):
+ return self.api(self.papi.ip_route_dump,
+ {'table': {
+ 'table_id': table_id,
+ 'is_ip6': is_ip6
+ }})
+
def ip_neighbor_add_del(self,
sw_if_index,
mac_address,
@@ -631,6 +663,26 @@ class VppPapiProvider(object):
}
})
+ def udp_encap_del(self, id):
+ return self.api(self.papi.udp_encap_del, {'id': id})
+
+ def udp_encap_dump(self):
+ return self.api(self.papi.udp_encap_dump, {})
+
+ def want_udp_encap_stats(self, enable=1):
+ return self.api(self.papi.want_udp_encap_stats,
+ {'enable': enable,
+ 'pid': os.getpid()})
+
+ def mpls_route_dump(self, table_id):
+ return self.api(self.papi.mpls_route_dump,
+ {'table': {
+ 'mt_table_id': table_id
+ }})
+
+ def mpls_table_dump(self):
+ return self.api(self.papi.mpls_table_dump, {})
+
def mpls_table_add_del(
self,
table_id,
@@ -644,17 +696,43 @@ class VppPapiProvider(object):
return self.api(
self.papi.mpls_table_add_del,
- {'mt_table_id': table_id,
+ {'mt_table':
+ {
+ 'mt_table_id': table_id,
+ },
'mt_is_add': is_add})
+ def mpls_route_add_del(self,
+ table_id,
+ label,
+ eos,
+ eos_proto,
+ is_multicast,
+ paths,
+ is_add,
+ is_multipath):
+ """ MPLS Route add/del """
+ return self.api(
+ self.papi.mpls_route_add_del,
+ {'mr_route':
+ {
+ 'mr_table_id': table_id,
+ 'mr_label': label,
+ 'mr_eos': eos,
+ 'mr_eos_proto': eos_proto,
+ 'mr_is_multicast': is_multicast,
+ 'mr_n_paths': len(paths),
+ 'mr_paths': paths,
+ },
+ 'mr_is_add': is_add,
+ 'mr_is_multipath': is_multipath})
+
def mpls_ip_bind_unbind(
self,
label,
- dst_address,
- dst_address_length,
+ prefix,
table_id=0,
ip_table_id=0,
- is_ip4=1,
is_bind=1):
"""
"""
@@ -664,60 +742,28 @@ class VppPapiProvider(object):
'mb_label': label,
'mb_ip_table_id': ip_table_id,
'mb_is_bind': is_bind,
- 'mb_is_ip4': is_ip4,
- 'mb_address_length': dst_address_length,
- 'mb_address': dst_address})
+ 'mb_prefix': prefix})
def mpls_tunnel_add_del(
self,
tun_sw_if_index,
- next_hop_proto_is_ip4,
- next_hop_address,
- next_hop_sw_if_index=0xFFFFFFFF,
- next_hop_table_id=0,
- next_hop_weight=1,
- next_hop_n_out_labels=0,
- next_hop_out_label_stack=[],
- next_hop_via_label=MPLS_LABEL_INVALID,
+ paths,
is_add=1,
l2_only=0,
is_multicast=0):
"""
-
- :param dst_address_length:
- :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
- :param dst_address:
- :param next_hop_address:
- :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
- :param vrf_id: (Default value = 0)
- :param lookup_in_vrf: (Default value = 0)
- :param classify_table_index: (Default value = 0xFFFFFFFF)
- :param is_add: (Default value = 1)
- :param is_drop: (Default value = 0)
- :param is_ipv6: (Default value = 0)
- :param is_local: (Default value = 0)
- :param is_classify: (Default value = 0)
- :param is_multipath: (Default value = 0)
- :param is_resolve_host: (Default value = 0)
- :param is_resolve_attached: (Default value = 0)
- :param next_hop_weight: (Default value = 1)
- :param is_multicast: (Default value = 0)
-
"""
return self.api(
self.papi.mpls_tunnel_add_del,
- {'mt_sw_if_index': tun_sw_if_index,
- 'mt_is_add': is_add,
- 'mt_l2_only': l2_only,
- 'mt_is_multicast': is_multicast,
- 'mt_next_hop_proto_is_ip4': next_hop_proto_is_ip4,
- 'mt_next_hop_weight': next_hop_weight,
- 'mt_next_hop': next_hop_address,
- 'mt_next_hop_n_out_labels': next_hop_n_out_labels,
- 'mt_next_hop_sw_if_index': next_hop_sw_if_index,
- 'mt_next_hop_table_id': next_hop_table_id,
- 'mt_next_hop_via_label': next_hop_via_label,
- 'mt_next_hop_out_label_stack': next_hop_out_label_stack})
+ {'mt_is_add': is_add,
+ 'mt_tunnel':
+ {
+ 'mt_sw_if_index': tun_sw_if_index,
+ 'mt_l2_only': l2_only,
+ 'mt_is_multicast': is_multicast,
+ 'mt_n_paths': len(paths),
+ 'mt_paths': paths,
+ }})
def bfd_udp_add(self, sw_if_index, desired_min_tx, required_min_rx,
detect_mult, local_addr, peer_addr, is_ipv6=0,
@@ -1004,39 +1050,40 @@ class VppPapiProvider(object):
})
def ip_mroute_add_del(self,
- src_address,
- grp_address,
- grp_address_length,
+ table_id,
+ prefix,
e_flags,
- next_hop_afi,
- next_hop_sw_if_index,
- next_hop_address,
- i_flags,
- bier_imp=0,
- rpf_id=0,
- table_id=0,
+ rpf_id,
+ paths,
is_add=1,
- is_ipv6=0,
- is_local=0):
+ is_multipath=1):
"""
IP Multicast Route add/del
"""
return self.api(
self.papi.ip_mroute_add_del,
- {'next_hop_sw_if_index': next_hop_sw_if_index,
- 'entry_flags': e_flags,
- 'itf_flags': i_flags,
- 'table_id': table_id,
- 'rpf_id': rpf_id,
- 'is_add': is_add,
- 'is_ipv6': is_ipv6,
- 'is_local': is_local,
- 'bier_imp': bier_imp,
- 'next_hop_afi': next_hop_afi,
- 'grp_address_length': grp_address_length,
- 'grp_address': grp_address,
- 'src_address': src_address,
- 'nh_address': next_hop_address})
+ {
+ 'is_add': is_add,
+ 'is_multipath': is_multipath,
+ 'route': {
+ 'table_id': table_id,
+ 'entry_flags': e_flags,
+ 'rpf_id': rpf_id,
+ 'prefix': prefix,
+ 'n_paths': len(paths),
+ 'paths': paths,
+ }
+ })
+
+ def mfib_signal_dump(self):
+ return self.api(self.papi.mfib_signal_dump, {})
+
+ def ip_mroute_dump(self, table_id, is_ip6=False):
+ return self.api(self.papi.ip_mroute_dump,
+ {'table': {
+ 'table_id': table_id,
+ 'is_ip6': is_ip6
+ }})
def lisp_enable_disable(self, is_enabled):
return self.api(
@@ -1633,14 +1680,18 @@ class VppPapiProvider(object):
""" BIER Route add/del """
return self.api(
self.papi.bier_route_add_del,
- {'br_tbl_id': {"bt_set": bti.set_id,
- "bt_sub_domain": bti.sub_domain_id,
- "bt_hdr_len_id": bti.hdr_len_id},
- 'br_bp': bp,
- 'br_n_paths': len(paths),
- 'br_paths': paths,
- 'br_is_add': is_add,
- 'br_is_replace': is_replace})
+ {
+ 'br_route': {
+ 'br_tbl_id': {"bt_set": bti.set_id,
+ "bt_sub_domain": bti.sub_domain_id,
+ "bt_hdr_len_id": bti.hdr_len_id},
+ 'br_bp': bp,
+ 'br_n_paths': len(paths),
+ 'br_paths': paths,
+ },
+ 'br_is_add': is_add,
+ 'br_is_replace': is_replace
+ })
def bier_route_dump(self, bti):
return self.api(
@@ -2238,30 +2289,6 @@ class VppPapiProvider(object):
return self.api(self.papi.pipe_delete,
{'parent_sw_if_index': parent_sw_if_index})
- def memif_create(
- self,
- role,
- mode,
- rx_queues=None,
- tx_queues=None,
- _id=None,
- socket_id=None,
- secret=None,
- ring_size=None,
- buffer_size=None,
- hw_addr=None):
- return self.api(self.papi.memif_create,
- {'role': role,
- 'mode': mode,
- 'rx_queues': rx_queues,
- 'tx_queues': tx_queues,
- 'id': _id,
- 'socket_id': socket_id,
- 'secret': secret,
- 'ring_size': ring_size,
- 'buffer_size': buffer_size,
- 'hw_addr': hw_addr})
-
def svs_table_add_del(self, af, table_id, is_add=1):
return self.api(self.papi.svs_table_add_del,
{