aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/ext/vom_test.cpp27
-rw-r--r--test/test_gbp.py1029
-rw-r--r--test/vpp_papi_provider.py63
3 files changed, 934 insertions, 185 deletions
diff --git a/test/ext/vom_test.cpp b/test/ext/vom_test.cpp
index 29738e2437e..6958eb117a4 100644
--- a/test/ext/vom_test.cpp
+++ b/test/ext/vom_test.cpp
@@ -892,7 +892,11 @@ BOOST_AUTO_TEST_CASE(test_bridge) {
bridge_domain bd1(33);
HW::item<uint32_t> hw_bd(33, rc_t::OK);
- ADD_EXPECT(bridge_domain_cmds::create_cmd(hw_bd, bridge_domain::learning_mode_t::ON));
+ ADD_EXPECT(bridge_domain_cmds::create_cmd(hw_bd,
+ bridge_domain::learning_mode_t::ON,
+ bridge_domain::arp_term_mode_t::ON,
+ bridge_domain::flood_mode_t::ON,
+ bridge_domain::mac_age_mode_t::OFF));
TRY_CHECK_RC(OM::write(franz, bd1));
@@ -973,7 +977,11 @@ BOOST_AUTO_TEST_CASE(test_bridge) {
bridge_domain bd2(99);
HW::item<uint32_t> hw_bd2(99, rc_t::OK);
- ADD_EXPECT(bridge_domain_cmds::create_cmd(hw_bd2, bridge_domain::learning_mode_t::ON));
+ ADD_EXPECT(bridge_domain_cmds::create_cmd(hw_bd2,
+ bridge_domain::learning_mode_t::ON,
+ bridge_domain::arp_term_mode_t::ON,
+ bridge_domain::flood_mode_t::ON,
+ bridge_domain::mac_age_mode_t::OFF));
TRY_CHECK_RC(OM::write(jkr, bd2));
@@ -1020,8 +1028,8 @@ BOOST_AUTO_TEST_CASE(test_vxlan) {
// VXLAN create
vxlan_tunnel::endpoint_t ep(boost::asio::ip::address::from_string("10.10.10.10"),
- boost::asio::ip::address::from_string("10.10.10.11"),
- 322);
+ boost::asio::ip::address::from_string("10.10.10.11"),
+ 322);
vxlan_tunnel vxt(ep.src, ep.dst, ep.vni);
@@ -1031,10 +1039,17 @@ BOOST_AUTO_TEST_CASE(test_vxlan) {
TRY_CHECK_RC(OM::write(franz, vxt));
// bridge-domain create
- bridge_domain bd1(33, bridge_domain::learning_mode_t::OFF);
+ bridge_domain bd1(33, bridge_domain::learning_mode_t::OFF,
+ bridge_domain::arp_term_mode_t::OFF,
+ bridge_domain::flood_mode_t::OFF,
+ bridge_domain::mac_age_mode_t::ON);
HW::item<uint32_t> hw_bd(33, rc_t::OK);
- ADD_EXPECT(bridge_domain_cmds::create_cmd(hw_bd, bridge_domain::learning_mode_t::OFF));
+ ADD_EXPECT(bridge_domain_cmds::create_cmd(hw_bd,
+ bridge_domain::learning_mode_t::OFF,
+ bridge_domain::arp_term_mode_t::OFF,
+ bridge_domain::flood_mode_t::OFF,
+ bridge_domain::mac_age_mode_t::ON));
TRY_CHECK_RC(OM::write(franz, bd1));
diff --git a/test/test_gbp.py b/test/test_gbp.py
index 427b14de506..805f2610ea3 100644
--- a/test/test_gbp.py
+++ b/test/test_gbp.py
@@ -6,14 +6,19 @@ import struct
from framework import VppTestCase, VppTestRunner
from vpp_object import VppObject
+from vpp_neighbor import VppNeighbor
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, DpoProto
from scapy.packet import Raw
-from scapy.layers.l2 import Ether
+from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP, UDP
-from scapy.layers.inet6 import IPv6
+from scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6NDOptSrcLLAddr, \
+ ICMPv6NDOptDstLLAddr, ICMPv6ND_NA
+from scapy.utils6 import in6_getnsma, in6_getnsmac
from socket import AF_INET, AF_INET6
-from scapy.utils import inet_pton
+from scapy.utils import inet_pton, inet_ntop
+from util import Host, mactobinary
class VppGbpEndpoint(VppObject):
@@ -21,47 +26,221 @@ class VppGbpEndpoint(VppObject):
GDB Endpoint
"""
- def __init__(self, test, sw_if_index, addr, epg, is_ip6=0):
+ @property
+ def bin_mac(self):
+ return mactobinary(self.itf.remote_mac)
+
+ @property
+ def mac(self):
+ return self.itf.remote_mac
+
+ def __init__(self, test, itf, epg, recirc, ip, fip, is_ip6=False):
self._test = test
- self.sw_if_index = sw_if_index
+ self.itf = itf
self.epg = epg
- self.addr_p = addr
+ self.recirc = recirc
+ self.ip = ip
+ self.floating_ip = fip
self.is_ip6 = is_ip6
if is_ip6:
- self.addr = inet_pton(AF_INET6, addr)
+ self.proto = DpoProto.DPO_PROTO_IP6
+ self.af = AF_INET6
else:
- self.addr = inet_pton(AF_INET, addr)
+ self.proto = DpoProto.DPO_PROTO_IP4
+ self.af = AF_INET
+ self.ip_n = inet_pton(self.af, ip)
+ self.floating_ip_n = inet_pton(self.af, fip)
def add_vpp_config(self):
self._test.vapi.gbp_endpoint_add_del(
1,
- self.sw_if_index,
- self.addr,
+ self.itf.sw_if_index,
+ self.ip_n,
self.is_ip6,
- self.epg)
+ self.epg.epg)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.gbp_endpoint_add_del(
0,
- self.sw_if_index,
- self.addr,
+ self.itf.sw_if_index,
+ self.ip_n,
self.is_ip6,
- self.epg)
+ self.epg.epg)
def __str__(self):
return self.object_id()
def object_id(self):
- return "gbp-endpoint;[%d:%s:%d]" % (self.sw_if_index,
- self.addr_p,
- self.epg)
+ return "gbp-endpoint;[%d:%s:%d]" % (self.itf.sw_if_index,
+ self.ip,
+ self.epg.epg)
def query_vpp_config(self):
eps = self._test.vapi.gbp_endpoint_dump()
for ep in eps:
- if ep.endpoint.address == self.addr \
- and ep.endpoint.sw_if_index == self.sw_if_index:
+ if self.is_ip6:
+ if ep.endpoint.address == self.ip_n \
+ and ep.endpoint.sw_if_index == self.itf.sw_if_index:
+ return True
+ else:
+ if ep.endpoint.address[:4] == self.ip_n \
+ and ep.endpoint.sw_if_index == self.itf.sw_if_index:
+ return True
+ return False
+
+
+class VppGbpRecirc(VppObject):
+ """
+ GDB Recirculation Interface
+ """
+
+ def __init__(self, test, epg, recirc, is_ext=False):
+ self._test = test
+ self.recirc = recirc
+ self.epg = epg
+ self.is_ext = is_ext
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_recirc_add_del(
+ 1,
+ self.recirc.sw_if_index,
+ self.epg.epg,
+ self.is_ext)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_recirc_add_del(
+ 0,
+ self.recirc.sw_if_index,
+ self.epg.epg,
+ self.is_ext)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "gbp-recirc;[%d]" % (self.recirc.sw_if_index)
+
+ def query_vpp_config(self):
+ rs = self._test.vapi.gbp_recirc_dump()
+ for r in rs:
+ if r.recirc.sw_if_index == self.recirc.sw_if_index:
+ return True
+ return False
+
+
+class VppGbpSubnet(VppObject):
+ """
+ GDB Subnet
+ """
+
+ def __init__(self, test, table_id, address, address_len,
+ is_internal=True, is_ip6=False,
+ sw_if_index=None, epg=None):
+ self._test = test
+ self.table_id = table_id
+ self.address = address
+ self.address_len = address_len
+ self.is_ip6 = is_ip6
+ if is_ip6:
+ self.address_n = inet_pton(AF_INET6, address)
+ else:
+ self.address_n = inet_pton(AF_INET, address)
+ self.is_internal = is_internal
+ self.sw_if_index = sw_if_index
+ self.epg = epg
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_subnet_add_del(
+ 1,
+ self.table_id,
+ self.is_internal,
+ self.address_n,
+ self.address_len,
+ sw_if_index=self.sw_if_index if self.sw_if_index else 0xffffffff,
+ epg_id=self.epg if self.epg else 0xffffffff,
+ is_ip6=self.is_ip6)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_subnet_add_del(
+ 0,
+ self.table_id,
+ self.is_internal,
+ self.address_n,
+ self.address_len,
+ is_ip6=self.is_ip6)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "gbp-subnet;[%d:%s/%d]" % (self.table_id,
+ self.address,
+ self.address_len)
+
+ def query_vpp_config(self):
+ ss = self._test.vapi.gbp_subnet_dump()
+ for s in ss:
+ if s.subnet.table_id == self.table_id and \
+ s.subnet.address_length == self.address_len:
+ if self.is_ip6:
+ if s.subnet.address == self.address_n:
+ return True
+ else:
+ if s.subnet.address[:4] == self.address_n:
+ return True
+ return False
+
+
+class VppGbpEndpointGroup(VppObject):
+ """
+ GDB Endpoint Group
+ """
+
+ def __init__(self, test, epg, rd, bd, uplink,
+ bvi, bvi_ip4, bvi_ip6=None):
+ self._test = test
+ self.uplink = uplink
+ self.bvi = bvi
+ self.bvi_ip4 = bvi_ip4
+ self.bvi_ip4_n = inet_pton(AF_INET, bvi_ip4)
+ self.bvi_ip6 = bvi_ip6
+ self.bvi_ip6_n = inet_pton(AF_INET6, bvi_ip6)
+ self.epg = epg
+ self.bd = bd
+ self.rd = rd
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_endpoint_group_add_del(
+ 1,
+ self.epg,
+ self.bd,
+ self.rd,
+ self.rd,
+ self.uplink.sw_if_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_endpoint_group_add_del(
+ 0,
+ self.epg,
+ self.bd,
+ self.rd,
+ self.rd,
+ self.uplink.sw_if_index)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "gbp-endpoint-group;[%d]" % (self.epg)
+
+ def query_vpp_config(self):
+ epgs = self._test.vapi.gbp_endpoint_group_dump()
+ for epg in epgs:
+ if epg.epg.epg_id == self.epg:
return True
return False
@@ -101,10 +280,10 @@ class VppGbpContract(VppObject):
self.acl_index)
def query_vpp_config(self):
- eps = self._test.vapi.gbp_contract_dump()
- for ep in eps:
- if ep.contract.src_epg == self.src_epg \
- and ep.contract.dst_epg == self.dst_epg:
+ cs = self._test.vapi.gbp_contract_dump()
+ for c in cs:
+ if c.contract.src_epg == self.src_epg \
+ and c.contract.dst_epg == self.dst_epg:
return True
return False
@@ -115,233 +294,579 @@ class TestGBP(VppTestCase):
def setUp(self):
super(TestGBP, self).setUp()
- # create 6 pg interfaces for pg0 to pg5
- self.create_pg_interfaces(range(6))
+ self.create_pg_interfaces(range(9))
+ self.create_loopback_interfaces(range(9))
+
+ self.router_mac = "00:11:22:33:44:55"
for i in self.pg_interfaces:
i.admin_up()
- i.config_ip4()
- i.resolve_arp()
- i.config_ip6()
- i.resolve_ndp()
+ for i in self.lo_interfaces:
+ i.admin_up()
+ self.vapi.sw_interface_set_mac_address(
+ i.sw_if_index,
+ mactobinary(self.router_mac))
def tearDown(self):
for i in self.pg_interfaces:
- i.unconfig_ip4()
- i.unconfig_ip6()
+ i.admin_down()
super(TestGBP, self).tearDown()
- def test_gbp4(self):
- """ Group Based Policy v4 """
-
- ep1 = VppGbpEndpoint(self,
- self.pg0.sw_if_index,
- self.pg0.remote_ip4,
- 220)
- ep1.add_vpp_config()
- ep2 = VppGbpEndpoint(self,
- self.pg1.sw_if_index,
- self.pg1.remote_ip4,
- 220)
- ep2.add_vpp_config()
-
- ep3 = VppGbpEndpoint(self,
- self.pg2.sw_if_index,
- self.pg2.remote_ip4,
- 221)
- ep3.add_vpp_config()
- ep4 = VppGbpEndpoint(self,
- self.pg3.sw_if_index,
- self.pg3.remote_ip4,
- 222)
- ep4.add_vpp_config()
+ def send_and_expect_bridged(self, src, tx, dst):
+ rx = self.send_and_expect(src, tx, dst)
- self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_bridged6(self, src, tx, dst):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
+ self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
+ return rx
+
+ def send_and_expect_routed(self, src, tx, dst, src_mac):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, src_mac)
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_natted(self, src, tx, dst, src_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].src, src_ip)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_unnatted(self, src, tx, dst, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].dst, dst_ip)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ return rx
+
+ def send_and_expect_double_natted(self, src, tx, dst, src_ip, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, self.router_mac)
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IP].dst, dst_ip)
+ self.assertEqual(r[IP].src, src_ip)
+ return rx
+
+ def test_gbp(self):
+ """ Group Based Policy """
+
+ nat_table = VppIpTable(self, 20)
+ nat_table.add_vpp_config()
#
- # in the abscense of policy, endpoints in the same EPG
- # can communicate
+ # Bridge Domains
#
- pkt_intra_epg = (Ether(src=self.pg0.remote_mac,
- dst=self.pg0.local_mac) /
- IP(src=self.pg0.remote_ip4,
- dst=self.pg1.remote_ip4) /
- UDP(sport=1234, dport=1234) /
- Raw('\xa5' * 100))
+ self.vapi.bridge_domain_add_del(1, flood=1, uu_flood=1, forward=1,
+ learn=0, arp_term=1, is_add=1)
+ self.vapi.bridge_domain_add_del(2, flood=1, uu_flood=1, forward=1,
+ learn=0, arp_term=1, is_add=1)
+ self.vapi.bridge_domain_add_del(20, flood=1, uu_flood=1, forward=1,
+ learn=0, arp_term=1, is_add=1)
- self.send_and_expect(self.pg0, pkt_intra_epg * 65, self.pg1)
+ #
+ # 3 EPGs, 2 of which share a BD.
+ #
+ epgs = []
+ recircs = []
+ epgs.append(VppGbpEndpointGroup(self, 220, 0, 1, self.pg4,
+ self.loop0,
+ "10.0.0.128",
+ "2001:10::128"))
+ recircs.append(VppGbpRecirc(self, epgs[0],
+ self.loop3))
+ epgs.append(VppGbpEndpointGroup(self, 221, 0, 1, self.pg5,
+ self.loop0,
+ "10.0.1.128",
+ "2001:10:1::128"))
+ recircs.append(VppGbpRecirc(self, epgs[1],
+ self.loop4))
+ epgs.append(VppGbpEndpointGroup(self, 222, 0, 2, self.pg6,
+ self.loop1,
+ "10.0.2.128",
+ "2001:10:2::128"))
+ recircs.append(VppGbpRecirc(self, epgs[2],
+ self.loop5))
#
- # in the abscense of policy, endpoints in the different EPG
- # cannot communicate
+ # 2 NAT EPGs, one for floating-IP subnets, the other for internet
#
- pkt_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
- dst=self.pg0.local_mac) /
- IP(src=self.pg0.remote_ip4,
- dst=self.pg2.remote_ip4) /
- UDP(sport=1234, dport=1234) /
- Raw('\xa5' * 100))
- pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
- dst=self.pg0.local_mac) /
- IP(src=self.pg0.remote_ip4,
- dst=self.pg3.remote_ip4) /
- UDP(sport=1234, dport=1234) /
- Raw('\xa5' * 100))
- pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
- dst=self.pg2.local_mac) /
- IP(src=self.pg2.remote_ip4,
- dst=self.pg0.remote_ip4) /
- UDP(sport=1234, dport=1234) /
- Raw('\xa5' * 100))
+ epgs.append(VppGbpEndpointGroup(self, 333, 20, 20, self.pg7,
+ self.loop2,
+ "11.0.0.128",
+ "3001::128"))
+ recircs.append(VppGbpRecirc(self, epgs[3],
+ self.loop6, is_ext=True))
+ epgs.append(VppGbpEndpointGroup(self, 444, 20, 20, self.pg8,
+ self.loop2,
+ "11.0.0.129",
+ "3001::129"))
+ recircs.append(VppGbpRecirc(self, epgs[4],
+ self.loop8, is_ext=True))
- self.send_and_assert_no_replies(self.pg0,
- pkt_inter_epg_220_to_221 * 65)
- self.send_and_assert_no_replies(self.pg0,
- pkt_inter_epg_221_to_220 * 65)
+ epg_nat = epgs[3]
+ recirc_nat = recircs[3]
#
- # A uni-directional contract from EPG 220 -> 221
+ # 4 end-points, 2 in the same subnet, 3 in the same BD
#
- c1 = VppGbpContract(self, 220, 221, 0xffffffff)
- c1.add_vpp_config()
+ eps = []
+ eps.append(VppGbpEndpoint(self, self.pg0,
+ epgs[0], recircs[0],
+ "10.0.0.1",
+ "11.0.0.1"))
+ eps.append(VppGbpEndpoint(self, self.pg1,
+ epgs[0], recircs[0],
+ "10.0.0.2",
+ "11.0.0.2"))
+ eps.append(VppGbpEndpoint(self, self.pg2,
+ epgs[1], recircs[1],
+ "10.0.1.1",
+ "11.0.0.3"))
+ eps.append(VppGbpEndpoint(self, self.pg3,
+ epgs[2], recircs[2],
+ "10.0.2.1",
+ "11.0.0.4"))
+ eps.append(VppGbpEndpoint(self, self.pg0,
+ epgs[0], recircs[0],
+ "2001:10::1",
+ "3001::1",
+ is_ip6=True))
+ eps.append(VppGbpEndpoint(self, self.pg1,
+ epgs[0], recircs[0],
+ "2001:10::2",
+ "3001::2",
+ is_ip6=True))
+ eps.append(VppGbpEndpoint(self, self.pg2,
+ epgs[1], recircs[1],
+ "2001:10:1::1",
+ "3001::3",
+ is_ip6=True))
+ eps.append(VppGbpEndpoint(self, self.pg3,
+ epgs[2], recircs[2],
+ "2001:10:2::1",
+ "3001::4",
+ is_ip6=True))
- self.send_and_expect(self.pg0,
- pkt_inter_epg_220_to_221 * 65,
- self.pg2)
- self.send_and_assert_no_replies(self.pg2,
- pkt_inter_epg_221_to_220 * 65)
+ #
+ # Config related to each of the EPGs
+ #
+ for epg in epgs:
+ # IP config on the BVI interfaces
+ if epg != epgs[1] and epg != epgs[4]:
+ epg.bvi.set_table_ip4(epg.rd)
+ epg.bvi.set_table_ip6(epg.rd)
+
+ # The BVIs are NAT inside interfaces
+ self.vapi.nat44_interface_add_del_feature(epg.bvi.sw_if_index,
+ is_inside=1,
+ is_add=1)
+ # self.vapi.nat66_add_del_interface(epg.bvi.sw_if_index,
+ # is_inside=1,
+ # is_add=1)
+
+ self.vapi.sw_interface_add_del_address(epg.bvi.sw_if_index,
+ epg.bvi_ip4_n,
+ 32)
+ self.vapi.sw_interface_add_del_address(epg.bvi.sw_if_index,
+ epg.bvi_ip6_n,
+ 128,
+ is_ipv6=1)
+
+ # EPG uplink interfaces in the BD
+ epg.uplink.set_table_ip4(epg.rd)
+ self.vapi.sw_interface_set_l2_bridge(epg.uplink.sw_if_index,
+ epg.bd)
+
+ # add the BD ARP termination entry for BVI IP
+ self.vapi.bd_ip_mac_add_del(bd_id=epg.bd,
+ mac=mactobinary(self.router_mac),
+ ip=epg.bvi_ip4_n,
+ is_ipv6=0,
+ is_add=1)
+ self.vapi.bd_ip_mac_add_del(bd_id=epg.bd,
+ mac=mactobinary(self.router_mac),
+ ip=epg.bvi_ip6_n,
+ is_ipv6=1,
+ is_add=1)
+
+ # epg[1] shares the same BVI to epg[0]
+ if epg != epgs[1] and epg != epgs[4]:
+ # BVI in BD
+ self.vapi.sw_interface_set_l2_bridge(epg.bvi.sw_if_index,
+ epg.bd,
+ bvi=1)
+ # BVI L2 FIB entry
+ self.vapi.l2fib_add_del(self.router_mac,
+ epg.bd,
+ epg.bvi.sw_if_index,
+ is_add=1, bvi_mac=1)
+
+ # EPG in VPP
+ epg.add_vpp_config()
+
+ for recirc in recircs:
+ # EPG's ingress recirculation interface maps to its RD
+ recirc.recirc.set_table_ip4(recirc.epg.rd)
+
+ # in the bridge to allow DVR. L2 emulation to punt to L3
+ self.vapi.sw_interface_set_l2_bridge(recirc.recirc.sw_if_index,
+ recirc.epg.bd)
+ self.vapi.sw_interface_set_l2_emulation(
+ recirc.recirc.sw_if_index)
+
+ if recirc.is_ext:
+ # recirc interfaces on NAT EPGs are outside and an
+ # output feature
+ self.vapi.nat44_interface_add_del_output_feature(
+ recirc.recirc.sw_if_index,
+ is_inside=0,
+ is_add=1)
+ else:
+ self.vapi.nat44_interface_add_del_feature(
+ recirc.recirc.sw_if_index,
+ is_inside=0,
+ is_add=1)
+ # self.vapi.nat66_add_del_interface(
+ # recirc.recirc.sw_if_index,
+ # is_inside=0,
+ # is_add=1)
+
+ recirc.add_vpp_config()
+
+ ep_routes = []
+ ep_arps = []
+ for ep in eps:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ #
+ # routes to the endpoints. We need these since there are no
+ # adj-fibs due to the fact the the BVI address has /32 and
+ # the subnet is not attached.
+ #
+ r = VppIpRoute(self, ep.ip, 32,
+ [VppRoutePath(ep.ip,
+ ep.epg.bvi.sw_if_index,
+ proto=ep.proto)],
+ is_ip6=ep.is_ip6)
+ r.add_vpp_config()
+ ep_routes.append(r)
+
+ #
+ # ARP entries for the endpoints
+ #
+ a = VppNeighbor(self,
+ ep.epg.bvi.sw_if_index,
+ ep.itf.remote_mac,
+ ep.ip, af=ep.af)
+ a.add_vpp_config()
+ ep_arps.append(a)
+
+ # add each EP itf to the its BD
+ self.vapi.sw_interface_set_l2_bridge(ep.itf.sw_if_index,
+ ep.epg.bd)
+
+ # add the BD ARP termination entry
+ self.vapi.bd_ip_mac_add_del(bd_id=ep.epg.bd,
+ mac=ep.bin_mac,
+ ip=ep.ip_n,
+ is_ipv6=0,
+ is_add=1)
+
+ # L2 FIB entry
+ self.vapi.l2fib_add_del(ep.mac,
+ ep.epg.bd,
+ ep.itf.sw_if_index,
+ is_add=1)
+
+ # Add static mappings for each EP from the 10/8 to 11/8 network
+ if ep.af == AF_INET:
+ self.vapi.nat44_add_del_static_mapping(ep.ip_n,
+ ep.floating_ip_n,
+ vrf_id=0,
+ addr_only=1)
+ # else:
+ # self.vapi.nat66_add_del_static_mapping(ep.ip_n,
+ # ep.floating_ip_n,
+ # vrf_id=20)
+
+ # VPP EP create ...
+ ep.add_vpp_config()
+
+ # ... results in a Gratuitous ARP/ND on the EPG's uplink
+ rx = ep.epg.uplink.get_capture(1, timeout=0.2)
+
+ if ep.is_ip6:
+ self.assertTrue(rx[0].haslayer(ICMPv6ND_NA))
+ self.assertEqual(rx[0][ICMPv6ND_NA].tgt, ep.ip)
+ else:
+ self.assertTrue(rx[0].haslayer(ARP))
+ self.assertEqual(rx[0][ARP].psrc, ep.ip)
+ self.assertEqual(rx[0][ARP].pdst, ep.ip)
+
+ # add the BD ARP termination entry for floating IP
+ self.vapi.bd_ip_mac_add_del(bd_id=epg_nat.bd,
+ mac=ep.bin_mac,
+ ip=ep.floating_ip_n,
+ is_ipv6=0,
+ is_add=1)
+
+ # floating IPs route via EPG recirc
+ r = VppIpRoute(self, ep.floating_ip, 32,
+ [VppRoutePath(ep.floating_ip,
+ ep.recirc.recirc.sw_if_index,
+ is_dvr=1,
+ proto=ep.proto)],
+ table_id=20,
+ is_ip6=ep.is_ip6)
+ r.add_vpp_config()
+ ep_routes.append(r)
+
+ # L2 FIB entries in the NAT EPG BD to bridge the packets from
+ # the outside direct to the internal EPG
+ self.vapi.l2fib_add_del(ep.mac,
+ epg_nat.bd,
+ ep.recirc.recirc.sw_if_index,
+ is_add=1)
#
- # contract for the return direction
+ # ARP packets for unknown IP are flooded
#
- c2 = VppGbpContract(self, 221, 220, 0xffffffff)
- c2.add_vpp_config()
+ pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ ARP(op="who-has",
+ hwdst="ff:ff:ff:ff:ff:ff",
+ hwsrc=self.pg0.remote_mac,
+ pdst=epgs[0].bvi_ip4,
+ psrc="10.0.0.88"))
- self.send_and_expect(self.pg0,
- pkt_inter_epg_220_to_221 * 65,
- self.pg2)
- self.send_and_expect(self.pg2,
- pkt_inter_epg_221_to_220 * 65,
- self.pg0)
+ self.send_and_expect(self.pg0, [pkt_arp], self.pg0)
#
- # check that inter group is still disabled for the groups
- # not in the contract.
+ # ARP/ND packets get a response
#
- self.send_and_assert_no_replies(self.pg0,
- pkt_inter_epg_220_to_222 * 65)
+ pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ ARP(op="who-has",
+ hwdst="ff:ff:ff:ff:ff:ff",
+ hwsrc=self.pg0.remote_mac,
+ pdst=epgs[0].bvi_ip4,
+ psrc=eps[0].ip))
- self.logger.info(self.vapi.cli("sh gbp contract"))
+ self.send_and_expect(self.pg0, [pkt_arp], self.pg0)
+
+ nsma = in6_getnsma(inet_pton(AF_INET6, eps[4].ip))
+ d = inet_ntop(AF_INET6, nsma)
+ pkt_nd = (Ether(dst=in6_getnsmac(nsma)) /
+ IPv6(dst=d, src=eps[4].ip) /
+ ICMPv6ND_NS(tgt=epgs[0].bvi_ip6) /
+ ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
+ self.send_and_expect(self.pg0, [pkt_nd], self.pg0)
#
- # remove both contracts, traffic stops in both directions
+ # broadcast packets are flooded
#
- c2.remove_vpp_config()
- c1.remove_vpp_config()
+ pkt_bcast = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ IP(src=eps[0].ip, dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
- self.send_and_assert_no_replies(self.pg2,
- pkt_inter_epg_221_to_220 * 65)
- self.send_and_assert_no_replies(self.pg0,
- pkt_inter_epg_220_to_221 * 65)
- self.send_and_expect(self.pg0, pkt_intra_epg * 65, self.pg1)
-
- def test_gbp6(self):
- """ Group Based Policy v6 """
-
- ep1 = VppGbpEndpoint(self,
- self.pg0.sw_if_index,
- self.pg0.remote_ip6,
- 220,
- is_ip6=1)
- ep1.add_vpp_config()
- ep2 = VppGbpEndpoint(self,
- self.pg1.sw_if_index,
- self.pg1.remote_ip6,
- 220,
- is_ip6=1)
- ep2.add_vpp_config()
-
- ep3 = VppGbpEndpoint(self,
- self.pg2.sw_if_index,
- self.pg2.remote_ip6,
- 221,
- is_ip6=1)
- ep3.add_vpp_config()
- ep4 = VppGbpEndpoint(self,
- self.pg3.sw_if_index,
- self.pg3.remote_ip6,
- 222,
- is_ip6=1)
- ep4.add_vpp_config()
+ self.vapi.cli("clear trace")
+ self.pg0.add_stream(pkt_bcast)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rxd = eps[1].itf.get_capture(1)
+ self.assertEqual(rxd[0][Ether].dst, pkt_bcast[Ether].dst)
+ rxd = epgs[0].uplink.get_capture(1)
+ self.assertEqual(rxd[0][Ether].dst, pkt_bcast[Ether].dst)
+
+ #
+ # packets to non-local L3 destinations dropped
+ #
+ pkt_intra_epg_220_ip4 = (Ether(src=self.pg0.remote_mac,
+ dst=self.router_mac) /
+ IP(src=eps[0].ip, dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ pkt_inter_epg_222_ip4 = (Ether(src=self.pg0.remote_mac,
+ dst=self.router_mac) /
+ IP(src=eps[0].ip, dst="10.0.1.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, pkt_intra_epg_220_ip4 * 65)
+
+ pkt_inter_epg_222_ip6 = (Ether(src=self.pg0.remote_mac,
+ dst=self.router_mac) /
+ IPv6(src=eps[4].ip, dst="2001:10::99") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ self.send_and_assert_no_replies(self.pg0, pkt_inter_epg_222_ip6 * 65)
+
+ #
+ # Add the subnet routes
+ #
+ s41 = VppGbpSubnet(self, 0, "10.0.0.0", 24)
+ s42 = VppGbpSubnet(self, 0, "10.0.1.0", 24)
+ s43 = VppGbpSubnet(self, 0, "10.0.2.0", 24)
+ s41.add_vpp_config()
+ s42.add_vpp_config()
+ s43.add_vpp_config()
+ s61 = VppGbpSubnet(self, 0, "2001:10::1", 64, is_ip6=True)
+ s62 = VppGbpSubnet(self, 0, "2001:10:1::1", 64, is_ip6=True)
+ s63 = VppGbpSubnet(self, 0, "2001:10:2::1", 64, is_ip6=True)
+ s61.add_vpp_config()
+ s62.add_vpp_config()
+ s63.add_vpp_config()
+
+ self.send_and_expect_bridged(self.pg0,
+ pkt_intra_epg_220_ip4 * 65,
+ self.pg4)
+ self.send_and_expect_bridged(self.pg3,
+ pkt_inter_epg_222_ip4 * 65,
+ self.pg6)
+ self.send_and_expect_bridged6(self.pg3,
+ pkt_inter_epg_222_ip6 * 65,
+ self.pg6)
+
+ self.logger.info(self.vapi.cli("sh ip fib 11.0.0.2"))
+ self.logger.info(self.vapi.cli("sh gbp endpoint-group"))
self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ self.logger.info(self.vapi.cli("sh gbp recirc"))
+ self.logger.info(self.vapi.cli("sh int"))
+ self.logger.info(self.vapi.cli("sh int addr"))
+ self.logger.info(self.vapi.cli("sh int feat loop6"))
+ self.logger.info(self.vapi.cli("sh vlib graph ip4-gbp-src-classify"))
+ self.logger.info(self.vapi.cli("sh int feat loop3"))
+
+ #
+ # Packet destined to unknown unicast is sent on the epg uplink ...
+ #
+ pkt_intra_epg_220_to_uplink = (Ether(src=self.pg0.remote_mac,
+ dst="00:00:00:33:44:55") /
+ IP(src=eps[0].ip, dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_expect_bridged(self.pg0,
+ pkt_intra_epg_220_to_uplink * 65,
+ self.pg4)
+ # ... and nowhere else
+ self.pg1.get_capture(0, timeout=0.1)
+ self.pg1.assert_nothing_captured(remark="Flood onto other VMS")
+
+ pkt_intra_epg_221_to_uplink = (Ether(src=self.pg2.remote_mac,
+ dst="00:00:00:33:44:66") /
+ IP(src=eps[0].ip, dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ self.send_and_expect_bridged(self.pg2,
+ pkt_intra_epg_221_to_uplink * 65,
+ self.pg5)
+
+ #
+ # Packets from the uplink are forwarded in the absence of a contract
#
- # in the abscense of policy, endpoints in the same EPG
+ pkt_intra_epg_220_from_uplink = (Ether(src="00:00:00:33:44:55",
+ dst=self.pg0.remote_mac) /
+ IP(src=eps[0].ip, dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_expect_bridged(self.pg4,
+ pkt_intra_epg_220_from_uplink * 65,
+ self.pg0)
+
+ #
+ # in the absence of policy, endpoints in the same EPG
# can communicate
#
pkt_intra_epg = (Ether(src=self.pg0.remote_mac,
- dst=self.pg0.local_mac) /
- IPv6(src=self.pg0.remote_ip6,
- dst=self.pg1.remote_ip6) /
+ dst=self.pg1.remote_mac) /
+ IP(src=eps[0].ip, dst=eps[1].ip) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
- self.send_and_expect(self.pg0, pkt_intra_epg * 65, self.pg1)
+ self.send_and_expect_bridged(self.pg0, pkt_intra_epg * 65, self.pg1)
#
# in the abscense of policy, endpoints in the different EPG
# cannot communicate
#
pkt_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
- dst=self.pg0.local_mac) /
- IPv6(src=self.pg0.remote_ip6,
- dst=self.pg2.remote_ip6) /
+ dst=self.pg2.remote_mac) /
+ IP(src=eps[0].ip, dst=eps[2].ip) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
- pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
- dst=self.pg0.local_mac) /
- IPv6(src=self.pg0.remote_ip6,
- dst=self.pg3.remote_ip6) /
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg0.remote_mac) /
+ IP(src=eps[2].ip, dst=eps[0].ip) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
- pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
- dst=self.pg2.local_mac) /
- IPv6(src=self.pg2.remote_ip6,
- dst=self.pg0.remote_ip6) /
+ pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
+ dst=self.router_mac) /
+ IP(src=eps[0].ip, dst=eps[3].ip) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.send_and_assert_no_replies(self.pg0,
pkt_inter_epg_220_to_221 * 65)
self.send_and_assert_no_replies(self.pg0,
- pkt_inter_epg_221_to_220 * 65)
+ pkt_inter_epg_220_to_222 * 65)
#
# A uni-directional contract from EPG 220 -> 221
#
- c1 = VppGbpContract(self, 220, 221, 0xffffffff)
+ c1 = VppGbpContract(self, 220, 221, 0)
c1.add_vpp_config()
- self.send_and_expect(self.pg0,
- pkt_inter_epg_220_to_221 * 65,
- self.pg2)
- self.send_and_assert_no_replies(self.pg2,
- pkt_inter_epg_221_to_220 * 65)
+ self.send_and_expect_bridged(self.pg0,
+ pkt_inter_epg_220_to_221 * 65,
+ self.pg2)
+ self.send_and_assert_no_replies(self.pg0,
+ pkt_inter_epg_220_to_222 * 65)
#
# contract for the return direction
#
- c2 = VppGbpContract(self, 221, 220, 0xffffffff)
+ c2 = VppGbpContract(self, 221, 220, 0)
c2.add_vpp_config()
- self.send_and_expect(self.pg0,
- pkt_inter_epg_220_to_221 * 65,
- self.pg2)
- self.send_and_expect(self.pg2,
- pkt_inter_epg_221_to_220 * 65,
- self.pg0)
+ self.send_and_expect_bridged(self.pg0,
+ pkt_inter_epg_220_to_221 * 65,
+ self.pg2)
+ self.send_and_expect_bridged(self.pg2,
+ pkt_inter_epg_221_to_220 * 65,
+ self.pg0)
#
# check that inter group is still disabled for the groups
@@ -350,19 +875,165 @@ class TestGBP(VppTestCase):
self.send_and_assert_no_replies(self.pg0,
pkt_inter_epg_220_to_222 * 65)
+ #
+ # A uni-directional contract from EPG 220 -> 222 'L3 routed'
+ #
+ c3 = VppGbpContract(self, 220, 222, 0)
+ c3.add_vpp_config()
+
self.logger.info(self.vapi.cli("sh gbp contract"))
+ self.send_and_expect_routed(self.pg0,
+ pkt_inter_epg_220_to_222 * 65,
+ self.pg3,
+ self.router_mac)
+
#
# remove both contracts, traffic stops in both directions
#
c2.remove_vpp_config()
c1.remove_vpp_config()
+ c3.remove_vpp_config()
self.send_and_assert_no_replies(self.pg2,
pkt_inter_epg_221_to_220 * 65)
self.send_and_assert_no_replies(self.pg0,
pkt_inter_epg_220_to_221 * 65)
- self.send_and_expect(self.pg0, pkt_intra_epg * 65, self.pg1)
+ self.send_and_expect_bridged(self.pg0, pkt_intra_epg * 65, self.pg1)
+
+ #
+ # EPs to the outside world
+ #
+
+ # in the EP's RD an external subnet via the NAT EPG's recirc
+ se1 = VppGbpSubnet(self, 0, "0.0.0.0", 0,
+ is_internal=False,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ epg=epg_nat.epg)
+ se1.add_vpp_config()
+ se2 = VppGbpSubnet(self, 0, "11.0.0.0", 8,
+ is_internal=False,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ epg=epg_nat.epg)
+ se2.add_vpp_config()
+ # in the NAT RD an external subnet via the NAT EPG's uplink
+ se3 = VppGbpSubnet(self, 20, "0.0.0.0", 0,
+ is_internal=False,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ epg=epg_nat.epg)
+ se4 = VppGbpSubnet(self, 20, "11.0.0.0", 8,
+ is_internal=False,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ epg=epg_nat.epg)
+ se3.add_vpp_config()
+ se4.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh ip fib 0.0.0.0/0"))
+ self.logger.info(self.vapi.cli("sh ip fib 11.0.0.1"))
+
+ pkt_inter_epg_220_to_global = (Ether(src=self.pg0.remote_mac,
+ dst=self.router_mac) /
+ IP(src=eps[0].ip, dst="1.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ # no policy yet
+ self.send_and_assert_no_replies(self.pg0,
+ pkt_inter_epg_220_to_global * 65)
+
+ c4 = VppGbpContract(self, 220, 333, 0)
+ c4.add_vpp_config()
+
+ self.send_and_expect_natted(self.pg0,
+ pkt_inter_epg_220_to_global * 65,
+ self.pg7,
+ "11.0.0.1")
+
+ pkt_inter_epg_220_from_global = (Ether(src=self.router_mac,
+ dst=self.pg0.remote_mac) /
+ IP(dst=eps[0].floating_ip,
+ src="1.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg7,
+ pkt_inter_epg_220_from_global * 65)
+
+ c5 = VppGbpContract(self, 333, 220, 0)
+ c5.add_vpp_config()
+
+ self.send_and_expect_unnatted(self.pg7,
+ pkt_inter_epg_220_from_global * 65,
+ self.pg0,
+ "10.0.0.1")
+
+ pkt_intra_epg_220_global = (Ether(src=self.pg0.remote_mac,
+ dst=self.router_mac) /
+ IP(src=eps[0].ip,
+ dst=eps[1].floating_ip) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.send_and_expect_double_natted(self.pg0,
+ pkt_intra_epg_220_global * 65,
+ self.pg1,
+ "11.0.0.1",
+ "10.0.0.2")
+
+ #
+ # cleanup
+ #
+ for ep in eps:
+ # del static mappings for each EP from the 10/8 to 11/8 network
+ if ep.af == AF_INET:
+ self.vapi.nat44_add_del_static_mapping(ep.ip_n,
+ ep.floating_ip_n,
+ vrf_id=0,
+ addr_only=1,
+ is_add=0)
+ # else:
+ # self.vapi.nat66_add_del_static_mapping(ep.ip_n,
+ # ep.floating_ip_n,
+ # vrf_id=0,
+ # is_add=0)
+
+ for epg in epgs:
+ # IP config on the BVI interfaces
+ self.vapi.sw_interface_add_del_address(epg.bvi.sw_if_index,
+ epg.bvi_ip4_n,
+ 32,
+ is_add=0)
+ self.logger.info(self.vapi.cli("sh int addr"))
+
+ epg.uplink.set_table_ip4(0)
+
+ if epg != epgs[0] and epg != epgs[3]:
+ epg.bvi.set_table_ip4(0)
+
+ self.vapi.nat44_interface_add_del_feature(epg.bvi.sw_if_index,
+ is_inside=1,
+ is_add=0)
+ # self.vapi.nat66_add_del_interface(epg.bvi.sw_if_index,
+ # is_inside=1,
+ # is_add=0)
+
+ for recirc in recircs:
+ recirc.recirc.set_table_ip4(0)
+
+ if recirc.is_ext:
+ self.vapi.nat44_interface_add_del_output_feature(
+ recirc.recirc.sw_if_index,
+ is_inside=0,
+ is_add=0)
+ else:
+ self.vapi.nat44_interface_add_del_feature(
+ recirc.recirc.sw_if_index,
+ is_inside=0,
+ is_add=0)
+ # self.vapi.nat66_add_del_interface(
+ # recirc.recirc.sw_if_index,
+ # is_inside=0,
+ # is_add=0)
if __name__ == '__main__':
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index 24483fe82b8..ad887e8c079 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -668,6 +668,16 @@ class VppPapiProvider(object):
{'sw_if_index': sw_if_index,
'mtu': mtu})
+ def sw_interface_set_promiscuous(self, sw_if_index, enable):
+ """
+ :param sw_if_index:
+ :param enable:
+
+ """
+ return self.api(self.papi.sw_interface_set_promiscuous,
+ {'sw_if_index': sw_if_index,
+ 'enable': enable})
+
def sw_interface_set_mac_address(self, sw_if_index, mac):
return self.api(self.papi.sw_interface_set_mac_address,
{'sw_if_index': sw_if_index,
@@ -3238,6 +3248,59 @@ class VppPapiProvider(object):
""" GBP endpoint Dump """
return self.api(self.papi.gbp_endpoint_dump, {})
+ def gbp_endpoint_group_add_del(self, is_add, epg, bd,
+ ip4_rd,
+ ip6_rd,
+ uplink_sw_if_index):
+ """ GBP endpoint group Add/Del """
+ return self.api(self.papi.gbp_endpoint_group_add_del,
+ {'is_add': is_add,
+ 'epg': {
+ 'uplink_sw_if_index': uplink_sw_if_index,
+ 'bd_id': bd,
+ 'ip4_table_id': ip4_rd,
+ 'ip6_table_id': ip6_rd,
+ 'epg_id': epg}})
+
+ def gbp_endpoint_group_dump(self):
+ """ GBP endpoint group Dump """
+ return self.api(self.papi.gbp_endpoint_group_dump, {})
+
+ def gbp_recirc_add_del(self, is_add, sw_if_index, epg, is_ext):
+ """ GBP recirc Add/Del """
+ return self.api(self.papi.gbp_recirc_add_del,
+ {'is_add': is_add,
+ 'recirc': {
+ 'is_ext': is_ext,
+ 'sw_if_index': sw_if_index,
+ 'epg_id': epg}})
+
+ def gbp_recirc_dump(self):
+ """ GBP recirc Dump """
+ return self.api(self.papi.gbp_recirc_dump, {})
+
+ def gbp_subnet_add_del(self, is_add, table_id,
+ is_internal,
+ addr, addr_len,
+ sw_if_index=0xffffffff,
+ epg_id=0xffffffff,
+ is_ip6=False):
+ """ GBP Subnet Add/Del """
+ return self.api(self.papi.gbp_subnet_add_del,
+ {'is_add': is_add,
+ 'subnet': {
+ 'is_internal': is_internal,
+ 'is_ip6': is_ip6,
+ 'sw_if_index': sw_if_index,
+ 'epg_id': epg_id,
+ 'address': addr,
+ 'address_length': addr_len,
+ 'table_id': table_id}})
+
+ def gbp_subnet_dump(self):
+ """ GBP Subnet Dump """
+ return self.api(self.papi.gbp_subnet_dump, {})
+
def gbp_contract_add_del(self, is_add, src_epg, dst_epg, acl_index):
""" GBP contract Add/Del """
return self.api(self.papi.gbp_contract_add_del,