aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2017-09-10 04:39:11 -0700
committerDamjan Marion <dmarion.lists@gmail.com>2017-09-11 10:14:36 +0000
commit1500254bee11355bbd69cc1dd9705be4f002f2bd (patch)
treec403642105f399baccb3a727020232b5732fe8f7 /test
parenta7191840beeb2c3a0f2598707ed1051a9f23c45f (diff)
FIB table add/delete API
part 2; - this adds the code to create an IP and MPLS table via the API. - but the enforcement that the table must be created before it is used is still missing, this is so that CSIT can pass. Change-Id: Id124d884ade6cb7da947225200e3bb193454c555 Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'test')
-rw-r--r--test/test_dhcp.py24
-rw-r--r--test/test_gre.py8
-rw-r--r--test/test_ip4.py11
-rw-r--r--test/test_ip4_vrf_multi_instance.py4
-rw-r--r--test/test_ip6.py7
-rw-r--r--test/test_ip6_vrf_multi_instance.py4
-rw-r--r--test/test_ip_mcast.py98
-rw-r--r--test/test_mpls.py48
-rw-r--r--test/test_nat.py13
-rw-r--r--test/test_neighbor.py66
-rw-r--r--test/vpp_ip_route.py73
-rw-r--r--test/vpp_papi_provider.py46
12 files changed, 367 insertions, 35 deletions
diff --git a/test/test_dhcp.py b/test/test_dhcp.py
index 6fc291824ec..fe97f6c9a2b 100644
--- a/test/test_dhcp.py
+++ b/test/test_dhcp.py
@@ -6,7 +6,7 @@ import struct
from framework import VppTestCase, VppTestRunner
from vpp_neighbor import VppNeighbor
-from vpp_ip_route import find_route
+from vpp_ip_route import find_route, VppIpTable
from util import mk_ll_addr
from scapy.layers.l2 import Ether, getmacbyip, ARP
@@ -34,10 +34,20 @@ class TestDHCP(VppTestCase):
# create 3 pg interfaces
self.create_pg_interfaces(range(4))
+ self.tables = []
# pg0 and 1 are IP configured in VRF 0 and 1.
# pg2 and 3 are non IP-configured in VRF 0 and 1
table_id = 0
+ for table_id in range(1, 4):
+ tbl4 = VppIpTable(self, table_id)
+ tbl4.add_vpp_config()
+ self.tables.append(tbl4)
+ tbl6 = VppIpTable(self, table_id, is_ip6=1)
+ tbl6.add_vpp_config()
+ self.tables.append(tbl6)
+
+ table_id = 0
for i in self.pg_interfaces[:2]:
i.admin_up()
i.set_table_ip4(table_id)
@@ -56,11 +66,15 @@ class TestDHCP(VppTestCase):
table_id += 1
def tearDown(self):
- super(TestDHCP, self).tearDown()
- for i in self.pg_interfaces:
+ for i in self.pg_interfaces[:2]:
i.unconfig_ip4()
i.unconfig_ip6()
+
+ for i in self.pg_interfaces:
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
i.admin_down()
+ super(TestDHCP, self).tearDown()
def send_and_assert_no_replies(self, intf, pkts, remark):
intf.add_stream(pkts)
@@ -667,6 +681,8 @@ class TestDHCP(VppTestCase):
"DHCP cleanup VRF 0")
self.send_and_assert_no_replies(self.pg3, pkts_disc_vrf1,
"DHCP cleanup VRF 1")
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
def test_dhcp6_proxy(self):
""" DHCPv6 Proxy"""
@@ -1045,6 +1061,8 @@ class TestDHCP(VppTestCase):
server_table_id=0,
is_ipv6=1,
is_add=0)
+ self.pg2.unconfig_ip6()
+ self.pg3.unconfig_ip6()
def test_dhcp_client(self):
""" DHCP Client"""
diff --git a/test/test_gre.py b/test/test_gre.py
index 1afc44fb423..9046b05f2f6 100644
--- a/test/test_gre.py
+++ b/test/test_gre.py
@@ -6,7 +6,7 @@ from logging import *
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppDot1QSubint
from vpp_gre_interface import VppGreInterface, VppGre6Interface
-from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto, VppIpTable
from vpp_papi_provider import L2_VTR_OP
from scapy.packet import Raw
@@ -30,6 +30,9 @@ class TestGRE(VppTestCase):
# create 3 pg interfaces - set one in a non-default table.
self.create_pg_interfaces(range(3))
+
+ self.tbl = VppIpTable(self, 1)
+ self.tbl.add_vpp_config()
self.pg1.set_table_ip4(1)
for i in self.pg_interfaces:
@@ -43,11 +46,12 @@ class TestGRE(VppTestCase):
self.pg2.resolve_ndp()
def tearDown(self):
- super(TestGRE, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
+ self.pg1.set_table_ip4(0)
+ super(TestGRE, self).tearDown()
def create_stream_ip4(self, src_if, src_ip, dst_ip):
pkts = []
diff --git a/test/test_ip4.py b/test/test_ip4.py
index 7a7098c36f0..55d16735a01 100644
--- a/test/test_ip4.py
+++ b/test/test_ip4.py
@@ -6,7 +6,8 @@ import unittest
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpMRoute, \
- VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind
+ VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \
+ VppMplsTable
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q, ARP
@@ -774,6 +775,8 @@ class TestIPLoadBalance(VppTestCase):
super(TestIPLoadBalance, self).setUp()
self.create_pg_interfaces(range(5))
+ mpls_tbl = VppMplsTable(self, 0)
+ mpls_tbl.add_vpp_config()
for i in self.pg_interfaces:
i.admin_up()
@@ -782,11 +785,11 @@ class TestIPLoadBalance(VppTestCase):
i.enable_mpls()
def tearDown(self):
- super(TestIPLoadBalance, self).tearDown()
for i in self.pg_interfaces:
i.disable_mpls()
i.unconfig_ip4()
i.admin_down()
+ super(TestIPLoadBalance, self).tearDown()
def send_and_expect_load_balancing(self, input, pkts, outputs):
input.add_stream(pkts)
@@ -966,6 +969,8 @@ class TestIPVlan0(VppTestCase):
super(TestIPVlan0, self).setUp()
self.create_pg_interfaces(range(2))
+ mpls_tbl = VppMplsTable(self, 0)
+ mpls_tbl.add_vpp_config()
for i in self.pg_interfaces:
i.admin_up()
@@ -974,11 +979,11 @@ class TestIPVlan0(VppTestCase):
i.enable_mpls()
def tearDown(self):
- super(TestIPVlan0, self).tearDown()
for i in self.pg_interfaces:
i.disable_mpls()
i.unconfig_ip4()
i.admin_down()
+ super(TestIPVlan0, self).tearDown()
def send_and_expect(self, input, pkts, output):
input.add_stream(pkts)
diff --git a/test/test_ip4_vrf_multi_instance.py b/test/test_ip4_vrf_multi_instance.py
index b73ac9483c3..5a8d6760586 100644
--- a/test/test_ip4_vrf_multi_instance.py
+++ b/test/test_ip4_vrf_multi_instance.py
@@ -172,9 +172,10 @@ class TestIp4VrfMultiInst(VppTestCase):
pg_if = self.pg_if_by_vrf_id[vrf_id][0]
dest_addr = pg_if.remote_hosts[0].ip4n
dest_addr_len = 24
+ self.vapi.ip_table_add_del(vrf_id, is_add=1)
self.vapi.ip_add_del_route(
dest_addr, dest_addr_len, pg_if.local_ip4n,
- table_id=vrf_id, create_vrf_if_needed=1, is_multipath=1)
+ table_id=vrf_id, is_multipath=1)
self.logger.info("IPv4 VRF ID %d created" % vrf_id)
if vrf_id not in self.vrf_list:
self.vrf_list.append(vrf_id)
@@ -216,6 +217,7 @@ class TestIp4VrfMultiInst(VppTestCase):
self.logger.info("IPv4 VRF ID %d reset" % vrf_id)
self.logger.debug(self.vapi.ppcli("show ip fib"))
self.logger.debug(self.vapi.ppcli("show ip arp"))
+ self.vapi.ip_table_add_del(vrf_id, is_add=0)
def create_stream(self, src_if, packet_sizes):
"""
diff --git a/test/test_ip6.py b/test/test_ip6.py
index 285ce18150c..aad3713c4c0 100644
--- a/test/test_ip6.py
+++ b/test/test_ip6.py
@@ -8,7 +8,7 @@ from vpp_sub_interface import VppSubInterface, VppDot1QSubint
from vpp_pg_interface import is_ipv6_misc
from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, VppIpMRoute, \
VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \
- VppMplsRoute, DpoProto
+ VppMplsRoute, DpoProto, VppMplsTable
from vpp_neighbor import find_nbr, VppNeighbor
from scapy.packet import Raw
@@ -1260,6 +1260,9 @@ class TestIP6LoadBalance(VppTestCase):
self.create_pg_interfaces(range(5))
+ mpls_tbl = VppMplsTable(self, 0)
+ mpls_tbl.add_vpp_config()
+
for i in self.pg_interfaces:
i.admin_up()
i.config_ip6()
@@ -1267,11 +1270,11 @@ class TestIP6LoadBalance(VppTestCase):
i.enable_mpls()
def tearDown(self):
- super(TestIP6LoadBalance, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip6()
i.admin_down()
i.disable_mpls()
+ super(TestIP6LoadBalance, self).tearDown()
def send_and_expect_load_balancing(self, input, pkts, outputs):
input.add_stream(pkts)
diff --git a/test/test_ip6_vrf_multi_instance.py b/test/test_ip6_vrf_multi_instance.py
index af80b5ba065..769cb2e51b3 100644
--- a/test/test_ip6_vrf_multi_instance.py
+++ b/test/test_ip6_vrf_multi_instance.py
@@ -187,9 +187,10 @@ class TestIP6VrfMultiInst(VppTestCase):
pg_if = self.pg_if_by_vrf_id[vrf_id][0]
dest_addr = pg_if.remote_hosts[0].ip6n
dest_addr_len = 64
+ self.vapi.ip_table_add_del(vrf_id, is_add=1, is_ipv6=1)
self.vapi.ip_add_del_route(
dest_addr, dest_addr_len, pg_if.local_ip6n, is_ipv6=1,
- table_id=vrf_id, create_vrf_if_needed=1, is_multipath=1)
+ table_id=vrf_id, is_multipath=1)
self.logger.info("IPv6 VRF ID %d created" % vrf_id)
if vrf_id not in self.vrf_list:
self.vrf_list.append(vrf_id)
@@ -232,6 +233,7 @@ class TestIP6VrfMultiInst(VppTestCase):
self.logger.info("IPv6 VRF ID %d reset" % vrf_id)
self.logger.debug(self.vapi.ppcli("show ip6 fib"))
self.logger.debug(self.vapi.ppcli("show ip6 neighbors"))
+ self.vapi.ip_table_add_del(vrf_id, is_add=0, is_ipv6=1)
def create_stream(self, src_if, packet_sizes):
"""
diff --git a/test/test_ip_mcast.py b/test/test_ip_mcast.py
index 276555d6267..7cad683cac5 100644
--- a/test/test_ip_mcast.py
+++ b/test/test_ip_mcast.py
@@ -5,7 +5,7 @@ import unittest
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from vpp_ip_route import VppIpMRoute, VppMRoutePath, VppMFibSignal, \
- MRouteItfFlags, MRouteEntryFlags
+ MRouteItfFlags, MRouteEntryFlags, VppIpTable
from scapy.packet import Raw
from scapy.layers.l2 import Ether
@@ -44,16 +44,37 @@ class TestIPMcast(VppTestCase):
super(TestIPMcast, self).setUp()
# create 8 pg interfaces
- self.create_pg_interfaces(range(8))
+ self.create_pg_interfaces(range(9))
# setup interfaces
- for i in self.pg_interfaces:
+ for i in self.pg_interfaces[:8]:
i.admin_up()
i.config_ip4()
i.config_ip6()
i.resolve_arp()
i.resolve_ndp()
+ # one more in a vrf
+ tbl4 = VppIpTable(self, 10)
+ tbl4.add_vpp_config()
+ self.pg8.set_table_ip4(10)
+ self.pg8.config_ip4()
+
+ tbl6 = VppIpTable(self, 10, is_ip6=1)
+ tbl6.add_vpp_config()
+ self.pg8.set_table_ip6(10)
+ self.pg8.config_ip6()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ self.pg8.set_table_ip4(0)
+ self.pg8.set_table_ip6(0)
+ super(TestIPMcast, self).tearDown()
+
def create_stream_ip4(self, src_if, src_ip, dst_ip, payload_size=0):
pkts = []
# default to small packet sizes
@@ -663,6 +684,77 @@ class TestIPMcast(VppTestCase):
#
route_232_1_1_1.remove_vpp_config()
+ def test_ip_mcast_vrf(self):
+ """ IP Multicast Replication in non-default table"""
+
+ #
+ # An (S,G).
+ # one accepting interface, pg0, 2 forwarding interfaces
+ #
+ route_1_1_1_1_232_1_1_1 = VppIpMRoute(
+ self,
+ "1.1.1.1",
+ "232.1.1.1", 64,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ [VppMRoutePath(self.pg8.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
+ VppMRoutePath(self.pg2.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
+ table_id=10)
+ route_1_1_1_1_232_1_1_1.add_vpp_config()
+
+ #
+ # a stream that matches the route for (1.1.1.1,232.1.1.1)
+ # small packets
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg8, "1.1.1.1", "232.1.1.1")
+ self.pg8.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # We expect replications on Pg1 & 2
+ self.verify_capture_ip4(self.pg1, tx)
+ self.verify_capture_ip4(self.pg2, tx)
+
+ def test_ip6_mcast_vrf(self):
+ """ IPv6 Multicast Replication in non-default table"""
+
+ #
+ # An (S,G).
+ # one accepting interface, pg0, 2 forwarding interfaces
+ #
+ route_2001_ff01_1 = VppIpMRoute(
+ self,
+ "2001::1",
+ "ff01::1", 256,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ [VppMRoutePath(self.pg8.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
+ VppMRoutePath(self.pg2.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
+ table_id=10,
+ is_ip6=1)
+ route_2001_ff01_1.add_vpp_config()
+
+ #
+ # a stream that matches the route for (2001::1, ff00::1)
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip6(self.pg8, "2001::1", "ff01::1")
+ self.pg8.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # We expect replications on Pg1, 2,
+ self.verify_capture_ip6(self.pg1, tx)
+ self.verify_capture_ip6(self.pg2, tx)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_mpls.py b/test/test_mpls.py
index b2226a74bdf..460a32d1fc1 100644
--- a/test/test_mpls.py
+++ b/test/test_mpls.py
@@ -6,7 +6,7 @@ import socket
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
- MRouteItfFlags, MRouteEntryFlags, DpoProto
+ MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
from scapy.packet import Raw
@@ -60,9 +60,23 @@ class TestMPLS(VppTestCase):
# setup both interfaces
# assign them different tables.
table_id = 0
+ self.tables = []
+
+ tbl = VppMplsTable(self, 0)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
for i in self.pg_interfaces:
i.admin_up()
+
+ if table_id != 0:
+ tbl = VppIpTable(self, table_id)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+ tbl = VppIpTable(self, table_id, is_ip6=1)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
i.set_table_ip4(table_id)
i.set_table_ip6(table_id)
i.config_ip4()
@@ -73,12 +87,15 @@ class TestMPLS(VppTestCase):
table_id += 1
def tearDown(self):
- super(TestMPLS, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.ip6_disable()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+ i.disable_mpls()
i.admin_down()
+ super(TestMPLS, self).tearDown()
# the default of 64 matches the IP packet TTL default
def create_stream_labelled_ip4(
@@ -1092,6 +1109,9 @@ class TestMPLSDisabled(VppTestCase):
# create 2 pg interfaces
self.create_pg_interfaces(range(2))
+ self.tbl = VppMplsTable(self, 0)
+ self.tbl.add_vpp_config()
+
# PG0 is MPLS enalbed
self.pg0.admin_up()
self.pg0.config_ip4()
@@ -1102,11 +1122,13 @@ class TestMPLSDisabled(VppTestCase):
self.pg1.admin_up()
def tearDown(self):
- super(TestMPLSDisabled, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
+ self.pg0.disable_mpls()
+ super(TestMPLSDisabled, self).tearDown()
+
def send_and_assert_no_replies(self, intf, pkts, remark):
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
@@ -1174,6 +1196,13 @@ class TestMPLSPIC(VppTestCase):
# create 2 pg interfaces
self.create_pg_interfaces(range(4))
+ mpls_tbl = VppMplsTable(self, 0)
+ mpls_tbl.add_vpp_config()
+ tbl4 = VppIpTable(self, 1)
+ tbl4.add_vpp_config()
+ tbl6 = VppIpTable(self, 1, is_ip6=1)
+ tbl6.add_vpp_config()
+
# core links
self.pg0.admin_up()
self.pg0.config_ip4()
@@ -1201,14 +1230,15 @@ class TestMPLSPIC(VppTestCase):
self.pg3.resolve_ndp()
def tearDown(self):
- super(TestMPLSPIC, self).tearDown()
self.pg0.disable_mpls()
+ self.pg1.disable_mpls()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.set_table_ip4(0)
i.set_table_ip6(0)
i.admin_down()
+ super(TestMPLSPIC, self).tearDown()
def test_mpls_ibgp_pic(self):
""" MPLS iBGP PIC edge convergence
@@ -1534,24 +1564,30 @@ class TestMPLSL2(VppTestCase):
# create 2 pg interfaces
self.create_pg_interfaces(range(2))
+ # create the default MPLS table
+ self.tables = []
+ tbl = VppMplsTable(self, 0)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
# use pg0 as the core facing interface
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
self.pg0.enable_mpls()
- # use the other 2 for customer facg L2 links
+ # use the other 2 for customer facing L2 links
for i in self.pg_interfaces[1:]:
i.admin_up()
def tearDown(self):
- super(TestMPLSL2, self).tearDown()
for i in self.pg_interfaces[1:]:
i.admin_down()
self.pg0.disable_mpls()
self.pg0.unconfig_ip4()
self.pg0.admin_down()
+ super(TestMPLSL2, self).tearDown()
def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels,
ttl=255, top=None):
diff --git a/test/test_nat.py b/test/test_nat.py
index 1f2d17ab101..73e9e217348 100644
--- a/test/test_nat.py
+++ b/test/test_nat.py
@@ -549,6 +549,8 @@ class TestNAT44(MethodHolder):
cls.pg0.configure_ipv4_neighbors()
cls.overlapping_interfaces = list(list(cls.pg_interfaces[4:7]))
+ cls.vapi.ip_table_add_del(10, is_add=1)
+ cls.vapi.ip_table_add_del(20, is_add=1)
cls.pg4._local_ip4 = "172.16.255.1"
cls.pg4._local_ip4n = socket.inet_pton(socket.AF_INET, i.local_ip4)
@@ -1797,6 +1799,8 @@ class TestNAT44(MethodHolder):
self.pg0.unconfig_ip4()
self.pg1.unconfig_ip4()
+ self.vapi.ip_table_add_del(vrf_id1, is_add=1)
+ self.vapi.ip_table_add_del(vrf_id2, is_add=1)
self.pg0.set_table_ip4(vrf_id1)
self.pg1.set_table_ip4(vrf_id2)
self.pg0.config_ip4()
@@ -1825,6 +1829,13 @@ class TestNAT44(MethodHolder):
capture = self.pg2.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip2)
+ self.pg0.unconfig_ip4()
+ self.pg1.unconfig_ip4()
+ self.pg0.set_table_ip4(0)
+ self.pg1.set_table_ip4(0)
+ self.vapi.ip_table_add_del(vrf_id1, is_add=0)
+ self.vapi.ip_table_add_del(vrf_id2, is_add=0)
+
def test_vrf_feature_independent(self):
""" NAT44 tenant VRF independent address pool mode """
@@ -3042,6 +3053,8 @@ class TestNAT64(MethodHolder):
cls.ip6_interfaces.append(cls.pg_interfaces[2])
cls.ip4_interfaces = list(cls.pg_interfaces[1:2])
+ cls.vapi.ip_table_add_del(cls.vrf1_id, is_add=1, is_ipv6=1)
+
cls.pg_interfaces[2].set_table_ip6(cls.vrf1_id)
cls.pg0.generate_remote_hosts(2)
diff --git a/test/test_neighbor.py b/test/test_neighbor.py
index 1c7cc2678ff..68dde2fb840 100644
--- a/test/test_neighbor.py
+++ b/test/test_neighbor.py
@@ -5,7 +5,8 @@ from socket import AF_INET, AF_INET6, inet_pton
from framework import VppTestCase, VppTestRunner
from vpp_neighbor import VppNeighbor, find_nbr
-from vpp_ip_route import VppIpRoute, VppRoutePath, find_route
+from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, \
+ VppIpTable
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP, Dot1Q
@@ -39,11 +40,13 @@ class ARPTestCase(VppTestCase):
self.pg1.config_ip6()
# pg3 in a different VRF
+ self.tbl = VppIpTable(self, 1)
+ self.tbl.add_vpp_config()
+
self.pg3.set_table_ip4(1)
self.pg3.config_ip4()
def tearDown(self):
- super(ARPTestCase, self).tearDown()
self.pg0.unconfig_ip4()
self.pg0.unconfig_ip6()
@@ -51,10 +54,13 @@ class ARPTestCase(VppTestCase):
self.pg1.unconfig_ip6()
self.pg3.unconfig_ip4()
+ self.pg3.set_table_ip4(0)
for i in self.pg_interfaces:
i.admin_down()
+ super(ARPTestCase, self).tearDown()
+
def verify_arp_req(self, rx, smac, sip, dip):
ether = rx[Ether]
self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
@@ -1080,6 +1086,62 @@ class ARPTestCase(VppTestCase):
self.pg0.remote_ip4,
self.pg1.remote_hosts[1].ip4)
+ def test_arp_static(self):
+ """ ARP Static"""
+ self.pg2.generate_remote_hosts(3)
+
+ #
+ # Add a static ARP entry
+ #
+ static_arp = VppNeighbor(self,
+ self.pg2.sw_if_index,
+ self.pg2.remote_hosts[1].mac,
+ self.pg2.remote_hosts[1].ip4,
+ is_static=1)
+ static_arp.add_vpp_config()
+
+ #
+ # Add the connected prefix to the interface
+ #
+ self.pg2.config_ip4()
+
+ #
+ # We should now find the adj-fib
+ #
+ self.assertTrue(find_nbr(self,
+ self.pg2.sw_if_index,
+ self.pg2.remote_hosts[1].ip4,
+ is_static=1))
+ self.assertTrue(find_route(self,
+ self.pg2.remote_hosts[1].ip4,
+ 32))
+
+ #
+ # remove the connected
+ #
+ self.pg2.unconfig_ip4()
+
+ #
+ # put the interface into table 1
+ #
+ self.pg2.set_table_ip4(1)
+
+ #
+ # configure the same connected and expect to find the
+ # adj fib in the new table
+ #
+ self.pg2.config_ip4()
+ self.assertTrue(find_route(self,
+ self.pg2.remote_hosts[1].ip4,
+ 32,
+ table_id=1))
+
+ #
+ # clean-up
+ #
+ self.pg2.unconfig_ip4()
+ self.pg2.set_table_ip4(0)
+
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py
index 2c489e3ccb0..b79937939c0 100644
--- a/test/vpp_ip_route.py
+++ b/test/vpp_ip_route.py
@@ -54,6 +54,46 @@ def find_route(test, ip_addr, len, table_id=0, inet=AF_INET):
return False
+class VppIpTable(VppObject):
+
+ def __init__(self,
+ test,
+ table_id,
+ is_ip6=0):
+ self._test = test
+ self.table_id = table_id
+ self.is_ip6 = is_ip6
+
+ def add_vpp_config(self):
+ self._test.vapi.ip_table_add_del(
+ self.table_id,
+ is_ipv6=self.is_ip6,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.ip_table_add_del(
+ self.table_id,
+ is_ipv6=self.is_ip6,
+ is_add=0)
+
+ def query_vpp_config(self):
+ # find the default route
+ return find_route(self._test,
+ "::" if self.is_ip6 else "0.0.0.0",
+ 0,
+ self.table_id,
+ inet=AF_INET6 if self.is_ip6 == 1 else AF_INET)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return ("table-%s-%d" %
+ ("v6" if self.is_ip6 == 1 else "v4",
+ self.table_id))
+
+
class VppRoutePath(object):
def __init__(
@@ -391,6 +431,39 @@ class VppMplsIpBind(VppObject):
self.dest_addr_len))
+class VppMplsTable(VppObject):
+
+ def __init__(self,
+ test,
+ table_id):
+ self._test = test
+ self.table_id = table_id
+
+ def add_vpp_config(self):
+ self._test.vapi.mpls_table_add_del(
+ self.table_id,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.mpls_table_add_del(
+ self.table_id,
+ is_add=0)
+
+ def query_vpp_config(self):
+ # find the default route
+ dump = self._test.vapi.mpls_fib_dump()
+ if len(dump):
+ return True
+ return False
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return ("table-mpls-%d" % (self.table_id))
+
+
class VppMplsRoute(VppObject):
"""
MPLS Route/LSP
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index b70da026901..519aff80899 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -652,6 +652,24 @@ class VppPapiProvider(object):
return self.api(self.papi.delete_loopback,
{'sw_if_index': sw_if_index, })
+ def ip_table_add_del(self,
+ table_id,
+ is_add=1,
+ is_ipv6=0):
+ """
+
+ :param table_id
+ :param is_add: (Default value = 1)
+ :param is_ipv6: (Default value = 0)
+
+ """
+
+ return self.api(
+ self.papi.ip_table_add_del,
+ {'table_id': table_id,
+ 'is_add': is_add,
+ 'is_ipv6': is_ipv6})
+
def ip_add_del_route(
self,
dst_address,
@@ -664,7 +682,6 @@ class VppPapiProvider(object):
next_hop_n_out_labels=0,
next_hop_out_label_stack=[],
next_hop_via_label=MPLS_LABEL_INVALID,
- create_vrf_if_needed=0,
is_resolve_host=0,
is_resolve_attached=0,
classify_table_index=0xFFFFFFFF,
@@ -687,7 +704,6 @@ class VppPapiProvider(object):
:param vrf_id: (Default value = 0)
:param lookup_in_vrf: (Default value = 0)
:param classify_table_index: (Default value = 0xFFFFFFFF)
- :param create_vrf_if_needed: (Default value = 0)
:param is_add: (Default value = 1)
:param is_drop: (Default value = 0)
:param is_ipv6: (Default value = 0)
@@ -707,7 +723,6 @@ class VppPapiProvider(object):
'table_id': table_id,
'classify_table_index': classify_table_index,
'next_hop_table_id': next_hop_table_id,
- 'create_vrf_if_needed': create_vrf_if_needed,
'is_add': is_add,
'is_drop': is_drop,
'is_unreach': is_unreach,
@@ -912,6 +927,22 @@ class VppPapiProvider(object):
def mpls_fib_dump(self):
return self.api(self.papi.mpls_fib_dump, {})
+ def mpls_table_add_del(
+ self,
+ table_id,
+ is_add=1):
+ """
+
+ :param table_id
+ :param is_add: (Default value = 1)
+
+ """
+
+ return self.api(
+ self.papi.mpls_table_add_del,
+ {'mt_table_id': table_id,
+ 'mt_is_add': is_add})
+
def mpls_route_add_del(
self,
label,
@@ -925,7 +956,6 @@ class VppPapiProvider(object):
next_hop_n_out_labels=0,
next_hop_out_label_stack=[],
next_hop_via_label=MPLS_LABEL_INVALID,
- create_vrf_if_needed=0,
is_resolve_host=0,
is_resolve_attached=0,
is_interface_rx=0,
@@ -947,7 +977,6 @@ class VppPapiProvider(object):
:param vrf_id: (Default value = 0)
:param lookup_in_vrf: (Default value = 0)
:param classify_table_index: (Default value = 0xFFFFFFFF)
- :param create_vrf_if_needed: (Default value = 0)
:param is_add: (Default value = 1)
:param is_drop: (Default value = 0)
:param is_ipv6: (Default value = 0)
@@ -968,7 +997,6 @@ class VppPapiProvider(object):
'mr_eos': eos,
'mr_table_id': table_id,
'mr_classify_table_index': classify_table_index,
- 'mr_create_table_if_needed': create_vrf_if_needed,
'mr_is_add': is_add,
'mr_is_classify': is_classify,
'mr_is_multipath': is_multipath,
@@ -994,7 +1022,6 @@ class VppPapiProvider(object):
table_id=0,
ip_table_id=0,
is_ip4=1,
- create_vrf_if_needed=0,
is_bind=1):
"""
"""
@@ -1003,7 +1030,6 @@ class VppPapiProvider(object):
{'mb_mpls_table_id': table_id,
'mb_label': label,
'mb_ip_table_id': ip_table_id,
- 'mb_create_table_if_needed': create_vrf_if_needed,
'mb_is_bind': is_bind,
'mb_is_ip4': is_ip4,
'mb_address_length': dst_address_length,
@@ -1020,7 +1046,6 @@ class VppPapiProvider(object):
next_hop_n_out_labels=0,
next_hop_out_label_stack=[],
next_hop_via_label=MPLS_LABEL_INVALID,
- create_vrf_if_needed=0,
is_add=1,
l2_only=0,
is_multicast=0):
@@ -1034,7 +1059,6 @@ class VppPapiProvider(object):
:param vrf_id: (Default value = 0)
:param lookup_in_vrf: (Default value = 0)
:param classify_table_index: (Default value = 0xFFFFFFFF)
- :param create_vrf_if_needed: (Default value = 0)
:param is_add: (Default value = 1)
:param is_drop: (Default value = 0)
:param is_ipv6: (Default value = 0)
@@ -1844,7 +1868,6 @@ class VppPapiProvider(object):
i_flags,
rpf_id=0,
table_id=0,
- create_vrf_if_needed=0,
is_add=1,
is_ipv6=0,
is_local=0):
@@ -1857,7 +1880,6 @@ class VppPapiProvider(object):
'itf_flags': i_flags,
'table_id': table_id,
'rpf_id': rpf_id,
- 'create_vrf_if_needed': create_vrf_if_needed,
'is_add': is_add,
'is_ipv6': is_ipv6,
'is_local': is_local,