summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/patches/scapy-2.3.3/bier.patch45
-rw-r--r--test/patches/scapy-2.3.3/mpls.py.patch35
-rw-r--r--test/test_bier.py390
-rw-r--r--test/test_ip_mcast.py57
-rw-r--r--test/vpp_bier.py267
-rw-r--r--test/vpp_ip_route.py28
-rw-r--r--test/vpp_papi_provider.py119
7 files changed, 899 insertions, 42 deletions
diff --git a/test/patches/scapy-2.3.3/bier.patch b/test/patches/scapy-2.3.3/bier.patch
new file mode 100644
index 00000000000..024805d0501
--- /dev/null
+++ b/test/patches/scapy-2.3.3/bier.patch
@@ -0,0 +1,45 @@
+diff --git a/scapy/contrib/bier.py b/scapy/contrib/bier.py
+new file mode 100644
+index 0000000..e173cdb
+--- /dev/null
++++ b/scapy/contrib/bier.py
+@@ -0,0 +1,39 @@
++# http://trac.secdev.org/scapy/ticket/31
++
++# scapy.contrib.description = MPLS
++# scapy.contrib.status = loads
++
++from scapy.packet import *
++from scapy.fields import *
++from scapy.layers.inet import IP
++from scapy.layers.inet6 import IPv6
++
++class BIERLength:
++ BIER_LEN_64 = 0
++ BIER_LEN_128 = 1
++ BIER_LEN_256 = 2
++
++
++
++BIERnhcls = { 1: "MPLS",
++ 2: "MPLS",
++ 4: "IPv4",
++ 5: "IPv6" }
++
++class BIER(Packet):
++ name = "BIER"
++ fields_desc = [ BitField("id", 5, 4),
++ BitField("version", 0, 4),
++ BitField("length", 0, 4),
++ BitField("entropy", 0, 20),
++ BitField("OAM", 0, 2),
++ BitField("RSV", 0, 2),
++ BitField("DSCP", 0, 6),
++ BitEnumField("Proto", 2, 6, BIERnhcls),
++ ShortField("BFRID", 0),
++ StrFixedLenField("BitString",
++ chr(255)*32, 32) ]
++
++
++bind_layers(BIER, IP, Proto=4)
++bind_layers(BIER, IPv6, Proto=5)
diff --git a/test/patches/scapy-2.3.3/mpls.py.patch b/test/patches/scapy-2.3.3/mpls.py.patch
index f63a70a3cd7..f3293e8eee2 100644
--- a/test/patches/scapy-2.3.3/mpls.py.patch
+++ b/test/patches/scapy-2.3.3/mpls.py.patch
@@ -1,16 +1,35 @@
diff --git a/scapy/contrib/mpls.py b/scapy/contrib/mpls.py
-index 640a0c5..6af1d4a 100644
+index 640a0c5..944723a 100644
--- a/scapy/contrib/mpls.py
+++ b/scapy/contrib/mpls.py
-@@ -18,6 +18,8 @@ class MPLS(Packet):
+@@ -6,6 +6,7 @@
+ from scapy.packet import Packet, bind_layers, Padding
+ from scapy.fields import BitField,ByteField
+ from scapy.layers.inet import IP
++from scapy.contrib.bier import BIER
+ from scapy.layers.inet6 import IPv6
+ from scapy.layers.l2 import Ether, GRE
+
+@@ -17,9 +18,12 @@ class MPLS(Packet):
def guess_payload_class(self, payload):
- if len(payload) >= 1:
-+ if not self.s:
-+ return MPLS
- ip_version = (ord(payload[0]) >> 4) & 0xF
- if ip_version == 4:
- return IP
+- if len(payload) >= 1:
+- ip_version = (ord(payload[0]) >> 4) & 0xF
+- if ip_version == 4:
+- return IP
+- elif ip_version == 6:
+- return IPv6
+- return Padding
++ if not self.s:
++ return MPLS
++ ip_version = (ord(payload[0]) >> 4) & 0xF
++ if ip_version == 4:
++ return IP
++ elif ip_version == 5:
++ return BIER
++ elif ip_version == 6:
++ return IPv6
++ return Padding
@@ -27,3 +29,4 @@ class MPLS(Packet):
bind_layers(Ether, MPLS, type=0x8847)
diff --git a/test/test_bier.py b/test/test_bier.py
new file mode 100644
index 00000000000..1a4567bd656
--- /dev/null
+++ b/test/test_bier.py
@@ -0,0 +1,390 @@
+#!/usr/bin/env python
+
+import unittest
+import socket
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
+ VppMplsTable, VppIpMRoute, VppMRoutePath, VppIpTable, \
+ MRouteEntryFlags, MRouteItfFlags, MPLS_LABEL_INVALID, DpoProto
+from vpp_bier import *
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import IPv6
+from scapy.contrib.mpls import MPLS
+from scapy.contrib.bier import *
+
+
+class TestBFIB(VppTestCase):
+ """ BIER FIB Test Case """
+
+ def test_bfib(self):
+ """ BFIB Unit Tests """
+ error = self.vapi.cli("test bier")
+
+ if error:
+ self.logger.critical(error)
+ self.assertEqual(error.find("Failed"), -1)
+
+
+class TestBier(VppTestCase):
+ """ BIER Test Case """
+
+ def setUp(self):
+ super(TestBier, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(3))
+
+ # create the default MPLS table
+ self.tables = []
+ tbl = VppMplsTable(self, 0)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ tbl = VppIpTable(self, 10)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ # setup both interfaces
+ for i in self.pg_interfaces:
+ if i == self.pg2:
+ i.set_table_ip4(10)
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.enable_mpls()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.disable_mpls()
+ i.unconfig_ip4()
+ i.set_table_ip4(0)
+ i.admin_down()
+ super(TestBier, self).tearDown()
+
+ def send_and_assert_no_replies(self, intf, pkts, remark):
+ intf.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ for i in self.pg_interfaces:
+ i.assert_nothing_captured(remark=remark)
+
+ def send_and_expect(self, input, pkts, output):
+ self.vapi.cli("trace add bier-mpls-lookup 10")
+ input.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ rx = output.get_capture(len(pkts))
+
+ def test_bier_midpoint(self):
+ """BIER midpoint"""
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # A packet with no bits set gets dropped
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=chr(0)*64) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ pkts = [p]
+
+ self.send_and_assert_no_replies(self.pg0, pkts,
+ "Empty Bit-String")
+
+ #
+ # Add a BIER route for each bit-position in the table via a different
+ # next-hop. Testing whether the BIER walk and replicate forwarding
+ # function works for all bit posisitons.
+ #
+ nh_routes = []
+ bier_routes = []
+ for i in range(1, 256):
+ nh = "10.0.%d.%d" % (i / 255, i % 255)
+ nh_routes.append(VppIpRoute(self, nh, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[2000+i])]))
+ nh_routes[-1].add_vpp_config()
+
+ bier_routes.append(VppBierRoute(self, bti, i, nh, 100+i))
+ bier_routes[-1].add_vpp_config()
+
+ #
+ # A packet with all bits set gets spat out to BP:1
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ pkts = [p]
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(255)
+
+ for rxp in rx:
+ #
+ # The packets are not required to be sent in bit-position order
+ # when we setup the routes above we used the bit-position to
+ # construct the out-label. so use that here to determine the BP
+ #
+ olabel = rxp[MPLS]
+ bp = olabel.label - 2000
+
+ blabel = olabel[MPLS].payload
+ self.assertEqual(blabel.label, 100+bp)
+
+ bier_hdr = blabel[MPLS].payload
+
+ self.assertEqual(bier_hdr.id, 5)
+ self.assertEqual(bier_hdr.version, 0)
+ self.assertEqual(bier_hdr.length, BIERLength.BIER_LEN_256)
+ self.assertEqual(bier_hdr.entropy, 0)
+ self.assertEqual(bier_hdr.OAM, 0)
+ self.assertEqual(bier_hdr.RSV, 0)
+ self.assertEqual(bier_hdr.DSCP, 0)
+ self.assertEqual(bier_hdr.Proto, 5)
+
+ # The bit-string should consist only of the BP given by i.
+ i = 0
+ bitstring = ""
+ bpi = bp - 1
+ while (i < bpi/8):
+ bitstring = chr(0) + bitstring
+ i += 1
+ bitstring = chr(1 << bpi % 8) + bitstring
+
+ while len(bitstring) < 32:
+ bitstring = chr(0) + bitstring
+
+ self.assertEqual(len(bitstring), len(bier_hdr.BitString))
+ self.assertEqual(bitstring, bier_hdr.BitString)
+
+ def test_bier_head(self):
+ """BIER head"""
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # 2 bit positions via two next hops
+ #
+ nh1 = "10.0.0.1"
+ nh2 = "10.0.0.2"
+ ip_route_1 = VppIpRoute(self, nh1, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[2001])])
+ ip_route_2 = VppIpRoute(self, nh2, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[2002])])
+ ip_route_1.add_vpp_config()
+ ip_route_2.add_vpp_config()
+
+ bier_route_1 = VppBierRoute(self, bti, 1, nh1, 101)
+ bier_route_2 = VppBierRoute(self, bti, 2, nh2, 102)
+ bier_route_1.add_vpp_config()
+ bier_route_2.add_vpp_config()
+
+ #
+ # An imposition object with both bit-positions set
+ #
+ bi = VppBierImp(self, bti, 333, chr(0x3) * 32)
+ bi.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward into the BIER doamin
+ #
+ route_ing_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_BIER,
+ bier_imp=bi.bi_index)])
+ route_ing_232_1_1_1.add_vpp_config()
+
+ #
+ # inject a packet an IP. We expect it to be BIER encapped,
+ # replicated.
+ #
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234))
+
+ self.pg0.add_stream([p])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(2)
+
+ def test_bier_tail(self):
+ """BIER Tail"""
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # disposition table
+ #
+ bdt = VppBierDispTable(self, 8)
+ bdt.add_vpp_config()
+
+ #
+ # BIER route in table that's for-us
+ #
+ bier_route_1 = VppBierRoute(self, bti, 1, "0.0.0.0", 0,
+ disp_table=8)
+ bier_route_1.add_vpp_config()
+
+ #
+ # An entry in the disposition table
+ #
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 99,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ "0.0.0.0", 0, rpf_id=8192)
+ bier_de_1.add_vpp_config()
+
+ #
+ # A multicast route to forward post BIER disposition
+ #
+ route_eg_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_1.add_vpp_config()
+ route_eg_232_1_1_1.update_rpf_id(8192)
+
+ #
+ # A packet with all bits set gets spat out to BP:1
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256, BFRID=99) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ self.send_and_expect(self.pg0, [p], self.pg1)
+
+ def test_bier_e2e(self):
+ """ BIER end-to-end """
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # Impostion Sets bit string 101010101....
+ # sender 333
+ #
+ bi = VppBierImp(self, bti, 333, chr(0x5) * 32)
+ bi.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward into the BIER doamin
+ #
+ route_ing_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_BIER,
+ bier_imp=bi.bi_index)])
+ route_ing_232_1_1_1.add_vpp_config()
+
+ #
+ # disposition table 8
+ #
+ bdt = VppBierDispTable(self, 8)
+ bdt.add_vpp_config()
+
+ #
+ # BIER route in table that's for-us, resolving through
+ # disp table 8.
+ #
+ bier_route_1 = VppBierRoute(self, bti, 1, "0.0.0.0",
+ MPLS_LABEL_INVALID,
+ disp_table=8)
+ bier_route_1.add_vpp_config()
+
+ #
+ # An entry in the disposition table for sender 333
+ # lookup in VRF 10
+ #
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 333,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ "0.0.0.0", 10, rpf_id=8192)
+ bier_de_1.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward the traffic
+ # post-disposition
+ #
+ route_eg_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ table_id=10,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_1.add_vpp_config()
+ route_eg_232_1_1_1.update_rpf_id(8192)
+
+ #
+ # inject a packet in VRF-0. We expect it to be BIER encapped,
+ # replicated, then hit the disposition and be forwarded
+ # out of VRF 10, i.e. on pg1
+ #
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234))
+
+ self.send_and_expect(self.pg0, p*65, self.pg1)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_ip_mcast.py b/test/test_ip_mcast.py
index 7cad683cac5..8ca92df26b2 100644
--- a/test/test_ip_mcast.py
+++ b/test/test_ip_mcast.py
@@ -5,7 +5,7 @@ import unittest
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from vpp_ip_route import VppIpMRoute, VppMRoutePath, VppMFibSignal, \
- MRouteItfFlags, MRouteEntryFlags, VppIpTable
+ MRouteItfFlags, MRouteEntryFlags, VppIpTable, DpoProto
from scapy.packet import Raw
from scapy.layers.l2 import Ether
@@ -324,10 +324,6 @@ class TestIPMcast(VppTestCase):
self.verify_capture_ip4(self.pg6, tx)
self.verify_capture_ip4(self.pg7, tx)
- route_232_1_1_1.remove_vpp_config()
- route_1_1_1_1_232_1_1_1.remove_vpp_config()
- route_232.remove_vpp_config()
-
def test_ip6_mcast(self):
""" IPv6 Multicast Replication """
@@ -354,13 +350,17 @@ class TestIPMcast(VppTestCase):
"ff01::1", 128,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT,
+ proto=DpoProto.DPO_PROTO_IP6),
VppMRoutePath(self.pg1.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_IP6),
VppMRoutePath(self.pg2.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_IP6),
VppMRoutePath(self.pg3.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
route_ff01_1.add_vpp_config()
@@ -374,11 +374,14 @@ class TestIPMcast(VppTestCase):
"ff01::1", 256,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT,
+ proto=DpoProto.DPO_PROTO_IP6),
VppMRoutePath(self.pg1.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_IP6),
VppMRoutePath(self.pg2.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
route_2001_ff01_1.add_vpp_config()
@@ -392,9 +395,11 @@ class TestIPMcast(VppTestCase):
"ff01::", 16,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT,
+ proto=DpoProto.DPO_PROTO_IP6),
VppMRoutePath(self.pg1.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
route_ff01.add_vpp_config()
@@ -474,10 +479,6 @@ class TestIPMcast(VppTestCase):
self.pg3.assert_nothing_captured(
remark="IP multicast packets forwarded on PG3")
- route_ff01.remove_vpp_config()
- route_ff01_1.remove_vpp_config()
- route_2001_ff01_1.remove_vpp_config()
-
def _mcast_connected_send_stream(self, dst_ip):
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0,
@@ -584,8 +585,10 @@ class TestIPMcast(VppTestCase):
signal_232_1_1_1_itf_0.compare(signals[1])
signal_232_1_1_2_itf_0.compare(signals[0])
- route_232_1_1_1.remove_vpp_config()
- route_232_1_1_2.remove_vpp_config()
+ route_232_1_1_1.update_entry_flags(
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE)
+ route_232_1_1_2.update_entry_flags(
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE)
def test_ip_mcast_signal(self):
""" IP Multicast Signal """
@@ -679,11 +682,6 @@ class TestIPMcast(VppTestCase):
signals = self.vapi.mfib_signal_dump()
self.assertEqual(0, len(signals))
- #
- # Cleanup
- #
- route_232_1_1_1.remove_vpp_config()
-
def test_ip_mcast_vrf(self):
""" IP Multicast Replication in non-default table"""
@@ -733,11 +731,14 @@ class TestIPMcast(VppTestCase):
"ff01::1", 256,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg8.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT,
+ proto=DpoProto.DPO_PROTO_IP6),
VppMRoutePath(self.pg1.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_IP6),
VppMRoutePath(self.pg2.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_IP6)],
table_id=10,
is_ip6=1)
route_2001_ff01_1.add_vpp_config()
diff --git a/test/vpp_bier.py b/test/vpp_bier.py
new file mode 100644
index 00000000000..58c4f7248da
--- /dev/null
+++ b/test/vpp_bier.py
@@ -0,0 +1,267 @@
+"""
+ BIER Tables and Routes
+"""
+
+import socket
+from vpp_object import VppObject
+
+
+class BIER_HDR_PAYLOAD:
+ BIER_HDR_PROTO_MPLS_DOWN_STREAM = 1
+ BIER_HDR_PROTO_MPLS_UP_STREAM = 2
+ BIER_HDR_PROTO_ETHERNET = 3
+ BIER_HDR_PROTO_IPV4 = 4
+ BIER_HDR_PROTO_IPV6 = 5
+ BIER_HDR_PROTO_VXLAN = 6
+ BIER_HDR_PROTO_CTRL = 7
+ BIER_HDR_PROTO_OAM = 8
+
+
+class VppBierTableID():
+ def __init__(self, set_id, sub_domain_id, hdr_len_id):
+ self.set_id = set_id
+ self.sub_domain_id = sub_domain_id
+ self.hdr_len_id = hdr_len_id
+
+
+def find_bier_table(test, bti):
+ tables = test.vapi.bier_table_dump()
+ for t in tables:
+ if bti.set_id == t.bt_tbl_id.bt_set \
+ and bti.sub_domain_id == t.bt_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == t.bt_tbl_id.bt_hdr_len_id:
+ return True
+ return False
+
+
+def find_bier_route(test, bti, bp):
+ routes = test.vapi.bier_route_dump(bti)
+ for r in routes:
+ if bti.set_id == r.br_tbl_id.bt_set \
+ and bti.sub_domain_id == r.br_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == r.br_tbl_id.bt_hdr_len_id \
+ and bp == r.br_bp:
+ return True
+ return False
+
+
+def find_bier_disp_table(test, bdti):
+ tables = test.vapi.bier_disp_table_dump()
+ for t in tables:
+ if bdti == t.bdt_tbl_id:
+ return True
+ return False
+
+
+def find_bier_disp_entry(test, bdti, bp):
+ entries = test.vapi.bier_disp_entry_dump(bdti)
+ for e in entries:
+ if bp == e.bde_bp \
+ and bdti == e.bde_tbl_id:
+ return True
+ return False
+
+
+def find_bier_imp(test, bti, bp):
+ imps = test.vapi.bier_imp_dump()
+ for i in imps:
+ if bti.set_id == i.bi_tbl_id.bt_set \
+ and bti.sub_domain_id == i.bi_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == i.bi_tbl_id.bt_hdr_len_id \
+ and bp == i.bi_src:
+ return True
+ return False
+
+
+class VppBierTable(VppObject):
+ """
+ BIER Table
+ """
+
+ def __init__(self, test, id, mpls_label):
+ self._test = test
+ self.id = id
+ self.mpls_label = mpls_label
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_table_add_del(
+ self.id,
+ self.mpls_label,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_table_add_del(
+ self.id,
+ self.mpls_label,
+ is_add=0)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "bier-table;[%d:%d:%d]" % (self.id.set_id,
+ self.id.sub_domain_id,
+ self.id.hdr_len_id)
+
+ def query_vpp_config(self):
+ return find_bier_table(self._test, self.id)
+
+
+class VppBierRoute(VppObject):
+ """
+ BIER route
+ """
+
+ def __init__(self, test, tbl_id, bp, nh, out_label,
+ disp_table=0):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.out_label = out_label
+ self.bp = bp
+ self.disp_table = disp_table
+ self.nh = socket.inet_pton(socket.AF_INET, nh)
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ self.nh,
+ self.out_label,
+ self.disp_table,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ self.nh,
+ self.out_label,
+ self.disp_table,
+ is_add=0)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "bier-route;[%d:%d:%d:%d]" % (self.tbl_id.set_id,
+ self.tbl_id.sub_domain_id,
+ self.tbl_id.hdr_len_id,
+ self.bp)
+
+ def query_vpp_config(self):
+ return find_bier_route(self._test, self.tbl_id, self.bp)
+
+
+class VppBierImp(VppObject):
+ """
+ BIER route
+ """
+
+ def __init__(self, test, tbl_id, src, ibytes):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.ibytes = ibytes
+ self.src = src
+
+ def add_vpp_config(self):
+ res = self._test.vapi.bier_imp_add(
+ self.tbl_id,
+ self.src,
+ self.ibytes)
+ self.bi_index = res.bi_index
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_imp_del(
+ self.bi_index)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "bier-imp;[%d:%d:%d:%d]" % (self.tbl_id.set_id,
+ self.tbl_id.sub_domain_id,
+ self.tbl_id.hdr_len_id,
+ self.src)
+
+ def query_vpp_config(self):
+ return find_bier_imp(self._test, self.tbl_id, self.src)
+
+
+class VppBierDispTable(VppObject):
+ """
+ BIER Disposition Table
+ """
+
+ def __init__(self, test, id):
+ self._test = test
+ self.id = id
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_disp_table_add_del(
+ self.id,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_disp_table_add_del(
+ self.id,
+ is_add=0)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "bier-disp-table;[%d]" % (self.id)
+
+ def query_vpp_config(self):
+ return find_bier_disp_table(self._test, self.id)
+
+
+class VppBierDispEntry(VppObject):
+ """
+ BIER Disposition Entry
+ """
+
+ def __init__(self, test, tbl_id, bp, payload_proto, nh, nh_tbl,
+ rpf_id=~0):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.nh_tbl = nh_tbl
+ self.bp = bp
+ self.payload_proto = payload_proto
+ self.rpf_id = rpf_id
+ self.nh = socket.inet_pton(socket.AF_INET, nh)
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_disp_entry_add_del(
+ self.tbl_id,
+ self.bp,
+ self.payload_proto,
+ self.nh,
+ self.nh_tbl,
+ self.rpf_id,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_disp_entry_add_del(
+ self.tbl_id,
+ self.bp,
+ self.payload_proto,
+ self.nh,
+ self.nh_tbl,
+ self.rpf_id,
+ is_add=0)
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "bier-disp-entry;[%d:%d]" % (self.tbl_id,
+ self.bp)
+
+ def query_vpp_config(self):
+ return find_bier_disp_entry(self._test, self.tbl_id, self.bp)
diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py
index 34aa5428d18..3bc3b65111c 100644
--- a/test/vpp_ip_route.py
+++ b/test/vpp_ip_route.py
@@ -34,7 +34,8 @@ class DpoProto:
DPO_PROTO_IP6 = 1
DPO_PROTO_MPLS = 2
DPO_PROTO_ETHERNET = 3
- DPO_PROTO_NSH = 4
+ DPO_PROTO_BIER = 4
+ DPO_PROTO_NSH = 5
def find_route(test, ip_addr, len, table_id=0, inet=AF_INET):
@@ -138,10 +139,15 @@ class VppRoutePath(object):
class VppMRoutePath(VppRoutePath):
- def __init__(self, nh_sw_if_index, flags):
- super(VppMRoutePath, self).__init__("0.0.0.0",
- nh_sw_if_index)
+ def __init__(self, nh_sw_if_index, flags,
+ proto=DpoProto.DPO_PROTO_IP4,
+ bier_imp=0):
+ super(VppMRoutePath, self).__init__(
+ "::" if proto is DpoProto.DPO_PROTO_IP6 else "0.0.0.0",
+ nh_sw_if_index,
+ proto=proto)
self.nh_i_flags = flags
+ self.bier_imp = bier_imp
class VppIpRoute(VppObject):
@@ -283,8 +289,10 @@ class VppIpMRoute(VppObject):
self.grp_addr,
self.grp_addr_len,
self.e_flags,
+ path.proto,
path.nh_itf,
path.nh_i_flags,
+ bier_imp=path.bier_imp,
rpf_id=self.rpf_id,
table_id=self.table_id,
is_ipv6=self.is_ip6)
@@ -296,6 +304,7 @@ class VppIpMRoute(VppObject):
self.grp_addr,
self.grp_addr_len,
self.e_flags,
+ path.proto,
path.nh_itf,
path.nh_i_flags,
table_id=self.table_id,
@@ -308,6 +317,7 @@ class VppIpMRoute(VppObject):
self.grp_addr,
self.grp_addr_len,
self.e_flags,
+ 0,
0xffffffff,
0,
table_id=self.table_id,
@@ -319,6 +329,7 @@ class VppIpMRoute(VppObject):
self.grp_addr,
self.grp_addr_len,
self.e_flags,
+ 0,
0xffffffff,
0,
rpf_id=self.rpf_id,
@@ -334,16 +345,21 @@ class VppIpMRoute(VppObject):
self.grp_addr,
self.grp_addr_len,
self.e_flags,
+ path.proto,
path.nh_itf,
path.nh_i_flags,
table_id=self.table_id,
is_ipv6=self.is_ip6)
def query_vpp_config(self):
- dump = self._test.vapi.ip_fib_dump()
+ if self.is_ip6:
+ dump = self._test.vapi.ip6_mfib_dump()
+ else:
+ dump = self._test.vapi.ip_mfib_dump()
for e in dump:
- if self.grp_addr == e.address \
+ if self.grp_addr == e.grp_address \
and self.grp_addr_len == e.address_length \
+ and self.src_addr == e.src_address \
and self.table_id == e.table_id:
return True
return False
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index 5085ff2801e..893ef0d89b9 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -2057,14 +2057,17 @@ class VppPapiProvider(object):
grp_address,
grp_address_length,
e_flags,
+ next_hop_afi,
next_hop_sw_if_index,
i_flags,
+ bier_imp=0,
rpf_id=0,
table_id=0,
is_add=1,
is_ipv6=0,
is_local=0):
"""
+ IP Multicast Route add/del
"""
return self.api(
self.papi.ip_mroute_add_del,
@@ -2076,6 +2079,8 @@ class VppPapiProvider(object):
'is_add': is_add,
'is_ipv6': is_ipv6,
'is_local': is_local,
+ 'bier_imp': bier_imp,
+ 'next_hop_afi': next_hop_afi,
'grp_address_length': grp_address_length,
'grp_address': grp_address,
'src_address': src_address})
@@ -2086,6 +2091,9 @@ class VppPapiProvider(object):
def ip_mfib_dump(self):
return self.api(self.papi.ip_mfib_dump, {})
+ def ip6_mfib_dump(self):
+ return self.api(self.papi.ip6_mfib_dump, {})
+
def lisp_enable_disable(self, is_enabled):
return self.api(
self.papi.lisp_enable_disable,
@@ -2634,3 +2642,114 @@ class VppPapiProvider(object):
'nh': nh,
'is_add': is_add,
'is_ip6': is_ip6})
+
+ def bier_table_add_del(self,
+ bti,
+ mpls_label,
+ is_add=1):
+ """ BIER Table add/del """
+ return self.api(
+ self.papi.bier_table_add_del,
+ {'bt_tbl_id': {"bt_set": bti.set_id,
+ "bt_sub_domain": bti.sub_domain_id,
+ "bt_hdr_len_id": bti.hdr_len_id},
+ 'bt_label': mpls_label,
+ 'bt_is_add': is_add})
+
+ def bier_table_dump(self):
+ return self.api(self.papi.bier_table_dump, {})
+
+ def bier_route_add_del(self,
+ bti,
+ bp,
+ next_hop,
+ next_hop_label,
+ next_hop_table_id,
+ next_hop_is_ip4=1,
+ is_add=1):
+ """ BIER Route add/del """
+ return self.api(
+ self.papi.bier_route_add_del,
+ {'br_tbl_id': {"bt_set": bti.set_id,
+ "bt_sub_domain": bti.sub_domain_id,
+ "bt_hdr_len_id": bti.hdr_len_id},
+ 'br_bp': bp,
+ 'br_n_paths': 1,
+ 'br_paths': [{'next_hop': next_hop,
+ 'afi': 0,
+ 'n_labels': 1,
+ 'table_id': next_hop_table_id,
+ 'label_stack': [next_hop_label]}],
+ 'br_is_add': is_add})
+
+ def bier_route_dump(self, bti):
+ return self.api(
+ self.papi.bier_route_dump,
+ {'br_tbl_id': {"bt_set": bti.set_id,
+ "bt_sub_domain": bti.sub_domain_id,
+ "bt_hdr_len_id": bti.hdr_len_id}})
+
+ def bier_imp_add(self,
+ bti,
+ src,
+ ibytes,
+ is_add=1):
+ """ BIER Imposition Add """
+ return self.api(
+ self.papi.bier_imp_add,
+ {'bi_tbl_id': {"bt_set": bti.set_id,
+ "bt_sub_domain": bti.sub_domain_id,
+ "bt_hdr_len_id": bti.hdr_len_id},
+ 'bi_src': src,
+ 'bi_n_bytes': len(ibytes),
+ 'bi_bytes': ibytes})
+
+ def bier_imp_del(self, bi_index):
+ """ BIER Imposition del """
+ return self.api(
+ self.papi.bier_imp_del,
+ {'bi_index': bi_index})
+
+ def bier_imp_dump(self):
+ return self.api(self.papi.bier_imp_dump, {})
+
+ def bier_disp_table_add_del(self,
+ bdti,
+ is_add=1):
+ """ BIER Disposition Table add/del """
+ return self.api(
+ self.papi.bier_disp_table_add_del,
+ {'bdt_tbl_id': bdti,
+ 'bdt_is_add': is_add})
+
+ def bier_disp_table_dump(self):
+ return self.api(self.papi.bier_disp_table_dump, {})
+
+ def bier_disp_entry_add_del(self,
+ bdti,
+ bp,
+ payload_proto,
+ next_hop,
+ next_hop_tbl_id=0,
+ next_hop_rpf_id=~0,
+ next_hop_is_ip4=1,
+ is_add=1):
+ """ BIER Route add/del """
+ return self.api(
+ self.papi.bier_disp_entry_add_del,
+ {'bde_tbl_id': bdti,
+ 'bde_bp': bp,
+ 'bde_payload_proto': payload_proto,
+ 'bde_n_paths': 1,
+ 'bde_paths': [{'next_hop': next_hop,
+ 'table_id': next_hop_tbl_id,
+ 'afi': 0,
+ 'rpf_id': next_hop_rpf_id,
+ 'n_labels': 0,
+ 'label_stack': [0]}],
+ 'bde_is_add': is_add})
+
+ def bier_disp_entry_dump(self, bdti):
+ return self.api(
+ self.papi.bier_disp_entry_dump,
+ {'bde_tbl_id': bdti})