aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorDave Wallace <dwallacelf@gmail.com>2020-11-13 14:56:19 -0500
committerNeale Ranns <nranns@cisco.com>2020-12-23 09:45:48 +0000
commitdd9299c68a02b7c476cb60b8d9ac4e145503a44f (patch)
tree3db3b11b3b2778b8adc157b4ae30bb60c21c4460 /src/vnet
parent4b8011e72ea22d17942f2e57a34a4fa3a3d81af5 (diff)
tests: move bier tests to src/vnet/bier/test
- Refactor make test code to be co-located with the vpp feature source code Type: test Signed-off-by: Dave Wallace <dwallacelf@gmail.com> Change-Id: I17003925be06d1051f18f1c24ff081790a610c23
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/bier/test/test_bier.py862
-rw-r--r--src/vnet/bier/test/vpp_bier.py293
2 files changed, 1155 insertions, 0 deletions
diff --git a/src/vnet/bier/test/test_bier.py b/src/vnet/bier/test/test_bier.py
new file mode 100644
index 00000000000..2f649bbde53
--- /dev/null
+++ b/src/vnet/bier/test/test_bier.py
@@ -0,0 +1,862 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, \
+ VppMplsTable, VppIpMRoute, VppMRoutePath, VppIpTable, \
+ MPLS_LABEL_INVALID, \
+ VppMplsLabel, FibPathProto, FibPathType
+from vpp_bier import BIER_HDR_PAYLOAD, VppBierImp, VppBierDispEntry, \
+ VppBierDispTable, VppBierTable, VppBierTableID, VppBierRoute
+from vpp_udp_encap import VppUdpEncap
+from vpp_papi import VppEnum
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.contrib.mpls import MPLS
+from scapy.contrib.bier import BIER, BIERLength, BIFT
+
+NUM_PKTS = 67
+
+
+class TestBFIB(VppTestCase):
+ """ BIER FIB Test Case """
+
+ def test_bfib(self):
+ """ BFIB Unit Tests """
+ error = self.vapi.cli("test bier")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn("Failed", error)
+
+
+class TestBier(VppTestCase):
+ """ BIER Test Case """
+
+ def setUp(self):
+ super(TestBier, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(3))
+
+ # create the default MPLS table
+ self.tables = []
+ tbl = VppMplsTable(self, 0)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ tbl = VppIpTable(self, 10)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ # setup both interfaces
+ for i in self.pg_interfaces:
+ if i == self.pg2:
+ i.set_table_ip4(10)
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.enable_mpls()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.disable_mpls()
+ i.unconfig_ip4()
+ i.set_table_ip4(0)
+ i.admin_down()
+ super(TestBier, self).tearDown()
+
+ def bier_midpoint(self, hdr_len_id, n_bytes, max_bp):
+ """BIER midpoint"""
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, hdr_len_id)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # A packet with no bits set gets dropped
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=hdr_len_id) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ pkts = [p]
+
+ self.send_and_assert_no_replies(self.pg0, pkts,
+ "Empty Bit-String")
+
+ #
+ # Add a BIER route for each bit-position in the table via a different
+ # next-hop. Testing whether the BIER walk and replicate forwarding
+ # function works for all bit posisitons.
+ #
+ nh_routes = []
+ bier_routes = []
+ for i in range(1, max_bp+1):
+ nh = "10.0.%d.%d" % (i / 255, i % 255)
+ nh_routes.append(
+ VppIpRoute(self, nh, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2000+i)])]))
+ nh_routes[-1].add_vpp_config()
+
+ bier_routes.append(
+ VppBierRoute(self, bti, i,
+ [VppRoutePath(nh, 0xffffffff,
+ labels=[VppMplsLabel(100+i)])]))
+ bier_routes[-1].add_vpp_config()
+
+ #
+ # A packet with all bits set gets replicated once for each bit
+ #
+ pkt_sizes = [64, 1400]
+
+ for pkt_size in pkt_sizes:
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=hdr_len_id,
+ BitString=scapy.compat.chb(255)*n_bytes) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(scapy.compat.chb(5) * pkt_size))
+ pkts = p
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(max_bp)
+
+ for rxp in rx:
+ #
+ # The packets are not required to be sent in bit-position order
+ # when we setup the routes above we used the bit-position to
+ # construct the out-label. so use that here to determine the BP
+ #
+ olabel = rxp[MPLS]
+ bp = olabel.label - 2000
+
+ blabel = olabel[MPLS].payload
+ self.assertEqual(blabel.label, 100+bp)
+ self.assertEqual(blabel.ttl, 254)
+
+ bier_hdr = blabel[MPLS].payload
+
+ self.assertEqual(bier_hdr.id, 5)
+ self.assertEqual(bier_hdr.version, 0)
+ self.assertEqual(bier_hdr.length, hdr_len_id)
+ self.assertEqual(bier_hdr.entropy, 0)
+ self.assertEqual(bier_hdr.OAM, 0)
+ self.assertEqual(bier_hdr.RSV, 0)
+ self.assertEqual(bier_hdr.DSCP, 0)
+ self.assertEqual(bier_hdr.Proto, 5)
+
+ # The bit-string should consist only of the BP given by i.
+ byte_array = [b'\0'] * (n_bytes)
+ byte_val = scapy.compat.chb(1 << (bp - 1) % 8)
+ byte_pos = n_bytes - (((bp - 1) // 8) + 1)
+ byte_array[byte_pos] = byte_val
+ bitstring = b''.join(byte_array)
+
+ self.assertEqual(len(bitstring), len(bier_hdr.BitString))
+ self.assertEqual(bitstring, bier_hdr.BitString)
+
+ #
+ # cleanup. not strictly necessary, but it's much quicker this way
+ # because the bier_fib_dump and ip_fib_dump will be empty when the
+ # auto-cleanup kicks in
+ #
+ for br in bier_routes:
+ br.remove_vpp_config()
+ for nhr in nh_routes:
+ nhr.remove_vpp_config()
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_midpoint_1024(self):
+ """BIER midpoint BSL:1024"""
+ self.bier_midpoint(BIERLength.BIER_LEN_1024, 128, 1024)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_midpoint_512(self):
+ """BIER midpoint BSL:512"""
+ self.bier_midpoint(BIERLength.BIER_LEN_512, 64, 512)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_midpoint_256(self):
+ """BIER midpoint BSL:256"""
+ self.bier_midpoint(BIERLength.BIER_LEN_256, 32, 256)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_midpoint_128(self):
+ """BIER midpoint BSL:128"""
+ self.bier_midpoint(BIERLength.BIER_LEN_128, 16, 128)
+
+ def test_bier_midpoint_64(self):
+ """BIER midpoint BSL:64"""
+ self.bier_midpoint(BIERLength.BIER_LEN_64, 8, 64)
+
+ def test_bier_load_balance(self):
+ """BIER load-balance"""
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_64)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # packets with varying entropy
+ #
+ pkts = []
+ for ii in range(257):
+ pkts.append((Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_64,
+ entropy=ii,
+ BitString=scapy.compat.chb(255)*16) /
+ IPv6(src=self.pg0.remote_ip6,
+ dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw()))
+
+ #
+ # 4 next hops
+ #
+ nhs = [{'ip': "10.0.0.1", 'label': 201},
+ {'ip': "10.0.0.2", 'label': 202},
+ {'ip': "10.0.0.3", 'label': 203},
+ {'ip': "10.0.0.4", 'label': 204}]
+
+ for nh in nhs:
+ ipr = VppIpRoute(
+ self, nh['ip'], 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(nh['label'])])])
+ ipr.add_vpp_config()
+
+ bier_route = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath(nhs[0]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]),
+ VppRoutePath(nhs[1]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)])])
+ bier_route.add_vpp_config()
+
+ rx = self.send_and_expect(self.pg0, pkts, self.pg1)
+
+ #
+ # we should have recieved a packet from each neighbor
+ #
+ for nh in nhs[:2]:
+ self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx))
+
+ #
+ # add the other paths
+ #
+ bier_route.update_paths(
+ [VppRoutePath(nhs[0]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]),
+ VppRoutePath(nhs[1]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]),
+ VppRoutePath(nhs[2]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]),
+ VppRoutePath(nhs[3]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)])])
+
+ rx = self.send_and_expect(self.pg0, pkts, self.pg1)
+
+ for nh in nhs:
+ self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx))
+
+ #
+ # remove first two paths
+ #
+ bier_route.remove_path(VppRoutePath(nhs[0]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]))
+ bier_route.remove_path(VppRoutePath(nhs[1]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]))
+
+ rx = self.send_and_expect(self.pg0, pkts, self.pg1)
+ for nh in nhs[2:]:
+ self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx))
+
+ #
+ # remove the last of the paths, deleteing the entry
+ #
+ bier_route.remove_all_paths()
+
+ self.send_and_assert_no_replies(self.pg0, pkts)
+
+ def test_bier_head(self):
+ """BIER head"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # 2 bit positions via two next hops
+ #
+ nh1 = "10.0.0.1"
+ nh2 = "10.0.0.2"
+ ip_route_1 = VppIpRoute(self, nh1, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2001)])])
+ ip_route_2 = VppIpRoute(self, nh2, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2002)])])
+ ip_route_1.add_vpp_config()
+ ip_route_2.add_vpp_config()
+
+ bier_route_1 = VppBierRoute(self, bti, 1,
+ [VppRoutePath(nh1, 0xffffffff,
+ labels=[VppMplsLabel(101)])])
+ bier_route_2 = VppBierRoute(self, bti, 2,
+ [VppRoutePath(nh2, 0xffffffff,
+ labels=[VppMplsLabel(102)])])
+ bier_route_1.add_vpp_config()
+ bier_route_2.add_vpp_config()
+
+ #
+ # An imposition object with both bit-positions set
+ #
+ bi = VppBierImp(self, bti, 333, scapy.compat.chb(0x3) * 32)
+ bi.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward into the BIER doamin
+ #
+ route_ing_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi.bi_index)])
+ route_ing_232_1_1_1.add_vpp_config()
+
+ #
+ # inject an IP packet. We expect it to be BIER encapped and
+ # replicated.
+ #
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234))
+
+ self.pg0.add_stream([p])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(2)
+
+ #
+ # Encap Stack is; eth, MPLS, MPLS, BIER
+ #
+ igp_mpls = rx[0][MPLS]
+ self.assertEqual(igp_mpls.label, 2001)
+ self.assertEqual(igp_mpls.ttl, 64)
+ self.assertEqual(igp_mpls.s, 0)
+ bier_mpls = igp_mpls[MPLS].payload
+ self.assertEqual(bier_mpls.label, 101)
+ self.assertEqual(bier_mpls.ttl, 64)
+ self.assertEqual(bier_mpls.s, 1)
+ self.assertEqual(rx[0][BIER].length, 2)
+
+ igp_mpls = rx[1][MPLS]
+ self.assertEqual(igp_mpls.label, 2002)
+ self.assertEqual(igp_mpls.ttl, 64)
+ self.assertEqual(igp_mpls.s, 0)
+ bier_mpls = igp_mpls[MPLS].payload
+ self.assertEqual(bier_mpls.label, 102)
+ self.assertEqual(bier_mpls.ttl, 64)
+ self.assertEqual(bier_mpls.s, 1)
+ self.assertEqual(rx[0][BIER].length, 2)
+
+ def test_bier_tail(self):
+ """BIER Tail"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # disposition table
+ #
+ bdt = VppBierDispTable(self, 8)
+ bdt.add_vpp_config()
+
+ #
+ # BIER route in table that's for-us
+ #
+ bier_route_1 = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
+ bier_route_1.add_vpp_config()
+
+ #
+ # An entry in the disposition table
+ #
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 99,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 0, rpf_id=8192)
+ bier_de_1.add_vpp_config()
+
+ #
+ # A multicast route to forward post BIER disposition
+ #
+ route_eg_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_1.add_vpp_config()
+ route_eg_232_1_1_1.update_rpf_id(8192)
+
+ #
+ # A packet with all bits set gets spat out to BP:1
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=scapy.compat.chb(255)*32,
+ BFRID=99) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ self.send_and_expect(self.pg0, [p], self.pg1)
+
+ #
+ # A packet that does not match the Disposition entry gets dropped
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=scapy.compat.chb(255)*32,
+ BFRID=77) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ self.send_and_assert_no_replies(self.pg0, p*2,
+ "no matching disposition entry")
+
+ #
+ # Add the default route to the disposition table
+ #
+ bier_de_2 = VppBierDispEntry(self, bdt.id, 0,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 0, rpf_id=8192)
+ bier_de_2.add_vpp_config()
+
+ #
+ # now the previous packet is forwarded
+ #
+ self.send_and_expect(self.pg0, [p], self.pg1)
+
+ #
+ # A multicast route to forward post BIER disposition that needs
+ # a check against sending back into the BIER core
+ #
+ bi = VppBierImp(self, bti, 333, scapy.compat.chb(0x3) * 32)
+ bi.add_vpp_config()
+
+ route_eg_232_1_1_2 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.2", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi.bi_index),
+ VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_2.add_vpp_config()
+ route_eg_232_1_1_2.update_rpf_id(8192)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=scapy.compat.chb(255)*32,
+ BFRID=77) /
+ IP(src="1.1.1.1", dst="232.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ self.send_and_expect(self.pg0, [p], self.pg1)
+
+ def bier_e2e(self, hdr_len_id, n_bytes, max_bp):
+ """ BIER end-to-end"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, hdr_len_id)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ lowest = [b'\0'] * (n_bytes)
+ lowest[-1] = scapy.compat.chb(1)
+ highest = [b'\0'] * (n_bytes)
+ highest[0] = scapy.compat.chb(128)
+
+ #
+ # Impostion Sets bit strings
+ #
+ bi_low = VppBierImp(self, bti, 333, lowest)
+ bi_low.add_vpp_config()
+ bi_high = VppBierImp(self, bti, 334, highest)
+ bi_high.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward into the BIER doamin
+ #
+ route_ing_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi_low.bi_index)])
+ route_ing_232_1_1_1.add_vpp_config()
+ route_ing_232_1_1_2 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.2", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi_high.bi_index)])
+ route_ing_232_1_1_2.add_vpp_config()
+
+ #
+ # disposition table 8
+ #
+ bdt = VppBierDispTable(self, 8)
+ bdt.add_vpp_config()
+
+ #
+ # BIER routes in table that are for-us, resolving through
+ # disp table 8.
+ #
+ bier_route_1 = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
+ bier_route_1.add_vpp_config()
+ bier_route_max = VppBierRoute(
+ self, bti, max_bp,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
+ bier_route_max.add_vpp_config()
+
+ #
+ # An entry in the disposition table for sender 333
+ # lookup in VRF 10
+ #
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 333,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 10, rpf_id=8192)
+ bier_de_1.add_vpp_config()
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 334,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 10, rpf_id=8193)
+ bier_de_1.add_vpp_config()
+
+ #
+ # Add a multicast routes that will forward the traffic
+ # post-disposition
+ #
+ route_eg_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ table_id=10,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_1.add_vpp_config()
+ route_eg_232_1_1_1.update_rpf_id(8192)
+ route_eg_232_1_1_2 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.2", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ table_id=10,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_2.add_vpp_config()
+ route_eg_232_1_1_2.update_rpf_id(8193)
+
+ #
+ # inject a packet in VRF-0. We expect it to be BIER encapped,
+ # replicated, then hit the disposition and be forwarded
+ # out of VRF 10, i.e. on pg1
+ #
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(scapy.compat.chb(5) * 32))
+
+ rx = self.send_and_expect(self.pg0, p*NUM_PKTS, self.pg1)
+
+ self.assertEqual(rx[0][IP].src, "1.1.1.1")
+ self.assertEqual(rx[0][IP].dst, "232.1.1.1")
+
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(scapy.compat.chb(5) * 512))
+
+ rx = self.send_and_expect(self.pg0, p*NUM_PKTS, self.pg1)
+ self.assertEqual(rx[0][IP].src, "1.1.1.1")
+ self.assertEqual(rx[0][IP].dst, "232.1.1.2")
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_e2e_1024(self):
+ """ BIER end-to-end BSL:1024"""
+ self.bier_e2e(BIERLength.BIER_LEN_1024, 128, 1024)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_e2e_512(self):
+ """ BIER end-to-end BSL:512"""
+ self.bier_e2e(BIERLength.BIER_LEN_512, 64, 512)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_e2e_256(self):
+ """ BIER end-to-end BSL:256"""
+ self.bier_e2e(BIERLength.BIER_LEN_256, 32, 256)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_e2e_128(self):
+ """ BIER end-to-end BSL:128"""
+ self.bier_e2e(BIERLength.BIER_LEN_128, 16, 128)
+
+ def test_bier_e2e_64(self):
+ """ BIER end-to-end BSL:64"""
+ self.bier_e2e(BIERLength.BIER_LEN_64, 8, 64)
+
+ def test_bier_head_o_udp(self):
+ """BIER head over UDP"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 1, set 0, and BSL 256
+ #
+ bti = VppBierTableID(1, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # 1 bit positions via 1 next hops
+ #
+ nh1 = "10.0.0.1"
+ ip_route = VppIpRoute(self, nh1, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2001)])])
+ ip_route.add_vpp_config()
+
+ udp_encap = VppUdpEncap(self,
+ self.pg0.local_ip4,
+ nh1,
+ 330, 8138)
+ udp_encap.add_vpp_config()
+
+ bier_route = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
+ next_hop_id=udp_encap.id)])
+ bier_route.add_vpp_config()
+
+ #
+ # An 2 imposition objects with all bit-positions set
+ # only use the second, but creating 2 tests with a non-zero
+ # value index in the route add
+ #
+ bi = VppBierImp(self, bti, 333, scapy.compat.chb(0xff) * 32)
+ bi.add_vpp_config()
+ bi2 = VppBierImp(self, bti, 334, scapy.compat.chb(0xff) * 32)
+ bi2.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward into the BIER doamin
+ #
+ route_ing_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi2.bi_index)])
+ route_ing_232_1_1_1.add_vpp_config()
+
+ #
+ # inject a packet an IP. We expect it to be BIER and UDP encapped,
+ #
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234))
+
+ self.pg0.add_stream([p])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+
+ #
+ # Encap Stack is, eth, IP, UDP, BIFT, BIER
+ #
+ self.assertEqual(rx[0][IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[0][IP].dst, nh1)
+ self.assertEqual(rx[0][UDP].sport, 330)
+ self.assertEqual(rx[0][UDP].dport, 8138)
+ self.assertEqual(rx[0][BIFT].bsl, BIERLength.BIER_LEN_256)
+ self.assertEqual(rx[0][BIFT].sd, 1)
+ self.assertEqual(rx[0][BIFT].set, 0)
+ self.assertEqual(rx[0][BIFT].ttl, 64)
+ self.assertEqual(rx[0][BIER].length, 2)
+
+ def test_bier_tail_o_udp(self):
+ """BIER Tail over UDP"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(1, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, MPLS_LABEL_INVALID)
+ bt.add_vpp_config()
+
+ #
+ # disposition table
+ #
+ bdt = VppBierDispTable(self, 8)
+ bdt.add_vpp_config()
+
+ #
+ # BIER route in table that's for-us
+ #
+ bier_route_1 = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
+ bier_route_1.add_vpp_config()
+
+ #
+ # An entry in the disposition table
+ #
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 99,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 0, rpf_id=8192)
+ bier_de_1.add_vpp_config()
+
+ #
+ # A multicast route to forward post BIER disposition
+ #
+ route_eg_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_1.add_vpp_config()
+ route_eg_232_1_1_1.update_rpf_id(8192)
+
+ #
+ # A packet with all bits set gets spat out to BP:1
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=333, dport=8138) /
+ BIFT(sd=1, set=0, bsl=2, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=scapy.compat.chb(255)*32,
+ BFRID=99) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ rx = self.send_and_expect(self.pg0, [p], self.pg1)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/src/vnet/bier/test/vpp_bier.py b/src/vnet/bier/test/vpp_bier.py
new file mode 100644
index 00000000000..6e087a8ee0b
--- /dev/null
+++ b/src/vnet/bier/test/vpp_bier.py
@@ -0,0 +1,293 @@
+"""
+ BIER Tables and Routes
+"""
+
+import socket
+from vpp_object import VppObject
+from vpp_ip_route import MPLS_LABEL_INVALID, VppRoutePath, VppMplsLabel
+
+
+class BIER_HDR_PAYLOAD:
+ BIER_HDR_PROTO_MPLS_DOWN_STREAM = 1
+ BIER_HDR_PROTO_MPLS_UP_STREAM = 2
+ BIER_HDR_PROTO_ETHERNET = 3
+ BIER_HDR_PROTO_IPV4 = 4
+ BIER_HDR_PROTO_IPV6 = 5
+ BIER_HDR_PROTO_VXLAN = 6
+ BIER_HDR_PROTO_CTRL = 7
+ BIER_HDR_PROTO_OAM = 8
+
+
+class VppBierTableID():
+ def __init__(self, sub_domain_id, set_id, hdr_len_id):
+ self.set_id = set_id
+ self.sub_domain_id = sub_domain_id
+ self.hdr_len_id = hdr_len_id
+
+
+def find_bier_table(test, bti):
+ tables = test.vapi.bier_table_dump()
+ for t in tables:
+ if bti.set_id == t.bt_tbl_id.bt_set \
+ and bti.sub_domain_id == t.bt_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == t.bt_tbl_id.bt_hdr_len_id:
+ return True
+ return False
+
+
+def find_bier_route(test, bti, bp):
+ routes = test.vapi.bier_route_dump(bti)
+ for r in routes:
+ if bti.set_id == r.br_route.br_tbl_id.bt_set \
+ and bti.sub_domain_id == r.br_route.br_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == r.br_route.br_tbl_id.bt_hdr_len_id \
+ and bp == r.br_route.br_bp:
+ return True
+ return False
+
+
+def find_bier_disp_table(test, bdti):
+ tables = test.vapi.bier_disp_table_dump()
+ for t in tables:
+ if bdti == t.bdt_tbl_id:
+ return True
+ return False
+
+
+def find_bier_disp_entry(test, bdti, bp):
+ entries = test.vapi.bier_disp_entry_dump(bdti)
+ for e in entries:
+ if bp == e.bde_bp \
+ and bdti == e.bde_tbl_id:
+ return True
+ return False
+
+
+def find_bier_imp(test, bti, bp):
+ imps = test.vapi.bier_imp_dump()
+ for i in imps:
+ if bti.set_id == i.bi_tbl_id.bt_set \
+ and bti.sub_domain_id == i.bi_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == i.bi_tbl_id.bt_hdr_len_id \
+ and bp == i.bi_src:
+ return True
+ return False
+
+
+class VppBierTable(VppObject):
+ """
+ BIER Table
+ """
+
+ def __init__(self, test, id, mpls_label):
+ self._test = test
+ self.id = id
+ self.mpls_label = mpls_label
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_table_add_del(
+ self.id,
+ self.mpls_label,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_table_add_del(
+ self.id,
+ self.mpls_label,
+ is_add=0)
+
+ def object_id(self):
+ return "bier-table;[%d:%d:%d]" % (self.id.set_id,
+ self.id.sub_domain_id,
+ self.id.hdr_len_id)
+
+ def query_vpp_config(self):
+ return find_bier_table(self._test, self.id)
+
+
+class VppBierRoute(VppObject):
+ """
+ BIER route
+ """
+
+ def __init__(self, test, tbl_id, bp, paths):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.bp = bp
+ self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ self.encoded_paths,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ self.encoded_paths,
+ is_add=0)
+
+ def update_paths(self, paths):
+ self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ self.encoded_paths,
+ is_replace=1)
+
+ def add_path(self, path):
+ self.encoded_paths.append(path.encode())
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ [path.encode()],
+ is_add=1,
+ is_replace=0)
+ self.paths.append(path)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_path(self, path):
+ self.encoded_paths.remove(path.encode())
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ [path.encode()],
+ is_add=0,
+ is_replace=0)
+ self.paths.remove(path)
+
+ def remove_all_paths(self):
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ [],
+ is_add=0,
+ is_replace=1)
+ self.paths = []
+
+ def object_id(self):
+ return "bier-route;[%d:%d:%d:%d]" % (self.tbl_id.set_id,
+ self.tbl_id.sub_domain_id,
+ self.tbl_id.hdr_len_id,
+ self.bp)
+
+ def query_vpp_config(self):
+ return find_bier_route(self._test, self.tbl_id, self.bp)
+
+
+class VppBierImp(VppObject):
+ """
+ BIER route
+ """
+
+ def __init__(self, test, tbl_id, src, ibytes):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.ibytes = ibytes
+ self.src = src
+
+ def add_vpp_config(self):
+ res = self._test.vapi.bier_imp_add(
+ self.tbl_id,
+ self.src,
+ self.ibytes)
+ self.bi_index = res.bi_index
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_imp_del(
+ self.bi_index)
+
+ def object_id(self):
+ return "bier-imp;[%d:%d:%d:%d]" % (self.tbl_id.set_id,
+ self.tbl_id.sub_domain_id,
+ self.tbl_id.hdr_len_id,
+ self.src)
+
+ def query_vpp_config(self):
+ return find_bier_imp(self._test, self.tbl_id, self.src)
+
+
+class VppBierDispTable(VppObject):
+ """
+ BIER Disposition Table
+ """
+
+ def __init__(self, test, id):
+ self._test = test
+ self.id = id
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_disp_table_add_del(
+ self.id,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_disp_table_add_del(
+ self.id,
+ is_add=0)
+
+ def object_id(self):
+ return "bier-disp-table;[%d]" % (self.id)
+
+ def query_vpp_config(self):
+ return find_bier_disp_table(self._test, self.id)
+
+
+class VppBierDispEntry(VppObject):
+ """
+ BIER Disposition Entry
+ """
+
+ def __init__(self, test, tbl_id, bp, payload_proto, nh_proto,
+ nh, nh_tbl, rpf_id=~0):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.nh_tbl = nh_tbl
+ self.nh_proto = nh_proto
+ self.bp = bp
+ self.payload_proto = payload_proto
+ self.rpf_id = rpf_id
+ self.nh = socket.inet_pton(socket.AF_INET, nh)
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_disp_entry_add_del(
+ self.tbl_id,
+ self.bp,
+ self.payload_proto,
+ self.nh_proto,
+ self.nh,
+ self.nh_tbl,
+ self.rpf_id,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_disp_entry_add_del(
+ self.tbl_id,
+ self.bp,
+ self.payload_proto,
+ self.nh_proto,
+ self.nh,
+ self.nh_tbl,
+ self.rpf_id,
+ is_add=0)
+
+ def object_id(self):
+ return "bier-disp-entry;[%d:%d]" % (self.tbl_id,
+ self.bp)
+
+ def query_vpp_config(self):
+ return find_bier_disp_entry(self._test, self.tbl_id, self.bp)