summaryrefslogtreecommitdiffstats
path: root/test/test_ip4.py
AgeCommit message (Collapse)AuthorFilesLines
2018-12-17Fix TestIPv4FibCrud interdependencies.Paul Vinciguerra1-0/+14
The tests fail if run independently or out of order. This change breaks the dependency between the tests. ERROR: Add 1k routes ------------------------------------------------------------------------------ Traceback (most recent call last): File "/vpp/test/test_ip4.py", line 514, in test_3_add_new_routes self.deleted_routes.remove(x) ValueError: list.remove(x): x not in list Change-Id: I344ceba6bd8b86556f92e50080be6c43092b9faf Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-12-12PAPI: Allow ipaddress object as argument and return values from API callsOle Troan1-6/+6
The API calls that use any of vl_api_address_t, vl_api_ip4_address, vl_api_ip6_address_t, vl_api_prefix_t, vl_api_ip4_prefix_t, vl_api_ip6_prefix_t now accepts either the old style dictionary, a text string (2001:db8::/32) or an ipaddress ojbect. Unless it is called with '_no_type_conversion':True, it will also return an appropriate ipaddress object. Change-Id: I84e4a1577bd57f6b5ae725f316a523988b6a955b Signed-off-by: Ole Troan <ot@cisco.com>
2018-12-10IP-local: any IP can appear as the source (VPP-1522)Neale Ranns1-0/+19
Change-Id: Ib0d9b533d72c899b77c9a7bd1daa9b4a55b7221c Signed-off-by: Neale Ranns <nranns@cisco.com>
2018-12-06API: Change ip4_address and ip6_address to use type alias.Ole Troan1-1/+1
Change-Id: Id8669bbadd1d6b2054865a310a654e9b38d1667d Signed-off-by: Ole Troan <ot@cisco.com>
2018-12-05VPP-1508 python3 tests: xrangePaul Vinciguerra1-11/+13
xrange is not supported. Use six.range. py27 runtests: commands[5] | stestr --test-path ./test run --slowest test_ip4 test_ip6 ============================================================================== IPv4 disabled ============================================================================== ============================================================================== ICMP Echo Test Case ============================================================================== {0} test.test_ip4.TestIPDisabled.test_ip_disabled [5.256819s] ... ok 07:24:41,902 Couldn't stat : /tmp/vpp-unittest-TestICMPEcho-hU4IsB/stats.sock {1} test.test_ip4.TestICMPEcho.test_icmp_echo [0.367035s] ... ok ============================================================================== IPv4 Deaggregate Routes ============================================================================== ============================================================================== IPv4 Input Exceptions ============================================================================== 07:24:47,314 Couldn't stat : /tmp/vpp-unittest-TestIPDeag-eE1VgC/stats.sock {1} test.test_ip4.TestIPDeag.test_ip_deag [5.895646s] ... ok {0} test.test_ip4.TestIPInput.test_ip_input [5.819001s] ... ok ============================================================================== IPv4 longest Prefix Match ... output truncated ... ============================== Failed 4 tests - output below: ============================== test.test_ip4.TestIPv4FibCrud.test_3_add_new_routes --------------------------------------------------- Captured traceback: ~~~~~~~~~~~~~~~~~~ Traceback (most recent call last): File "/vpp/test/test_ip4.py", line 509, in test_3_add_new_routes self.deleted_routes.remove(x) ValueError: list.remove(x): x not in list test.test_ip4.TestIPv4FibCrud.test_2_del_routes ----------------------------------------------- Captured traceback: ~~~~~~~~~~~~~~~~~~ Traceback (most recent call last): File "/vpp/test/test_ip4.py", line 478, in test_2_del_routes self.configured_routes.remove(x) ValueError: list.remove(x): x not in list test.test_ip4_vrf_multi_instance.TestIp4VrfMultiInst.test_ip4_vrf_03 -------------------------------------------------------------------- Captured traceback: ~~~~~~~~~~~~~~~~~~ Traceback (most recent call last): File "/vpp/test/test_ip4_vrf_multi_instance.py", line 465, in test_ip4_vrf_03 self.create_vrf_and_assign_interfaces(1) File "/vpp/test/test_ip4_vrf_multi_instance.py", line 189, in create_vrf_and_assign_interfaces pg_if.set_table_ip4(vrf_id) File "/vpp/test/vpp_interface.py", line 322, in set_table_ip4 self.sw_if_index, 0, self.ip4_table_id) File "/vpp/test/vpp_papi_provider.py", line 264, in sw_interface_set_table 'vrf_id': table_id}) File "/vpp/test/vpp_papi_provider.py", line 196, in api raise UnexpectedApiReturnValueError(msg) test.vpp_papi_provider.UnexpectedApiReturnValueError: API call failed, expected 0 return value instead of -114 in sw_interface_set_table_reply(_0=91, context=1007, retval=-114) test.test_ip4_vrf_multi_instance.TestIp4VrfMultiInst.test_ip4_vrf_02 -------------------------------------------------------------------- Captured traceback: ~~~~~~~~~~~~~~~~~~ Traceback (most recent call last): File "/vpp/test/test_ip4_vrf_multi_instance.py", line 445, in test_ip4_vrf_02 self.reset_vrf_and_remove_from_vrf_list(1) File "/vpp/test/test_ip4_vrf_multi_instance.py", line 208, in reset_vrf_and_remove_from_vrf_list self.vapi.reset_fib(vrf_id, is_ipv6=0) File "/vpp/test/vpp_papi_provider.py", line 1137, in reset_fib 'is_ipv6': is_ipv6, File "/vpp/test/vpp_papi_provider.py", line 196, in api raise UnexpectedApiReturnValueError(msg) test.vpp_papi_provider.UnexpectedApiReturnValueError: API call failed, expected 0 return value instead of -3 in reset_fib_reply(_0=259, context=1198, retval=-3) ====== Totals ====== Ran: 57 tests in 266.0000 sec. - Passed: 53 - Skipped: 0 - Expected Fail: 0 - Unexpected Success: 0 - Failed: 4 Sum of execute time for each test: 157.3925 sec. ============== Worker Balance ============== - Worker 0 (29 tests) => 0:03:52.608995 - Worker 1 (28 tests) => 0:04:08.615473 Test id Runtime (s) --------------------------------------------- ----------- test.test_ip_mcast.TestIPMcast.test_ip6_mcast 8.535 test.test_ip4.TestIPPunt.test_ip_punt 8.082 test.test_ip6.TestIP6Punt.test_ip_punt 6.582 test.test_ip6.TestIPDeag.test_ip_deag 6.175 test.test_ip6.TestIPv6.test_ns 6.171 test.test_ip4.TestIPDeag.test_ip_deag 5.896 test.test_ip6.TestIPv6.test_fib 5.846 test.test_ip4.TestIPInput.test_ip_input 5.819 test.test_ip6.TestIPv6.test_rs 5.737 test.test_ip4.TestIPv4.test_fib 5.267 ERROR: InvocationError for command '/vpp/.tox/py27/bin/stestr --test-path ./test run --slowest test_ip4 test_ip6' (exited with code 1) ______________________________________________________ summary ______________________________________________________ ERROR: py27: commands failed Change-Id: Id9f6ecb4897386f790d82ab908963e4971a3aac8 Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-11-29VPP-1507: Added binary api to dump configured ip_punt_redirectPavel Kotucek1-4/+37
Change-Id: I790f7785e183cc9aaffd5b593617c4e12a32e20d Signed-off-by: Pavel Kotucek <pavel.kotucek@pantheon.tech>
2018-11-27VPP-1508 python3 tests: .encode('hex')Paul Vinciguerra1-6/+5
Change to binascii.hexlify() for consistent bahavior. Change-Id: Ie430cdd1ffeb6510db4aa037546e42d85992093b Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-11-26Python3 tests: Fix asserts.Paul Vinciguerra1-2/+2
Use assert(Not)Equal() Use assert{Greater,Less}[Equal] Change-Id: I7c14570b8dce463ee13a67e9c1f10beb1a0308a8 Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-10-22Fix buffer overflow when fragmenting packets (VPP-1383)Juraj Sloboda1-0/+50
Change-Id: Idcda9ae55fa2efb0b2e928bac3e8e86ff8d19eba Signed-off-by: Juraj Sloboda <jsloboda@cisco.com>
2018-10-15VPP-1448: Fix error when recurse on down the trie.mu.duojiao1-0/+49
Change-Id: Idfed8243643780d3f52dfe6e6ec621c440daa6ae Signed-off-by: mu.duojiao <mu.duojiao@zte.com.cn>
2018-09-07IP route update fix when multipath and drop setNeale Ranns1-1/+27
Change-Id: I9cec7486cb6e3c5261d74d2b15a4d19469285a30 Signed-off-by: Neale Ranns <nranns@cisco.com>
2018-08-03loop counter to prevent infiinte number of look ups per-packetNeale Ranns1-0/+18
Change-Id: I59235d11baac18785a4c90cdaf14e8f3ddf06dab Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
2018-07-20IP directed broadcastNeale Ranns1-0/+69
with ip direct broadcast enable a packet to the interface's subnet broadcast address with be sent L2 broadcast on the interface. dissabled, it will be dropped. it is disabled by default, which preserves current behaviour Change-Id: If154cb92e64834e97a541b32624354348a0eafb3 Signed-off-by: Neale Ranns <nranns@cisco.com>
2018-07-02make_test: add icmp packet size sweep and icmp echo testsJan Gelety1-24/+100
Jira: CSIT-1141 Change-Id: I162bb4e718bff188abefc7b2f33501de9c55bb03 Signed-off-by: Jan Gelety <jgelety@cisco.com>
2018-06-11MTU: Software interface / Per-protocol MTU supportOle Troan1-2/+4
This patch separates setting of hardware interfaec and software interface MTU. Software MTU is L2 payload MTU (i.e. not including L2 header). Per-protocol MTU for IPv4, IPv6 and MPLS can also be set. Currently only IP4, IP6 are enabled in adjacency / rewrite code. Documentation in src/vnet/MTU.md Change-Id: Iee2fd6f0bbc8210748dd8e073ab9fab87d323690 Signed-off-by: Ole Troan <ot@cisco.com>
2018-05-23VPP-1283: IPv4 PMTU missing MTU value in ICMP4 message.Ole Troan1-1/+1
Change-Id: I7a4133c59ff45b0744b48e246a049d9f015026fc Signed-off-by: Ole Troan <ot@cisco.com>
2018-04-13Revert "MTU: Setting of MTU on software interface (instead of hardware ↵Damjan Marion1-1/+1
interface)" This reverts commit 70083ee74c3141bbefb185525315f1b34497dcaa. Reverting as this patch is causing following crash: 0: /home/damarion/cisco/vpp3/build-data/../src/vnet/devices/devices.h:131 (vnet_get_device_input_thread_index) assertion `queue_id < vec_len (hw->input_node_thread_index_by_queue)' fails Aborted Change-Id: Ie2a365032110b1f67be7a9d832885b9899813d39 Signed-off-by: Damjan Marion <damarion@cisco.com>
2018-04-13MTU: Setting of MTU on software interface (instead of hardware interface)Ole Troan1-1/+1
Change-Id: I98bd454a761a1032738a21edeb0fe847e801f901 Signed-off-by: Ole Troan <ot@cisco.com>
2018-02-12Improve MTU handlingNeale Ranns1-0/+26
- setting MTU on an interface updates the L3 max bytes too - value cached in the adjacency is also updated - MTU exceeded generates ICMP to sender Change-Id: I343ec71d8e903b529594c4bd0543f04bc7f370b3 Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
2018-01-09test: consolidate the multiple versions of send_and_*Neale Ranns1-73/+0
Change-Id: I7fa7d0ebf73dab8264a2e5ddbd412600d78ead05 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-11-11MPLS disposition actions at the tail of unicast LSPsNeale Ranns1-1/+0
Change-Id: I8c42e26152f2ed1246f91b789887bfc923418bdf Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-10-31Refactor IP input checks for re-use at MPLS dispositionNeale Ranns1-0/+135
Change-Id: I7aafdecd6f370411138e6ab67b2ff72cda6e0666 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-10-14Source Lookup progammable via APINeale Ranns1-1/+109
Change-Id: I5d5d4f22b6369d504455a644f73076d772fbcfb4 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-10-10punt and drop features:Neale Ranns1-1/+111
- new IPv4 and IPv6 feature arcs on the punt and drop nodes - new features: - redirect punted traffic to an interface and nexthop - police punted traffic. Change-Id: I53be8bf4e06545add8a3619e462de5ffedd0a95c Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-09-11FIB table add/delete APINeale Ranns1-3/+8
part 2; - this adds the code to create an IP and MPLS table via the API. - but the enforcement that the table must be created before it is used is still missing, this is so that CSIT can pass. Change-Id: Id124d884ade6cb7da947225200e3bb193454c555 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-08-01Tests for recursive load-balancing with no choices.Neale Ranns1-5/+35
Change-Id: I90bb3369576741d03628a818ffa63cc99d6e4c98 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-06-06Packets recieved on VLAN-0 map to the main interfaceNeale Ranns1-0/+46
Change-Id: I21b1ad39275495d4d006023b58f630a213445854 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-05-25MPLS hash function improvementsNeale Ranns1-18/+43
Change-Id: I28e98f445c01493562b6196a4f5b532a51f178af Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-04-26IP Flow Hash Config fixesNeale Ranns1-0/+137
- the flow hash config is (and was) cached on the load-balance object so the fib_table_t struct is not used a switch time. Therefore changes to the table's flow hash config need to be propagated to all load-balances and hance all FIB entries in the table. - enable API for setting the IPv6 table flow hash config - use only the hash config in the fib_table_t object and not on the ipX_fib_t - add tests. Change-Id: Ib804c11162c6d4972c764957562c372f663e05d4 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-03-29Sub-net broadcast addresses for IPv4Neale Ranns1-1/+124
Change-Id: Ib2189d01e8bc61de57404159690fb70f89c47277 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-03-17Fix IP feature ordering.Neale Ranns1-1/+98
Drop comes before lookup when enabled. is_first_or_last is not required when setting a feature, the anchor is added in find_config_with_features(). Don't make the PG interfaces automatically L3 enabled, this way we can have tests that check the L3 protocol disbaled behaviour. Change-Id: Icef22a920b27ff9cec6ab2da6b05f05c532cb60f Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-03-09Tests to target holes in adjacency and DPO test coverageNeale Ranns1-1/+82
Change-Id: Ic6ac7e441a7b75baa02f03c1585d1ae00903a399 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-01-11make test: improve documentation and PEP8 complianceKlement Sekera1-7/+10
Change-Id: Ib4f0353aab6112fcc3c3d8f0bcbed5bc4b567b9b Signed-off-by: Klement Sekera <ksekera@cisco.com>
2016-12-23make test: improve handling of packet capturesKlement Sekera1-11/+8
Perform accounting of expected packets based on created packet infos. Use this accounting info to automatically expect (and verify) the correct number of packets to be captured. Automatically retry the read of the capture file if scapy raises an exception while doing so to handle rare cases when capture file is read while only partially written during busy wait. Don't fail assert_nothing_captured if only junk packets arrived. Change-Id: I16ec2e9410ef510d313ec16b7e13c57d0b2a63f5 Signed-off-by: Klement Sekera <ksekera@cisco.com>
2016-12-09make test: FIB add/update/delete - ip4 routesMatej Klotton1-2/+266
- JIRA:CSIT-483 Change-Id: Idb4c5bd7a234bc975f3380ece58c0e8d4bfdafd9 Signed-off-by: Matej Klotton <mklotton@cisco.com>
2016-12-05make test: fix missing log/packet messagesKlement Sekera1-7/+5
Change-Id: Idb3119792943664748c4abc3829ad723f4156dfe Signed-off-by: Klement Sekera <ksekera@cisco.com>
2016-11-15Update test documentation.Matej Klotton1-18/+60
- update IRB, IPv4, ipv6 doc - revert 778c2765c8ea5c6628f6d668847f0b9ae06dbf3d Change-Id: I9af5ed9329ce5fe01392cf28d5bf321cfc647e48 Signed-off-by: Matej Klotton <mklotton@cisco.com>
2016-11-09Disable non-working checks in load-balancer test and rename ip->ip4Klement Sekera1-0/+165
Change-Id: If62011e29e912bf0c47625b0d3b3624ef6375013 Signed-off-by: Klement Sekera <ksekera@cisco.com>
self).setUp() # create 2 pg interfaces self.create_pg_interfaces(range(3)) # create the default MPLS table self.tables = [] tbl = VppMplsTable(self, 0) tbl.add_vpp_config() self.tables.append(tbl) tbl = VppIpTable(self, 10) tbl.add_vpp_config() self.tables.append(tbl) # setup both interfaces for i in self.pg_interfaces: if i == self.pg2: i.set_table_ip4(10) i.admin_up() i.config_ip4() i.resolve_arp() i.enable_mpls() def tearDown(self): for i in self.pg_interfaces: i.disable_mpls() i.unconfig_ip4() i.set_table_ip4(0) i.admin_down() super(TestBier, self).tearDown() def bier_midpoint(self, hdr_len_id, n_bytes, max_bp): """BIER midpoint""" # # Add a BIER table for sub-domain 0, set 0, and BSL 256 # bti = VppBierTableID(0, 0, hdr_len_id) bt = VppBierTable(self, bti, 77) bt.add_vpp_config() # # A packet with no bits set gets dropped # p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=77, ttl=255) / BIER(length=hdr_len_id) / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) / UDP(sport=1234, dport=1234) / Raw()) pkts = [p] self.send_and_assert_no_replies(self.pg0, pkts, "Empty Bit-String") # # Add a BIER route for each bit-position in the table via a different # next-hop. Testing whether the BIER walk and replicate forwarding # function works for all bit posisitons. # nh_routes = [] bier_routes = [] for i in range(1, max_bp+1): nh = "10.0.%d.%d" % (i / 255, i % 255) nh_routes.append( VppIpRoute(self, nh, 32, [VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index, labels=[VppMplsLabel(2000+i)])])) nh_routes[-1].add_vpp_config() bier_routes.append( VppBierRoute(self, bti, i, [VppRoutePath(nh, 0xffffffff, labels=[VppMplsLabel(100+i)])])) bier_routes[-1].add_vpp_config() # # A packet with all bits set gets replicated once for each bit # pkt_sizes = [64, 1400] for pkt_size in pkt_sizes: p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=77, ttl=255) / BIER(length=hdr_len_id, BitString=scapy.compat.chb(255)*n_bytes) / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) / UDP(sport=1234, dport=1234) / Raw(scapy.compat.chb(5) * pkt_size)) pkts = p self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg1.get_capture(max_bp) for rxp in rx: # # The packets are not required to be sent in bit-position order # when we setup the routes above we used the bit-position to # construct the out-label. so use that here to determine the BP # olabel = rxp[MPLS] bp = olabel.label - 2000 blabel = olabel[MPLS].payload self.assertEqual(blabel.label, 100+bp) self.assertEqual(blabel.ttl, 254) bier_hdr = blabel[MPLS].payload self.assertEqual(bier_hdr.id, 5) self.assertEqual(bier_hdr.version, 0) self.assertEqual(bier_hdr.length, hdr_len_id) self.assertEqual(bier_hdr.entropy, 0) self.assertEqual(bier_hdr.OAM, 0) self.assertEqual(bier_hdr.RSV, 0) self.assertEqual(bier_hdr.DSCP, 0) self.assertEqual(bier_hdr.Proto, 5) # The bit-string should consist only of the BP given by i. byte_array = [b'\0'] * (n_bytes) byte_val = scapy.compat.chb(1 << (bp - 1) % 8) byte_pos = n_bytes - (((bp - 1) // 8) + 1) byte_array[byte_pos] = byte_val bitstring = b''.join(byte_array) self.assertEqual(len(bitstring), len(bier_hdr.BitString)) self.assertEqual(bitstring, bier_hdr.BitString) # # cleanup. not strictly necessary, but it's much quicker this way # because the bier_fib_dump and ip_fib_dump will be empty when the # auto-cleanup kicks in # for br in bier_routes: br.remove_vpp_config() for nhr in nh_routes: nhr.remove_vpp_config() @unittest.skipUnless(running_extended_tests, "part of extended tests") def test_bier_midpoint_1024(self): """BIER midpoint BSL:1024""" self.bier_midpoint(BIERLength.BIER_LEN_1024, 128, 1024) @unittest.skipUnless(running_extended_tests, "part of extended tests") def test_bier_midpoint_512(self): """BIER midpoint BSL:512""" self.bier_midpoint(BIERLength.BIER_LEN_512, 64, 512) @unittest.skipUnless(running_extended_tests, "part of extended tests") def test_bier_midpoint_256(self): """BIER midpoint BSL:256""" self.bier_midpoint(BIERLength.BIER_LEN_256, 32, 256) @unittest.skipUnless(running_extended_tests, "part of extended tests") def test_bier_midpoint_128(self): """BIER midpoint BSL:128""" self.bier_midpoint(BIERLength.BIER_LEN_128, 16, 128) def test_bier_midpoint_64(self): """BIER midpoint BSL:64""" self.bier_midpoint(BIERLength.BIER_LEN_64, 8, 64) def test_bier_load_balance(self): """BIER load-balance""" # # Add a BIER table for sub-domain 0, set 0, and BSL 256 # bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_64) bt = VppBierTable(self, bti, 77) bt.add_vpp_config() # # packets with varying entropy # pkts = [] for ii in range(257): pkts.append((Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=77, ttl=255) / BIER(length=BIERLength.BIER_LEN_64, entropy=ii, BitString=scapy.compat.chb(255)*16) / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) / UDP(sport=1234, dport=1234) / Raw())) # # 4 next hops # nhs = [{'ip': "10.0.0.1", 'label': 201}, {'ip': "10.0.0.2", 'label': 202}, {'ip': "10.0.0.3", 'label': 203}, {'ip': "10.0.0.4", 'label': 204}] for nh in nhs: ipr = VppIpRoute( self, nh['ip'], 32, [VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index, labels=[VppMplsLabel(nh['label'])])]) ipr.add_vpp_config() bier_route = VppBierRoute( self, bti, 1, [VppRoutePath(nhs[0]['ip'], 0xffffffff, labels=[VppMplsLabel(101)]), VppRoutePath(nhs[1]['ip'], 0xffffffff, labels=[VppMplsLabel(101)])]) bier_route.add_vpp_config() rx = self.send_and_expect(self.pg0, pkts, self.pg1) # # we should have recieved a packet from each neighbor # for nh in nhs[:2]: self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx)) # # add the other paths # bier_route.update_paths( [VppRoutePath(nhs[0]['ip'], 0xffffffff, labels=[VppMplsLabel(101)]), VppRoutePath(nhs[1]['ip'], 0xffffffff, labels=[VppMplsLabel(101)]), VppRoutePath(nhs[2]['ip'], 0xffffffff, labels=[VppMplsLabel(101)]), VppRoutePath(nhs[3]['ip'], 0xffffffff, labels=[VppMplsLabel(101)])]) rx = self.send_and_expect(self.pg0, pkts, self.pg1) for nh in nhs: self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx)) # # remove first two paths # bier_route.remove_path(VppRoutePath(nhs[0]['ip'], 0xffffffff, labels=[VppMplsLabel(101)])) bier_route.remove_path(VppRoutePath(nhs[1]['ip'], 0xffffffff, labels=[VppMplsLabel(101)])) rx = self.send_and_expect(self.pg0, pkts, self.pg1) for nh in nhs[2:]: self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx)) # # remove the last of the paths, deleteing the entry # bier_route.remove_all_paths() self.send_and_assert_no_replies(self.pg0, pkts) def test_bier_head(self): """BIER head""" # # Add a BIER table for sub-domain 0, set 0, and BSL 256 # bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256) bt = VppBierTable(self, bti, 77) bt.add_vpp_config() # # 2 bit positions via two next hops # nh1 = "10.0.0.1" nh2 = "10.0.0.2" ip_route_1 = VppIpRoute(self, nh1, 32, [VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index, labels=[VppMplsLabel(2001)])]) ip_route_2 = VppIpRoute(self, nh2, 32, [VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index, labels=[VppMplsLabel(2002)])]) ip_route_1.add_vpp_config() ip_route_2.add_vpp_config() bier_route_1 = VppBierRoute(self, bti, 1, [VppRoutePath(nh1, 0xffffffff, labels=[VppMplsLabel(101)])]) bier_route_2 = VppBierRoute(self, bti, 2, [VppRoutePath(nh2, 0xffffffff, labels=[VppMplsLabel(102)])]) bier_route_1.add_vpp_config() bier_route_2.add_vpp_config() # # An imposition object with both bit-positions set # bi = VppBierImp(self, bti, 333, scapy.compat.chb(0x3) * 32) bi.add_vpp_config() # # Add a multicast route that will forward into the BIER doamin # route_ing_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, paths=[VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(0xffffffff, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=FibPathProto.FIB_PATH_NH_PROTO_BIER, type=FibPathType.FIB_PATH_TYPE_BIER_IMP, bier_imp=bi.bi_index)]) route_ing_232_1_1_1.add_vpp_config() # # inject an IP packet. We expect it to be BIER encapped and # replicated. # p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / IP(src="1.1.1.1", dst="232.1.1.1") / UDP(sport=1234, dport=1234)) self.pg0.add_stream([p]) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg1.get_capture(2) # # Encap Stack is; eth, MPLS, MPLS, BIER # igp_mpls = rx[0][MPLS] self.assertEqual(igp_mpls.label, 2001) self.assertEqual(igp_mpls.ttl, 64) self.assertEqual(igp_mpls.s, 0) bier_mpls = igp_mpls[MPLS].payload self.assertEqual(bier_mpls.label, 101) self.assertEqual(bier_mpls.ttl, 64) self.assertEqual(bier_mpls.s, 1) self.assertEqual(rx[0][BIER].length, 2) igp_mpls = rx[1][MPLS] self.assertEqual(igp_mpls.label, 2002) self.assertEqual(igp_mpls.ttl, 64) self.assertEqual(igp_mpls.s, 0) bier_mpls = igp_mpls[MPLS].payload self.assertEqual(bier_mpls.label, 102) self.assertEqual(bier_mpls.ttl, 64) self.assertEqual(bier_mpls.s, 1) self.assertEqual(rx[0][BIER].length, 2) def test_bier_tail(self): """BIER Tail""" # # Add a BIER table for sub-domain 0, set 0, and BSL 256 # bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256) bt = VppBierTable(self, bti, 77) bt.add_vpp_config() # # disposition table # bdt = VppBierDispTable(self, 8) bdt.add_vpp_config() # # BIER route in table that's for-us # bier_route_1 = VppBierRoute( self, bti, 1, [VppRoutePath("0.0.0.0", 0xffffffff, proto=FibPathProto.FIB_PATH_NH_PROTO_BIER, nh_table_id=8)]) bier_route_1.add_vpp_config() # # An entry in the disposition table # bier_de_1 = VppBierDispEntry(self, bdt.id, 99, BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4, FibPathProto.FIB_PATH_NH_PROTO_BIER, "0.0.0.0", 0, rpf_id=8192) bier_de_1.add_vpp_config() # # A multicast route to forward post BIER disposition # route_eg_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, paths=[VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_eg_232_1_1_1.add_vpp_config() route_eg_232_1_1_1.update_rpf_id(8192) # # A packet with all bits set gets spat out to BP:1 # p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=77, ttl=255) / BIER(length=BIERLength.BIER_LEN_256, BitString=scapy.compat.chb(255)*32, BFRID=99) / IP(src="1.1.1.1", dst="232.1.1.1") / UDP(sport=1234, dport=1234) / Raw()) self.send_and_expect(self.pg0, [p], self.pg1) # # A packet that does not match the Disposition entry gets dropped # p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=77, ttl=255) / BIER(length=BIERLength.BIER_LEN_256, BitString=scapy.compat.chb(255)*32, BFRID=77) / IP(src="1.1.1.1", dst="232.1.1.1") / UDP(sport=1234, dport=1234) / Raw()) self.send_and_assert_no_replies(self.pg0, p*2, "no matching disposition entry") # # Add the default route to the disposition table # bier_de_2 = VppBierDispEntry(self, bdt.id, 0, BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4, FibPathProto.FIB_PATH_NH_PROTO_BIER, "0.0.0.0", 0, rpf_id=8192) bier_de_2.add_vpp_config() # # now the previous packet is forwarded # self.send_and_expect(self.pg0, [p], self.pg1) # # A multicast route to forward post BIER disposition that needs # a check against sending back into the BIER core # bi = VppBierImp(self, bti, 333, scapy.compat.chb(0x3) * 32) bi.add_vpp_config() route_eg_232_1_1_2 = VppIpMRoute( self, "0.0.0.0", "232.1.1.2", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, paths=[VppMRoutePath(0xffffffff, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_BIER, type=FibPathType.FIB_PATH_TYPE_BIER_IMP, bier_imp=bi.bi_index), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_eg_232_1_1_2.add_vpp_config() route_eg_232_1_1_2.update_rpf_id(8192) p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=77, ttl=255) / BIER(length=BIERLength.BIER_LEN_256, BitString=scapy.compat.chb(255)*32, BFRID=77) / IP(src="1.1.1.1", dst="232.1.1.2") / UDP(sport=1234, dport=1234) / Raw()) self.send_and_expect(self.pg0, [p], self.pg1) def bier_e2e(self, hdr_len_id, n_bytes, max_bp): """ BIER end-to-end""" # # Add a BIER table for sub-domain 0, set 0, and BSL 256 # bti = VppBierTableID(0, 0, hdr_len_id) bt = VppBierTable(self, bti, 77) bt.add_vpp_config() lowest = [b'\0'] * (n_bytes) lowest[-1] = scapy.compat.chb(1) highest = [b'\0'] * (n_bytes) highest[0] = scapy.compat.chb(128) # # Impostion Sets bit strings # bi_low = VppBierImp(self, bti, 333, lowest) bi_low.add_vpp_config() bi_high = VppBierImp(self, bti, 334, highest) bi_high.add_vpp_config() # # Add a multicast route that will forward into the BIER doamin # route_ing_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, paths=[VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(0xffffffff, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=FibPathProto.FIB_PATH_NH_PROTO_BIER, type=FibPathType.FIB_PATH_TYPE_BIER_IMP, bier_imp=bi_low.bi_index)]) route_ing_232_1_1_1.add_vpp_config() route_ing_232_1_1_2 = VppIpMRoute( self, "0.0.0.0", "232.1.1.2", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, paths=[VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(0xffffffff, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=FibPathProto.FIB_PATH_NH_PROTO_BIER, type=FibPathType.FIB_PATH_TYPE_BIER_IMP, bier_imp=bi_high.bi_index)]) route_ing_232_1_1_2.add_vpp_config() # # disposition table 8 # bdt = VppBierDispTable(self, 8) bdt.add_vpp_config() # # BIER routes in table that are for-us, resolving through # disp table 8. # bier_route_1 = VppBierRoute( self, bti, 1, [VppRoutePath("0.0.0.0", 0xffffffff, proto=FibPathProto.FIB_PATH_NH_PROTO_BIER, nh_table_id=8)]) bier_route_1.add_vpp_config() bier_route_max = VppBierRoute( self, bti, max_bp, [VppRoutePath("0.0.0.0", 0xffffffff, proto=FibPathProto.FIB_PATH_NH_PROTO_BIER, nh_table_id=8)]) bier_route_max.add_vpp_config() # # An entry in the disposition table for sender 333 # lookup in VRF 10 # bier_de_1 = VppBierDispEntry(self, bdt.id, 333, BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4, FibPathProto.FIB_PATH_NH_PROTO_BIER, "0.0.0.0", 10, rpf_id=8192) bier_de_1.add_vpp_config() bier_de_1 = VppBierDispEntry(self, bdt.id, 334, BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4, FibPathProto.FIB_PATH_NH_PROTO_BIER, "0.0.0.0", 10, rpf_id=8193) bier_de_1.add_vpp_config() # # Add a multicast routes that will forward the traffic # post-disposition # route_eg_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, table_id=10, paths=[VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_eg_232_1_1_1.add_vpp_config() route_eg_232_1_1_1.update_rpf_id(8192) route_eg_232_1_1_2 = VppIpMRoute( self, "0.0.0.0", "232.1.1.2", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, table_id=10, paths=[VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_eg_232_1_1_2.add_vpp_config() route_eg_232_1_1_2.update_rpf_id(8193) # # inject a packet in VRF-0. We expect it to be BIER encapped, # replicated, then hit the disposition and be forwarded # out of VRF 10, i.e. on pg1 # p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / IP(src="1.1.1.1", dst="232.1.1.1") / UDP(sport=1234, dport=1234) / Raw(scapy.compat.chb(5) * 32)) rx = self.send_and_expect(self.pg0, p*NUM_PKTS, self.pg1) self.assertEqual(rx[0][IP].src, "1.1.1.1") self.assertEqual(rx[0][IP].dst, "232.1.1.1") p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / IP(src="1.1.1.1", dst="232.1.1.2") / UDP(sport=1234, dport=1234) / Raw(scapy.compat.chb(5) * 512)) rx = self.send_and_expect(self.pg0, p*NUM_PKTS, self.pg1) self.assertEqual(rx[0][IP].src, "1.1.1.1") self.assertEqual(rx[0][IP].dst, "232.1.1.2") @unittest.skipUnless(running_extended_tests, "part of extended tests") def test_bier_e2e_1024(self): """ BIER end-to-end BSL:1024""" self.bier_e2e(BIERLength.BIER_LEN_1024, 128, 1024) @unittest.skipUnless(running_extended_tests, "part of extended tests") def test_bier_e2e_512(self): """ BIER end-to-end BSL:512""" self.bier_e2e(BIERLength.BIER_LEN_512, 64, 512) @unittest.skipUnless(running_extended_tests, "part of extended tests") def test_bier_e2e_256(self): """ BIER end-to-end BSL:256""" self.bier_e2e(BIERLength.BIER_LEN_256, 32, 256) @unittest.skipUnless(running_extended_tests, "part of extended tests") def test_bier_e2e_128(self): """ BIER end-to-end BSL:128""" self.bier_e2e(BIERLength.BIER_LEN_128, 16, 128) def test_bier_e2e_64(self): """ BIER end-to-end BSL:64""" self.bier_e2e(BIERLength.BIER_LEN_64, 8, 64) def test_bier_head_o_udp(self): """BIER head over UDP""" # # Add a BIER table for sub-domain 1, set 0, and BSL 256 # bti = VppBierTableID(1, 0, BIERLength.BIER_LEN_256) bt = VppBierTable(self, bti, 77) bt.add_vpp_config() # # 1 bit positions via 1 next hops # nh1 = "10.0.0.1" ip_route = VppIpRoute(self, nh1, 32, [VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index, labels=[VppMplsLabel(2001)])]) ip_route.add_vpp_config() udp_encap = VppUdpEncap(self, self.pg0.local_ip4, nh1, 330, 8138) udp_encap.add_vpp_config() bier_route = VppBierRoute( self, bti, 1, [VppRoutePath("0.0.0.0", 0xFFFFFFFF, type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP, next_hop_id=udp_encap.id)]) bier_route.add_vpp_config() # # An 2 imposition objects with all bit-positions set # only use the second, but creating 2 tests with a non-zero # value index in the route add # bi = VppBierImp(self, bti, 333, scapy.compat.chb(0xff) * 32) bi.add_vpp_config() bi2 = VppBierImp(self, bti, 334, scapy.compat.chb(0xff) * 32) bi2.add_vpp_config() # # Add a multicast route that will forward into the BIER doamin # route_ing_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, paths=[VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(0xffffffff, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=FibPathProto.FIB_PATH_NH_PROTO_BIER, type=FibPathType.FIB_PATH_TYPE_BIER_IMP, bier_imp=bi2.bi_index)]) route_ing_232_1_1_1.add_vpp_config() # # inject a packet an IP. We expect it to be BIER and UDP encapped, # p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / IP(src="1.1.1.1", dst="232.1.1.1") / UDP(sport=1234, dport=1234)) self.pg0.add_stream([p]) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx = self.pg1.get_capture(1) # # Encap Stack is, eth, IP, UDP, BIFT, BIER # self.assertEqual(rx[0][IP].src, self.pg0.local_ip4) self.assertEqual(rx[0][IP].dst, nh1) self.assertEqual(rx[0][UDP].sport, 330) self.assertEqual(rx[0][UDP].dport, 8138) self.assertEqual(rx[0][BIFT].bsl, BIERLength.BIER_LEN_256) self.assertEqual(rx[0][BIFT].sd, 1) self.assertEqual(rx[0][BIFT].set, 0) self.assertEqual(rx[0][BIFT].ttl, 64) self.assertEqual(rx[0][BIER].length, 2) def test_bier_tail_o_udp(self): """BIER Tail over UDP""" # # Add a BIER table for sub-domain 0, set 0, and BSL 256 # bti = VppBierTableID(1, 0, BIERLength.BIER_LEN_256) bt = VppBierTable(self, bti, MPLS_LABEL_INVALID) bt.add_vpp_config() # # disposition table # bdt = VppBierDispTable(self, 8) bdt.add_vpp_config() # # BIER route in table that's for-us # bier_route_1 = VppBierRoute( self, bti, 1, [VppRoutePath("0.0.0.0", 0xffffffff, proto=FibPathProto.FIB_PATH_NH_PROTO_BIER, nh_table_id=8)]) bier_route_1.add_vpp_config() # # An entry in the disposition table # bier_de_1 = VppBierDispEntry(self, bdt.id, 99, BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4, FibPathProto.FIB_PATH_NH_PROTO_BIER, "0.0.0.0", 0, rpf_id=8192) bier_de_1.add_vpp_config() # # A multicast route to forward post BIER disposition # route_eg_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, paths=[VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_eg_232_1_1_1.add_vpp_config() route_eg_232_1_1_1.update_rpf_id(8192) # # A packet with all bits set gets spat out to BP:1 # p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) / UDP(sport=333, dport=8138) / BIFT(sd=1, set=0, bsl=2, ttl=255) / BIER(length=BIERLength.BIER_LEN_256, BitString=scapy.compat.chb(255)*32, BFRID=99) / IP(src="1.1.1.1", dst="232.1.1.1") / UDP(sport=1234, dport=1234) / Raw()) rx = self.send_and_expect(self.pg0, [p], self.pg1) if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)