diff options
author | Neale Ranns <neale.ranns@cisco.com> | 2018-05-01 05:17:55 -0700 |
---|---|---|
committer | Ole Trøan <otroan@employees.org> | 2019-06-18 13:31:39 +0000 |
commit | 097fa66b986f06281f603767d321ab13ab6c88c3 (patch) | |
tree | ed052819615d08ee4bd0afbc34de7e64e4598105 /test/test_mpls.py | |
parent | 39baa32186fd3e4b20d9f58afbbfe7b8daebed62 (diff) |
fib: fib api updates
Enhance the route add/del APIs to take a set of paths rather than just one.
Most unicast routing protocols calcualte all the available paths in one
run of the algorithm so updating all the paths at once is beneficial for the client.
two knobs control the behaviour:
is_multipath - if set the the set of paths passed will be added to those
that already exist, otherwise the set will replace them.
is_add - add or remove the set
is_add=0, is_multipath=1 and an empty set, results in deleting the route.
It is also considerably faster to add multiple paths at once, than one at a time:
vat# ip_add_del_route 1.1.1.1/32 count 100000 multipath via 10.10.10.11
100000 routes in .572240 secs, 174751.80 routes/sec
vat# ip_add_del_route 1.1.1.1/32 count 100000 multipath via 10.10.10.12
100000 routes in .528383 secs, 189256.54 routes/sec
vat# ip_add_del_route 1.1.1.1/32 count 100000 multipath via 10.10.10.13
100000 routes in .757131 secs, 132077.52 routes/sec
vat# ip_add_del_route 1.1.1.1/32 count 100000 multipath via 10.10.10.14
100000 routes in .878317 secs, 113854.12 routes/sec
vat# ip_route_add_del 1.1.1.1/32 count 100000 multipath via 10.10.10.11 via 10.10.10.12 via 10.10.10.13 via 10.10.10.14
100000 routes in .900212 secs, 111084.93 routes/sec
Change-Id: I416b93f7684745099c1adb0b33edac58c9339c1a
Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
Signed-off-by: Ole Troan <ot@cisco.com>
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
Diffstat (limited to 'test/test_mpls.py')
-rw-r--r-- | test/test_mpls.py | 144 |
1 files changed, 78 insertions, 66 deletions
diff --git a/test/test_mpls.py b/test/test_mpls.py index 79f3204c53e..d068bc37ee2 100644 --- a/test/test_mpls.py +++ b/test/test_mpls.py @@ -8,7 +8,8 @@ from vpp_ip import DpoProto from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \ VppMplsIpBind, VppIpMRoute, VppMRoutePath, \ MRouteItfFlags, MRouteEntryFlags, VppIpTable, VppMplsTable, \ - VppMplsLabel, MplsLspMode, find_mpls_route + VppMplsLabel, MplsLspMode, find_mpls_route, \ + FibPathProto, FibPathType, FibPathFlags, VppMplsLabel, MplsLspMode from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface import scapy.compat @@ -498,8 +499,8 @@ class TestMPLS(VppTestCase): self, 333, 1, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - labels=[], - proto=DpoProto.DPO_PROTO_IP6)]) + labels=[])], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6) route_333_eos.add_vpp_config() tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)]) @@ -523,8 +524,8 @@ class TestMPLS(VppTestCase): self, 334, 1, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - labels=[VppMplsLabel(3)], - proto=DpoProto.DPO_PROTO_IP6)]) + labels=[VppMplsLabel(3)])], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6) route_334_eos.add_vpp_config() tx = self.create_stream_labelled_ip6(self.pg0, @@ -539,8 +540,8 @@ class TestMPLS(VppTestCase): self, 335, 1, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)], - proto=DpoProto.DPO_PROTO_IP6)]) + labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6) route_335_eos.add_vpp_config() tx = self.create_stream_labelled_ip6( @@ -586,6 +587,7 @@ class TestMPLS(VppTestCase): labels=[VppMplsLabel(44), VppMplsLabel(45)])]) route_34_eos.add_vpp_config() + self.logger.info(self.vapi.cli("sh mpls fib 34")) tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34, ttl=3)]) @@ -775,10 +777,8 @@ class TestMPLS(VppTestCase): self, "2001::3", 128, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - proto=DpoProto.DPO_PROTO_IP6, labels=[VppMplsLabel(32, - mode=MplsLspMode.UNIFORM)])], - is_ip6=1) + mode=MplsLspMode.UNIFORM)])]) route_2001_3.add_vpp_config() tx = self.create_stream_ip6(self.pg0, "2001::3", @@ -968,7 +968,7 @@ class TestMPLS(VppTestCase): VppMplsLabel(33, ttl=47)]) def test_mpls_tunnel_many(self): - """ Multiple Tunnels """ + """ MPLS Multiple Tunnels """ for ii in range(10): mpls_tun = VppMPLSTunnelInterface( @@ -1111,10 +1111,11 @@ class TestMPLS(VppTestCase): # if the packet egresses, then we must have swapped to pg1 # so as to have matched the route in table 1 # - route_34_eos = VppMplsRoute(self, 34, 1, - [VppRoutePath("0.0.0.0", - self.pg1.sw_if_index, - is_interface_rx=1)]) + route_34_eos = VppMplsRoute( + self, 34, 1, + [VppRoutePath("0.0.0.0", + self.pg1.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)]) route_34_eos.add_vpp_config() # @@ -1154,7 +1155,7 @@ class TestMPLS(VppTestCase): labels=[VppMplsLabel(3402)]), VppRoutePath("0.0.0.0", self.pg1.sw_if_index, - is_interface_rx=1)], + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)], is_multicast=1) route_3400_eos.add_vpp_config() @@ -1235,6 +1236,7 @@ class TestMPLS(VppTestCase): VppMRoutePath(mpls_tun._sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() + self.logger.info(self.vapi.cli("sh ip mfib index 0")) self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "232.1.1.1") @@ -1273,12 +1275,14 @@ class TestMPLS(VppTestCase): # if the packet egresses, then we must have matched the route in # table 1 # - route_34_eos = VppMplsRoute(self, 34, 1, - [VppRoutePath("0.0.0.0", - self.pg1.sw_if_index, - nh_table_id=1, - rpf_id=55)], - is_multicast=1) + route_34_eos = VppMplsRoute( + self, 34, 1, + [VppRoutePath("0.0.0.0", + 0xffffffff, + nh_table_id=1, + rpf_id=55)], + is_multicast=1, + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4) route_34_eos.add_vpp_config() @@ -1294,6 +1298,7 @@ class TestMPLS(VppTestCase): # set the RPF-ID of the entry to match the input packet's # route_232_1_1_1.update_rpf_id(55) + self.logger.info(self.vapi.cli("sh ip mfib index 1 232.1.1.1")) tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)], dst_ip="232.1.1.1") @@ -1330,8 +1335,8 @@ class TestMPLS(VppTestCase): MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, table_id=1, paths=[VppMRoutePath(self.pg1.sw_if_index, - MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], - is_ip6=1) + MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, + proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)]) route_ff.add_vpp_config() # @@ -1345,11 +1350,11 @@ class TestMPLS(VppTestCase): route_34_eos = VppMplsRoute( self, 34, 1, [VppRoutePath("::", - self.pg1.sw_if_index, + 0xffffffff, nh_table_id=1, - rpf_id=55, - proto=DpoProto.DPO_PROTO_IP6)], - is_multicast=1) + rpf_id=55)], + is_multicast=1, + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6) route_34_eos.add_vpp_config() @@ -1572,16 +1577,19 @@ class TestMPLSPIC(VppTestCase): pkts = [] for ii in range(NUM_PKTS): dst = "192.168.1.%d" % ii - vpn_routes.append(VppIpRoute(self, dst, 32, - [VppRoutePath("10.0.0.45", - 0xffffffff, - labels=[145], - is_resolve_host=1), - VppRoutePath("10.0.0.46", - 0xffffffff, - labels=[146], - is_resolve_host=1)], - table_id=1)) + vpn_routes.append(VppIpRoute( + self, dst, 32, + [VppRoutePath( + "10.0.0.45", + 0xffffffff, + labels=[145], + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST), + VppRoutePath( + "10.0.0.46", + 0xffffffff, + labels=[146], + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST)], + table_id=1)) vpn_routes[ii].add_vpp_config() pkts.append(Ether(dst=self.pg2.local_mac, @@ -1686,16 +1694,19 @@ class TestMPLSPIC(VppTestCase): for ii in range(NUM_PKTS): dst = "192.168.1.%d" % ii local_label = 1600 + ii - vpn_routes.append(VppIpRoute(self, dst, 32, - [VppRoutePath(self.pg2.remote_ip4, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1), - VppRoutePath(self.pg3.remote_ip4, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1)], - table_id=1)) + vpn_routes.append(VppIpRoute( + self, dst, 32, + [VppRoutePath( + self.pg2.remote_ip4, + 0xffffffff, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED), + VppRoutePath( + self.pg3.remote_ip4, + 0xffffffff, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)], + table_id=1)) vpn_routes[ii].add_vpp_config() vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32, @@ -1807,23 +1818,21 @@ class TestMPLSPIC(VppTestCase): local_label = 1600 + ii vpn_routes.append(VppIpRoute( self, dst, 128, - [VppRoutePath(self.pg2.remote_ip6, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1, - proto=DpoProto.DPO_PROTO_IP6), - VppRoutePath(self.pg3.remote_ip6, - 0xffffffff, - nh_table_id=1, - proto=DpoProto.DPO_PROTO_IP6, - is_resolve_attached=1)], - table_id=1, - is_ip6=1)) + [VppRoutePath( + self.pg2.remote_ip6, + 0xffffffff, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED), + VppRoutePath( + self.pg3.remote_ip6, + 0xffffffff, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)], + table_id=1)) vpn_routes[ii].add_vpp_config() vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128, - ip_table_id=1, - is_ip6=1)) + ip_table_id=1)) vpn_bindings[ii].add_vpp_config() pkts.append(Ether(dst=self.pg0.local_mac, @@ -1832,6 +1841,7 @@ class TestMPLSPIC(VppTestCase): IPv6(src=self.pg0.remote_ip6, dst=dst) / UDP(sport=1234, dport=1234) / Raw('\xa5' * 100)) + self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst)) self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) @@ -1988,8 +1998,9 @@ class TestMPLSL2(VppTestCase): self, 55, 1, [VppRoutePath("0.0.0.0", mpls_tun_1.sw_if_index, - is_interface_rx=1, - proto=DpoProto.DPO_PROTO_ETHERNET)]) + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET) route_55_eos.add_vpp_config() # @@ -2050,8 +2061,9 @@ class TestMPLSL2(VppTestCase): self, 55, 1, [VppRoutePath("0.0.0.0", mpls_tun.sw_if_index, - is_interface_rx=1, - proto=DpoProto.DPO_PROTO_ETHERNET)]) + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET) route_55_eos.add_vpp_config() # |