aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/perf/l2/10ge2p1x710-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr.robot
blob: 636997b807f387bcaa6ad8b0c4536c803a10c682 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/shared/default.robot
|
| Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRPDR
| ... | NIC_Intel-X710 | ETH | L2BDMACLRN | FEATURE | ACL | ACL_STATELESS
| ... | IACL | ACL50 | 10K_FLOWS | DRV_VFIO_PCI
| ... | RXQ_SIZE_0 | TXQ_SIZE_0
| ... | eth-l2bdbasemaclrn-iacl50sl-10kflows
|
| Suite Setup | Setup suite topology interfaces | performance
| Suite Teardown | Tear down suite | performance
| Test Setup | Setup test | performance
| Test Teardown | Tear down test | performance | acl
|
| Test Template | Local Template
|
| Documentation | *RFC2544: Packet throughput L2BD test cases with ACL*
|
| ... | *[Top] Network Topologies:* TG-DUT1-DUT2-TG 3-node circular topology\
| ... | with single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4-UDP for L2 switching of IPv4.
| ... | *[Cfg] DUT configuration:* DUT1 is configured with L2 bridge domain\
| ... | and MAC learning enabled. DUT2 is configured with L2 cross-connects.\
| ... | Required ACL rules are applied to input paths of both DUT1 intefaces.\
| ... | DUT1 and DUT2 are tested with ${nic_name}.\
| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop\
| ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\
| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\
| ... | of packets transmitted. NDR and PDR are discovered for different\
| ... | Ethernet L2 frame sizes using MLRsearch library.\
| ... | Test packets are generated by TG on\
| ... | links to DUTs. TG traffic profile contains two L3 flow-groups\
| ... | (flow-group per direction, ${flows_per_dir} flows per flow-group) with\
| ... | all packets containing Ethernet header, IPv4 header with UDP header and\
| ... | static payload. MAC addresses are matching MAC addresses of the TG node\
| ... | interfaces.
| ... | *[Ref] Applicable standard specifications:* RFC2544.

*** Variables ***
| @{plugins_to_enable}= | dpdk_plugin.so | acl_plugin.so
| ${crypto_type}= | ${None}
| ${nic_name}= | Intel-X710
| ${nic_driver}= | vfio-pci
| ${nic_rxq_size}= | 0
| ${nic_txq_size}= | 0
| ${nic_pfs}= | 2
| ${nic_vfs}= | 0
| ${osi_layer}= | L2
| ${overhead}= | ${0}
# ACL test setup
| ${acl_action}= | permit
| ${acl_apply_type}= | input
| ${no_hit_aces_number}= | 50
| ${flows_per_dir}= | 10k
# starting points for non-hitting ACLs
| ${src_ip_start}= | 30.30.30.1
| ${dst_ip_start}= | 40.40.40.1
| ${ip_step}= | ${1}
| ${sport_start}= | ${1000}
| ${dport_start}= | ${1000}
| ${port_step}= | ${1}
| ${trex_stream1_subnet}= | 10.10.10.0/24
| ${trex_stream2_subnet}= | 20.20.20.0/24
# Traffic profile:
| ${traffic_profile}= | trex-sl-3n-ethip4udp-10u1000p-conc

*** Keywords ***
| Local Template
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD config with ACLs with ${phy_cores} phy
| | ... | core(s).
| | ... | [Ver] Measure NDR and PDR values using MLRsearch algorithm.\
| |
| | ... | *Arguments:*
| | ... | - frame_size - Framesize in Bytes in integer or string (IMIX_v4_1).
| | ... | Type: integer, string
| | ... | - phy_cores - Number of physical cores. Type: integer
| | ... | - rxq - Number of RX queues, default value: ${None}. Type: integer
| |
| | [Arguments] | ${frame_size} | ${phy_cores} | ${rxq}=${None}
| |
| | Set Test Variable | \${frame_size}
| |
| | Given Set Max Rate And Jumbo
| | And Add worker threads to all DUTs | ${phy_cores} | ${rxq}
| | And Pre-initialize layer driver | ${nic_driver}
| | And Apply Startup configuration on all VPP DUTs
| | When Initialize layer driver | ${nic_driver}
| | And Initialize layer interface
| | And Initialize L2 bridge domain with IPv4 ACLs on DUT1 in 3-node circular topology
| | Then Find NDR and PDR intervals using optimized search

*** Test Cases ***
| tc01-64B-1c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 64B | 1C
| | frame_size=${64} | phy_cores=${1}

| tc02-64B-2c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 64B | 2C
| | frame_size=${64} | phy_cores=${2}

| tc03-64B-4c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 64B | 4C
| | frame_size=${64} | phy_cores=${4}

| tc04-1518B-1c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 1518B | 1C
| | frame_size=${1518} | phy_cores=${1}

| tc05-1518B-2c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 1518B | 2C
| | frame_size=${1518} | phy_cores=${2}

| tc06-1518B-4c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 1518B | 4C
| | frame_size=${1518} | phy_cores=${4}

| tc07-9000B-1c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 9000B | 1C
| | frame_size=${9000} | phy_cores=${1}

| tc08-9000B-2c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 9000B | 2C
| | frame_size=${9000} | phy_cores=${2}

| tc09-9000B-4c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | 9000B | 4C
| | frame_size=${9000} | phy_cores=${4}

| tc10-IMIX-1c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | IMIX | 1C
| | frame_size=IMIX_v4_1 | phy_cores=${1}

| tc11-IMIX-2c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | IMIX | 2C
| | frame_size=IMIX_v4_1 | phy_cores=${2}

| tc12-IMIX-4c-eth-l2bdbasemaclrn-iacl50sl-10kflows-ndrpdr
| | [Tags] | IMIX | 4C
| | frame_size=IMIX_v4_1 | phy_cores=${4}
n>dst=src_if.local_mac, src=src_if.remote_mac) / IPv6(src=src_ip, dst=dst_ip) / UDP(sport=1234, dport=1234) / Raw(payload)) info.data = p.copy() pkts.append(p) return pkts def verify_filter(self, capture, sent): if not len(capture) == len(sent): # filter out any IPv6 RAs from the captur for p in capture: if (p.haslayer(IPv6)): capture.remove(p) return capture def verify_capture_ip4(self, src_if, sent): rxd = self.pg1.get_capture(N_PKTS_IN_STREAM) try: capture = self.verify_filter(rxd, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] eth = rx[Ether] self.assertEqual(eth.type, 0x800) tx_ip = tx[IP] rx_ip = rx[IP] # check the MAC address on the RX'd packet is correctly formed self.assertEqual(eth.dst, getmacbyip(rx_ip.dst)) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) except: raise def verify_capture_ip6(self, src_if, sent): capture = self.pg1.get_capture(N_PKTS_IN_STREAM) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] eth = rx[Ether] self.assertEqual(eth.type, 0x86DD) tx_ip = tx[IPv6] rx_ip = rx[IPv6] # check the MAC address on the RX'd packet is correctly formed self.assertEqual(eth.dst, getmacbyip6(rx_ip.dst)) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) def test_ip_mcast(self): """ IP Multicast Replication """ # # a stream that matches the default route. gets dropped. # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on default route") # # A (*,G). # one accepting interface, pg0, 7 forwarding interfaces # many forwarding interfaces test the case where the replicare DPO # needs to use extra cache lines for the buckets. # route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg3.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg4.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg5.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg6.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg7.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() # # An (S,G). # one accepting interface, pg0, 2 forwarding interfaces # route_1_1_1_1_232_1_1_1 = VppIpMRoute( self, "1.1.1.1", "232.1.1.1", 64, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_1_1_1_1_232_1_1_1.add_vpp_config() # # An (*,G/m). # one accepting interface, pg0, 1 forwarding interfaces # route_232 = VppIpMRoute( self, "0.0.0.0", "232.0.0.0", 8, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232.add_vpp_config() # # a stream that matches the route for (1.1.1.1,232.1.1.1) # small packets # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1->7 self.verify_capture_ip4(self.pg1, tx) self.verify_capture_ip4(self.pg2, tx) self.verify_capture_ip4(self.pg3, tx) self.verify_capture_ip4(self.pg4, tx) self.verify_capture_ip4(self.pg5, tx) self.verify_capture_ip4(self.pg6, tx) self.verify_capture_ip4(self.pg7, tx) # no replications on Pg0 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # a stream that matches the route for (1.1.1.1,232.1.1.1) # large packets # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1", payload_size=1024) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1->7 self.verify_capture_ip4(self.pg1, tx) self.verify_capture_ip4(self.pg2, tx) self.verify_capture_ip4(self.pg3, tx) self.verify_capture_ip4(self.pg4, tx) self.verify_capture_ip4(self.pg5, tx) self.verify_capture_ip4(self.pg6, tx) self.verify_capture_ip4(self.pg7, tx) # no replications on Pg0 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # a stream that matches the route for (*,232.0.0.0/8) # Send packets with the 9th bit set so we test the correct clearing # of that bit in the mac rewrite # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.255.255.255") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1 only self.verify_capture_ip4(self.pg1, tx) # no replications on Pg0, Pg2 not Pg3 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg2.assert_nothing_captured( remark="IP multicast packets forwarded on PG2") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # a stream that matches the route for (*,232.1.1.1) # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.2", "232.1.1.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1, 2, 3. self.verify_capture_ip4(self.pg1, tx) self.verify_capture_ip4(self.pg2, tx) self.verify_capture_ip4(self.pg3, tx) # no replications on Pg0 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") route_232_1_1_1.remove_vpp_config() route_1_1_1_1_232_1_1_1.remove_vpp_config() route_232.remove_vpp_config() def test_ip6_mcast(self): """ IPv6 Multicast Replication """ # # a stream that matches the default route. gets dropped. # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg0, "2001::1", "ff01::1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.pg0.assert_nothing_captured( remark="IPv6 multicast packets forwarded on default route") # # A (*,G). # one accepting interface, pg0, 3 forwarding interfaces # route_ff01_1 = VppIpMRoute( self, "::", "ff01::1", 128, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg3.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], is_ip6=1) route_ff01_1.add_vpp_config() # # An (S,G). # one accepting interface, pg0, 2 forwarding interfaces # route_2001_ff01_1 = VppIpMRoute( self, "2001::1", "ff01::1", 256, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], is_ip6=1) route_2001_ff01_1.add_vpp_config() # # An (*,G/m). # one accepting interface, pg0, 1 forwarding interface # route_ff01 = VppIpMRoute( self, "::", "ff01::", 16, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], is_ip6=1) route_ff01.add_vpp_config() # # a stream that matches the route for (*, ff01::/16) # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg0, "2002::1", "ff01:2::255") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1 self.verify_capture_ip6(self.pg1, tx) # no replications on Pg0, Pg3 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg2.assert_nothing_captured( remark="IP multicast packets forwarded on PG2") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # a stream that matches the route for (*,ff01::1) # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg0, "2002::2", "ff01::1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1, 2, 3. self.verify_capture_ip6(self.pg1, tx) self.verify_capture_ip6(self.pg2, tx) self.verify_capture_ip6(self.pg3, tx) # no replications on Pg0 self.pg0.assert_nothing_captured( remark="IPv6 multicast packets forwarded on PG0") # # a stream that matches the route for (2001::1, ff00::1) # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg0, "2001::1", "ff01::1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1, 2, self.verify_capture_ip6(self.pg1, tx) self.verify_capture_ip6(self.pg2, tx) # no replications on Pg0, Pg3 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") route_ff01.remove_vpp_config() route_ff01_1.remove_vpp_config() route_2001_ff01_1.remove_vpp_config() def _mcast_connected_send_stream(self, dst_ip): self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, self.pg0.remote_ip4, dst_ip) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1. self.verify_capture_ip4(self.pg1, tx) return tx def test_ip_mcast_connected(self): """ IP Multicast Connected Source check """ # # A (*,G). # one accepting interface, pg0, 1 forwarding interfaces # route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() route_232_1_1_1.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_CONNECTED) # # Now the (*,G) is present, send from connected source # tx = self._mcast_connected_send_stream("232.1.1.1") # # Constrct a representation of the signal we expect on pg0 # signal_232_1_1_1_itf_0 = VppMFibSignal(self, route_232_1_1_1, self.pg0.sw_if_index, tx[0]) # # read the only expected signal # signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # reading the signal allows for the generation of another # so send more packets and expect the next signal # tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # A Second entry with connected check # one accepting interface, pg0, 1 forwarding interfaces # route_232_1_1_2 = VppIpMRoute( self, "0.0.0.0", "232.1.1.2", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_2.add_vpp_config() route_232_1_1_2.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_CONNECTED) # # Send traffic to both entries. One read should net us two signals # signal_232_1_1_2_itf_0 = VppMFibSignal(self, route_232_1_1_2, self.pg0.sw_if_index, tx[0]) tx = self._mcast_connected_send_stream("232.1.1.1") tx2 = self._mcast_connected_send_stream("232.1.1.2") # # read the only expected signal # signals = self.vapi.mfib_signal_dump() self.assertEqual(2, len(signals)) signal_232_1_1_1_itf_0.compare(signals[1]) signal_232_1_1_2_itf_0.compare(signals[0]) route_232_1_1_1.remove_vpp_config() route_232_1_1_2.remove_vpp_config() def test_ip_mcast_signal(self): """ IP Multicast Signal """ # # A (*,G). # one accepting interface, pg0, 1 forwarding interfaces # route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() route_232_1_1_1.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_SIGNAL) # # Now the (*,G) is present, send from connected source # tx = self._mcast_connected_send_stream("232.1.1.1") # # Constrct a representation of the signal we expect on pg0 # signal_232_1_1_1_itf_0 = VppMFibSignal(self, route_232_1_1_1, self.pg0.sw_if_index, tx[0]) # # read the only expected signal # signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # reading the signal allows for the generation of another # so send more packets and expect the next signal # tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # Set the negate-signal on the accepting interval - the signals # should stop # route_232_1_1_1.update_path_flags( self.pg0.sw_if_index, (MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT | MRouteItfFlags.MFIB_ITF_FLAG_NEGATE_SIGNAL)) tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(0, len(signals)) # # Clear the SIGNAL flag on the entry and the signals should # come back since the interface is still NEGATE-SIGNAL # route_232_1_1_1.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE) tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # Lastly remove the NEGATE-SIGNAL from the interface and the # signals should stop # route_232_1_1_1.update_path_flags(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT) tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(0, len(signals)) # # Cleanup # route_232_1_1_1.remove_vpp_config() if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)