#!/usr/bin/env python """ACL plugin Test Case HLD: """ import unittest import random from scapy.packet import Raw from scapy.layers.l2 import Ether from scapy.layers.inet import IP, TCP, UDP, ICMP from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest from scapy.layers.inet6 import IPv6ExtHdrFragment from framework import VppTestCase, VppTestRunner from util import Host, ppp from vpp_lo_interface import VppLoInterface class TestACLplugin(VppTestCase): """ ACL plugin Test Case """ # traffic types IP = 0 ICMP = 1 # IP version IPRANDOM = -1 IPV4 = 0 IPV6 = 1 # rule types DENY = 0 PERMIT = 1 # supported protocols proto = [[6, 17], [1, 58]] proto_map = {1: 'ICMP', 58: 'ICMPv6EchoRequest', 6: 'TCP', 17: 'UDP'} ICMPv4 = 0 ICMPv6 = 1 TCP = 0 UDP = 1 PROTO_ALL = 0 # port ranges PORTS_ALL = -1 PORTS_RANGE = 0 PORTS_RANGE_2 = 1 udp_sport_from = 10 udp_sport_to = udp_sport_from + 5 udp_dport_from = 20000 udp_dport_to = udp_dport_from + 5000 tcp_sport_from = 30 tcp_sport_to = tcp_sport_from + 5 tcp_dport_from = 40000 tcp_dport_to = tcp_dport_from + 5000 udp_sport_from_2 = 90 udp_sport_to_2 = udp_sport_from_2 + 5 udp_dport_from_2 = 30000 udp_dport_to_2 = udp_dport_from_2 + 5000 tcp_sport_from_2 = 130 tcp_sport_to_2 = tcp_sport_from_2 + 5 tcp_dport_from_2 = 20000 tcp_dport_to_2 = tcp_dport_from_2 + 5000 icmp4_type = 8 # echo request icmp4_code = 3 icmp6_type = 128 # echo request icmp6_code = 3 icmp4_type_2 = 8 icmp4_code_from_2 = 5 icmp4_code_to_2 = 20 icmp6_type_2 = 128 icmp6_code_from_2 = 8 icmp6_code_to_2 = 42 # Test variables bd_id = 1 @classmethod def setUpClass(cls): """ Perform standard class setup (defined by class method setUpClass in class VppTestCase) before running the test case, set test case related variables and configure VPP. """ super(TestACLplugin, cls).setUpClass() try: # Create 2 pg interfaces cls.create_pg_interfaces(range(2)) # Packet flows mapping pg0 -> pg1, pg2 etc. cls.flows = dict() cls.flows[cls.pg0] = [cls.pg1] # Packet sizes cls.pg_if_packet_sizes = [64, 512, 1518, 9018] # Create BD with MAC learning and unknown unicast flooding disabled # and put interfaces to this BD cls.vapi.bridge_domain_add_del(bd_id=cls.bd_id, uu_flood=1, learn=1) for pg_if in cls.pg_interfaces: cls.vapi.sw_interface_set_l2_bridge(pg_if.sw_if_index, bd_id=cls.bd_id) # Set up all interfaces for i in cls.pg_interfaces: i.admin_up() # Mapping between packet-generator index and lists of test hosts cls.hosts_by_pg_idx = dict() for pg_if in cls.pg_interfaces: cls.hosts_by_pg_idx[pg_if.sw_if_index] = [] # Create list of deleted hosts cls.deleted_hosts_by_pg_idx = dict() for pg_if in cls.pg_interfaces: cls.deleted_hosts_by_pg_idx[pg_if.sw_if_index] = [] # warm-up the mac address tables # self.warmup_test() count = 16 start = 0 n_int = len(cls.pg_interfaces) macs_per_if = count / n_int i = -1 for pg_if in cls.pg_interfaces: i += 1 start_nr = macs_per_if * i + start end_nr = count + start if i == (n_int - 1) \ else macs_per_if * (i + 1) + start hosts = cls.hosts_by_pg_idx[pg_if.sw_if_index] for j in range(start_nr, end_nr): host = Host( "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j), "172.17.1%02x.%u" % (pg_if.sw_if_index, j), "2017:dead:%02x::%u" % (pg_if.sw_if_index, j)) hosts.append(host) except Exception: super(TestACLplugin, cls).tearDownClass() raise def setUp(self): super(TestACLplugin, self).setUp() self.reset_packet_infos() def tearDown(self): """ Show various debug prints after each test. """ super(TestACLplugin, self).tearDown() if not self.vpp_dead: cli = "show vlib graph l2-input-feat-arc" self.logger.info(self.vapi.ppcli(cli)) cli = "show vlib graph l2-input-feat-arc-end" self.logger.info(self.vapi.ppcli(cli)) cli = "show vlib graph l2-output-feat-arc" self.logger.info(self.vapi.ppcli(cli)) cli = "show vlib graph l2-output-feat-arc-end" self.logger.info(self.vapi.ppcli(cli)) self.logger.info(self.vapi.ppcli("show l2fib verbose")) self.logger.info(self.vapi.ppcli("show acl-plugin acl")) self.logger.info(self.vapi.ppcli("show acl-plugin interface")) self.logger.info(self.vapi.ppcli("show acl-plugin tables")) self.logger.info(self.vapi.ppcli("show bridge-domain %s detail" % self.bd_id)) def create_rule(self, ip=0, permit_deny=0, ports=PORTS_ALL, proto=-1, s_prefix=0, s_ip='\x00\x00\x00\x00', d_prefix=0, d_ip='\x00\x00\x00\x00'): if proto == -1: return if ports == self.PORTS_ALL: sport_from = 0 dport_from = 0 sport_to = 65535 if proto != 1 and proto != 58 else 255 dport_to = sport_to elif ports == self.PORTS_RANGE: if proto == 1: sport_from = self.icmp4_type sport_to = self.icmp4_type dport_from = self.icmp4_code dport_to = self.icmp4_code elif proto == 58: sport_from = self.icmp6_type sport_to = self.icmp6_type dport_from = self.icmp6_code dport_to = self.icmp6_code elif proto == self.proto[self.IP][self.TCP]: sport_from = self.tcp_sport_from sport_to = self.tcp_sport_to dport_from = self.tcp_dport_from dport_to = self.tcp_dport_to elif proto == self.proto[self.IP][self.UDP]: sport_from = self.udp_sport_from sport_to = self.udp_sport_to dport_from = self.udp_dport_from dport_to = self.udp_dport_to elif ports == self.PORTS_RANGE_2: if proto == 1: sport_from = self.icmp4_type_2 sport_to = self.icmp4_type_2 dport_from = self.icmp4_code_from_2 dport_to = self.icmp4_code_to_2 elif proto == 58: sport_from = self.icmp6_type_2 sport_to = self.icmp6_type_2 dport_from = self.icmp6_code_from_2 dport_to = self.icmp6_code_to_2 elif proto == self.proto[self.IP][self.TCP]: sport_from = self.tcp_sport_from_2 sport_to = self.tcp_sport_to_2 dport_from = self.tcp_dport_from_2 dport_to = self.tcp_dport_to_2 elif proto == self.proto[self.IP][self.UDP]: sport_from = self.udp_sport_from_2 sport_to = self.udp_sport_to_2 dport_from = self.udp_dport_from_2 dport_to = self.udp_dport_to_2 else: sport_from = ports sport_to = ports dport_from = ports dport_to = ports rule = ({'is_perm
# VPP Link Aggregation Control Protocol (LACP) implementation {#lacp_plugin_doc}
This document is to describe the usage of VPP LACP implementation.
## LACP
The Link Aggregation Control Protocol (LACP) is an 802.3ad standard which
provides a protocol for exchanging information between Partner Systems on a
link to allow their protocol instances to reach agreement on the Link Aggregation
Group to which the link belongs and enable transmission and reception for the
higher layer. Multiple links may be bundled to the same Aggregation Group to form
a high bandwidth transmission medium and create a fault-tolerant link.
### Configuration
1. Create the bond interface
create bond mode lacp [hw-addr <mac-address>] [load-balance { l2 | l23 | l34 }]
2. Enslave the physical interface to the bond
enslave interface <interface> to <bond-interface-name> [passive] [long-timeout]"
3. Delete the bond interface
delete bond {<interface> | sw_if_index <sw_idx>}
4. Detach the slave interface from the bond
detach interface <interface>
### Configuration example
```
create bond mode lacp
set interface state BondEthernet0 up
enslave interface TenGigabitEthernet7/0/0 to BondEthernet1
enslave interface TenGigabitEthernet7/0/1 to BondEthernet1
enslave interface TenGigabitEthernet5/0/0 to BondEthernet1
enslave interface TenGigabitEthernet5/0/1 to BondEthernet1
```
```
detach interface TenGigabitEthernet5/0/1
```
```
delete bond BondEthernet0
```
### Operational data
```
show lacp [<interface>] [details]
```
Example:
```
DBGvpp# show lacp
actor state partner state
interface name sw_if_index bond interface exp/def/dis/col/syn/agg/tim/act exp/def/dis/col/syn/agg/tim/act
GigabitEthernet2/0/1 1 BondEthernet0 0 0 1 1 1 1 1 1 0 0 1 1 1 1 1 1
LAG ID: [(ffff,e4-c7-22-f3-26-71,0000,00ff,0001), (ffff,fc-99-47-4a-0c-8b,0009,00ff,0001)]
RX-state: CURRENT, TX-state: TRANSMIT, MUX-state: COLLECTING_DISTRIBUTING, PTX-state: PERIODIC_TX
TenGigabitEthernet4/0/0 2 BondEthernet1 0 0 1 1 1 1 1 1 0 0 1 1 1 1 0 1
LAG ID: [(ffff,90-e2-ba-76-cf-2d,0001,00ff,0001), (8000,00-2a-6a-e5-50-c1,0140,8000,011d)]
RX-state: CURRENT, TX-state: TRANSMIT, MUX-state: COLLECTING_DISTRIBUTING, PTX-state: PERIODIC_TX
TenGigabitEthernet4/0/1 3 BondEthernet1 0 0 1 1 1 1 1 1 0 0 1 1 1 1 0 1
LAG ID: [(ffff,90-e2-ba-76-cf-2d,0001,00ff,0002), (8000,00-2a-6a-e5-50-c1,0140,8000,011e)]
RX-state: CURRENT, TX-state: TRANSMIT, MUX-state: COLLECTING_DISTRIBUTING, PTX-state: PERIODIC_TX
TenGigabitEthernet8/0/1 7 BondEthernet1 0 0 1 1 1 1 1 1 0 0 1 1 1 1 0 1
LAG ID: [(ffff,90-e2-ba-76-cf-2d,0001,00ff,0003), (8000,00-2a-6a-e5-50-01,007a,8000,0114)]
RX-state: CURRENT, TX-state: TRANSMIT, MUX-state: COLLECTING_DISTRIBUTING, PTX-state: PERIODIC_TX
TenGigabitEthernet8/0/0 6 BondEthernet1 0 0 1 1 1 1 1 1 0 0 1 1 1 1 0 1
LAG ID: [(ffff,90-e2-ba-76-cf-2d,0001,00ff,0004), (8000,00-2a-6a-e5-50-01,007a,8000,0115)]
RX-state: CURRENT, TX-state: TRANSMIT, MUX-state: COLLECTING_DISTRIBUTING, PTX-state: PERIODIC_TX
TenGigabitEthernet6/0/1 5 BondEthernet2 0 0 1 1 1 1 1 1 0 0 1 1 1 1 1 1
LAG ID: [(ffff,90-e2-ba-36-31-21,0002,00ff,0001), (ffff,90-e2-ba-29-f5-31,000f,00ff,0002)]
RX-state: CURRENT, TX-state: TRANSMIT, MUX-state: COLLECTING_DISTRIBUTING, PTX-state: PERIODIC_TX
TenGigabitEthernet6/0/0 4 BondEthernet2 0 0 1 1 1 1 1 1 0 0 1 1 1 1 1 1
LAG ID: [(ffff,90-e2-ba-36-31-21,0002,00ff,0002), (ffff,90-e2-ba-29-f5-31,000f,00ff,0001)]
RX-state: CURRENT, TX-state: TRANSMIT, MUX-state: COLLECTING_DISTRIBUTING, PTX-state: PERIODIC_TX
DBGvpp#
```
```
show bond [details]
````
Example:
```
DBGvpp# show bond
sh bond
interface name sw_if_index mode load balance active slaves slaves
BondEthernet0 10 lacp l2 1 1
BondEthernet1 11 lacp l34 4 4
BondEthernet2 12 lacp l23 2 2
DBGvpp#
```
### Debugging
```
debug lacp [<interface>] <on | off>
```