#!/usr/bin/env python import unittest import binascii from socket import AF_INET6 from framework import VppTestCase, VppTestRunner from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto, VppIpTable from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \ SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes from scapy.packet import Raw from scapy.layers.l2 import Ether, Dot1Q from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting from scapy.layers.inet import IP, UDP from scapy.utils import inet_pton, inet_ntop from util import ppp class TestSRv6(VppTestCase): """ SRv6 Test Case """ @classmethod def setUpClass(self): super(TestSRv6, self).setUpClass() def setUp(self): """ Perform test setup before each test case. """ super(TestSRv6, self).setUp() # packet sizes, inclusive L2 overhead self.pg_packet_sizes = [64, 512, 1518, 9018] # reset packet_infos self.reset_packet_infos() def tearDown(self): """ Clean up test setup after each test case. """ self.teardown_interfaces() super(TestSRv6, self).tearDown() def configure_interface(self, interface, ipv6=False, ipv4=False, ipv6_table_id=0, ipv4_table_id=0): """ Configure interface. :param ipv6: configure IPv6 on interface :param ipv4: configure IPv4 on interface :param ipv6_table_id: FIB table_id for IPv6 :param ipv4_table_id: FIB table_id for IPv4 """ self.logger.debug("Configuring interface %s" % (interface.name)) if ipv6: self.logger.debug("Configuring IPv6") interface.set_table_ip6(ipv6_table_id) interface.config_ip6() interface.resolve_ndp(timeout=5) if ipv4: self.logger.debug("Configuring IPv4") interface.set_table_ip4(ipv4_table_id) interface.config_ip4() interface.resolve_arp() interface.admin_up() def setup_interfaces(self, ipv6=[], ipv4=[], ipv6_table_id=[], ipv4_table_id=[]): """ Create and configure interfaces. :param ipv6: list of interface IPv6 capabilities :param ipv4: list of interface IPv4 capabilities :param ipv6_table_id: list of intf IPv6 FIB table_ids :param ipv4_table_id: list of intf IPv4 FIB table_ids :returns: List of created interfaces. """ # how many interfaces? if len(ipv6): count = len(ipv6) else: count = len(ipv4) self.logger.debug("Creating and configuring %d interfaces" % (count)) # fill up ipv6 and ipv4 lists if needed # not enabled (False) is the default if len(ipv6) < count: ipv6 += (count - len(ipv6)) * [False] if len(ipv4) < count: ipv4 += (count - len(ipv4)) * [False] # fill up table_id lists if needed # table_id 0 (global) is the default if len(ipv6_table_id) < count: ipv6_table_id += (count - len(ipv6_table_id)) * [0] if len(ipv4_table_id) < count: ipv4_table_id += (count - len(ipv4_table_id)) * [0] # create 'count' pg interfaces self.create_pg_interfaces(range(count)) # setup all interfaces for i in range(count): intf = self.pg_interfaces[i] self.configure_interface(intf, ipv6[i], ipv4[i], ipv6_table_id[i], ipv4_table_id[i]) if any(ipv6): self.logger.debug(self.vapi.cli("show ip6 neighbors")) if any(ipv4): self.logger.debug(self.vapi.cli("show ip arp")) self.logger.debug(self.vapi.cli("show interface")) self.logger.debug(self.vapi.cli("show hardware")) return self.pg_interfaces def teardown_interfaces(self): """ Unconfigure and bring down interface. """ self.logger.debug("Tearing down interfaces") # tear down all interfaces # AFAIK they cannot be deleted for i in self.pg_interfaces: self.logger.debug("Tear down interface %s" % (i.name)) i.admin_down() i.unconfig() i.set_table_ip4(0) i.set_table_ip6(0) @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Encaps(self): """ Test SRv6 Transit.Encaps behavior for IPv6. """ # send traffic to one destination interface # source and destination are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=1, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv6 traffic to a7::/64 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="a7::", mask_width=64, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = 'a7::1234' pkts = [] # create IPv6 packets without SRH packet_header = self.create_packet_header_IPv6(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH( sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH and IPv6 # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Encaps) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Insert(self): """ Test SRv6 Transit.Insert behavior (IPv6 only). """ # send traffic to one destination interface # source and destination are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=0, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv6 traffic to a7::/64 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="a7::", mask_width=64, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = 'a7::1234' pkts = [] # create IPv6 packets without SRH packet_header = self.create_packet_header_IPv6(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create IPv6 packets with SRH # packets with segments-left 1, active segment a7:: packet_header = self.create_packet_header_IPv6_SRH( sidlist=['a8::', 'a7::', 'a6::'], segleft=1) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Insert) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skipUnless(0, "PC to fix") def test_SRv6_T_Encaps_IPv4(self): """ Test SRv6 Transit.Encaps behavior for IPv4. """ # send traffic to one destination interface # source interface is IPv4 only # destination interface is IPv6 only self.setup_interfaces(ipv6=[False, True], ipv4=[True, False]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=1, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer IPv4 traffic to 7.1.1.0/24 into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="7.1.1.0", mask_width=24, traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV4, sr_policy_index=0, table_id=0, sw_if_index=0) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) dst_inner = '7.1.1.123' pkts = [] # create IPv4 packets packet_header = self.create_packet_header_IPv4(dst_inner) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Encaps_IPv4) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() @unittest.skip("VPP crashes after running this test") def test_SRv6_T_Encaps_L2(self): """ Test SRv6 Transit.Encaps behavior for L2. """ # send traffic to one destination interface # source interface is IPv4 only TODO? # destination interface is IPv6 only self.setup_interfaces(ipv6=[False, True], ipv4=[False, False]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure encaps IPv6 source address # needs to be done before SR Policy config # TODO: API? self.vapi.cli("set sr encaps source addr a3::") bsid = 'a3::9999:1' # configure SRv6 Policy # Note: segment list order: first -> last sr_policy = VppSRv6Policy( self, bsid=bsid, is_encap=1, sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, weight=1, fib_table=0, segments=['a4::', 'a5::', 'a6::c7'], source='a3::') sr_policy.add_vpp_config() self.sr_policy = sr_policy # log the sr policies self.logger.info(self.vapi.cli("show sr policies")) # steer L2 traffic into SRv6 Policy # use the bsid of the above self.sr_policy pol_steering = VppSRv6Steering( self, bsid=self.sr_policy.bsid, prefix="::", mask_width=0, traffic_type=SRv6PolicySteeringTypes.SR_STEER_L2, sr_policy_index=0, table_id=0, sw_if_index=self.pg0.sw_if_index) pol_steering.add_vpp_config() # log the sr steering policies self.logger.info(self.vapi.cli("show sr steering policies")) # create packets count = len(self.pg_packet_sizes) pkts = [] # create L2 packets without dot1q header packet_header = self.create_packet_header_L2() # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # create L2 packets with dot1q header packet_header = self.create_packet_header_L2(vlan=123) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_T_Encaps_L2) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SR steering pol_steering.remove_vpp_config() self.logger.info(self.vapi.cli("show sr steering policies")) # remove SR Policies self.sr_policy.remove_vpp_config() self.logger.info(self.vapi.cli("show sr policies")) # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End(self): """ Test SRv6 End (without PSP) behavior. """ # send traffic to one destination interface # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure SRv6 localSID End without PSP behavior localsid = VppSRv6LocalSID( self, localsid={'addr': 'A3::0'}, behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_END, nh_addr4='0.0.0.0', nh_addr6='::', end_psp=0, sw_if_index=0, vlan_index=0, fib_table=0) localsid.add_vpp_config() # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # create IPv6 packets with SRH (SL=2, SL=1, SL=0) # send one packet per SL value per packet size # SL=0 packet with localSID End with USP needs 2nd SRH count = len(self.pg_packet_sizes) dst_inner = 'a4::1234' pkts = [] # packets with segments-left 2, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a5::', 'a4::', 'a3::'], segleft=2) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # packets with segments-left 1, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a4::', 'a3::', 'a2::'], segleft=1) # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # TODO: test behavior with SL=0 packet (needs 2*SRH?) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_End) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs localsid.remove_vpp_config() # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End_with_PSP(self): """ Test SRv6 End with PSP behavior. """ # send traffic to one destination interface # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure FIB entries route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() # configure SRv6 localSID End with PSP behavior localsid = VppSRv6LocalSID( self, localsid={'addr': 'A3::0'}, behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_END, nh_addr4='0.0.0.0', nh_addr6='::', end_psp=1, sw_if_index=0, vlan_index=0, fib_table=0) localsid.add_vpp_config() # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # create IPv6 packets with SRH (SL=2, SL=1) # send one packet per SL value per packet size # SL=0 packet with localSID End with PSP is dropped count = len(self.pg_packet_sizes) dst_inner = 'a4::1234' pkts = [] # packets with segments-left 2, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a5::', 'a4::', 'a3::'], segleft=2) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # packets with segments-left 1, active segment a3:: packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a4::', 'a3::', 'a2::'], segleft=1) # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_End_PSP) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs localsid.remove_vpp_config() # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End_X(self): """ Test SRv6 End.X (without PSP) behavior. """ # create three interfaces (1 source, 2 destinations) # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True, True]) # configure FIB entries # a4::/64 via pg1 and pg2 route = VppIpRoute(self, "a4::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg2.remote_ip6, self.pg2.sw_if_index, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route.add_vpp_config() self.logger.debug(self.vapi.cli("show ip6 fib")) # configure SRv6 localSID End.X without PSP behavior # End.X points to interface pg1 localsid = VppSRv6LocalSID( self, localsid={'addr': 'A3::C4'}, behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_X, nh_addr4='0.0.0.0', nh_addr6=self.pg1.remote_ip6, end_psp=0, sw_if_index=self.pg1.sw_if_index, vlan_index=0, fib_table=0) localsid.add_vpp_config() # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # create IPv6 packets with SRH (SL=2, SL=1) # send one packet per SL value per packet size # SL=0 packet with localSID End with PSP is dropped count = len(self.pg_packet_sizes) dst_inner = 'a4::1234' pkts = [] # packets with segments-left 2, active segment a3::c4 packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a5::', 'a4::', 'a3::c4'], segleft=2) # create traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # packets with segments-left 1, active segment a3::c4 packet_header = self.create_packet_header_IPv6_SRH_IPv6( dst_inner, sidlist=['a4::', 'a3::c4', 'a2::'], segleft=1) # add to traffic stream pg0->pg1 pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, self.pg_packet_sizes, count)) # send packets and verify received packets # using same comparison function as End (no PSP) self.send_and_verify_pkts(self.pg0, pkts, self.pg1, self.compare_rx_tx_packet_End) # assert nothing was received on the other interface (pg2) self.pg2.assert_nothing_captured("mis-directed packet(s)") # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs localsid.remove_vpp_config() # remove FIB entries # done by tearDown # cleanup interfaces self.teardown_interfaces() def test_SRv6_End_X_
/*
*------------------------------------------------------------------
* Copyright (c) 2017 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <net/if.h>
#include <linux/if_tun.h>
#include <sys/ioctl.h>
#include <linux/virtio_net.h>
#include <linux/vhost.h>
#include <sys/eventfd.h>
#include <sched.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vnet/devices/netlink.h>
#include <vnet/devices/virtio/virtio.h>
#include <vnet/devices/tap/tap.h>
tap_main_t tap_main;
#define _IOCTL(fd,a,...) \
if (ioctl (fd, a, __VA_ARGS__) < 0) \
{ \
err = clib_error_return_unix (0, "ioctl(" #a ")"); \
goto error; \
}
static u32
virtio_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi,
u32 flags)
{
/* nothing for now */
//TODO On MTU change call vnet_netlink_set_if_mtu
return 0;
}
void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
static clib_error_t *
call_tap_read_ready (clib_file_t * uf)
{
/* nothing to do */
return 0;
}
static void
tap_delete_if_cp (u32 * sw_if_index)
{
vlib_main_t *vm = vlib_get_main ();
tap_delete_if (vm, *sw_if_index);
}
/*
* Tap clean-up routine:
* Linux side of tap interface can be deleted i.e. tap is
* attached to container and if someone will delete this
* container, will also removes tap interface. While VPP
* will have other side of tap. This function will RPC
* main thread to call the tap_delete_if to cleanup tap.
*/
static clib_error_t *
call_tap_error_ready (clib_file_t * uf)
{
vl_api_rpc_call_main_thread (tap_delete_if_cp, (u8 *) & uf->private_data,
sizeof (uf->private_data));
return 0;
}
static int
open_netns_fd (char *netns)
{
u8 *s = 0;
int fd;
if (strncmp (netns, "pid:", 4) == 0)
s = format (0, "/proc/%u/ns/net%c", atoi (netns + 4), 0);
else if (netns[0] == '/')
s = format (0, "%s%c", netns, 0);
else
s = format (0, "/var/run/netns/%s%c", netns, 0);
fd = open ((char *) s, O_RDONLY);
vec_free (s);
return fd;
}
#define TAP_MAX_INSTANCE 1024
void
tap_create_if (vlib_main_t * vm, tap_create_if_args_t * args)
{
vnet_main_t *vnm = vnet_get_main ();
vlib_thread_main_t *thm = vlib_get_thread_main ();
virtio_main_t *vim = &virtio_main;
tap_main_t *tm = &tap_main;
vnet_sw_interface_t *sw;
vnet_hw_interface_t *hw;
int i;
int old_netns_fd = -1;
struct ifreq ifr;
size_t hdrsz;
struct vhost_memory *vhost_mem = 0;
virtio_if_t *vif = 0;
clib_file_t t = { 0 };
clib_error_t *err = 0;
int fd = -1;
if (args->id != ~0)
{
if (clib_bitmap_get (tm->tap_ids, args->id))
{
args->rv = VNET_API_ERROR_INVALID_INTERFACE;
args->error = clib_error_return (0, "interface already exists");
return;
}
}
else
{
args->id = clib_bitmap_first_clear (tm->tap_ids);
}
if (args->id > TAP_MAX_INSTANCE)
{
args->rv = VNET_API_ERROR_UNSPECIFIED;
args->error = clib_error_return (0, "cannot find free interface id");
return;
}
clib_memset (&ifr, 0, sizeof (ifr));
pool_get (vim->interfaces, vif);
vif->dev_instance = vif - vim->interfaces;
vif->tap_fd = -1;
vif->id = args->id;
if ((vif->fd = open ("/dev/vhost-net", O_RDWR | O_NONBLOCK)) < 0)
{
args->rv = VNET_API_ERROR_SYSCALL_ERROR_1;
args->error = clib_error_return_unix (0, "open '/dev/vhost-net'");
goto error;
}
_IOCTL (vif->fd, VHOST_GET_FEATURES, &vif->remote_features);
if ((vif->remote_features & VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF)) == 0)
{
args->rv = VNET_API_ERROR_UNSUPPORTED;
args->error = clib_error_return (0, "vhost-net backend doesn't support "
"VIRTIO_NET_F_MRG_RXBUF feature");
goto error;
}
if ((vif->remote_features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC)) ==
0)
{
args->rv = VNET_API_ERROR_UNSUPPORTED;
args->error = clib_error_return (0, "vhost-net backend doesn't support "
"VIRTIO_RING_F_INDIRECT_DESC feature");
goto error;
}
if ((vif->remote_features & VIRTIO_FEATURE (VIRTIO_F_VERSION_1)) == 0)
{
args->rv = VNET_API_ERROR_UNSUPPORTED;
args->error = clib_error_return (0, "vhost-net backend doesn't support "
"VIRTIO_F_VERSION_1 features");
goto error;
}
vif->features |= VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF);
vif->features |= VIRTIO_FEATURE (VIRTIO_F_VERSION_1);
vif->features |= VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC);
virtio_set_net_hdr_size (vif);
_IOCTL (vif->fd, VHOST_SET_FEATURES, &vif->features);
if ((vif->tap_fd = open ("/dev/net/tun", O_RDWR | O_NONBLOCK)) < 0)
{
args->rv = VNET_API_ERROR_SYSCALL_ERROR_2;
args->error = clib_error_return_unix (0, "open '/dev/net/tun'");
goto error;
}
ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR;
_IOCTL (vif->tap_fd, TUNSETIFF, (void *) &ifr);
vif->ifindex = if_nametoindex (ifr.ifr_ifrn.ifrn_name);
if (!args->host_if_name)
args->host_if_name = (u8 *) ifr.ifr_ifrn.ifrn_name;
unsigned int offload = 0;
hdrsz = sizeof (struct virtio_net_hdr_v1);
if (args->tap_flags & TAP_FLAG_GSO)
{
offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6;
vif->gso_enabled = 1;
}
else
{
vif->gso_enabled = 0;
}
_IOCTL (vif->tap_fd, TUNSETOFFLOAD, offload);
_IOCTL (vif->tap_fd, TUNSETVNETHDRSZ, &hdrsz);
_IOCTL (vif->fd, VHOST_SET_OWNER, 0);
/* if namespace is specified, all further netlink messages should be excuted
after we change our net namespace */
if (args->host_namespace)
{
old_netns_fd = open ("/proc/self/ns/net", O_RDONLY);
if ((fd = open_netns_fd ((char *) args->host_namespace)) == -1)
{
args->rv = VNET_API_ERROR_SYSCALL_ERROR_2;
args->error = clib_error_return_unix (0, "open_netns_fd '%s'",
args->host_namespace);
goto error;
}
args->error = vnet_netlink_set_link_netns (vif->ifindex, fd,
(char *) args->host_if_name);
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
if (setns (fd, CLONE_NEWNET) == -1)
{
args->rv = VNET_API_ERROR_SYSCALL_ERROR_3;
args->error = clib_error_return_unix (0, "setns '%s'",
args->host_namespace);
goto error;
}
if ((vif->ifindex = if_nametoindex ((char *) args->host_if_name)) == 0)
{
args->rv = VNET_API_ERROR_SYSCALL_ERROR_3;
args->error = clib_error_return_unix (0, "if_nametoindex '%s'",
args->host_if_name);
goto error;
}
}
else
{
if (args->host_if_name)
{
args->error = vnet_netlink_set_link_name (vif->ifindex,
(char *)
args->host_if_name);
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
}
}
if (!ethernet_mac_address_is_zero (args->host_mac_addr))
{
args->error = vnet_netlink_set_link_addr (vif->ifindex,
args->host_mac_addr);
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
}
if (args->host_bridge)
{
args->error = vnet_netlink_set_link_master (vif->ifindex,
(char *) args->host_bridge);
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
}
if (args->host_ip4_prefix_len)
{
args->error = vnet_netlink_add_ip4_addr (vif->ifindex,
&args->host_ip4_addr,
args->host_ip4_prefix_len);
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
}
if (args->host_ip6_prefix_len)
{
args->error = vnet_netlink_add_ip6_addr (vif->ifindex,
&args->host_ip6_addr,
args->host_ip6_prefix_len);
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
}
args->error = vnet_netlink_set_link_state (vif->ifindex, 1 /* UP */ );
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
if (args->host_ip4_gw_set)
{
args->error = vnet_netlink_add_ip4_route (0, 0, &args->host_ip4_gw);
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
}
if (args->host_ip6_gw_set)
{
args->error = vnet_netlink_add_ip6_route (0, 0, &args->host_ip6_gw);
if (args->error)
{
args->rv = VNET_API_ERROR_NETLINK_ERROR;
goto error;
}
}
/* switch back to old net namespace */
if (args->host_namespace)
{
if (setns (old_netns_fd, CLONE_NEWNET) == -1)
{
args->rv = VNET_API_ERROR_SYSCALL_ERROR_2;
args->error = clib_error_return_unix (0, "setns '%s'",
args->host_namespace);
goto error;
}
}
/* Set vhost memory table */
i = sizeof (struct vhost_memory) + sizeof (struct vhost_memory_region);
vhost_mem = clib_mem_alloc (i);
clib_memset (vhost_mem, 0, i);
vhost_mem->nregions = 1;
vhost_mem->regions[0].memory_size = (1ULL << 47) - 4096;
_IOCTL (vif->fd, VHOST_SET_MEM_TABLE, vhost_mem);
if ((args->error =
virtio_vring_init (vm, vif, RX_QUEUE (0), args->rx_ring_sz)))
{
args->rv = VNET_API_ERROR_INIT_FAILED;
goto error;
}
vif->num_rxqs = 1;
if ((args->error =
virtio_vring_init (vm, vif, TX_QUEUE (0), args->tx_ring_sz)))
{
args->rv = VNET_API_ERROR_INIT_FAILED;
goto error;
}
vif->num_txqs = 1;
if (!args->mac_addr_set)
ethernet_mac_address_generate (args->mac_addr);
clib_memcpy (vif->mac_addr, args->