/* *------------------------------------------------------------------ * ip_api.c - vnet ip api * * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define vl_typedefs /* define message structures */ #include #undef vl_typedefs #define vl_endianfun /* define message structures */ #include #undef vl_endianfun /* instantiate all the print functions we know about */ #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) #define vl_printfun #include #undef vl_printfun #include #define foreach_ip_api_msg \ _(IP_FIB_DUMP, ip_fib_dump) \ _(IP6_FIB_DUMP, ip6_fib_dump) \ _(IP_MFIB_DUMP, ip_mfib_dump) \ _(IP6_MFIB_DUMP, ip6_mfib_dump) \ _(IP_NEIGHBOR_DUMP, ip_neighbor_dump) \ _(IP_MROUTE_ADD_DEL, ip_mroute_add_del) \ _(MFIB_SIGNAL_DUMP, mfib_signal_dump) \ _(IP_ADDRESS_DUMP, ip_address_dump) \ _(IP_UNNUMBERED_DUMP, ip_unnumbered_dump) \ _(IP_DUMP, ip_dump) \ _(IP_NEIGHBOR_ADD_DEL, ip_neighbor_add_del) \ _(SET_ARP_NEIGHBOR_LIMIT, set_arp_neighbor_limit) \ _(IP_PROBE_NEIGHBOR, ip_probe_neighbor) \ _(IP_SCAN_NEIGHBOR_ENABLE_DISABLE, ip_scan_neighbor_enable_disable) \ _(WANT_IP4_ARP_EVENTS, want_ip4_arp_events) \ _(WANT_IP6_ND_EVENTS, want_ip6_nd_events) \ _(WANT_IP6_RA_EVENTS, want_ip6_ra_events) \ _(PROXY_ARP_ADD_DEL, proxy_arp_add_del) \ _(PROXY_ARP_DUMP, proxy_arp_dump) \ _(PROXY_ARP_INTFC_ENABLE_DISABLE, proxy_arp_intfc_enable_disable) \ _(PROXY_ARP_INTFC_DUMP, proxy_arp_intfc_dump) \ _(RESET_FIB, reset_fib) \ _(IP_ADD_DEL_ROUTE, ip_add_del_route) \ _(IP_TABLE_ADD_DEL, ip_table_add_del) \ _(IP_PUNT_POLICE, ip_punt_police) \ _(IP_PUNT_REDIRECT, ip_punt_redirect) \ _(SET_IP_FLOW_HASH,set_ip_flow_hash) \ _(SW_INTERFACE_IP6ND_RA_CONFIG, sw_interface_ip6nd_ra_config) \ _(SW_INTERFACE_IP6ND_RA_PREFIX, sw_interface_ip6nd_ra_prefix) \ _(IP6ND_PROXY_ADD_DEL, ip6nd_proxy_add_del) \ _(IP6ND_PROXY_DUMP, ip6nd_proxy_dump) \ _(IP6ND_SEND_ROUTER_SOLICITATION, ip6nd_send_router_solicitation) \ _(SW_INTERFACE_IP6_ENABLE_DISABLE, sw_interface_ip6_enable_disable ) \ _(IP_CONTAINER_PROXY_ADD_DEL, ip_container_proxy_add_del) \ _(IP_CONTAINER_PROXY_DUMP, ip_container_proxy_dump) \ _(IOAM_ENABLE, ioam_enable) \ _(IOAM_DISABLE, ioam_disable) \ _(IP_SOURCE_AND_PORT_RANGE_CHECK_ADD_DEL, \ ip_source_and_port_range_check_add_del) \ _(IP_SOURCE_AND_PORT_RANGE_CHECK_INTERFACE_ADD_DEL, \ ip_source_and_port_range_check_interface_add_del) \ _(IP_SOURCE_CHECK_INTERFACE_ADD_DEL, \ ip_source_check_interface_add_del) \ _(IP_REASSEMBLY_SET, ip_reassembly_set) \ _(IP_REASSEMBLY_GET, ip_reassembly_get) \ _(IP_REASSEMBLY_ENABLE_DISABLE, ip_reassembly_enable_disable) \ _(IP_PUNT_REDIRECT_DUMP, ip_punt_redirect_dump) extern void stats_dslock_with_hint (int hint, int tag); extern void stats_dsunlock (void); static vl_api_ip_neighbor_flags_t ip_neighbor_flags_encode (ip_neighbor_flags_t f) { vl_api_ip_neighbor_flags_t v = IP_API_NEIGHBOR_FLAG_NONE; if (f & IP_NEIGHBOR_FLAG_STATIC) v |= IP_API_NEIGHBOR_FLAG_STATIC; if (f & IP_NEIGHBOR_FLAG_NO_FIB_ENTRY) v |= IP_API_NEIGHBOR_FLAG_NO_FIB_ENTRY; return (clib_host_to_net_u32 (v)); } static void send_ip_neighbor_details (u32 sw_if_index, const ip46_address_t * ip_address, const mac_address_t * mac, ip_neighbor_flags_t flags, vl_api_registration_t * reg, u32 context) { vl_api_ip_neighbor_details_t *mp; mp = vl_msg_api_alloc (sizeof (*mp)); clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_NEIGHBOR_DETAILS); mp->context = context; mp->neighbor.sw_if_index = htonl (sw_if_index); mp->neighbor.flags = ip_neighbor_flags_encode (flags); ip_address_encode (ip_address, IP46_TYPE_ANY, &mp->neighbor.ip_address); mac_address_encode (mac, mp->neighbor.mac_address); vl_api_send_msg (reg, (u8 *) mp); } static void vl_api_ip_neighbor_dump_t_handler (vl_api_ip_neighbor_dump_t * mp) { vl_api_registration_t *reg; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; u32 sw_if_index = ntohl (mp->sw_if_index); if (mp->is_ipv6) { ip6_neighbor_t *n, *ns; ns = ip6_neighbors_entries (sw_if_index); /* *INDENT-OFF* */ vec_foreach (n, ns) { ip46_address_t nh = { .ip6 = { .as_u64[0] = n->key.ip6_address.as_u64[0], .as_u64[1] = n->key.ip6_address.as_u64[1], }, }; send_ip_neighbor_details (n->key.sw_if_index, &nh, &n->mac, n->flags, reg, mp->context); } /* *INDENT-ON* */ vec_free (ns); } else { ethernet_arp_ip4_entry_t *n, *ns; ns = ip4_neighbor_entries (sw_if_index); /* *INDENT-OFF* */ vec_foreach (n, ns) { ip46_address_t nh = { .ip4 = { .as_u32 = n->ip4_address.as_u32, }, }; send_ip_neighbor_details (n->sw_if_index, &nh, &n->mac, n->flags, reg, mp->context); } /* *INDENT-ON* */ vec_free (ns); } } static void send_ip_fib_details (vpe_api_main_t * am, vl_api_registration_t * reg, const fib_table_t * table, const fib_prefix_t * pfx, fib_route_path_encode_t * api_rpaths, u32 context) { vl_api_ip_fib_details_t *mp; fib_route_path_encode_t *api_rpath; vl_api_fib_path_t *fp; int path_count; path_count = vec_len (api_rpaths); mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_FIB_DETAILS); mp->context = context; mp->table_id = htonl (table->ft_table_id); memcpy (mp->table_name, table->ft_desc, clib_min (vec_len (table->ft_desc), sizeof (mp->table_name))); mp->address_length = pfx->fp_len; memcpy (mp->address, &pfx->fp_addr.ip4, sizeof (pfx->fp_addr.ip4)); mp->stats_index = htonl (fib_table_entry_get_stats_index (table->ft_index, pfx)); mp->count = htonl (path_count); fp = mp->path; vec_foreach (api_rpath, api_rpaths) { fib_api_path_encode (api_rpath, fp); fp++; } vl_api_send_msg (reg, (u8 *) mp); } typedef struct vl_api_ip_fib_dump_walk_ctx_t_ { fib_node_index_t *feis; } vl_api_ip_fib_dump_walk_ctx_t; static fib_table_walk_rc_t vl_api_ip_fib_dump_walk (fib_node_index_t fei, void *arg) { vl_api_ip_fib_dump_walk_ctx_t *ctx = arg; vec_add1 (ctx->feis, fei); return (FIB_TABLE_WALK_CONTINUE); } static void vl_api_ip_fib_dump_t_handler (vl_api_ip_fib_dump_t * mp) { vpe_api_main_t *am = &vpe_api_main; vl_api_registration_t *reg; ip4_main_t *im = &ip4_main; fib_table_t *fib_table; fib_node_index_t *lfeip; const fib_prefix_t *pfx; u32 fib_index; fib_route_path_encode_t *api_rpaths; vl_api_ip_fib_dump_walk_ctx_t ctx = { .feis = NULL, }; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (fib_table, im->fibs, ({ fib_table_walk(fib_table->ft_index, FIB_PROTOCOL_IP4, vl_api_ip_fib_dump_walk, &ctx); })); /* *INDENT-ON* */ vec_sort_with_function (ctx.feis, fib_entry_cmp_for_sort); vec_foreach (lfeip, ctx.feis) { pfx = fib_entry_get_prefix (*lfeip); fib_index = fib_entry_get_fib_index (*lfeip); fib_table = fib_table_get (fib_index, pfx->fp_proto); api_rpaths = NULL; fib_entry_encode (*lfeip, &api_rpaths); send_ip_fib_details (am, reg, fib_table, pfx, api_rpaths, mp->context); vec_free (api_rpaths); } vec_free (ctx.feis); } static void send_ip6_fib_details (vpe_api_main_t * am, vl_api_registration_t * reg, const fib_table_t * table, const fib_prefix_t * pfx, fib_route_path_encode_t * api_rpaths, u32 context) { vl_api_ip6_fib_details_t *mp; fib_route_path_encode_t *api_rpath; vl_api_fib_path_t *fp; int path_count; path_count = vec_len (api_rpaths); mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP6_FIB_DETAILS); mp->context = context; mp->table_id = htonl (table->ft_table_id); mp->address_length = pfx->fp_len; memcpy (mp->address, &pfx->fp_addr.ip6, sizeof (pfx->fp_addr.ip6)); memcpy (mp->table_name, table->ft_desc, clib_min (vec_len (table->ft_desc), sizeof (mp->table_name))); mp->stats_index = htonl (fib_table_entry_get_stats_index (table->ft_index, pfx)); mp->count = htonl (path_count); fp = mp->path; vec_foreach (api_rpath, api_rpaths) { fib_api_path_encode (api_rpath, fp); fp++; } vl_api_send_msg (reg, (u8 *) mp); } typedef struct apt_ip6_fib_show_ctx_t_ { fib_node_index_t *entries; } api_ip6_fib_show_ctx_t; static fib_table_walk_rc_t api_ip6_fib_table_put_entries (fib_node_index_t fei, void *arg) { api_ip6_fib_show_ctx_t *ctx = arg; vec_add1 (ctx->entries, fei); return (FIB_TABLE_WALK_CONTINUE); } static void api_ip6_fib_table_get_all (vl_api_registration_t * reg, vl_api_ip6_fib_dump_t * mp, fib_table_t * fib_table) { vpe_api_main_t *am = &vpe_api_main; fib_node_index_t *fib_entry_index; api_ip6_fib_show_ctx_t ctx = { .entries = NULL, }; fib_route_path_encode_t *api_rpaths; const fib_prefix_t *pfx; ip6_fib_table_walk (fib_table->ft_index, api_ip6_fib_table_put_entries, &ctx); vec_sort_with_function (ctx.entries, fib_entry_cmp_for_sort); vec_foreach (fib_entry_index, ctx.entries) { pfx = fib_entry_get_prefix (*fib_entry_index); api_rpaths = NULL; fib_entry_encode (*fib_entry_index, &api_rpaths); send_ip6_fib_details (am, reg, fib_table, pfx, api_rpaths, mp->context); vec_free (api_rpaths); } vec_free (ctx.entries); } static void vl_api_ip6_fib_dump_t_handler (vl_api_ip6_fib_dump_t * mp) { vl_api_registration_t *reg; ip6_main_t *im6 = &ip6_main; fib_table_t *fib_table; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (fib_table, im6->fibs, ({ /* don't send link locals */ if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL) continue; api_ip6_fib_table_get_all(reg, mp, fib_table); })); /* *INDENT-ON* */ } static void send_ip_mfib_details (vl_api_registration_t * reg, u32 context, u32 table_id, fib_node_index_t mfei) { fib_route_path_encode_t *api_rpath, *api_rpaths = NULL; vl_api_ip_mfib_details_t *mp; const mfib_prefix_t *pfx; mfib_entry_t *mfib_entry; vl_api_mfib_path_t *fp; int path_count; mfib_entry = mfib_entry_get (mfei); pfx = mfib_entry_get_prefix (mfei); mfib_entry_encode (mfei, &api_rpaths); path_count = vec_len (api_rpaths); mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_MFIB_DETAILS); mp->context = context; mp->rpf_id = mfib_entry->mfe_rpf_id; mp->entry_flags = mfib_entry->mfe_flags; mp->table_id = htonl (table_id); mp->address_length = pfx->fp_len; memcpy (mp->grp_address, &pfx->fp_grp_addr.ip4, sizeof (pfx->fp_grp_addr.ip4)); memcpy (mp->src_address, &pfx->fp_src_addr.ip4, sizeof (pfx->fp_src_addr.ip4)); mp->count = htonl (path_count); fp = mp->path; vec_foreach (api_rpath, api_rpaths) { fib_api_path_encode (api_rpath, &fp->path); fp->itf_flags = ntohl (api_rpath->rpath.frp_mitf_flags); fp++; } vec_free (api_rpaths); vl_api_send_msg (reg, (u8 *) mp); } typedef struct vl_api_ip_mfib_dump_ctc_t_ { fib_node_index_t *entries; } vl_api_ip_mfib_dump_ctc_t; static int vl_api_ip_mfib_table_dump_walk (fib_node_index_t fei, void *arg) { vl_api_ip_mfib_dump_ctc_t *ctx = arg; vec_add1 (ctx->entries, fei); return (0); } static void vl_api_ip_mfib_dump_t_handler (vl_api_ip_mfib_dump_t * mp) { vl_api_registration_t *reg; ip4_main_t *im = &ip4_main; mfib_table_t *mfib_table; fib_node_index_t *mfeip; vl_api_ip_mfib_dump_ctc_t ctx = { .entries = NULL, }; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (mfib_table, im->mfibs, ({ ip4_mfib_table_walk(&mfib_table->v4, vl_api_ip_mfib_table_dump_walk, &ctx); vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort); vec_foreach (mfeip, ctx.entries) { send_ip_mfib_details (reg, mp->context, mfib_table->mft_table_id, *mfeip); } vec_reset_length (ctx.entries); })); /* *INDENT-ON* */ vec_free (ctx.entries); } static void send_ip6_mfib_details (vpe_api_main_t * am, vl_api_registration_t * reg, u32 table_id, const mfib_prefix_t * pfx, fib_route_path_encode_t * api_rpaths, u32 context) { vl_api_ip6_mfib_details_t *mp; fib_route_path_encode_t *api_rpath; vl_api_mfib_path_t *fp; int path_count; path_count = vec_len (api_rpaths); mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP6_MFIB_DETAILS); mp->context = context; mp->table_id = htonl (table_id); mp->address_length = pfx->fp_len; memcpy (mp->grp_address, &pfx->fp_grp_addr.ip6, sizeof (pfx->fp_grp_addr.ip6)); memcpy (mp->src_address, &pfx->fp_src_addr.ip6, sizeof (pfx->fp_src_addr.ip6)); mp->count = htonl (path_count); fp = mp->path; vec_foreach (api_rpath, api_rpaths) { fib_api_path_encode (api_rpath, &fp->path); fp->itf_flags = ntohl (api_rpath->rpath.frp_mitf_flags); fp++; } vl_api_send_msg (reg, (u8 *) mp); } typedef struct vl_api_ip6_mfib_dump_ctc_t_ { fib_node_index_t *entries; } vl_api_ip6_mfib_dump_ctc_t; static int vl_api_ip6_mfib_table_dump_walk (fib_node_index_t fei, void *arg) { vl_api_ip6_mfib_dump_ctc_t *ctx = arg; vec_add1 (ctx->entries, fei); return (0); } static void vl_api_ip6_mfib_dump_t_handler (vl_api_ip6_mfib_dump_t * mp) { vpe_api_main_t *am = &vpe_api_main; vl_api_registration_t *reg; ip6_main_t *im = &ip6_main; mfib_table_t *mfib_table; const mfib_prefix_t *pfx; fib_node_index_t *mfeip; fib_route_path_encode_t *api_rpaths = NULL; vl_api_ip6_mfib_dump_ctc_t ctx = { .entries = NULL, }; reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; /* *INDENT-OFF* */ pool_foreach (mfib_table, im->mfibs, ({ ip6_mfib_table_walk(&mfib_table->v6, vl_api_ip6_mfib_table_dump_walk, &ctx); vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort); vec_foreach(mfeip, ctx.entries) { pfx = mfib_entry_get_prefix (*mfeip); mfib_entry_encode (*mfeip, &api_rpaths); send_ip6_mfib_details (am, reg, mfib_table->mft_table_id, pfx, api_rpaths, mp->context); } vec_reset_length (api_rpaths); vec_reset_length (ctx.ent
#!/usr/bin/env python
"""IP4 VRF Multi-instance Test Case HLD:

**NOTES:**
    - higher number of pg-ip4 interfaces causes problems => only 15 pg-ip4 \
    interfaces in 5 VRFs are tested
    - jumbo packets in configuration with 15 pg-ip4 interfaces leads to \
    problems too
    - Reset of FIB table / VRF does not remove routes from IP FIB (see Jira \
    ticket https://jira.fd.io/browse/VPP-560) so checks of reset VRF tables \
    are skipped in tests 2, 3 and 4

**config 1**
    - add 15 pg-ip4 interfaces
    - configure 5 hosts per pg-ip4 interface
    - configure 4 VRFs
    - add 3 pg-ip4 interfaces per VRF

**test 1**
    - send IP4 packets between all pg-ip4 interfaces in all VRF groups

**verify 1**
    - check VRF data by parsing output of ip_fib_dump API command
    - all packets received correctly in case of pg-ip4 interfaces in VRF
    - no packet received in case of pg-ip4 interfaces not in VRF

**config 2**
    - delete 2 VRFs

**test 2**
    - send IP4 packets between all pg-ip4 interfaces in all VRF groups

**verify 2**
    - check VRF data by parsing output of ip_fib_dump API command
    - all packets received correctly in case of pg-ip4 interfaces in VRF
    - no packet received in case of pg-ip4 interfaces not in VRF

**config 3**
    - add 1 of deleted VRFs and 1 new VRF

**test 3**
    - send IP4 packets between all pg-ip4 interfaces in all VRF groups

**verify 3**
    - check VRF data by parsing output of ip_fib_dump API command
    - all packets received correctly in case of pg-ip4 interfaces in VRF
    - no packet received in case of pg-ip4 interfaces not in VRF

**config 4**
    - delete all VRFs (i.e. no VRF except VRF=0 created)

**test 4**
    - send IP4 packets between all pg-ip4 interfaces in all VRF groups

**verify 4**
    - check VRF data by parsing output of ip_fib_dump API command
    - all packets received correctly in case of pg-ip4 interfaces in VRF
    - no packet received in case of pg-ip4 interfaces not in VRF
"""

import unittest
import random

from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ARP

from framework import VppTestCase, VppTestRunner
from util import ppp


def is_ipv4_misc(p):
    """ Is packet one of uninteresting IPv4 broadcasts? """
    if p.haslayer(ARP):
        return True
    return False


class TestIp4VrfMultiInst(VppTestCase):
    """ IP4 VRF  Multi-instance Test Case """

    @classmethod
    def setUpClass(cls):
        """
        Perform standard class setup (defined by class method setUpClass in
        class VppTestCase) before running the test case, set test case related
        variables and configure VPP.
        """
        super(TestIp4VrfMultiInst, cls).setUpClass()

        # Test variables
        cls.hosts_per_pg = 5
        cls.nr_of_vrfs = 5
        cls.pg_ifs_per_vrf = 3

        try:
            # Create pg interfaces
            cls.create_pg_interfaces(
                range(cls.nr_of_vrfs * cls.pg_ifs_per_vrf))

            # Packet flows mapping pg0 -> pg1, pg2 etc.
            cls.flows = dict()
            for i in range(len(cls.pg_interfaces)):
                multiplicand = i / cls.pg_ifs_per_vrf
                pg_list = [
                    cls.pg_interfaces[multiplicand * cls.pg_ifs_per_vrf + j]
                    for j in range(cls.pg_ifs_per_vrf)
                    if (multiplicand * cls.pg_ifs_per_vrf + j) != i]
                cls.flows[cls.pg_interfaces[i]] = pg_list

            # Packet sizes - jumbo packet (9018 bytes) skipped
            cls.pg_if_packet_sizes = [64, 512, 1518]

            # Set up all interfaces
            for pg_if in cls.pg_interfaces:
                pg_if.admin_up()
                pg_if.generate_remote_hosts(cls.hosts_per_pg)

            # Create list of VRFs
            cls.vrf_list = list()

            # Create list of deleted VRFs
            cls.vrf_deleted_list = list()

            # Create list of pg_interfaces in VRFs
            cls.pg_in_vrf = list()

            # Create list of pg_interfaces not in BDs
            cls.pg_not_in_vrf = [pg_if for pg_if in cls.pg_interfaces]

            # Create mapping of pg_interfaces to VRF IDs
            cls.pg_if_by_vrf_id = dict()
            for i in range(cls.nr_of_vrfs):
                vrf_id = i + 1
                pg_list = [
                    cls.pg_interfaces[i * cls.pg_ifs_per_vrf + j]
                    for j in range(cls.pg_ifs_per_vrf)]
                cls.pg_if_by_vrf_id[vrf_id] = pg_list

        except Exception:
            super(TestIp4VrfMultiInst, cls).tearDownClass()
            raise

    def setUp(self):
        """
        Clear trace and packet infos before running each test.
        """
        super(TestIp4VrfMultiInst, self).setUp()
        self.reset_packet_infos()

    def tearDown(self):
        """
        Show various debug prints after each test.
        """
        super(TestIp4VrfMultiInst, self).tearDown()
        if not self.vpp_dead:
            self.logger.info(self.vapi.ppcli("show ip fib"))
            self.logger.info(self.vapi.ppcli("show ip arp"))

    def create_vrf_and_assign_interfaces(self, count, start=1):
        """
        Create required number of FIB tables / VRFs, put 3 l2-pg interfaces
        to every FIB table / VRF.

        :param int count: Number of FIB tables / VRFs to be created.
        :param int start: Starting number of the FIB table / VRF ID. \
        (Default value = 1)
        """

        for i in range(count):
            vrf_id = i + start
            pg_if = self.pg_if_by_vrf_id[vrf_id][0]
            dest_addr = pg_if.remote_hosts[0].ip4n
            dest_addr_len = 24
            self.vapi.ip_add_del_route(
                dest_addr, dest_addr_len, pg_if.local_ip4n,
                table_id=vrf_id, create_vrf_if_needed=1, is_multipath=1)
            self.logger.info("IPv4 VRF ID %d created" % vrf_id)
            if vrf_id not in self.vrf_list:
                self.vrf_list.append(vrf_id)
            if vrf_id in self.vrf_deleted_list:
                self.vrf_deleted_list.remove(vrf_id)
            for j in range(self.pg_ifs_per_vrf):
                pg_if = self.pg_if_by_vrf_id[vrf_id][j]
                pg_if.set_table_ip4(vrf_id)
                self.logger.info("pg-interface %s added to IPv4 VRF ID %d"
                                 % (pg_if.name, vrf_id))
                if pg_if not in self.pg_in_vrf:
                    self.pg_in_vrf.append(pg_if)
                if pg_if in self.pg_not_in_vrf:
                    self.pg_not_in_vrf.remove(pg_if)
                pg_if.config_ip4()
                pg_if.configure_ipv4_neighbors(vrf_id)
        self.logger.debug(self.vapi.ppcli("show ip fib"))
        self.logger.debug(self.vapi.ppcli("show ip arp"))

    def delete_vrf(self, vrf_id):
        """
        Delete required FIB table / VRF.

        :param int vrf_id: The FIB table / VRF ID to be deleted.
        """
        # self.vapi.reset_vrf(vrf_id, is_ipv6=0)
        self.vapi.reset_fib(vrf_id, is_ipv6=0)
        if vrf_id in self.vrf_list:
            self.vrf_list.remove(vrf_id)
        if vrf_id not in self.vrf_deleted_list:
            self.vrf_deleted_list.append(vrf_id)
        for j in range(self.pg_ifs_per_vrf):
            pg_if = self.pg_if_by_vrf_id[vrf_id][j]
            if pg_if in self.pg_in_vrf:
                self.pg_in_vrf.remove(pg_if)
            if pg_if not in self.pg_not_in_vrf:
                self.pg_not_in_vrf.append(pg_if)
        self.logger.info("IPv4 VRF ID %d reset" % vrf_id)
        self.logger.debug(self.vapi.ppcli("show ip fib"))
        self.logger.debug(self.vapi.ppcli("show ip arp"))

    def create_stream(self, src_if, packet_sizes):
        """
        Create input packet stream for defined interface using hosts list.

        :param object src_if: Interface to create packet stream for.
        :param list packet_sizes: List of required packet sizes.
        :return: Stream of packets.
        """
        pkts = []
        src_hosts = src_if.remote_hosts
        for dst_if in self.flows[src_if]:
            for dst_host in dst_if.remote_hosts:
                src_host = random.choice(src_hosts)
                pkt_info = self.create_packet_info(src_if, dst_if)
                payload = self.info_to_payload(pkt_info)
                p = (Ether(dst=src_if.local_mac, src=src_host.mac) /
                     IP(src=src_host.ip4, dst=dst_host.ip4) /
                     UDP(sport=1234, dport=1234) /
                     Raw(payload))
                pkt_info.data = p.copy()
                size = random.choice(packet_sizes)
                self.extend_packet(p, size)
                pkts.append(p)
        self.logger.debug("Input stream created for port %s. Length: %u pkt(s)"
                          % (src_if.name, len(pkts)))
        return pkts

    def verify_capture(self, pg_if, capture):
        """
        Verify captured input packet stream for defined interface.

        :param object pg_if: Interface to verify captured packet stream for.
        :param list capture: Captured packet stream.
        """
        last_info = dict()
        for i in self.pg_interfaces:
            last_info[i.sw_if_index] = None
        dst_sw_if_index = pg_if.sw_if_index
        for packet in capture:
            try:
                ip = packet[IP]
                udp = packet[UDP]
                payload_info = self.payload_to_info(str(packet[Raw]))
                packet_index = payload_info.index
                self.assertEqual(payload_info.dst, dst_sw_if_index)
                self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
                                  (pg_if.name, payload_info.src, packet_index))
                next_info = self.get_next_packet_info_for_interface2(
                    payload_info.src, dst_sw_if_index,
                    last_info[payload_info.src])
                last_info[payload_info.src] = next_info
                self.assertIsNotNone(next_info)
                self.assertEqual(packet_index, next_info.index)
                saved_packet = next_info.data
                # Check standard fields
                self.assertEqual(ip.src, saved_packet[IP].src)
                self.assertEqual(ip.dst, saved_packet[IP].dst)
                self.assertEqual(udp.sport, saved_packet[UDP].sport)
                self.assertEqual(udp.dport, saved_packet[UDP].dport)
            except:
                self.logger.error(ppp("Unexpected or invalid packet:", packet))
                raise
        for i in self.pg_interfaces:
            remaining_packet = self.get_next_packet_info_for_interface2(
                i, dst_sw_if_index, last_info[i.sw_if_index])
            self.assertIsNone(
                remaining_packet,
                "Port %u: Packet expected from source %u didn't arrive" %
                (dst_sw_if_index, i.sw_if_index))

    def verify_vrf(self, vrf_id):
        """
        Check if the FIB table / VRF ID is configured.

        :param int vrf_id: The FIB table / VRF ID to be verified.
        :return: 1 if the FIB table / VRF ID is configured, otherwise return 0.
        """
        ip_fib_dump = self.vapi.ip_fib_dump()
        vrf_count = 0
        for ip_fib_details in ip_fib_dump:
            if ip_fib_details[2] == vrf_id:
                vrf_count += 1
        if vrf_count == 0:
            self.logger.info("IPv4 VRF ID %d is not configured" % vrf_id)
            return 0
        else:
            self.logger.info("IPv4 VRF ID %d is configured" % vrf_id)
            return 1

    def run_verify_test(self):
        """
        Create packet streams for all configured l2-pg interfaces, send all \
        prepared packet streams and verify that:
            - all packets received correctly on all pg-l2 interfaces assigned
              to bridge domains
            - no packet received on all pg-l2 interfaces not assigned to bridge
              domains

        :raise RuntimeError: If no packet captured on l2-pg interface assigned
            to the bridge domain or if any packet is captured on l2-pg
            interface not assigned to the bridge domain.
        """
        # Test
        # Create incoming packet streams for packet-generator interfaces
        for pg_if in self.pg_interfaces:
            pkts = self.create_stream(pg_if, self.pg_if_packet_sizes)
            pg_if.add_stream(pkts)

        # Enable packet capture and start packet sending
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        # Verify
        # Verify outgoing packet streams per packet-generator interface
        for pg_if in self.pg_interfaces:
            if pg_if in self.pg_in_vrf:
                capture = pg_if.get_capture(remark="interface is in VRF")
                self.verify_capture(pg_if, capture)
            elif pg_if in self.pg_not_in_vrf:
                pg_if.assert_nothing_captured(remark="interface is not in VRF",
                                              filter_out_fn=is_ipv4_misc)
                self.logger.debug("No capture for interface %s" % pg_if.name)
            else:
                raise Exception("Unknown interface: %s" % pg_if.name)

    def test_ip4_vrf_01(self):
        """ IP4 VRF  Multi-instance test 1 - create 5 BDs
        """
        # Config 1
        # Create 4 VRFs
        self.create_vrf_and_assign_interfaces(4)

        # Verify 1
        for vrf_id in self.vrf_list:
            self.assertEqual(self.verify_vrf(vrf_id), 1)

        # Test 1
        self.run_verify_test()

    def test_ip4_vrf_02(self):
        """ IP4 VRF  Multi-instance test 2 - delete 2 VRFs
        """
        # Config 2
        # Delete 2 VRFs
        self.delete_vrf(1)
        self.delete_vrf(2)

        # Verify 2
        # for vrf_id in self.vrf_deleted_list:
        #     self.assertEqual(self.verify_vrf(vrf_id), 0)
        for vrf_id in self.vrf_list:
            self.assertEqual(self.verify_vrf(vrf_id), 1)

        # Test 2
        self.run_verify_test()

    def test_ip4_vrf_03(self):
        """ IP4 VRF  Multi-instance 3 - add 2 VRFs
        """
        # Config 3
        # Add 1 of deleted VRFs and 1 new VRF
        self.create_vrf_and_assign_interfaces(1)
        self.create_vrf_and_assign_interfaces(1, start=5)

        # Verify 3
        # for vrf_id in self.vrf_deleted_list:
        #     self.assertEqual(self.verify_vrf(vrf_id), 0)
        for vrf_id in self.vrf_list:
            self.assertEqual(self.verify_vrf(vrf_id), 1)

        # Test 3
        self.run_verify_test()

    def test_ip4_vrf_04(self):
        """ IP4 VRF  Multi-instance test 4 - delete 4 VRFs
        """
        # Config 4
        # Delete all VRFs (i.e. no VRF except VRF=0 created)
        for i in range(len(self.vrf_list)):
            self.delete_vrf(self.vrf_list[0])

        # Verify 4
        # for vrf_id in self.vrf_deleted_list:
        #     self.assertEqual(self.verify_vrf(vrf_id), 0)
        for vrf_id in self.vrf_list:
            self.assertEqual(self.verify_vrf(vrf_id), 1)

        # Test 4
        self.run_verify_test()


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)
ain_t *vm = vlib_get_main (); vl_api_ip_probe_neighbor_reply_t *rmp; clib_error_t *error; ip46_address_t dst; ip46_type_t itype; VALIDATE_SW_IF_INDEX (mp); u32 sw_if_index = ntohl (mp->sw_if_index); itype = ip_address_decode (&mp->dst, &dst); if (IP46_TYPE_IP6 == itype) error = ip6_probe_neighbor (vm, &dst.ip6, sw_if_index, 0); else error = ip4_probe_neighbor (vm, &dst.ip4, sw_if_index, 0); if (error) { clib_error_report (error); rv = clib_error_get_code (error); } BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_PROXY_ARP_INTFC_ENABLE_DISABLE_REPLY); } static void vl_api_ip_scan_neighbor_enable_disable_t_handler (vl_api_ip_scan_neighbor_enable_disable_t * mp) { int rv = 0; vl_api_ip_scan_neighbor_enable_disable_reply_t *rmp; ip_neighbor_scan_arg_t arg; arg.mode = mp->mode; arg.scan_interval = mp->scan_interval; arg.max_proc_time = mp->max_proc_time; arg.max_update = mp->max_update; arg.scan_int_delay = mp->scan_int_delay; arg.stale_threshold = mp->stale_threshold; ip_neighbor_scan_enable_disable (&arg); REPLY_MACRO (VL_API_IP_SCAN_NEIGHBOR_ENABLE_DISABLE_REPLY); } static int ip4_reset_fib_t_handler (vl_api_reset_fib_t * mp) { vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; ip4_main_t *im4 = &ip4_main; static u32 *sw_if_indices_to_shut; fib_table_t *fib_table; ip4_fib_t *fib; u32 sw_if_index; int i; int rv = VNET_API_ERROR_NO_SUCH_FIB; u32 target_fib_id = ntohl (mp->vrf_id); stats_dslock_with_hint (1 /* release hint */ , 8 /* tag */ ); /* *INDENT-OFF* */ pool_foreach (fib_table, im4->fibs, ({ vnet_sw_interface_t * si; fib = pool_elt_at_index (im4->v4_fibs, fib_table->ft_index); if (fib->table_id != target_fib_id) continue; /* remove any mpls encap/decap labels */ mpls_fib_reset_labels (fib->table_id); /* remove any proxy arps in this fib */ vnet_proxy_arp_fib_reset (fib->table_id); /* Set the flow hash for this fib to the default */ vnet_set_ip4_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT); vec_reset_length (sw_if_indices_to_shut); /* Shut down interfaces in this FIB / clean out intfc routes */ pool_foreach (si, im->sw_interfaces, ({ u32 sw_if_index = si->sw_if_index; if (sw_if_index < vec_len (im4->fib_index_by_sw_if_index) && (im4->fib_index_by_sw_if_index[si->sw_if_index] == fib->index)) vec_add1 (sw_if_indices_to_shut, si->sw_if_index); })); for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) { sw_if_index = sw_if_indices_to_shut[i]; u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index); flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP); vnet_sw_interface_set_flags (vnm, sw_if_index, flags); } fib_table_flush(fib->index, FIB_PROTOCOL_IP4, FIB_SOURCE_API); rv = 0; break; })); /* pool_foreach (fib) */ /* *INDENT-ON* */ stats_dsunlock (); return rv; } static int ip6_reset_fib_t_handler (vl_api_reset_fib_t * mp) { vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; ip6_main_t *im6 = &ip6_main; static u32 *sw_if_indices_to_shut; fib_table_t *fib_table; ip6_fib_t *fib; u32 sw_if_index; int i; int rv = VNET_API_ERROR_NO_SUCH_FIB; u32 target_fib_id = ntohl (mp->vrf_id); stats_dslock_with_hint (1 /* release hint */ , 9 /* tag */ ); /* *INDENT-OFF* */ pool_foreach (fib_table, im6->fibs, ({ vnet_sw_interface_t * si; fib = pool_elt_at_index (im6->v6_fibs, fib_table->ft_index); if (fib->table_id != target_fib_id) continue; vec_reset_length (sw_if_indices_to_shut); /* Set the flow hash for this fib to the default */ vnet_set_ip6_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT); /* Shut down interfaces in this FIB / clean out intfc routes */ pool_foreach (si, im->sw_interfaces, ({ if (im6->fib_index_by_sw_if_index[si->sw_if_index] == fib->index) vec_add1 (sw_if_indices_to_shut, si->sw_if_index); })); for (i = 0; i < vec_len (sw_if_indices_to_shut); i++) { sw_if_index = sw_if_indices_to_shut[i]; u32 flags = vnet_sw_interface_get_flags (vnm, sw_if_index); flags &= ~(VNET_SW_INTERFACE_FLAG_ADMIN_UP); vnet_sw_interface_set_flags (vnm, sw_if_index, flags); } fib_table_flush(fib->index, FIB_PROTOCOL_IP6, FIB_SOURCE_API); rv = 0; break; })); /* pool_foreach (fib) */ /* *INDENT-ON* */ stats_dsunlock (); return rv; } static void vl_api_reset_fib_t_handler (vl_api_reset_fib_t * mp) { int rv; vl_api_reset_fib_reply_t *rmp; if (mp->is_ipv6) rv = ip6_reset_fib_t_handler (mp); else rv = ip4_reset_fib_t_handler (mp); REPLY_MACRO (VL_API_RESET_FIB_REPLY); } static void vl_api_set_arp_neighbor_limit_t_handler (vl_api_set_arp_neighbor_limit_t * mp) { int rv; vl_api_set_arp_neighbor_limit_reply_t *rmp; vnet_main_t *vnm = vnet_get_main (); clib_error_t *error; vnm->api_errno = 0; if (mp->is_ipv6) error = ip6_set_neighbor_limit (ntohl (mp->arp_neighbor_limit)); else error = ip4_set_arp_limit (ntohl (mp->arp_neighbor_limit)); if (error) { clib_error_report (error); rv = VNET_API_ERROR_UNSPECIFIED; } else { rv = vnm->api_errno; } REPLY_MACRO (VL_API_SET_ARP_NEIGHBOR_LIMIT_REPLY); } void vl_api_ip_reassembly_set_t_handler (vl_api_ip_reassembly_set_t * mp) { vl_api_ip_reassembly_set_reply_t *rmp; int rv = 0; if (mp->is_ip6) { rv = ip6_reass_set (clib_net_to_host_u32 (mp->timeout_ms), clib_net_to_host_u32 (mp->max_reassemblies), clib_net_to_host_u32 (mp->expire_walk_interval_ms)); } else { rv = ip4_reass_set (clib_net_to_host_u32 (mp->timeout_ms), clib_net_to_host_u32 (mp->max_reassemblies), clib_net_to_host_u32 (mp->expire_walk_interval_ms)); } REPLY_MACRO (VL_API_IP_REASSEMBLY_SET_REPLY); } void vl_api_ip_reassembly_get_t_handler (vl_api_ip_reassembly_get_t * mp) { unix_shared_memory_queue_t *q; q = vl_api_client_index_to_input_queue (mp->client_index); if (q == 0) return; vl_api_ip_reassembly_get_reply_t *rmp = vl_msg_api_alloc (sizeof (*rmp)); clib_memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_IP_REASSEMBLY_GET_REPLY); rmp->context = mp->context; rmp->retval = 0; if (mp->is_ip6) { rmp->is_ip6 = 1; ip6_reass_get (&rmp->timeout_ms, &rmp->max_reassemblies, &rmp->expire_walk_interval_ms); } else { rmp->is_ip6 = 0; ip4_reass_get (&rmp->timeout_ms, &rmp->max_reassemblies, &rmp->expire_walk_interval_ms); } rmp->timeout_ms = clib_host_to_net_u32 (rmp->timeout_ms); rmp->max_reassemblies = clib_host_to_net_u32 (rmp->max_reassemblies); rmp->expire_walk_interval_ms = clib_host_to_net_u32 (rmp->expire_walk_interval_ms); vl_msg_api_send_shmem (q, (u8 *) & rmp); } void vl_api_ip_reassembly_enable_disable_t_handler (vl_api_ip_reassembly_enable_disable_t * mp) { vl_api_ip_reassembly_enable_disable_reply_t *rmp; int rv = 0; rv = ip4_reass_enable_disable (clib_net_to_host_u32 (mp->sw_if_index), mp->enable_ip4); if (0 == rv) { rv = ip6_reass_enable_disable (clib_net_to_host_u32 (mp->sw_if_index), mp->enable_ip6); } REPLY_MACRO (VL_API_IP_REASSEMBLY_ENABLE_DISABLE_REPLY); } void send_ip_punt_redirect_details (vl_api_registration_t * reg, u32 context, u32 sw_if_index, ip_punt_redirect_rx_t * pr, u8 is_ipv6) { vl_api_ip_punt_redirect_details_t *mp; mp = vl_msg_api_alloc (sizeof (*mp)); if (!mp) return; clib_memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = ntohs (VL_API_IP_PUNT_REDIRECT_DETAILS); mp->context = context; mp->punt.rx_sw_if_index = htonl (sw_if_index); mp->punt.tx_sw_if_index = htonl (pr->tx_sw_if_index); if (is_ipv6) { ip_address_encode (&pr->nh, IP46_TYPE_IP6, &mp->punt.nh); } else { ip_address_encode (&pr->nh, IP46_TYPE_IP4, &mp->punt.nh); } vl_api_send_msg (reg, (u8 *) mp); } static void vl_api_ip_punt_redirect_dump_t_handler (vl_api_ip_punt_redirect_dump_t * mp) { vl_api_registration_t *reg; u32 sw_if_index; int rv __attribute__ ((unused)) = 0; sw_if_index = ntohl (mp->sw_if_index); reg = vl_api_client_index_to_registration (mp->client_index); if (!reg) return; if (~0 != sw_if_index) VALIDATE_SW_IF_INDEX (mp); ip_punt_redirect_detail_t *pr, *prs; if (mp->is_ipv6) { prs = ip6_punt_redirect_entries (sw_if_index); /* *INDENT-OFF* */ vec_foreach (pr, prs) { send_ip_punt_redirect_details (reg, mp->context, pr->rx_sw_if_index, &pr->punt_redirect, 1); } /* *INDENT-ON* */ vec_free (prs); } else { prs = ip4_punt_redirect_entries (sw_if_index); /* *INDENT-OFF* */ vec_foreach (pr, prs) { send_ip_punt_redirect_details (reg, mp->context, pr->rx_sw_if_index, &pr->punt_redirect, 0); } /* *INDENT-ON* */ vec_free (prs); } BAD_SW_IF_INDEX_LABEL; } #define vl_msg_name_crc_list #include #undef vl_msg_name_crc_list static void setup_message_id_table (api_main_t * am) { #define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); foreach_vl_msg_name_crc_ip; #undef _ } static clib_error_t * ip_api_hookup (vlib_main_t * vm) { api_main_t *am = &api_main; #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 1); foreach_ip_api_msg; #undef _ /* * Mark the route add/del API as MP safe */ am->is_mp_safe[VL_API_IP_ADD_DEL_ROUTE] = 1; am->is_mp_safe[VL_API_IP_ADD_DEL_ROUTE_REPLY] = 1; /* * Set up the (msg_name, crc, message-id) table */ setup_message_id_table (am); ra_set_publisher_node (wc_arp_process_node.index, RA_REPORT); return 0; } VLIB_API_INIT_FUNCTION (ip_api_hookup); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */