/* * Copyright (c) 2017-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include static inline void session_wrk_send_evt_to_main (session_worker_t *wrk, session_evt_elt_t *elt) { session_evt_elt_t *he; uword thread_index; u8 is_empty; thread_index = wrk->vm->thread_index; he = clib_llist_elt (wrk->event_elts, wrk->evts_pending_main); is_empty = clib_llist_is_empty (wrk->event_elts, evt_list, he); clib_llist_add_tail (wrk->event_elts, evt_list, elt, he); if (is_empty) session_send_rpc_evt_to_thread (0, session_wrk_handle_evts_main_rpc, uword_to_pointer (thread_index, void *)); } #define app_check_thread_and_barrier(_wrk, _elt) \ if (!vlib_thread_is_main_w_barrier ()) \ { \ session_wrk_send_evt_to_main (wrk, elt); \ return; \ } static void session_wrk_timerfd_update (session_worker_t *wrk, u64 time_ns) { struct itimerspec its; its.it_value.tv_sec = 0; its.it_value.tv_nsec = time_ns; its.it_interval.tv_sec = 0; its.it_interval.tv_nsec = its.it_value.tv_nsec; if (timerfd_settime (wrk->timerfd, 0, &its, NULL) == -1) clib_warning ("timerfd_settime"); } always_inline u64 session_wrk_tfd_timeout (session_wrk_state_t state, u32 thread_index) { if (state == SESSION_WRK_INTERRUPT) return thread_index ? 1e6 : vlib_num_workers () ? 5e8 : 1e6; else if (state == SESSION_WRK_IDLE) return thread_index ? 1e8 : vlib_num_workers () ? 5e8 : 1e8; else return 0; } static inline void session_wrk_set_state (session_worker_t *wrk, session_wrk_state_t state) { u64 time_ns; wrk->state = state; if (wrk->timerfd == -1) return; time_ns = session_wrk_tfd_timeout (state, wrk->vm->thread_index); session_wrk_timerfd_update (wrk, time_ns); } static transport_endpt_ext_cfg_t * session_mq_get_ext_config (application_t *app, uword offset) { svm_fifo_chunk_t *c; fifo_segment_t *fs; fs = application_get_rx_mqs_segment (app); c = fs_chunk_ptr (fs->h, offset); return (transport_endpt_ext_cfg_t *) c->data; } static void session_mq_free_ext_config (application_t *app, uword offset) { svm_fifo_chunk_t *c; fifo_segment_t *fs; fs = application_get_rx_mqs_segment (app); c = fs_chunk_ptr (fs->h, offset); fifo_segment_collect_chunk (fs, 0 /* only one slice */, c); } static void session_mq_listen_handler (session_worker_t *wrk, session_evt_elt_t *elt) { vnet_listen_args_t _a, *a = &_a; session_listen_msg_t *mp; app_worker_t *app_wrk; application_t *app; int rv; app_check_thread_and_barrier (wrk, elt); mp = session_evt_ctrl_data (wrk, elt); app = application_lookup (mp->client_index); if (!app) return; clib_memset (a, 0, sizeof (*a)); a->sep.is_ip4 = mp->is_ip4; ip_copy (&a->sep.ip, &mp->ip, mp->is_ip4); a->sep.port = mp->port; a->sep.fib_index = mp->vrf; a->sep.sw_if_index = ENDPOINT_INVALID_INDEX; a->sep.transport_proto = mp->proto; a->app_index = app->app_index; a->wrk_map_index = mp->wrk_index; a->sep_ext.transport_flags = mp->flags; if (mp->ext_config) a->sep_ext.ext_cfg = session_mq_get_ext_config (app, mp->ext_config); if ((rv = vnet_listen (a))) session_worker_stat_error_inc (wrk, rv, 1); app_wrk = application_get_worker (app, mp->wrk_index); mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv); if (mp->ext_config) session_mq_free_ext_config (app, mp->ext_config); } static void session_mq_listen_uri_handler (session_worker_t *wrk, session_evt_elt_t *elt) { vnet_listen_args_t _a, *a = &_a; session_listen_uri_msg_t *mp; app_worker_t *app_wrk; application_t *app; int rv; app_check_thread_and_barrier (wrk, elt); mp = session_evt_ctrl_data (wrk, elt); app = application_lookup (mp->client_index); if (!app) return; clib_memset (a, 0, sizeof (*a)); a->uri = (char *) mp->uri; a->app_index = app->app_index; rv = vnet_bind_uri (a); app_wrk = application_get_worker (app, 0); mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv); } static void session_mq_connect_one (session_connect_msg_t *mp) { vnet_connect_args_t _a, *a = &_a; app_worker_t *app_wrk; session_worker_t *wrk; application_t *app; int rv; app = application_lookup (mp->client_index); if (!app) return; clib_memset (a, 0, sizeof (*a)); a->sep.is_ip4 = mp->is_ip4; clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip)); a->sep.port = mp->port; a->sep.transport_proto = mp->proto; a->sep.peer.fib_index = mp->vrf; a->sep.dscp = mp->dscp; clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip)); if (mp->is_ip4) { ip46_address_mask_ip4 (&a->sep.ip); ip46_address_mask_ip4 (&a->sep.peer.ip); } a->sep.peer.port = mp->lcl_port; a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX; a->sep_ext.parent_handle = mp->parent_handle; a->sep_ext.transport_flags = mp->flags; a->api_context = mp->context; a->app_index = app->app_index; a->wrk_map_index = mp->wrk_index; if (mp->ext_config) a->sep_ext.ext_cfg = session_mq_get_ext_config (app, mp->ext_config); if ((rv = vnet_connect (a))) { wrk = session_main_get_worker (vlib_get_thread_index ()); session_worker_stat_error_inc (wrk, rv, 1); app_wrk = application_get_worker (app, mp->wrk_index); mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv); } if (mp->ext_config) session_mq_free_ext_config (app, mp->ext_config); } static void session_mq_handle_connects_rpc (void *arg) { u32 max_connects = 32, n_connects = 0; session_evt_elt_t *he, *elt, *next; session_worker_t *fwrk; ASSERT (session_vlib_thread_is_cl_thread ()); /* Pending connects on linked list pertaining to first worker */ fwrk = session_main_get_worker (transport_cl_thread ()); if (!fwrk->n_pending_connects) return; he = clib_llist_elt (fwrk->event_elts, fwrk->pending_connects); elt = clib_llist_next (fwrk->event_elts, evt_list, he); /* Avoid holding the worker for too long */ while (n_connects < max_connects && elt != he) { next = clib_llist_next (fwrk->event_elts, evt_list, elt); clib_llist_remove (fwrk->event_elts, evt_list, elt); session_mq_connect_one (session_evt_ctrl_data (fwrk, elt)); session_evt_ctrl_data_free (fwrk, elt); clib_llist_put (fwrk->event_elts, elt); elt = next; n_connects += 1; } /* Decrement with worker barrier */ fwrk->n_pending_connects -= n_connects; if (fwrk->n_pending_connects > 0) { session_send_rpc_evt_to_thread_force (fwrk->vm->thread_index, session_mq_handle_connects_rpc, 0); } } static void session_mq_connect_handler (session_worker_t *wrk, session_evt_elt_t *elt) { u32 thread_index = wrk - session_main.wrk; session_evt_elt_t *he; if (PREDICT_FALSE (thread_index > transport_cl_thread ())) { clib_warning ("Connect on wrong thread. Dropping"); return; } /* If on worker, check if main has any pending messages. Avoids reordering * with other control messages that need to be handled by main */ if (thread_index) { he = clib_llist_elt (wrk->event_elts, wrk->evts_pending_main); /* Events pending on main, postpone to avoid reordering */ if (!clib_llist_is_empty (wrk->event_elts, evt_list, he)) { clib_llist_add_tail (wrk->event_elts, evt_list, elt, he); return; } } /* Add to pending list to be handled by first worker */ he = clib_llist_elt (wrk->event_elts, wrk->pending_connects); clib_llist_add_tail (wrk->event_elts, evt_list, elt, he); /* Decremented with worker barrier */ wrk->n_pending_connects += 1; if (wrk->n_pending_connects == 1) { session_send_rpc_evt_to_thread_force (thread_index, session_mq_handle_connects_rpc, 0); } } static void session_mq_connect_uri_handler (session_worker_t *wrk, session_evt_elt_t *elt) { vnet_connect_args_t _a, *a = &_a; session_connect_uri_msg_t *mp; app_worker_t *app_wrk; application_t *app; int rv; app_check_thread_and_barrier (wrk, elt); mp = session_evt_ctrl_data (wrk, elt); app = application_lookup (mp->client_index); if (!app) return; clib_memset (a, 0, sizeof (*a)); a->uri = (char *) mp->uri; a->api_context = mp->context; a->app_index = app->app_index; if ((rv = vnet_connect_uri (a))) { session_worker_stat_error_inc (wrk, rv, 1); app_wrk = application_get_worker (app, 0 /* default wrk only */ ); mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv); } } static void session_mq_shutdown_handler (void *data) { session_shutdown_msg_t *mp = (session_shutdown_msg_t *) data; vnet_shutdown_args_t _a, *a = &_a; application_t *app; app = application_lookup (mp->client_index); if (!app) return; a->app_index = app->app_index; a->handle = mp->handle; vnet_shutdown_session (a); } static void session_mq_disconnect_handler (void *data) { session_disconnect_msg_t *mp = (session_disconnect_msg_t *) data; vnet_disconnect_args_t _a, *a = &_a; application_t *app; app = application_lookup (mp->client_index); if (!app) return; a->app_index = app->app_index; a->handle = mp->handle; vnet_disconnect_session (a); } static void app_mq_detach_handler (session_worker_t *wrk, session_evt_elt_t *elt) { vnet_app_detach_args_t _a, *a = &_a; session_app_detach_msg_t *mp; application_t *app; app_check_thread_and_barrier (wrk, elt); mp = session_evt_ctrl_data (wrk, elt); app = application_lookup (mp->client_index); if (!app) return; a->app_index = app->app_index; a->api_client_index = mp->client_index; vnet_application_detach (a); } static void session_mq_unlisten_handler (session_worker_t *wrk, session_evt_elt_t *elt) { vnet_unlisten_args_t _a, *a = &_a; session_unlisten_msg_t *mp; app_worker_t *app_wrk; session_handle_t sh; application_t *app; int rv; app_check_thread_and_barrier (wrk, elt); mp = session_evt_ctrl_data (wrk, elt); sh = mp->handle; app = application_lookup (mp->client_index); if (!app) return; clib_memset (a, 0, sizeof (*a)); a->app_index = app->app_index; a->handle = sh; a->wrk_map_index = mp->wrk_index; if ((rv = vnet_unlisten (a))) session_worker_stat_error_inc (wrk, rv, 1); app_wrk = application_get_worker (app, a->wrk_map_index); if (!app_wrk) return; mq_send_unlisten_reply (app_wrk, sh, mp->context, rv); } static void session_mq_accepted_reply_handler (session_worker_t *wrk, session_evt_elt_t *elt) { vnet_disconnect_args_t _a = { 0 }, *a = &_a; session_accepted_reply_msg_t *mp; session_state_t old_state; app_worker_t *app_wrk; session_t *s; mp = session_evt_ctrl_data (wrk, elt); /* Mail this back from the main thread. We're not polling in main * thread so we're using other workers for notifications. */ if (session_thread_from_handle (mp->handle) == 0 && vlib_num_workers () && vlib_get_thread_index () != 0) { session_wrk_send_evt_to_main (wrk, elt); return; } s = session_get_from_handle_if_valid (mp->handle); if (!s) return; app_wrk = app_worker_get (s->app_wrk_index); if (app_wrk->app_index != mp->context) { clib_warning ("app doesn't own session"); return; } /* Server isn't interested, disconnect the session */ if (mp->retval) { a->app_index = mp->context; a->handle = mp->handle; vnet_disconnect_session (a); return; } /* Special handling for cut-through sessions */ if (!session_has_transport (s)) { session_set_state (s, SESSION_STATE_READY); ct_session_connect_notify (s, SESSION_E_NONE); return; } old_state = s->session_state; session_set_state (s, SESSION_STATE_READY); if (!svm_fifo_is_empty_prod (s->rx_fifo)) app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX); /* Closed while waiting for app to reply. Resend disconnect */ if (old_state >= SESSION_STATE_TRANSPORT_CLOSING) { app_worker_close_notify (app_wrk, s); session_set_state (s, old_state); return; } } static void session_mq_reset_reply_handler (void *data) { vnet_disconnect_args_t _a = { 0 }, *a = &_a; session_reset_reply_msg_t *mp; app_worker_t *app_wrk; session_t *s; application_t *app; u32 index, thread_index; mp = (session_reset_reply_msg_t *) data; app = application_lookup (mp->context); if (!app) return; session_parse_handle (mp->handle, &index, &thread_index); s = session_get_if_valid (index, thread_index); /* No session or not the right session */ if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING) return; app_wrk = app_worker_get (s->app_wrk_index); if (!app_wrk || app_wrk->app_index != app->app_index) { clib_warning ("App %u does not own handle 0x%lx!", app->app_index, mp->handle); return; } /* Client objected to resetting the session, log and continue */ if (mp->retval) { clib_warning ("client retval %d", mp->retval); return; } /* This comes as a response to a reset, transport only waiting for * confirmation to remove connection state, no need to disconnect */ a->handle = mp->handle; a->app_index = app->app_index; vnet_disconnect_session (a); } static void session_mq_disconnected_handler (void *data) { session_disconnected_reply_msg_t *rmp; vnet_disconnect_args_t _a, *a = &_a; svm_msg_q_msg_t _msg, *msg = &_msg; session_disconnected_msg_t *mp; app_worker_t *app_wrk; session_event_t *evt; session_t *s; application_t *app; int rv = 0; mp = (session_disconnected_msg_t *) data; if (!(s = session_get_from_handle_if_valid (mp->handle))) { clib_warning ("could not disconnect handle %llu", mp->handle); return; } app_wrk = app_worker_get (s->app_wrk_index); app = application_lookup (mp->client_index); if (!(app_wrk && app && app->app_index == app_wrk->app_index)) { clib_warning ("could not disconnect session: %llu app: %u", mp->handle, mp->client_index); return; } a->handle = mp->handle; a->app_index = app_wrk->wrk_index; rv = vnet_disconnect_session (a); svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg); evt = svm_msg_q_msg_data (app_wrk->event_queue, msg); clib_memset (evt, 0, sizeof (*evt)); evt->event_type = SESSION_CTRL_EVT_DISCONNECTED_REPLY; rmp = (session_disconnected_reply_msg_t *) evt->data; rmp->handle = mp->handle; rmp->context = mp->context; rmp->retval = rv; svm_msg_q_add_and_unlock (app_wrk->event_queue, msg); } static void session_mq_disconnected_reply_handler (void *data) { session_disconnected_reply_msg_t *mp; vnet_disconnect_args_t _a, *a = &_a; application_t *app; mp = (session_disconnected_reply_msg_t *) data; /* Client objected to disconnecting the session, log and continue */ if (mp->retval) { clib_warning ("client retval %d", mp->retval); return; } /* Disconnect has been confirmed. Confirm close to transport */ app = application_lookup (mp->context); if (app) { a->handle = mp->handle; a->app_index = app->app_index; vnet_disconnect_session (a); } } static void session_mq_worker_update_handler (void *data) { session_worker_update_msg_t *mp = (session_worker_update_msg_t *) data; session_worker_update_reply_msg_t *rmp; svm_msg_q_msg_t _msg, *msg = &_msg; app_worker_t *app_wrk; u32 owner_app_wrk_map; session_event_t *evt; session_t *s; application_t *app; int rv; app = application_lookup (mp->client_index); if (!app) return; if (!(s = session_get_from_handle_if_valid (mp->handle))) { clib_warning ("invalid handle %llu", mp->handle); return; } app_wrk = app_worker_get (s->app_wrk_index); if (app_wrk->app_index != app->app_index) { clib_warning ("app %u does not own session %llu", app->app_index, mp->handle); return; } owner_app_wrk_map = app_wrk->wrk_map_index; app_wrk = application_get_worker (app, mp->wrk_index); /* This needs to come from the new owner */ if (mp->req_wrk_index == owner_app_wrk_map) { session_req_worker_update_msg_t *wump; svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg); evt = svm_msg_q_msg_data (app_wrk->event_queue, msg); clib_memset (evt, 0, sizeof (*evt)); evt->event_type = SESSION_CTRL_EVT_REQ_WORKER_UPDATE; wump = (session_req_worker_update_msg_t *) evt->data; wump->session_handle = mp->handle; svm_msg_q_add_and_unlock (app_wrk->event_queue, msg); return; } rv = app_worker_own_session (app_wrk, s); if (rv) session_stat_error_inc (rv, 1); /* * Send reply */ svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg); evt = svm_msg_q_msg_data (app_wrk->event_queue, msg); clib_memset (evt, 0, sizeof (*evt)); evt->event_type = SESSION_CTRL_EVT_WORKER_UPDATE_REPLY; rmp = (session_worker_update_repl
#!/usr/bin/env python
"""IP4 VRF Multi-instance Test Case HLD:

**NOTES:**
    - higher number of pg-ip4 interfaces causes problems => only 15 pg-ip4 \
    interfaces in 5 VRFs are tested
    - jumbo packets in configuration with 15 pg-ip4 interfaces leads to \
    problems too
    - Reset of FIB table / VRF does not remove routes from IP FIB (see Jira \
    ticket https://jira.fd.io/browse/VPP-560) so checks of reset VRF tables \
    are skipped in tests 2, 3 and 4

**config 1**
    - add 15 pg-ip4 interfaces
    - configure 5 hosts per pg-ip4 interface
    - configure 4 VRFs
    - add 3 pg-ip4 interfaces per VRF

**test 1**
    - send IP4 packets between all pg-ip4 interfaces in all VRF groups

**verify 1**
    - check VRF data by parsing output of ip_fib_dump API command
    - all packets received correctly in case of pg-ip4 interfaces in VRF
    - no packet received in case of pg-ip4 interfaces not in VRF

**config 2**
    - delete 2 VRFs

**test 2**
    - send IP4 packets between all pg-ip4 interfaces in all VRF groups

**verify 2**
    - check VRF data by parsing output of ip_fib_dump API command
    - all packets received correctly in case of pg-ip4 interfaces in VRF
    - no packet received in case of pg-ip4 interfaces not in VRF

**config 3**
    - add 1 of deleted VRFs and 1 new VRF

**test 3**
    - send IP4 packets between all pg-ip4 interfaces in all VRF groups

**verify 3**
    - check VRF data by parsing output of ip_fib_dump API command
    - all packets received correctly in case of pg-ip4 interfaces in VRF
    - no packet received in case of pg-ip4 interfaces not in VRF

**config 4**
    - delete all VRFs (i.e. no VRF except VRF=0 created)

**test 4**
    - send IP4 packets between all pg-ip4 interfaces in all VRF groups

**verify 4**
    - check VRF data by parsing output of ip_fib_dump API command
    - all packets received correctly in case of pg-ip4 interfaces in VRF
    - no packet received in case of pg-ip4 interfaces not in VRF
"""

import unittest
import random

from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ARP

from framework import VppTestCase, VppTestRunner
from util import ppp


def is_ipv4_misc(p):
    """ Is packet one of uninteresting IPv4 broadcasts? """
    if p.haslayer(ARP):
        return True
    return False


class TestIp4VrfMultiInst(VppTestCase):
    """ IP4 VRF  Multi-instance Test Case """

    @classmethod
    def setUpClass(cls):
        """
        Perform standard class setup (defined by class method setUpClass in
        class VppTestCase) before running the test case, set test case related
        variables and configure VPP.
        """
        super(TestIp4VrfMultiInst, cls).setUpClass()

        # Test variables
        cls.hosts_per_pg = 5
        cls.nr_of_vrfs = 5
        cls.pg_ifs_per_vrf = 3

        try:
            # Create pg interfaces
            cls.create_pg_interfaces(
                range(cls.nr_of_vrfs * cls.pg_ifs_per_vrf))

            # Packet flows mapping pg0 -> pg1, pg2 etc.
            cls.flows = dict()
            for i in range(len(cls.pg_interfaces)):
                multiplicand = i / cls.pg_ifs_per_vrf
                pg_list = [
                    cls.pg_interfaces[multiplicand * cls.pg_ifs_per_vrf + j]
                    for j in range(cls.pg_ifs_per_vrf)
                    if (multiplicand * cls.pg_ifs_per_vrf + j) != i]
                cls.flows[cls.pg_interfaces[i]] = pg_list

            # Packet sizes - jumbo packet (9018 bytes) skipped
            cls.pg_if_packet_sizes = [64, 512, 1518]

            # Set up all interfaces
            for pg_if in cls.pg_interfaces:
                pg_if.admin_up()
                pg_if.generate_remote_hosts(cls.hosts_per_pg)

            # Create list of VRFs
            cls.vrf_list = list()

            # Create list of deleted VRFs
            cls.vrf_deleted_list = list()

            # Create list of pg_interfaces in VRFs
            cls.pg_in_vrf = list()

            # Create list of pg_interfaces not in BDs
            cls.pg_not_in_vrf = [pg_if for pg_if in cls.pg_interfaces]

            # Create mapping of pg_interfaces to VRF IDs
            cls.pg_if_by_vrf_id = dict()
            for i in range(cls.nr_of_vrfs):
                vrf_id = i + 1
                pg_list = [
                    cls.pg_interfaces[i * cls.pg_ifs_per_vrf + j]
                    for j in range(cls.pg_ifs_per_vrf)]
                cls.pg_if_by_vrf_id[vrf_id] = pg_list

        except Exception:
            super(TestIp4VrfMultiInst, cls).tearDownClass()
            raise

    def setUp(self):
        """
        Clear trace and packet infos before running each test.
        """
        super(TestIp4VrfMultiInst, self).setUp()
        self.reset_packet_infos()

    def tearDown(self):
        """
        Show various debug prints after each test.
        """
        super(TestIp4VrfMultiInst, self).tearDown()
        if not self.vpp_dead:
            self.logger.info(self.vapi.ppcli("show ip fib"))
            self.logger.info(self.vapi.ppcli("show ip arp"))

    def create_vrf_and_assign_interfaces(self, count, start=1):
        """
        Create required number of FIB tables / VRFs, put 3 l2-pg interfaces
        to every FIB table / VRF.

        :param int count: Number of FIB tables / VRFs to be created.
        :param int start: Starting number of the FIB table / VRF ID. \
        (Default value = 1)
        """

        for i in range(count):
            vrf_id = i + start
            pg_if = self.pg_if_by_vrf_id[vrf_id][0]
            dest_addr = pg_if.remote_hosts[0].ip4n
            dest_addr_len = 24
            self.vapi.ip_table_add_del(vrf_id, is_add=1)
            self.vapi.ip_add_del_route(
                dest_addr, dest_addr_len, pg_if.local_ip4n,
                table_id=vrf_id, is_multipath=1)
            self.logger.info("IPv4 VRF ID %d created" % vrf_id)
            if vrf_id not in self.vrf_list:
                self.vrf_list.append(vrf_id)
            if vrf_id in self.vrf_deleted_list:
                self.vrf_deleted_list.remove(vrf_id)
            for j in range(self.pg_ifs_per_vrf):
                pg_if = self.pg_if_by_vrf_id[vrf_id][j]
                pg_if.set_table_ip4(vrf_id)
                self.logger.info("pg-interface %s added to IPv4 VRF ID %d"
                                 % (pg_if.name, vrf_id))
                if pg_if not in self.pg_in_vrf:
                    self.pg_in_vrf.append(pg_if)
                if pg_if in self.pg_not_in_vrf:
                    self.pg_not_in_vrf.remove(pg_if)
                pg_if.config_ip4()
                pg_if.configure_ipv4_neighbors()
        self.logger.debug(self.vapi.ppcli("show ip fib"))
        self.logger.debug(self.vapi.ppcli("show ip arp"))

    def delete_vrf(self, vrf_id):
        """
        Delete required FIB table / VRF.

        :param int vrf_id: The FIB table / VRF ID to be deleted.
        """
        # self.vapi.reset_vrf(vrf_id, is_ipv6=0)
        self.vapi.reset_fib(vrf_id, is_ipv6=0)
        if vrf_id in self.vrf_list:
            self.vrf_list.remove(vrf_id)
        if vrf_id not in self.vrf_deleted_list:
            self.vrf_deleted_list.append(vrf_id)
        for j in range(self.pg_ifs_per_vrf):
            pg_if = self.pg_if_by_vrf_id[vrf_id][j]
            pg_if.unconfig_ip4()
            if pg_if in self.pg_in_vrf:
                self.pg_in_vrf.remove(pg_if)
            if pg_if not in self.pg_not_in_vrf:
                self.pg_not_in_vrf.append(pg_if)
        self.logger.info("IPv4 VRF ID %d reset" % vrf_id)
        self.logger.debug(self.vapi.ppcli("show ip fib"))
        self.logger.debug(self.vapi.ppcli("show ip arp"))
        self.vapi.ip_table_add_del(vrf_id, is_add=0)

    def create_stream(self, src_if, packet_sizes):
        """
        Create input packet stream for defined interface using hosts list.

        :param object src_if: Interface to create packet stream for.
        :param list packet_sizes: List of required packet sizes.
        :return: Stream of packets.
        """
        pkts = []
        src_hosts = src_if.remote_hosts
        for dst_if in self.flows[src_if]:
            for dst_host in dst_if.remote_hosts:
                src_host = random.choice(src_hosts)
                pkt_info = self.create_packet_info(src_if, dst_if)
                payload = self.info_to_payload(pkt_info)
                p = (Ether(dst=src_if.local_mac, src=src_host.mac) /
                     IP(src=src_host.ip4, dst=dst_host.ip4) /
                     UDP(sport=1234, dport=1234) /
                     Raw(payload))
                pkt_info.data = p.copy()
                size = random.choice(packet_sizes)
                self.extend_packet(p, size)
                pkts.append(p)
        self.logger.debug("Input stream created for port %s. Length: %u pkt(s)"
                          % (src_if.name, len(pkts)))
        return pkts

    def verify_capture(self, pg_if, capture):
        """
        Verify captured input packet stream for defined interface.

        :param object pg_if: Interface to verify captured packet stream for.
        :param list capture: Captured packet stream.
        """
        last_info = dict()
        for i in self.pg_interfaces:
            last_info[i.sw_if_index] = None
        dst_sw_if_index = pg_if.sw_if_index
        for packet in capture:
            try:
                ip = packet[IP]
                udp = packet[UDP]
                payload_info = self.payload_to_info(str(packet[Raw]))
                packet_index = payload_info.index
                self.assertEqual(payload_info.dst, dst_sw_if_index)
                self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
                                  (pg_if.name, payload_info.src, packet_index))
                next_info