/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * main.c: main vector processing loop * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include #include #include CJ_GLOBAL_LOG_PROTOTYPE; /* Actually allocate a few extra slots of vector data to support speculative vector enqueues which overflow vector data in next frame. */ #define VLIB_FRAME_SIZE_ALLOC (VLIB_FRAME_SIZE + 4) u32 wraps; always_inline u32 vlib_frame_bytes (u32 n_scalar_bytes, u32 n_vector_bytes) { u32 n_bytes; /* Make room for vlib_frame_t plus scalar arguments. */ n_bytes = vlib_frame_vector_byte_offset (n_scalar_bytes); /* Make room for vector arguments. Allocate a few extra slots of vector data to support speculative vector enqueues which overflow vector data in next frame. */ #define VLIB_FRAME_SIZE_EXTRA 4 n_bytes += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * n_vector_bytes; /* Magic number is first 32bit number after vector data. Used to make sure that vector data is never overrun. */ #define VLIB_FRAME_MAGIC (0xabadc0ed) n_bytes += sizeof (u32); /* Pad to cache line. */ n_bytes = round_pow2 (n_bytes, CLIB_CACHE_LINE_BYTES); return n_bytes; } always_inline u32 * vlib_frame_find_magic (vlib_frame_t * f, vlib_node_t * node) { void *p = f; p += vlib_frame_vector_byte_offset (node->scalar_size); p += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * node->vector_size; return p; } static vlib_frame_size_t * get_frame_size_info (vlib_node_main_t * nm, u32 n_scalar_bytes, u32 n_vector_bytes) { uword key = (n_scalar_bytes << 16) | n_vector_bytes; uword *p, i; p = hash_get (nm->frame_size_hash, key); if (p) i = p[0]; else { i = vec_len (nm->frame_sizes); vec_validate (nm->frame_sizes, i); hash_set (nm->frame_size_hash, key, i); } return vec_elt_at_index (nm->frame_sizes, i); } static u32 vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index, u32 frame_flags) { vlib_node_main_t *nm = &vm->node_main; vlib_frame_size_t *fs; vlib_node_t *to_node; vlib_frame_t *f; u32 fi, l, n, scalar_size, vector_size; to_node = vlib_get_node (vm, to_node_index); scalar_size = to_node->scalar_size; vector_size = to_node->vector_size; fs = get_frame_size_info (nm, scalar_size, vector_size); n = vlib_frame_bytes (scalar_size, vector_size); if ((l = vec_len (fs->free_frame_indices)) > 0) { /* Allocate from end of free list. */ fi = fs->free_frame_indices[l - 1]; f = vlib_get_frame_no_check (vm, fi); _vec_len (fs->free_frame_indices) = l - 1; } else { f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN); fi = vlib_frame_index_no_check (vm, f); } /* Poison frame when debugging. */ if (CLIB_DEBUG > 0) memset (f, 0xfe, n); /* Insert magic number. */ { u32 *magic; magic = vlib_frame_find_magic (f, to_node); *magic = VLIB_FRAME_MAGIC; } f->flags = VLIB_FRAME_IS_ALLOCATED | frame_flags; f->n_vectors = 0; f->scalar_size = scalar_size; f->vector_size = vector_size; fs->n_alloc_frames += 1; return fi; } /* Allocate a frame for from FROM_NODE to TO_NODE via TO_NEXT_INDEX. Returns frame index. */ static u32 vlib_frame_alloc (vlib_main_t * vm, vlib_node_runtime_t * from_node_runtime, u32 to_next_index) { vlib_node_t *from_node; from_node = vlib_get_node (vm, from_node_runtime->node_index); ASSERT (to_next_index < vec_len (from_node->next_nodes)); return vlib_frame_alloc_to_node (vm, from_node->next_nodes[to_next_index], /* frame_flags */ 0); } vlib_frame_t * vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index) { u32 fi = vlib_frame_alloc_to_node (vm, to_node_index, /* frame_flags */ VLIB_FRAME_FREE_AFTER_DISPATCH); return vlib_get_frame (vm, fi); } void vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f) { vlib_pending_frame_t *p; vlib_node_t *to_node; if (f->n_vectors == 0) return; to_node = vlib_get_node (vm, to_node_index); vec_add2 (vm->node_main.pending_frames, p, 1); f->flags |= VLIB_FRAME_PENDING; p->frame_index = vlib_frame_index (vm, f); p->node_runtime_index = to_node->runtime_index; p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME; } /* Free given frame. */ void vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *node; vlib_frame_size_t *fs; u32 frame_index; ASSERT (f->flags & VLIB_FRAME_IS_ALLOCATED); node = vlib_get_node (vm, r->node_index); fs = get_frame_size_info (nm, node->scalar_size, node->vector_size); frame_index = vlib_frame_index (vm, f); ASSERT (f->flags & VLIB_FRAME_IS_ALLOCATED); /* No next frames may point to freed frame. */ if (CLIB_DEBUG > 0) { vlib_next_frame_t *nf; vec_foreach (nf, vm->node_main.next_frames) ASSERT (nf->frame_index != frame_index); } f->flags &= ~VLIB_FRAME_IS_ALLOCATED; vec_add1 (fs->free_frame_indices, frame_index); ASSERT (fs->n_alloc_frames > 0); fs->n_alloc_frames -= 1; } static clib_error_t * show_frame_stats (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_node_main_t *nm = &vm->node_main; vlib_frame_size_t *fs; vlib_cli_output (vm, "%=6s%=12s%=12s", "Size", "# Alloc", "# Free"); vec_foreach (fs, nm->frame_sizes) { u32 n_alloc = fs->n_alloc_frames; u32 n_free = vec_len (fs->free_frame_indices); if (n_alloc + n_free > 0) vlib_cli_output (vm, "%=6d%=12d%=12d", fs - nm->frame_sizes, n_alloc, n_free); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_frame_stats_cli, static) = { .path = "show vlib frame-allocation", .short_help = "Show node dispatch frame statistics", .function = show_frame_stats, }; /* *INDENT-ON* */ /* Change ownership of enqueue rights to given next node. */ static void vlib_next_frame_change_ownership (vlib_main_t * vm, vlib_node_runtime_t * node_runtime, u32 next_index) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *next_frame; vlib_node_t *node, *next_node; node = vec_elt (nm->nodes, node_runtime->node_index); /* Only internal & input nodes are allowed to call other nodes. */ ASSERT (node->type == VLIB_NODE_TYPE_INTERNAL || node->type == VLIB_NODE_TYPE_INPUT || node->type == VLIB_NODE_TYPE_PROCESS); ASSERT (vec_len (node->next_nodes) == node_runtime->n_next_nodes); next_frame = vlib_node_runtime_get_next_frame (vm, node_runtime, next_index); next_node = vec_elt (nm->nodes, node->next_nodes[next_index]); if (next_node->owner_node_index != VLIB_INVALID_NODE_INDEX) { /* Get frame from previous owner. */ vlib_next_frame_t *owner_next_frame; vlib_next_frame_t tmp; owner_next_frame = vlib_node_get_next_frame (vm, next_node->owner_node_index, next_node->owner_next_index); /* Swap target next frame with owner's. */ tmp = owner_next_frame[0]; owner_next_frame[0] = next_frame[0]; next_frame[0] = tmp; /* * If next_frame is already pending, we have to track down * all pending frames and fix their next_frame_index fields. */ if (next_frame->flags & VLIB_FRAME_PENDING) { vlib_pending_frame_t *p; if (next_frame->frame_index != ~0) { vec_foreach (p, nm->pending_frames) { if (p->frame_index == next_frame->frame_index) { p->next_frame_index = next_frame - vm->node_main.next_frames; } } } } } else { /* No previous owner. Take ownership. */ next_frame->flags |= VLIB_FRAME_OWNER; } /* Record new owner. */ next_node->owner_node_index = node->index; next_node->owner_next_index = next_index; /* Now we should be owner. */ ASSERT (next_frame->flags & VLIB_FRAME_OWNER); } /* Make sure that magic number is still there. Otherwise, it is likely that caller has overrun frame arguments. */ always_inline void validate_frame_magic (vlib_main_t * vm, vlib_frame_t * f, vlib_node_t * n, uword next_index) { vlib_node_t *next_node = vlib_get_node (vm, n->next_nodes[next_index]); u32 *magic = vlib_frame_find_magic (f, next_node); ASSERT (VLIB_FRAME_MAGIC == magic[0]); } vlib_frame_t * vlib_get_next_frame_internal (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next_index, u32 allocate_new_next_frame) { vlib_frame_t *f; vlib_next_frame_t *nf; u32 n_used; nf = vlib_node_runtime_get_next_frame (vm, node, next_index); /* Make sure this next frame owns right to enqueue to destination frame. */ if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_OWNER))) vlib_next_frame_change_ownership (vm, node, next_index); /* ??? Don't need valid flag: can use frame_index == ~0 */ if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_IS_ALLOCATED))) { nf->frame_index = vlib_frame_alloc (vm, node, next_index); nf->flags |= VLIB_FRAME_IS_ALLOCATED; } f = vlib_get_frame (vm, nf->frame_index); /* Has frame been removed from pending vector (e.g. finished dispatching)? If so we can reuse frame. */ if ((nf->flags & VLIB_FRAME_PENDING) && !(f->flags & VLIB_FRAME_PENDING)) { nf->flags &= ~VLIB_FRAME_PENDING; f->n_vectors = 0; } /* Allocate new frame if current one is already full. */ n_used = f->n_vectors; if (n_used >= VLIB_FRAME_SIZE || (allocate_new_next_frame && n_used > 0)) { /* Old frame may need to be freed after dispatch, since we'll have two redundant frames from node -> next node. */ if (!(nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH)) { vlib_frame_t *f_old = vlib_get_frame (vm, nf->frame_index); f_old->flags |= VLIB_FRAME_FREE_AFTER_DISPATCH; } /* Allocate new frame to replace full one. */ nf->frame_index = vlib_frame_alloc (vm, node, next_index); f = vlib_get_frame (vm, nf->frame_index); n_used = f->n_vectors; } /* Should have free vectors in frame now. */ ASSERT (n_used < VLIB_FRAME_SIZE); if (CLIB_DEBUG > 0) { validate_frame_magic (vm, f, vlib_get_node (vm, node->node_index), next_index); } return f; } static void vlib_put_next_frame_validate (vlib_main_t * vm, vlib_node_runtime_t * rt, u32 next_index, u32 n_vectors_left) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; vlib_frame_t *f; vlib_node_runtime_t *next_rt; vlib_node_t *next_node; u32 n_before, n_after; nf = vlib_node_runtime_get_next_frame (vm, rt, next_index); f = vlib_get_frame (vm, nf->frame_index); ASSERT (n_vectors_left <= VLIB_FRAME_SIZE); n_after = VLIB_FRAME_SIZE - n_vectors_left; n_before = f->n_vectors; ASSERT (n_after >= n_before); next_rt = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL], nf->node_runtime_index); next_node = vlib_get_node (vm, next_rt->node_index); if (n_after > 0 && next_node->validate_frame) { u8 *msg = next_node->validate_frame (vm, rt, f); if (msg) { clib_warning ("%v", msg); ASSERT (0); } vec_free (msg); } } void vlib_put_next_frame (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index, u32 n_vectors_left) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; vlib_frame_t *f; u32 n_vectors_in_frame; if (buffer_main.callbacks_registered == 0 && CLIB_DEBUG > 0) vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left); nf = vlib_node_runtime_get_next_frame (vm, r, next_index); f = vlib_get_frame (vm, nf->frame_index); /* Make sure that magic number is still there. Otherwise, caller has overrun frame meta data. */ if (CLIB_DEBUG > 0) { vlib_node_t *node = vlib_get_node (vm, r->node_index); validate_frame_magic (vm, f, node, next_index); } /* Convert # of vectors left -> number of vectors there. */ ASSERT (n_vectors_left <= VLIB_FRAME_SIZE); n_vectors_in_frame = VLIB_FRAME_SIZE - n_vectors_left; f->n_vectors = n_vectors_in_frame; /* If vectors were added to frame, add to pending vector. */ if (PREDICT_TRUE (n_vectors_in_frame > 0)) { vlib_pending_frame_t *p; u32 v0, v1; r->cached_next_index = next_index; if (!(f->flags & VLIB_FRAME_PENDING)) { __attribute__ ((unused)) vlib_node_t *node; vlib_node_t *next_node; vlib_node_runtime_t *next_runtime; node = vlib_get_node (vm, r->node_index); next_node = vlib_get_next_node (vm, r->node_index, next_index); next_runtime = vlib_node_get_runtime (vm, next_node->index); vec_add2 (nm->pending_frames, p, 1); p->frame_index = nf->frame_index; p->node_runtime_index = nf->node_runtime_index; p->next_frame_index = nf - nm->next_frames; nf->flags |= VLIB_FRAME_PENDING; f->flags |= VLIB_FRAME_PENDING; /* * If we're going to dispatch this frame on another thread, * force allocation of a new frame. Otherwise, we create * a dangling frame reference. Each thread has its own copy o
#!/usr/bin/env python

import unittest
import socket

from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_l2 import L2_PORT_TYPE, BRIDGE_FLAGS

from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP


class TestL2Flood(VppTestCase):
    """ L2-flood """

    def setUp(self):
        super(TestL2Flood, self).setUp()

        # 12 l2 interface and one l3
        self.create_pg_interfaces(range(13))
        self.create_bvi_interfaces(1)

        for i in self.pg_interfaces:
            i.admin_up()
        for i in self.bvi_interfaces:
            i.admin_up()

        self.pg12.config_ip4()
        self.pg12.resolve_arp()
        self.bvi0.config_ip4()

    def tearDown(self):
        self.pg12.unconfig_ip4()
        self.bvi0.unconfig_ip4()

        for i in self.pg_interfaces:
            i.admin_down()
        for i in self.bvi_interfaces:
            i.admin_down()
        super(TestL2Flood, self).tearDown()

    def test_flood(self):
        """ L2 Flood Tests """

        #
        # Create a single bridge Domain
        #
        self.vapi.bridge_domain_add_del(bd_id=1)

        #
        # add each interface to the BD. 3 interfaces per split horizon group
        #
        for i in self.pg_interfaces[0:4]:
            self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=i.sw_if_index,
                                                 bd_id=1, shg=0)
        for i in self.pg_interfaces[4:8]:
            self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=i.sw_if_index,
                                                 bd_id=1, shg=1)
        for i in self.pg_interfaces[8:12]:
            self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=i.sw_if_index,
                                                 bd_id=1, shg=2)
        for i in self.bvi_interfaces:
            self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=i.sw_if_index,
                                                 bd_id=1, shg=2,
                                                 port_type=L2_PORT_TYPE.BVI)

        p = (Ether(dst="ff:ff:ff:ff:ff:ff",
                   src="00:00:de:ad:be:ef") /
             IP(src="10.10.10.10", dst="1.1.1.1") /
             UDP(sport=1234, dport=1234) /
             Raw('\xa5' * 100))

        #
        # input on pg0 expect copies on pg1->11
        # this is in SHG=0 so its flooded to all, expect the pg0 since that's
        # the ingress link
        #
        self.pg0.add_stream(p*65)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        for i in self.pg_interfaces[1:12]:
            rx0 = i.get_capture(65, timeout=1)

        #
        # input on pg4 (SHG=1) expect copies on pg0->3 (SHG=0)
        # and pg8->11 (SHG=2)
        #
        self.pg4.add_stream(p*65)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        for i in self.pg_interfaces[:4]:
            rx0 = i.get_capture(65, timeout=1)
        for i in self.pg_interfaces[8:12]:
            rx0 = i.get_capture(65, timeout=1)
        for i in self.pg_interfaces[4:8]:
            i.assert_nothing_captured(remark="Different SH group")

        #
        # An IP route so the packet that hits the BVI is sent out of pg12
        #
        ip_route = VppIpRoute(self, "1.1.1.1", 32,
                              [VppRoutePath(self.pg12.remote_ip4,
                                            self.pg12.sw_if_index)])
        ip_route.add_vpp_config()

        self.logger.info(self.vapi.cli("sh bridge 1 detail"))

        #
        # input on pg0 expect copies on pg1->12
        # this is in SHG=0 so its flooded to all, expect the pg0 since that's
        # the ingress link
        #
        self.pg0.add_stream(p*65)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        for i in self.pg_interfaces[1:]:
            rx0 = i.get_capture(65, timeout=1)

        #
        # input on pg4 (SHG=1) expect copies on pg0->3 (SHG=0)
        # and pg8->12 (SHG=2)
        #
        self.pg4.add_stream(p*65)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        for i in self.pg_interfaces[:4]:
            rx0 = i.get_capture(65, timeout=1)
        for i in self.pg_interfaces[8:13]:
            rx0 = i.get_capture(65, timeout=1)
        for i in self.pg_interfaces[4:8]:
            i.assert_nothing_captured(remark="Different SH group")

        #
        # cleanup
        #
        for i in self.pg_interfaces[:12]:
            self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=i.sw_if_index,
                                                 bd_id=1, enable=0)
        for i in self.bvi_interfaces:
            self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=i.sw_if_index,
                                                 bd_id=1, shg=2,
                                                 port_type=L2_PORT_TYPE.BVI,
                                                 enable=0)

        self.vapi.bridge_domain_add_del(bd_id=1, is_add=0)

    def test_flood_one(self):
        """ L2 no-Flood Test """

        #
        # Create a single bridge Domain
        #
        self.vapi.bridge_domain_add_del(bd_id=1)

        #
        # add 2 interfaces to the BD. this means a flood goes to only
        # one member
        #
        for i in self.pg_interfaces[:2]:
            self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=i.sw_if_index,
                                                 bd_id=1, shg=0)

        p = (Ether(dst="ff:ff:ff:ff:ff:ff",
                   src="00:00:de:ad:be:ef") /
             IP(src="10.10.10.10", dst="1.1.1.1") /
             UDP(sport=1234, dport=1234) /
             Raw('\xa5' * 100))

        #
        # input on pg0 expect copies on pg1
        #
        self.send_and_expect(self.pg0, p*65, self.pg1)

        #
        # cleanup
        #
        for i in self.pg_interfaces[:2]:
            self.vapi.sw_interface