/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * buffer_funcs.h: VLIB buffer related functions/inlines * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef included_vlib_buffer_funcs_h #define included_vlib_buffer_funcs_h #include #include #include #include #include #include /** \file vlib buffer access methods. */ always_inline void vlib_buffer_validate (vlib_main_t * vm, vlib_buffer_t * b) { vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_pool_t *bp; /* reference count in allocated buffer always must be 1 or higher */ ASSERT (b->ref_count > 0); /* verify that buffer pool index is valid */ bp = vec_elt_at_index (bm->buffer_pools, b->buffer_pool_index); ASSERT (pointer_to_uword (b) >= bp->start); ASSERT (pointer_to_uword (b) < bp->start + bp->size - (bp->data_size + sizeof (vlib_buffer_t))); } always_inline void * vlib_buffer_ptr_from_index (uword buffer_mem_start, u32 buffer_index, uword offset) { offset += ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES; return uword_to_pointer (buffer_mem_start + offset, vlib_buffer_t *); } /** \brief Translate buffer index into buffer pointer @param vm - (vlib_main_t *) vlib main data structure pointer @param buffer_index - (u32) buffer index @return - (vlib_buffer_t *) buffer pointer */ always_inline vlib_buffer_t * vlib_get_buffer (vlib_main_t * vm, u32 buffer_index) { vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_t *b; b = vlib_buffer_ptr_from_index (bm->buffer_mem_start, buffer_index, 0); vlib_buffer_validate (vm, b); return b; } static_always_inline u32 vlib_buffer_get_default_data_size (vlib_main_t * vm) { return vm->buffer_main->default_data_size; } static_always_inline void vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices) { #if defined(CLIB_HAVE_VEC512) while (n_indices >= 16) { u32x16_store_unaligned (u32x16_load_unaligned (src), dst); dst += 16; src += 16; n_indices -= 16; } #endif #if defined(CLIB_HAVE_VEC256) while (n_indices >= 8) { u32x8_store_unaligned (u32x8_load_unaligned (src), dst); dst += 8; src += 8; n_indices -= 8; } #endif #if defined(CLIB_HAVE_VEC128) while (n_indices >= 4) { u32x4_store_unaligned (u32x4_load_unaligned (src), dst); dst += 4; src += 4; n_indices -= 4; } #endif while (n_indices) { dst[0] = src[0]; dst += 1; src += 1; n_indices -= 1; } } STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64); static_always_inline void vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt) { #if defined CLIB_HAVE_VEC512 b->as_u8x64[0] = bt->as_u8x64[0]; #elif defined (CLIB_HAVE_VEC256) b->as_u8x32[0] = bt->as_u8x32[0]; b->as_u8x32[1] = bt->as_u8x32[1]; #elif defined (CLIB_HAVE_VEC128) b->as_u8x16[0] = bt->as_u8x16[0]; b->as_u8x16[1] = bt->as_u8x16[1]; b->as_u8x16[2] = bt->as_u8x16[2]; b->as_u8x16[3] = bt->as_u8x16[3]; #else clib_memcpy_fast (b, bt, 64); #endif } always_inline u8 vlib_buffer_pool_get_default_for_numa (vlib_main_t * vm, u32 numa_node) { ASSERT (numa_node < VLIB_BUFFER_MAX_NUMA_NODES); return vm->buffer_main->default_buffer_pool_index_for_numa[numa_node]; } /** \brief Translate array of buffer indices into buffer pointers with offset @param vm - (vlib_main_t *) vlib main data structure pointer @param bi - (u32 *) array of buffer indices @param b - (void **) array to store buffer pointers @param count - (uword) number of elements @param offset - (i32) offset applied to each pointer */ static_always_inline void vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count, i32 offset) { uword buffer_mem_start = vm->buffer_main->buffer_mem_start; #ifdef CLIB_HAVE_VEC256 u64x4 off = u64x4_splat (buffer_mem_start + offset); /* if count is not const, compiler will not unroll while loop se we maintain two-in-parallel variant */ while (count >= 8) { u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi)); u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4)); /* shift and add to get vlib_buffer_t pointer */ u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4); b += 8; bi += 8; count -= 8; } #endif while (count >= 4) { #ifdef CLIB_HAVE_VEC256 u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi)); /* shift and add to get vlib_buffer_t pointer */ u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); #elif defined (CLIB_HAVE_VEC128) u64x2 off = u64x2_splat (buffer_mem_start + offset); u32x4 bi4 = u32x4_load_unaligned (bi); u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4); #if defined (__aarch64__) u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4); #else bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1); u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4); #endif u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2); #else b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset); b[1] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[1], offset); b[2] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[2], offset); b[3] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[3], offset); #endif b += 4; bi += 4; count -= 4; } while (count) { b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset); b += 1; bi += 1; count -= 1; } } /** \brief Translate array of buffer indices into buffer pointers @param vm - (vlib_main_t *) vlib main data structure pointer @param bi - (u32 *) array of buffer indices @param b - (vlib_buffer_t **) array to store buffer pointers @param count - (uword) number of elements */ static_always_inline void vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count) { vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0); } /** \brief Translate buffer pointer into buffer index @param vm - (vlib_main_t *) vlib main data structure pointer @param p - (void *) buffer pointer @return - (u32) buffer index */ always_inline u32 vlib_get_buffer_index (vlib_main_t * vm, void *p) { vlib_buffer_main_t *bm = vm->buffer_main; uword offset = pointer_to_uword (p) - bm->buffer_mem_start; ASSERT (pointer_to_uword (p) >= bm->buffer_mem_start); ASSERT (offset < bm->buffer_mem_size); ASSERT ((offset % (1 << CLIB_LOG2_CACHE_LINE_BYTES)) == 0); return offset >> CLIB_LOG2_CACHE_LINE_BYTES; } /** \brief Translate array of buffer pointers into buffer indices with offset @param vm - (vlib_main_t *) vlib main data structure pointer @param b - (void **) array of buffer pointers @param bi - (u32 *) array to store buffer indices @param count - (uword) number of elements @param offset - (i32) offset applied to each pointer */ static_always_inline void vlib_get_buffer_indices_with_offset (vlib_main_t * vm, void **b, u32 * bi, uword count, i32 offset) { #ifdef CLIB_HAVE_VEC256 u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 }; u64x4 off4 = u64x4_splat (vm->buffer_main->buffer_mem_start - offset); while (count >= 8) { /* load 4 pointers into 256-bit register */ u64x4 v0 = u64x4_load_unaligned (b); u64x4 v1 = u64x4_load_unaligned (b + 4); u32x8 v2, v3; v0 -= off4; v1 -= off4; v0 >>= CLIB_LOG2_CACHE_LINE_BYTES; v1 >>= CLIB_LOG2_CACHE_LINE_BYTES; /* permute 256-bit register so lower u32s of each buffer index are * placed into lower 128-bits */ v2 = u32x8_permute ((u32x8) v0, mask); v3 = u32x8_permute ((u32x8) v1, mask); /* extract lower 128-bits and save them to the array of buffer indices */ u32x4_store_unaligned (u32x8_extract_lo (v2), bi); u32x4_store_unaligned (u32x8_extract_lo (v3), bi + 4); bi += 8; b += 8; count -= 8; } #endif while (count >= 4) { /* equivalent non-nector implementation */ bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset); bi[1] = vlib_get_buffer_index (vm, ((u8 *) b[1]) + offset); bi[2] = vlib_get_buffer_index (vm, ((u8 *) b[2]) + offset); bi[3] = vlib_get_buffer_index (vm, ((u8 *) b[3]) + offset); bi += 4; b += 4; count -= 4; } while (count) { bi[0] = vlib_get_buffer_index (vm, ((u8 *) b[0]) + offset); bi += 1; b += 1; count -= 1; } } /** \brief Translate array of buffer pointers into buffer indices @param vm - (vlib_main_t *) vlib main data structure pointer @param b - (vlib_buffer_t **) array of buffer pointers @param bi - (u32 *) array to store buffer indices @param count - (uword) number of elements */ static_always_inline void vlib_get_buffer_indices (vlib_main_t * vm, vlib_buffer_t ** b, u32 * bi, uword count) { vlib_get_buffer_indices_with_offset (vm, (void **) b, bi, count, 0); } /** \brief Get next buffer in buffer linklist, or zero for end of list. @param vm - (vlib_main_t *) vlib main data structure pointer @param b - (void *) buffer pointer @return - (vlib_buffer_t *) next buffer, or NULL */ always_inline vlib_buffer_t * vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b) { return (b->flags & VLIB_BUFFER_NEXT_PRESENT ? vlib_get_buffer (vm, b->next_buffer) : 0); } uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm, vlib_buffer_t * b_first); /** \brief Get length in bytes of the buffer chain @param vm - (vlib_main_t *) vlib main data structure pointer @param b - (void *) buffer pointer @return - (uword) length of buffer chain */ always_inline uword vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b) { uword len = b->current_length; if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0)) return len; if (PREDICT_TRUE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)) return len + b->total_length_not_including_first_buffer; return vlib_buffer_length_in_chain_slow_path (vm, b); } /** \brief Get length in bytes of the buffer index buffer chain @param vm - (vlib_main_t *) vlib main data structure pointer @param bi - (u32) buffer index @return - (uword) length of buffer chain */ always_inline uword vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi) { vlib_buffer_t *b = vlib_get_buffer (vm, bi); return vlib_buffer_length_in_chain (vm, b); } /** \brief Copy buffer contents to memory @param vm - (vlib_main_t *) vlib main data structure pointer @param buffer_index - (u32) buffer index @param contents - (u8 *) memory, must be large enough @return - (uword) length of buffer chain */ always_inline uword vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents) { uword content_len = 0; uword l; vlib_buffer_t *b; while (1) { b = vlib_get_buffer (vm, buffer_index); l = b->current_length; clib_memcpy_fast (contents + content_len, b->data + b->current_data, l); content_len += l; if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT)) break; buffer_index = b->next_buffer; } return content_len; } always_inline uword vlib_buffer_get_pa (vlib_main_t * vm, vlib_buffer_t * b) { return vlib_physmem_get_pa (vm, b->data); } always_inline uword vlib_buffer_get_current_pa (vlib_main_t * vm, vlib_buffer_t * b) { return vlib_buffer_get_pa (vm, b) + b->current_data; } /** \brief Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header information @param vm - (vlib_main_t *) vlib main data structure pointer @param bi - (u32) buffer index @param type - LOAD, STORE. In most cases, STORE is the right answer */ /* Prefetch buffer header given index. */ #define vlib_prefetch_buffer_with_index(vm,bi,type) \ do { \ vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \ vlib_prefetch_buffer_header (_b, type); \ } while (0) typedef enum { /* Index is unknown. */ VLIB_BUFFER_UNKNOWN, /* Index is known and free/allocated. */ VLIB_BUFFER_KNOWN_FREE, VLIB_BUFFER_KNOWN_ALLOCATED, } vlib_bu
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/default.robot
| Resource | resources/libraries/robot/bridge_domain.robot
| Resource | resources/libraries/robot/testing_path.robot
| Resource | resources/libraries/robot/qemu.robot
| Library  | resources.libraries.python.Trace
| Force Tags | HW_ENV | VM_ENV
| Test Setup | Func Test Setup
| Test Teardown | Func Test Teardown
| Documentation | *L2 bridge-domain test cases*
| ...
| ... | *[Top] Network Topologies:* TG=DUT1=DUT2=TG 3-node circular topology
| ... | with double parallel links.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4-ICMPv4 for L2 switching of
| ... | IPv4; Eth-IPv6-ICMPv6 for L2 switching of IPv6 use. Both apply
| ... | to all links.
| ... | *[Cfg] DUT configuration:* DUT1 and DUT2 are configured with L2
| ... | bridge-domain (L2BD) switching combined with MAC learning enabled
| ... | and Split Horizon Groups (SHG).
| ... | *[Ver] TG verification:* Test ICMPv4 (or ICMPv6) Echo Request packets
| ... | are sent in both directions by TG on links to DUT1 and DUT2; on
| ... | receive TG verifies packets for correctness and their IPv4 (IPv6)
| ... | src-addr, dst-addr and MAC addresses.
| ... | *[Ref] Applicable standard specifications:*

*** Variables ***
| ${bd_id1}= | 1
| ${bd_id2}= | 2

| ${shg1}= | 3
| ${shg2}= | 4

*** Test Cases ***
| TC01: DUT1 and DUT2 with L2BD (MAC learn) and SHG switch between four TG links
| | [Documentation]
| | ... | [Top] TG=DUT1=DUT2=TG. [Enc] Eth-IPv4-ICMPv4. [Cfg] On DUT1 and \
| | ... | DUT2 configure four i/fs into L2BD with MAC learning and the
| | ... | same SHG on i/fs towards TG. [Ver] Make TG verify ICMPv4 Echo
| | ... | Req pkts are switched thru DUT1 and DUT2 in both directions and
| | ... | are correct on receive; verify no pkts are switched thru SHG
| | ... | isolated interfaces. [Ref]
| | [Tags] | 3_NODE_DOUBLE_LINK_TOPO
| | Given Path for 3-node BD-SHG testing is set | ${nodes['TG']}
| | ...                                         | ${nodes['DUT1']}
| | ...                                         | ${nodes['DUT2']}
| | And Interfaces in 3-node BD-SHG testing are up
| | When Bridge domain on DUT node is created | ${dut1_node} | ${bd_id1}
| | And Interface is added to bridge domain | ${dut1_node} | ${dut1_to_tg_if1}
| | ...                                     | ${bd_id1} | ${shg1}
| | And Interface is added to bridge domain | ${dut1_node} | ${dut1_to_tg_if2}
| | ...                                     | ${bd_id1} | ${shg1}
| | And Interface is added to bridge domain | ${dut1_node} | ${dut1_to_dut2}
| | ...                                     | ${bd_id1}
| | And Bridge domain on DUT node is created | ${dut2_node} | ${bd_id2}
| | And Interface is added to bridge domain | ${dut2_node} | ${dut2_to_tg_if1}
| | ...                                     | ${bd_id2} | ${shg2}
| | And Interface is added to bridge domain | ${dut2_node} | ${dut2_to_tg_if2}
| | ...                                     | ${bd_id2} | ${shg2}
| | And Interface is added to bridge domain | ${dut2_node} | ${dut2_to_dut1}
| | ...                                     | ${bd_id2}
| | Then Send and receive ICMPv4 bidirectionally | ${tg_node}
| | ...                                          | ${tg_to_dut1_if1}
| | ...                                          | ${tg_to_dut2_if1}
| | And Send and receive ICMPv4 bidirectionally | ${tg_node}
| | ...                                         | ${tg_to_dut1_if1}
| | ...                                         | ${tg_to_dut2_if2}
| | And Send and receive ICMPv4 bidirectionally | ${tg_node}
| | ...                                         | ${tg_to_dut1_if2}
| | ...                                         | ${tg_to_dut2_if1}
| | And Send and receive ICMPv4 bidirectionally | ${tg_node}
| | ...                                         | ${tg_to_dut1_if2}
| | ...                                         | ${tg_to_dut2_if2}
| | And Run Keyword And Expect Error | ICMP echo Rx timeout
| | ...                              | Send and receive ICMPv4 bidirectionally
| | | ...                            | ${tg_node} | ${tg_to_dut1_if1}
| | | ...                            | ${tg_to_dut1_if2}
| | And Run Keyword And Expect Error | ICMP echo Rx timeout
| | ...                              | Send and receive ICMPv4 bidirectionally
| | | ...                            | ${tg_node} | ${tg_to_dut2_if1}
| | | ...                            | ${tg_to_dut2_if2}
less than the number requested or zero */ always_inline u16 vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers, u16 n_buffers, u16 head_end_offset) { return vlib_buffer_clone_at_offset (vm, src_buffer, buffers, n_buffers, head_end_offset, 0); } /** \brief Attach cloned tail to the buffer @param vm - (vlib_main_t *) vlib main data structure pointer @param head - (vlib_buffer_t *) head buffer @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head */ always_inline void vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head, vlib_buffer_t * tail) { ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0); ASSERT (head->buffer_pool_index == tail->buffer_pool_index); head->flags |= VLIB_BUFFER_NEXT_PRESENT; head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID; head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID); head->next_buffer = vlib_get_buffer_index (vm, tail); head->total_length_not_including_first_buffer = tail->current_length + tail->total_length_not_including_first_buffer; next_segment: clib_atomic_add_fetch (&tail->ref_count, 1); if (tail->flags & VLIB_BUFFER_NEXT_PRESENT) { tail = vlib_get_buffer (vm, tail->next_buffer); goto next_segment; } } /* Initializes the buffer as an empty packet with no chained buffers. */ always_inline void vlib_buffer_chain_init (vlib_buffer_t * first) { first->total_length_not_including_first_buffer = 0; first->current_length = 0; first->flags &= ~VLIB_BUFFER_NEXT_PRESENT; first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; } /* The provided next_bi buffer index is appended to the end of the packet. */ always_inline vlib_buffer_t * vlib_buffer_chain_buffer (vlib_main_t * vm, vlib_buffer_t * last, u32 next_bi) { vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi); last->next_buffer = next_bi; last->flags |= VLIB_BUFFER_NEXT_PRESENT; next_buffer->current_length = 0; next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT; return next_buffer; } /* Increases or decreases the packet length. * It does not allocate or deallocate new buffers. * Therefore, the added length must be compatible * with the last buffer. */ always_inline void vlib_buffer_chain_increase_length (vlib_buffer_t * first, vlib_buffer_t * last, i32 len) { last->current_length += len; if (first != last) first->total_length_not_including_first_buffer += len; } /* Copy data to the end of the packet and increases its length. * It does not allocate new buffers. * Returns the number of copied bytes. */ always_inline u16 vlib_buffer_chain_append_data (vlib_main_t * vm, vlib_buffer_t * first, vlib_buffer_t * last, void *data, u16 data_len) { u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); ASSERT (n_buffer_bytes >= last->current_length + last->current_data); u16 len = clib_min (data_len, n_buffer_bytes - last->current_length - last->current_data); clib_memcpy_fast (vlib_buffer_get_current (last) + last->current_length, data, len); vlib_buffer_chain_increase_length (first, last, len); return len; } /* Copy data to the end of the packet and increases its length. * Allocates additional buffers from the free list if necessary. * Returns the number of copied bytes. * 'last' value is modified whenever new buffers are allocated and * chained and points to the last buffer in the chain. */ u16 vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm, vlib_buffer_t * first, vlib_buffer_t ** last, void *data, u16 data_len); void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first); format_function_t format_vlib_buffer, format_vlib_buffer_and_data, format_vlib_buffer_contents; typedef struct { /* Vector of packet data. */ u8 *packet_data; /* Number of buffers to allocate in each call to allocator. */ u32 min_n_buffers_each_alloc; u8 *name; } vlib_packet_template_t; void vlib_packet_template_init (vlib_main_t * vm, vlib_packet_template_t * t, void *packet_data, uword n_packet_data_bytes, uword min_n_buffers_each_alloc, char *fmt, ...); void *vlib_packet_template_get_packet (vlib_main_t * vm, vlib_packet_template_t * t, u32 * bi_result); always_inline void vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t) { vec_free (t->packet_data); } always_inline u32 vlib_buffer_space_left_at_end (vlib_main_t * vm, vlib_buffer_t * b) { return b->data + vlib_buffer_get_default_data_size (vm) - ((u8 *) vlib_buffer_get_current (b) + b->current_length); } always_inline u32 vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * b) { vlib_buffer_t *db = b, *sb, *first = b; int is_cloned = 0; u32 bytes_left = 0, data_size; u16 src_left, dst_left, n_buffers = 1; u8 *dp, *sp; u32 to_free = 0; if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0)) return 1; data_size = vlib_buffer_get_default_data_size (vm); dst_left = vlib_buffer_space_left_at_end (vm, b); while (b->flags & VLIB_BUFFER_NEXT_PRESENT) { b = vlib_get_buffer (vm, b->next_buffer); if (b->ref_count > 1) is_cloned = 1; bytes_left += b->current_length; n_buffers++; } /* if buffer is cloned, create completely new chain - unless everything fits * into one buffer */ if (is_cloned && bytes_left >= dst_left) { u32 len = 0; u32 space_needed = bytes_left - dst_left; u32 tail; if (vlib_buffer_alloc (vm, &tail, 1) == 0) return 0; ++n_buffers; len += data_size; b = vlib_get_buffer (vm, tail); while (len < space_needed) { u32 bi; if (vlib_buffer_alloc (vm, &bi, 1) == 0) { vlib_buffer_free_one (vm, tail); return 0; } b->flags = VLIB_BUFFER_NEXT_PRESENT; b->next_buffer = bi; b = vlib_get_buffer (vm, bi); len += data_size; n_buffers++; } sb = vlib_get_buffer (vm, first->next_buffer); to_free = first->next_buffer; first->next_buffer = tail; } else sb = vlib_get_buffer (vm, first->next_buffer); src_left = sb->current_length; sp = vlib_buffer_get_current (sb); dp = vlib_buffer_get_tail (db); while (bytes_left) { u16 bytes_to_copy; if (dst_left == 0) { db->current_length = dp - (u8 *) vlib_buffer_get_current (db); ASSERT (db->flags & VLIB_BUFFER_NEXT_PRESENT); db = vlib_get_buffer (vm, db->next_buffer); dst_left = data_size; if (db->current_data > 0) { db->current_data = 0; } else { dst_left += -db->current_data; } dp = vlib_buffer_get_current (db); } while (src_left == 0) { ASSERT (sb->flags & VLIB_BUFFER_NEXT_PRESENT); sb = vlib_get_buffer (vm, sb->next_buffer); src_left = sb->current_length; sp = vlib_buffer_get_current (sb); } bytes_to_copy = clib_min (dst_left, src_left); if (dp != sp) { if (sb == db) bytes_to_copy = clib_min (bytes_to_copy, sp - dp); clib_memcpy_fast (dp, sp, bytes_to_copy); } src_left -= bytes_to_copy; dst_left -= bytes_to_copy; dp += bytes_to_copy; sp += bytes_to_copy; bytes_left -= bytes_to_copy; } if (db != first) db->current_data = 0; db->current_length = dp - (u8 *) vlib_buffer_get_current (db); if (is_cloned && to_free) vlib_buffer_free_one (vm, to_free); else { if (db->flags & VLIB_BUFFER_NEXT_PRESENT) vlib_buffer_free_one (vm, db->next_buffer); db->flags &= ~VLIB_BUFFER_NEXT_PRESENT; b = first; n_buffers = 1; while (b->flags & VLIB_BUFFER_NEXT_PRESENT) { b = vlib_get_buffer (vm, b->next_buffer); ++n_buffers; } } first->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; return n_buffers; } #endif /* included_vlib_buffer_funcs_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */