/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * buffer_funcs.h: VLIB buffer related functions/inlines * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef included_vlib_buffer_funcs_h #define included_vlib_buffer_funcs_h #include #include #include #include #include #include #include /** \file vlib buffer access methods. */ typedef void (vlib_buffer_enqueue_to_next_fn_t) (vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count); typedef void (vlib_buffer_enqueue_to_single_next_fn_t) ( vlib_main_t *vm, vlib_node_runtime_t *node, u32 *ers, u16 next_index, u32 count); typedef u32 (vlib_buffer_enqueue_to_thread_fn_t) ( vlib_main_t *vm, vlib_node_runtime_t *node, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion); typedef u32 (vlib_frame_queue_dequeue_fn_t) (vlib_main_t *vm, vlib_frame_queue_main_t *fqm); typedef struct { vlib_buffer_enqueue_to_next_fn_t *buffer_enqueue_to_next_fn; vlib_buffer_enqueue_to_single_next_fn_t *buffer_enqueue_to_single_next_fn; vlib_buffer_enqueue_to_thread_fn_t *buffer_enqueue_to_thread_fn; vlib_frame_queue_dequeue_fn_t *frame_queue_dequeue_fn; } vlib_buffer_func_main_t; extern vlib_buffer_func_main_t vlib_buffer_func_main; always_inline void vlib_buffer_validate (vlib_main_t * vm, vlib_buffer_t * b) { vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_pool_t *bp; /* reference count in allocated buffer always must be 1 or higher */ ASSERT (b->ref_count > 0); /* verify that buffer pool index is valid */ bp = vec_elt_at_index (bm->buffer_pools, b->buffer_pool_index); ASSERT (pointer_to_uword (b) >= bp->start); ASSERT (pointer_to_uword (b) < bp->start + bp->size - (bp->data_size + sizeof (vlib_buffer_t))); } always_inline void * vlib_buffer_ptr_from_index (uword buffer_mem_start, u32 buffer_index, uword offset) { offset += ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES; return uword_to_pointer (buffer_mem_start + offset, vlib_buffer_t *); } /** \brief Translate buffer index into buffer pointer @param vm - (vlib_main_t *) vlib main data structure pointer @param buffer_index - (u32) buffer index @return - (vlib_buffer_t *) buffer pointer */ always_inline vlib_buffer_t * vlib_get_buffer (vlib_main_t * vm, u32 buffer_index) { vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_t *b; b = vlib_buffer_ptr_from_index (bm->buffer_mem_start, buffer_index, 0); vlib_buffer_validate (vm, b); return b; } static_always_inline u32 vlib_buffer_get_default_data_size (vlib_main_t * vm) { return vm->buffer_main->default_data_size; } static_always_inline void vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices) { clib_memcpy_u32 (dst, src, n_indices); } always_inline void vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start, u32 ring_size, u32 n_buffers) { ASSERT (n_buffers <= ring_size); if (PREDICT_TRUE (start + n_buffers <= ring_size)) { vlib_buffer_copy_indices (dst, ring + start, n_buffers); } else { u32 n = ring_size - start; vlib_buffer_copy_indices (dst, ring + start, n); vlib_buffer_copy_indices (dst + n, ring, n_buffers - n); } } always_inline void vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start, u32 ring_size, u32 n_buffers) { ASSERT (n_buffers <= ring_size); if (PREDICT_TRUE (start + n_buffers <= ring_size)) { vlib_buffer_copy_indices (ring + start, src, n_buffers); } else { u32 n = ring_size - start; vlib_buffer_copy_indices (ring + start, src, n); vlib_buffer_co
/*
 * nat_all_api_h.h - skeleton vpp engine plug-in api #include file
 *
 * Copyright (c) <current-year> <your-organization>
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/* Include the generated file, see BUILT_SOURCES in Makefile.am */

#ifdef vl_printfun
#include <vnet/format_fns.h>
#endif

#include <nat/nat.api.h>
/* If buffer allocation fault injection is configured */ if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0) { u32 vlib_buffer_alloc_may_fail (vlib_main_t *, u32); /* See how many buffers we're willing to allocate */ n_buffers = vlib_buffer_alloc_may_fail (vm, n_buffers); if (n_buffers == 0) return (n_buffers); } bp = vec_elt_at_index (bm->buffer_pools, buffer_pool_index); bpt = vec_elt_at_index (bp->threads, vm->thread_index); dst = buffers; n_left = n_buffers; len = bpt->n_cached; /* per-thread cache contains enough buffers */ if (len >= n_buffers) { src = bpt->cached_buffers + len - n_buffers; vlib_buffer_copy_indices (dst, src, n_buffers); bpt->n_cached -= n_buffers; goto done; } /* alloc bigger than cache - take buffers directly from main pool */ if (n_buffers >= VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ) { n_buffers = vlib_buffer_pool_get (vm, buffer_pool_index, buffers, n_buffers); goto done; } /* take everything available in the cache */ if (len) { vlib_buffer_copy_indices (dst, bpt->cached_buffers, len); bpt->n_cached = 0; dst += len; n_left -= len; } len = round_pow2 (n_left, 32); len = vlib_buffer_pool_get (vm, buffer_pool_index, bpt->cached_buffers, len); bpt->n_cached = len; if (len) { u32 n_copy = clib_min (len, n_left); src = bpt->cached_buffers + len - n_copy; vlib_buffer_copy_indices (dst, src, n_copy); bpt->n_cached -= n_copy; n_left -= n_copy; } n_buffers -= n_left; done: /* Verify that buffers are known free. */ if (CLIB_DEBUG > 0) vlib_buffer_validate_alloc_free (vm, buffers, n_buffers, VLIB_BUFFER_KNOWN_FREE); if (PREDICT_FALSE (bm->alloc_callback_fn != 0)) bm->alloc_callback_fn (vm, buffer_pool_index, buffers, n_buffers); return n_buffers; } /** \brief Allocate buffers from specific numa node into supplied array @param vm - (vlib_main_t *) vlib main data structure pointer @param buffers - (u32 * ) buffer index array @param n_buffers - (u32) number of buffers requested @param numa_node - (u32) numa node @return - (u32) number of buffers actually allocated, may be less than the number requested or zero */ always_inline __clib_warn_unused_result u32 vlib_buffer_alloc_on_numa (vlib_main_t * vm, u32 * buffers, u32 n_buffers, u32 numa_node) { u8 index = vlib_buffer_pool_get_default_for_numa (vm, numa_node); return vlib_buffer_alloc_from_pool (vm, buffers, n_buffers, index); } /** \brief Allocate buffers into supplied array @param vm - (vlib_main_t *) vlib main data structure pointer @param buffers - (u32 * ) buffer index array @param n_buffers - (u32) number of buffers requested @return - (u32) number of buffers actually allocated, may be less than the number requested or zero */ always_inline __clib_warn_unused_result u32 vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers) { return vlib_buffer_alloc_on_numa (vm, buffers, n_buffers, vm->numa_node); } /** \brief Allocate buffers into ring @param vm - (vlib_main_t *) vlib main data structure pointer @param buffers - (u32 * ) buffer index ring @param start - (u32) first slot in the ring @param ring_size - (u32) ring size @param n_buffers - (u32) number of buffers requested @return - (u32) number of buffers actually allocated, may be less than the number requested or zero */ always_inline __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start, u32 ring_size, u32 n_buffers) { u32 n_alloc; ASSERT (n_buffers <= ring_size); if (PREDICT_TRUE (start + n_buffers <= ring_size)) return vlib_buffer_alloc (vm, ring + start, n_buffers); n_alloc = vlib_buffer_alloc (vm, ring + start, ring_size - start); if (PREDICT_TRUE (n_alloc == ring_size - start)) n_alloc += vlib_buffer_alloc (vm, ring, n_buffers - n_alloc); return n_alloc; } /** \brief Allocate buffers into ring from specific buffer pool @param vm - (vlib_main_t *) vlib main data structure pointer @param buffers - (u32 * ) buffer index ring @param start - (u32) first slot in the ring @param ring_size - (u32) ring size @param n_buffers - (u32) number of buffers requested @return - (u32) number of buffers actually allocated, may be less than the number requested or zero */ always_inline __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool (vlib_main_t * vm, u32 * ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index) { u32 n_alloc; ASSERT (n_buffers <= ring_size); if (PREDICT_TRUE (start + n_buffers <= ring_size)) return vlib_buffer_alloc_from_pool (vm, ring + start, n_buffers, buffer_pool_index); n_alloc = vlib_buffer_alloc_from_pool (vm, ring + start, ring_size - start, buffer_pool_index); if (PREDICT_TRUE (n_alloc == ring_size - start)) n_alloc += vlib_buffer_alloc_from_pool (vm, ring, n_buffers - n_alloc, buffer_pool_index); return n_alloc; } static_always_inline void vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers, u32 n_buffers) { vlib_buffer_main_t *bm = vm->buffer_main; vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index); vlib_buffer_pool_thread_t *bpt = vec_elt_at_index (bp->threads, vm->thread_index); u32 n_cached, n_empty; if (CLIB_DEBUG > 0) vlib_buffer_validate_alloc_free (vm, buffers, n_buffers, VLIB_BUFFER_KNOWN_ALLOCATED); if (PREDICT_FALSE (bm->free_callback_fn != 0)) bm->free_callback_fn (vm, buffer_pool_index, buffers, n_buffers); n_cached = bpt->n_cached; n_empty = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ - n_cached; if (n_buffers <= n_empty) { vlib_buffer_copy_indices (bpt->cached_buffers + n_cached, buffers, n_buffers); bpt->n_cached = n_cached + n_buffers; return; } vlib_buffer_copy_indices (bpt->cached_buffers + n_cached, buffers + n_buffers - n_empty, n_empty); bpt->n_cached = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ; clib_spinlock_lock (&bp->lock); vlib_buffer_copy_indices (bp->buffers + bp->n_avail, buffers, n_buffers - n_empty); bp->n_avail += n_buffers - n_empty; clib_spinlock_unlock (&bp->lock); } static_always_inline void vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, int maybe_next) { const int queue_size = 128; vlib_buffer_pool_t *bp = 0; u8 buffer_pool_index = ~0; u32 n_queue = 0, queue[queue_size + 4]; vlib_buffer_t bt = { }; #if defined(CLIB_HAVE_VEC128) vlib_buffer_t bpi_mask = {.buffer_pool_index = ~0 }; vlib_buffer_t bpi_vec = {}; vlib_buffer_t flags_refs_mask = { .flags = VLIB_BUFFER_NEXT_PRESENT, .ref_count = ~1 }; #endif if (PREDICT_FALSE (n_buffers == 0)) return; vlib_buffer_t *b = vlib_get_buffer (vm, buffers[0]); buffer_pool_index = b->buffer_pool_index; bp = vlib_get_buffer_pool (vm, buffer_pool_index); vlib_buffer_copy_template (&bt, &bp->buffer_template); #if defined(CLIB_HAVE_VEC128) bpi_vec.buffer_pool_index = buffer_pool_index; #endif while (n_buffers) { vlib_buffer_t *b[8]; u32 bi, sum = 0, flags, next; if (n_buffers < 4) goto one_by_one; vlib_get_buffers (vm, buffers, b, 4); if (n_buffers >= 12) { vlib_get_buffers (vm, buffers + 8, b + 4, 4); vlib_prefetch_buffer_header (b[4], LOAD); vlib_prefetch_buffer_header (b[5], LOAD); vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); } #if defined(CLIB_HAVE_VEC128) u8x16 p0, p1, p2, p3, r; p0 = u8x16_load_unaligned (b[0]); p1 = u8x16_load_unaligned (b[1]); p2 = u8x16_load_unaligned (b[2]); p3 = u8x16_load_unaligned (b[3]); r = p0 ^ bpi_vec.as_u8x16[0]; r |= p1 ^ bpi_vec.as_u8x16[0]; r |= p2 ^ bpi_vec.as_u8x16[0]; r |= p3 ^ bpi_vec.as_u8x16[0]; r &= bpi_mask.as_u8x16[0]; r |= (p0 | p1 | p2 | p3) & flags_refs_mask.as_u8x16[0]; sum = !u8x16_is_all_zero (r); #else sum |= b[0]->flags; sum |= b[1]->flags; sum |= b[2]->flags; sum |= b[3]->flags; sum &= VLIB_BUFFER_NEXT_PRESENT; sum += b[0]->ref_count - 1; sum += b[1]->ref_count - 1; sum += b[2]->ref_count - 1; sum += b[3]->ref_count - 1; sum |= b[0]->buffer_pool_index ^ buffer_pool_index; sum |= b[1]->buffer_pool_index ^ buffer_pool_index; sum |= b[2]->buffer_pool_index ^ buffer_pool_index; sum |= b[3]->buffer_pool_index ^ buffer_pool_index; #endif if (sum) goto one_by_one; vlib_buffer_copy_indices (queue + n_queue, buffers, 4); vlib_buffer_copy_template (b[0], &bt); vlib_buffer_copy_template (b[1], &bt); vlib_buffer_copy_template (b[2], &bt); vlib_buffer_copy_template (b[3], &bt); n_queue += 4; vlib_buffer_validate (vm, b[0]); vlib_buffer_validate (vm, b[1]); vlib_buffer_validate (vm, b[2]); vlib_buffer_validate (vm, b[3]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]); if (n_queue >= queue_size) { vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue); n_queue = 0; } buffers += 4; n_buffers -= 4; continue; one_by_one: bi = buffers[0]; next_in_chain: b[0] = vlib_get_buffer (vm, bi); flags = b[0]->flags; next = b[0]->next_buffer; if (PREDICT_FALSE (buffer_pool_index != b[0]->buffer_pool_index)) { if (n_queue) { vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue); n_queue = 0; } buffer_pool_index = b[0]->buffer_pool_index; #if defined(CLIB_HAVE_VEC128) bpi_vec.buffer_pool_index = buffer_pool_index; #endif bp = vlib_get_buffer_pool (vm, buffer_pool_index); vlib_buffer_copy_template (&bt, &bp->buffer_template); } vlib_buffer_validate (vm, b[0]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); if (clib_atomic_sub_fetch (&b[0]->ref_count, 1) == 0) { vlib_buffer_copy_template (b[0], &bt); queue[n_queue++] = bi; } if (n_queue == queue_size) { vlib_buffer_pool_put (vm, buffer_pool_index, queue, queue_size); n_queue = 0; } if (maybe_next && (flags & VLIB_BUFFER_NEXT_PRESENT)) { bi = next; goto next_in_chain; } buffers++; n_buffers--; } if (n_queue) vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue); } /** \brief Free buffers Frees the entire buffer chain for each buffer @param vm - (vlib_main_t *) vlib main data structure pointer @param buffers - (u32 * ) buffer index array @param n_buffers - (u32) number of buffers to free */ always_inline void vlib_buffer_free (vlib_main_t * vm, /* pointer to first buffer */ u32 * buffers, /* number of buffers to free */ u32 n_buffers) { vlib_buffer_free_inline (vm, buffers, n_buffers, /* maybe next */ 1); } /** \brief Free buffers, does not free the buffer chain for each buffer @param vm - (vlib_main_t *) vlib main data structure pointer @param buffers - (u32 * ) buffer index array @param n_buffers - (u32) number of buffers to free */ always_inline void vlib_buffer_free_no_next (vlib_main_t * vm, /* pointer to first buffer */ u32 * buffers, /* number of buffers to free */ u32 n_buffers) { vlib_buffer_free_inline (vm, buffers, n_buffers, /* maybe next */ 0); } /** \brief Free one buffer Shorthand to free a single buffer chain. @param vm - (vlib_main_t *) vlib main data structure pointer @param buffer_index - (u32) buffer index to free */ always_inline void vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index) { vlib_buffer_free_inline (vm, &buffer_index, 1, /* maybe next */ 1); } /** \brief Free buffers from ring @param vm - (vlib_main_t *) vlib main data structure pointer @param buffers - (u32 * ) buffer index ring @param start - (u32) first slot in the ring @param ring_size - (u32) ring size @param n_buffers - (u32) number of buffers */ always_inline void vlib_buffer_free_from_ring (vlib_main_t * vm, u32 * ring, u32 start, u32 ring_size, u32 n_buffers) { ASSERT (n_buffers <= ring_size); if (PREDICT_TRUE (start + n_buffers <= ring_size)) { vlib_buffer_free (vm, ring + start, n_buffers); } else { vlib_buffer_free (vm, ring + start, ring_size - start); vlib_buffer_free (vm, ring, n_buffers - (ring_size - start)); } } /** \brief Free buffers from ring without freeing tail buffers @param vm - (vlib_main_t *) vlib main data structure pointer @param buffers - (u32 * ) buffer index ring @param start - (u32) first slot in the ring @param ring_size - (u32) ring size @param n_buffers - (u32) number of buffers */ always_inline void vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start, u32 ring_size, u32 n_buffers) { ASSERT (n_buffers <= ring_size); if (PREDICT_TRUE (start + n_buffers <= ring_size)) { vlib_buffer_free_no_next (vm, ring + start, n_buffers); } else { vlib_buffer_free_no_next (vm, ring + start, ring_size - start); vlib_buffer_free_no_next (vm, ring, n_buffers - (ring_size - start)); } } /* Append given data to end of buffer, possibly allocating new buffers. */ int vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data, u32 n_data_bytes); /* Define vlib_buffer and vnet_buffer flags bits preserved for copy/clone */ #define VLIB_BUFFER_COPY_CLONE_FLAGS_MASK \ (VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID | \ VLIB_BUFFER_IS_TRACED | ~VLIB_BUFFER_FLAGS_ALL) /* duplicate all buffers in chain */ always_inline vlib_buffer_t * vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b) { vlib_buffer_t *s, *d, *fd; uword n_alloc, n_buffers = 1; u32 flag_mask = VLIB_BUFFER_COPY_CLONE_FLAGS_MASK; int i; s = b; while (s->flags & VLIB_BUFFER_NEXT_PRESENT) { n_buffers++; s = vlib_get_buffer (vm, s->next_buffer); } u32 new_buffers[n_buffers]; n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers); /* No guarantee that we'll get all the buffers we asked for */ if (PREDICT_FALSE (n_alloc < n_buffers)) { if (n_alloc > 0) vlib_buffer_free (vm, new_buffers, n_alloc); return 0; } /* 1st segment */ s = b; fd = d = vlib_get_buffer (vm, new_buffers[0]); d->current_data = s->current_data; d->current_length = s->current_length; d->flags = s->flags & flag_mask; d->trace_handle = s->trace_handle; d->total_length_not_including_first_buffer = s->total_length_not_including_first_buffer; clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque)); clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2)); clib_memcpy_fast (vlib_buffer_get_current (d), vlib_buffer_get_current (s), s->current_length); /* next segments */ for (i = 1; i < n_buffers; i++) { /* previous */ d->next_buffer = new_buffers[i]; /* current */ s = vlib_get_buffer (vm, s->next_buffer); d = vlib_get_buffer (vm, new_buffers[i]); d->current_data = s->current_data; d->current_length = s->current_length; clib_memcpy_fast (vlib_buffer_get_current (d), vlib_buffer_get_current (s), s->current_length); d->flags = s->flags & flag_mask; } return fd; } /* duplicate first buffer in chain */ always_inline vlib_buffer_t * vlib_buffer_copy_no_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * di) { vlib_buffer_t *d; if ((vlib_buffer_alloc (vm, di, 1)) != 1) return 0; d = vlib_get_buffer (vm, *di); /* 1st segment */ d->current_data = b->current_data; d->current_length = b->current_length; clib_memcpy_fast (d->opaque, b->opaque, sizeof (b->opaque)); clib_memcpy_fast (d->opaque2, b->opaque2, sizeof (b->opaque2)); clib_memcpy_fast (vlib_buffer_get_current (d), vlib_buffer_get_current (b), b->current_length); return d; } /* \brief Move packet from current position to offset position in buffer. Only work for small packet using one buffer with room to fit the move @param vm - (vlib_main_t *) vlib main data structure pointer @param b - (vlib_buffer_t *) pointer to buffer @param offset - (i16) position to move the packet in buffer */ always_inline void vlib_buffer_move (vlib_main_t * vm, vlib_buffer_t * b, i16 offset) { ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0); ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0); ASSERT (offset + b->current_length < vlib_buffer_get_default_data_size (vm)); u8 *source = vlib_buffer_get_current (b); b->current_data = offset; u8 *destination = vlib_buffer_get_current (b); u16 length = b->current_length; if (source + length <= destination) /* no overlap */ clib_memcpy_fast (destination, source, length); else memmove (destination, source, length); } /** \brief Create a maximum of 256 clones of buffer and store them in the supplied array @param vm - (vlib_main_t *) vlib main data structure pointer @param src_buffer - (u32) source buffer index @param buffers - (u32 * ) buffer index array @param n_buffers - (u16) number of buffer clones requested (<=256) @param head_end_offset - (u16) offset relative to current position where packet head ends @param offset - (i16) copy packet head at current position if 0, else at offset position to change headroom space as specified @return - (u16) number of buffers actually cloned, may be less than the number requested or zero */ always_inline u16 vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, u16 n_buffers, u16 head_end_offset, i16 offset) { u16 i; vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer); ASSERT (s->ref_count == 1); ASSERT (n_buffers); ASSERT (n_buffers <= 256); ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0); ASSERT ((offset + head_end_offset) < vlib_buffer_get_default_data_size (vm)); if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2) { buffers[0] = src_buffer; if (offset) vlib_buffer_move (vm, s, offset); for (i = 1; i < n_buffers; i++) { vlib_buffer_t *d; d = vlib_buffer_copy (vm, s); if (d == 0) return i; buffers[i] = vlib_get_buffer_index (vm, d); } return n_buffers; } if (PREDICT_FALSE ((n_buffers == 1) && (offset == 0))) { buffers[0] = src_buffer; return 1; } n_buffers = vlib_buffer_alloc_from_pool (vm, buffers, n_buffers, s->buffer_pool_index); for (i = 0; i < n_buffers; i++) { vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]); if (offset) d->current_data = offset; else d->current_data = s->current_data; d->current_length = head_end_offset; ASSERT (d->buffer_pool_index == s->buffer_pool_index); d->total_length_not_including_first_buffer = s->current_length - head_end_offset; if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT)) { d->total_length_not_including_first_buffer += s->total_length_not_including_first_buffer; } d->flags = (s->flags & VLIB_BUFFER_COPY_CLONE_FLAGS_MASK) | VLIB_BUFFER_NEXT_PRESENT; d->trace_handle = s->trace_handle; clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque)); clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2)); clib_memcpy_fast (vlib_buffer_get_current (d), vlib_buffer_get_current (s), head_end_offset); d->next_buffer = src_buffer; } vlib_buffer_advance (s, head_end_offset); s->ref_count = n_buffers ? n_buffers : s->ref_count; while (s->flags & VLIB_BUFFER_NEXT_PRESENT) { s = vlib_get_buffer (vm, s->next_buffer); s->ref_count = n_buffers ? n_buffers : s->ref_count; } return n_buffers; } /** \brief Create multiple clones of buffer and store them in the supplied array @param vm - (vlib_main_t *) vlib main data structure pointer @param src_buffer - (u32) source buffer index @param buffers - (u32 * ) buffer index array @param n_buffers - (u16) number of buffer clones requested (<=256) @param head_end_offset - (u16) offset relative to current position where packet head ends @param offset - (i16) copy packet head at current position if 0, else at offset position to change headroom space as specified @return - (u16) number of buffers actually cloned, may be less than the number requested or zero */ always_inline u16 vlib_buffer_clone_at_offset (vlib_main_t * vm, u32 src_buffer, u32 * buffers, u16 n_buffers, u16 head_end_offset, i16 offset) { vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer); u16 n_cloned = 0; while (n_buffers > 256) { vlib_buffer_t *copy; copy = vlib_buffer_copy (vm, s); n_cloned += vlib_buffer_clone_256 (vm, vlib_get_buffer_index (vm, copy), (buffers + n_cloned), 256, head_end_offset, offset); n_buffers -= 256; } n_cloned += vlib_buffer_clone_256 (vm, src_buffer, buffers + n_cloned, n_buffers, head_end_offset, offset); return n_cloned; } /** \brief Create multiple clones of buffer and store them in the supplied array @param vm - (vlib_main_t *) vlib main data structure pointer @param src_buffer - (u32) source buffer index @param buffers - (u32 * ) buffer index array @param n_buffers - (u16) number of buffer clones requested (<=256) @param head_end_offset - (u16) offset relative to current position where packet head ends @return - (u16) number of buffers actually cloned, may be less than the number requested or zero */ always_inline u16 vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers, u16 n_buffers, u16 head_end_offset) { return vlib_buffer_clone_at_offset (vm, src_buffer, buffers, n_buffers, head_end_offset, 0); } /** \brief Attach cloned tail to the buffer @param vm - (vlib_main_t *) vlib main data structure pointer @param head - (vlib_buffer_t *) head buffer @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head */ always_inline void vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head, vlib_buffer_t * tail) { ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0); ASSERT (head->buffer_pool_index == tail->buffer_pool_index); head->flags |= VLIB_BUFFER_NEXT_PRESENT; head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID; head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID); head->next_buffer = vlib_get_buffer_index (vm, tail); head->total_length_not_including_first_buffer = tail->current_length + tail->total_length_not_including_first_buffer; next_segment: clib_atomic_add_fetch (&tail->ref_count, 1); if (tail->flags & VLIB_BUFFER_NEXT_PRESENT) { tail = vlib_get_buffer (vm, tail->next_buffer); goto next_segment; } } /* Initializes the buffer as an empty packet with no chained buffers. */ always_inline void vlib_buffer_chain_init (vlib_buffer_t * first) { first->total_length_not_including_first_buffer = 0; first->current_length = 0; first->flags &= ~VLIB_BUFFER_NEXT_PRESENT; first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; } /* The provided next_bi buffer index is appended to the end of the packet. */ always_inline vlib_buffer_t * vlib_buffer_chain_buffer (vlib_main_t * vm, vlib_buffer_t * last, u32 next_bi) { vlib_buffer_t *next_buffer = vlib_get_buffer (vm, next_bi); last->next_buffer = next_bi; last->flags |= VLIB_BUFFER_NEXT_PRESENT; next_buffer->current_length = 0; next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT; return next_buffer; } /* Increases or decreases the packet length. * It does not allocate or deallocate new buffers. * Therefore, the added length must be compatible * with the last buffer. */ always_inline void vlib_buffer_chain_increase_length (vlib_buffer_t * first, vlib_buffer_t * last, i32 len) { last->current_length += len; if (first != last) first->total_length_not_including_first_buffer += len; } /* Copy data to the end of the packet and increases its length. * It does not allocate new buffers. * Returns the number of copied bytes. */ always_inline u16 vlib_buffer_chain_append_data (vlib_main_t * vm, vlib_buffer_t * first, vlib_buffer_t * last, void *data, u16 data_len) { u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); ASSERT (n_buffer_bytes >= last->current_length + last->current_data); u16 len = clib_min (data_len, n_buffer_bytes - last->current_length - last->current_data); clib_memcpy_fast (vlib_buffer_get_current (last) + last->current_length, data, len); vlib_buffer_chain_increase_length (first, last, len); return len; } /* Copy data to the end of the packet and increases its length. * Allocates additional buffers from the free list if necessary. * Returns the number of copied bytes. * 'last' value is modified whenever new buffers are allocated and * chained and points to the last buffer in the chain. */ u16 vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm, vlib_buffer_t * first, vlib_buffer_t ** last, void *data, u16 data_len); void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first); format_function_t format_vlib_buffer, format_vlib_buffer_and_data, format_vlib_buffer_contents, format_vlib_buffer_no_chain; typedef struct { /* Vector of packet data. */ u8 *packet_data; /* Number of buffers to allocate in each call to allocator. */ u32 min_n_buffers_each_alloc; u8 *name; } vlib_packet_template_t; void vlib_packet_template_init (vlib_main_t * vm, vlib_packet_template_t * t, void *packet_data, uword n_packet_data_bytes, uword min_n_buffers_each_alloc, char *fmt, ...); void *vlib_packet_template_get_packet (vlib_main_t * vm, vlib_packet_template_t * t, u32 * bi_result); always_inline void vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t) { vec_free (t->packet_data); } always_inline u32 vlib_buffer_space_left_at_end (vlib_main_t * vm, vlib_buffer_t * b) { return b->data + vlib_buffer_get_default_data_size (vm) - ((u8 *) vlib_buffer_get_current (b) + b->current_length); } #define VLIB_BUFFER_LINEARIZE_MAX 64 always_inline u32 vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * b) { vlib_buffer_t *dst_b; u32 n_buffers = 1, to_free = 0; u16 rem_len, dst_len, data_size, src_len = 0; u8 *dst, *src = 0; if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0)) return 1; ASSERT (1 == b->ref_count); if (PREDICT_FALSE (1 != b->ref_count)) return 0; data_size = vlib_buffer_get_default_data_size (vm); rem_len = vlib_buffer_length_in_chain (vm, b) - b->current_length; dst_b = b; dst = vlib_buffer_get_tail (dst_b); dst_len = vlib_buffer_space_left_at_end (vm, dst_b); b->total_length_not_including_first_buffer -= dst_len; while (rem_len > 0) { u16 copy_len; while (0 == src_len) { ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT); if (PREDICT_FALSE (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))) break; /* malformed chained buffer */ b = vlib_get_buffer (vm, b->next_buffer); src = vlib_buffer_get_current (b); src_len = b->current_length; } if (0 == dst_len) { ASSERT (dst_b->flags & VLIB_BUFFER_NEXT_PRESENT); if (PREDICT_FALSE (!(dst_b->flags & VLIB_BUFFER_NEXT_PRESENT))) break; /* malformed chained buffer */ vlib_buffer_t *next_dst_b = vlib_get_buffer (vm, dst_b->next_buffer); if (PREDICT_TRUE (1 == next_dst_b->ref_count)) { /* normal case: buffer is not cloned, just use it */ dst_b = next_dst_b; } else { /* cloned buffer, build a new dest chain from there */ vlib_buffer_t *bufs[VLIB_BUFFER_LINEARIZE_MAX]; u32 bis[VLIB_BUFFER_LINEARIZE_MAX + 1]; const int n = (rem_len + data_size - 1) / data_size; int n_alloc; int i; ASSERT (n <= VLIB_BUFFER_LINEARIZE_MAX); if (PREDICT_FALSE (n > VLIB_BUFFER_LINEARIZE_MAX)) return 0; n_alloc = vlib_buffer_alloc (vm, bis, n); if (PREDICT_FALSE (n_alloc != n)) { vlib_buffer_free (vm, bis, n_alloc); return 0; } vlib_get_buffers (vm, bis, bufs, n); for (i = 0; i < n - 1; i++) { bufs[i]->flags |= VLIB_BUFFER_NEXT_PRESENT; bufs[i]->next_buffer = bis[i + 1]; } to_free = dst_b->next_buffer; dst_b->next_buffer = bis[0]; dst_b = bufs[0]; } n_buffers++; dst_b->current_data = clib_min (0, dst_b->current_data); dst_b->current_length = 0; dst = dst_b->data + dst_b->current_data; dst_len = data_size - dst_b->current_data; } copy_len = clib_min (src_len, dst_len); if (PREDICT_TRUE (src == dst)) { /* nothing to do */ } else if (src + copy_len > dst && dst + copy_len > src) { /* src and dst overlap */ ASSERT (b == dst_b); memmove (dst, src, copy_len); } else { clib_memcpy_fast (dst, src, copy_len); } dst_b->current_length += copy_len; dst += copy_len; src += copy_len; dst_len -= copy_len; src_len -= copy_len; rem_len -= copy_len; } /* in case of a malformed chain buffer, we'll exit early from the loop. */ ASSERT (0 == rem_len); b->total_length_not_including_first_buffer -= rem_len; if (to_free) vlib_buffer_free_one (vm, to_free); if (dst_b->flags & VLIB_BUFFER_NEXT_PRESENT) { /* the resulting chain is smaller than the original, cut it there */ dst_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT; vlib_buffer_free_one (vm, dst_b->next_buffer); if (1 == n_buffers) { /* no longer a chained buffer */ dst_b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; dst_b->total_length_not_including_first_buffer = 0; } } return n_buffers; } #endif /* included_vlib_buffer_funcs_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */