/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * buffer.c: allocate/free network buffers. * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm, vlib_buffer_t * b_first) { vlib_buffer_t * b = b_first; uword l_first = b_first->current_length; uword l = 0; while (b->flags & VLIB_BUFFER_NEXT_PRESENT) { b = vlib_get_buffer (vm, b->next_buffer); l += b->current_length; } b_first->total_length_not_including_first_buffer = l; b_first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; return l + l_first; } u8 * format_vlib_buffer (u8 * s, va_list * args) { vlib_buffer_t * b = va_arg (*args, vlib_buffer_t *); s = format (s, "current data %d, length %d, free-list %d", b->current_data, b->current_length, b->free_list_index); if (b->flags & VLIB_BUFFER_IS_TRACED) s = format (s, ", trace 0x%x", b->trace_index); if (b->flags & VLIB_BUFFER_NEXT_PRESENT) s = format (s, ", next-buffer 0x%x", b->next_buffer); return s; } u8 * format_vlib_buffer_and_data (u8 * s, va_list * args) { vlib_buffer_t * b = va_arg (*args, vlib_buffer_t *); s = format (s, "%U, %U", format_vlib_buffer, b, format_hex_bytes, vlib_buffer_get_current (b), 64); return s; } static u8 * format_vlib_buffer_known_state (u8 * s, va_list * args) { vlib_buffer_known_state_t state = va_arg (*args, vlib_buffer_known_state_t); char * t; switch (state) { case VLIB_BUFFER_UNKNOWN: t = "unknown"; break; case VLIB_BUFFER_KNOWN_ALLOCATED: t = "known-allocated"; break; case VLIB_BUFFER_KNOWN_FREE: t = "known-free"; break; default: t = "invalid"; break; } return format (s, "%s", t); } u8 * format_vlib_buffer_contents (u8 * s, va_list * va) { vlib_main_t * vm = va_arg (*va, vlib_main_t *); vlib_buffer_t * b = va_arg (*va, vlib_buffer_t *); while (1) { vec_add (s, vlib_buffer_get_current (b), b->current_length); if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT)) break; b = vlib_get_buffer (vm, b->next_buffer); } return s; } static u8 * vlib_validate_buffer_helper (vlib_main_t * vm, u32 bi, uword follow_buffer_next, uword ** unique_hash) { vlib_buffer_t * b = vlib_get_buffer (vm, bi); vlib_buffer_main_t * bm = vm->buffer_main; vlib_buffer_free_list_t * fl; if (pool_is_free_index (bm->buffer_free_list_pool, b->free_list_index)) return format (0, "unknown free list 0x%x", b->free_list_index); fl = pool_elt_at_index (bm->buffer_free_list_pool, b->free_list_index); if ((signed) b->current_data < (signed) - VLIB_BUFFER_PRE_DATA_SIZE) return format (0, "current data %d before pre-data", b->current_data); #if DPDK == 0 if (b->current_data + b->current_length > fl->n_data_bytes) return format (0, "%d-%d beyond end of buffer %d", b->current_data, b->current_length, fl->n_data_bytes); #endif if (follow_buffer_next && (b->flags & VLIB_BUFFER_NEXT_PRESENT)) { vlib_buffer_known_state_t k; u8 * msg, * result; k = vlib_buffer_is_known (vm, b->next_buffer); if (k != VLIB_BUFFER_KNOWN_ALLOCATED) return format (0, "next 0x%x: %U", b->next_buffer, format_vlib_buffer_known_state, k); if (unique_hash) { if (hash_get (*unique_hash, b->next_buffer)) return format (0, "duplicate buffer 0x%x", b->next_buffer); hash_set1 (*unique_hash, b->next_buffer); } msg = vlib_validate_buffer (vm, b->next_buffer, follow_buffer_next); if (msg) { result = format (0, "next 0x%x: %v", b->next_buffer, msg); vec_free (msg); return result; } } return 0; } u8 * vlib_validate_buffer (vlib_main_t * vm, u32 bi, uword follow_buffer_next) { return vlib_validate_buffer_helper (vm, bi, follow_buffer_next, /* unique_hash */ 0); } u8 * vlib_validate_buffers (vlib_main_t * vm, u32 * buffers, uword next_buffer_stride, uword n_buffers, vlib_buffer_known_state_t known_state, uword follow_buffer_next) { uword i, * hash; u32 bi, * b = buffers; vlib_buffer_known_state_t k; u8 * msg = 0, * result = 0; hash = hash_create (0, 0); for (i = 0; i < n_buffers; i++) { bi = b[0]; b += next_buffer_stride; /* Buffer is not unique. */ if (hash_get (hash, bi)) { msg = format (0, "not unique"); goto done; } k = vlib_buffer_is_known (vm, bi); if (k != known_state) { msg = format (0, "is %U; expected %U", format_vlib_buffer_known_state, k, format_vlib_buffer_known_state, known_state); goto done; } msg = vlib_validate_buffer_helper (vm, bi, follow_buffer_next, &hash); if (msg) goto done; hash_set1 (hash, bi); } done: if (msg) { result = format (0, "0x%x: %v", bi, msg); vec_free (msg); } hash_free (hash); return result; } vlib_main_t **vlib_mains; /* When dubugging validate that given buffers are either known allocated or known free. */ static void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers, uword n_buffers, vlib_buffer_known_state_t expected_state) { u32 * b; uword i, bi, is_free; if (CLIB_DEBUG == 0) return; ASSERT(os_get_cpu_number() == 0); /* smp disaster check */ if (vlib_mains) ASSERT(vm == vlib_mains[0]); is_free = expected_state == VLIB_BUFFER_KNOWN_ALLOCATED; b = buffers; for (i = 0; i < n_buffers; i++) { vlib_buffer_known_state_t known; bi = b[0]; b += 1; known = vlib_buffer_is_known (vm, bi); if (known != expected_state) { ASSERT (0); vlib_panic_with_msg (vm, "%s %U buffer 0x%x", is_free ? "freeing" : "allocating", format_vlib_buffer_known_state, known, bi); } vlib_buffer_set_known_state (vm, bi, is_free ? VLIB_BUFFER_KNOWN_FREE : VLIB_BUFFER_KNOWN_ALLOCATED); } } /* Aligned copy routine. */ void vlib_aligned_memcpy (void * _dst, void * _src, int n_bytes) { vlib_copy_unit_t * dst = _dst; vlib_copy_unit_t * src = _src; /* Arguments must be naturally aligned. */ ASSERT (pointer_to_uword (dst) % sizeof (dst[0]) == 0); ASSERT (pointer_to_uword (src) % sizeof (src[0]) == 0); ASSERT (n_bytes % sizeof (dst[0]) == 0); if (4 * sizeof (dst[0]) == CLIB_CACHE_LINE_BYTES) { CLIB_PREFETCH (dst + 0, 4 * sizeof (dst[0]), WRITE); CLIB_PREFETCH (src + 0, 4 * sizeof (src[0]), READ); while (n_bytes >= 4 * sizeof (dst[0])) { dst += 4; src += 4; n_bytes -= 4 * sizeof (dst[0]); CLIB_PREFETCH (dst, 4 * sizeof (dst[0]), WRITE); CLIB_PREFETCH (src, 4 * sizeof (src[0]), READ); dst[-4] = src[-4]; dst[-3] = src[-3]; dst[-2] = src[-2]; dst[-1] = src[-1]; } } else if (8 * sizeof (dst[0]) == CLIB_CACHE_LINE_BYTES) { CLIB_PREFETCH (dst + 0, 8 * sizeof (dst[0]), WRITE); CLIB_PREFETCH (src + 0, 8 * sizeof (src[0]), READ); while (n_bytes >= 8 * sizeof (dst[0])) { dst += 8; src += 8; n_bytes -= 8 * sizeof (dst[0]); CLIB_PREFETCH (dst, 8 * sizeof (dst[0]), WRITE); CLIB_PREFETCH (src, 8 * sizeof (src[0]), READ); dst[-8] = src[-8]; dst[-7] = src[-7]; dst[-6] = src[-6]; dst[-5] = src[-5]; dst[-4] = src[-4]; dst[-3] = src[-3]; dst[-2] = src[-2]; dst[-1] = src[-1]; } } else /* Cache line size unknown: fall back to slow version. */; while (n_bytes > 0) { *dst++ = *src++; n_bytes -= 1 * sizeof (dst[0]); } } #define BUFFERS_PER_COPY (sizeof (vlib_copy_unit_t) / sizeof (u32)) /* Make sure we have at least given number of unaligned buffers. */ static void fill_unaligned (vlib_main_t * vm, vlib_buffer_free_list_t * free_list, uword n_unaligned_buffers) { word la = vec_len (free_list->aligned_buffers); word lu = vec_len (free_list->unaligned_buffers); /* Aligned come in aligned copy-sized chunks. */ ASSERT (la % BUFFERS_PER_COPY == 0); ASSERT (la >= n_unaligned_buffers); while (lu < n_unaligned_buffers) { /* Copy 4 buffers from end of aligned vector to unaligned vector. */ vec_add (free_list->unaligned_buffers, free_list->aligned_buffers + la - BUFFERS_PER_COPY, BUFFERS_PER_COPY); la -= BUFFERS_PER_COPY; lu += BUFFERS_PER_COPY; } _vec_len (free_list->aligned_buffers) = la; } /* After free aligned buffers may not contain even sized chunks. */ static void trim_aligned (vlib_buffer_free_list_t * f) { uword l, n_trim; /* Add unaligned to aligned before trim. */ l = vec_len (f->unaligned_buffers); if (l > 0) { vec_add_aligned (f->aligned_buffers, f->unaligned_buffers, l, /* align */ sizeof (vlib_copy_unit_t)); _vec_len (f->unaligned_buffers) = 0; } /* Remove unaligned buffers from end of aligned vector and save for next trim. */ l = vec_len (f->aligned_buffers); n_trim = l % BUFFERS_PER_COPY; if (n_trim) { /* Trim aligned -> unaligned. */ vec_add (f->unaligned_buffers, f->aligned_buffers + l - n_trim, n_trim); /* Remove from aligned. */ _vec_len (f->aligned_buffers) = l - n_trim; } } static void merge_free_lists (vlib_buffer_free_list_t * dst, vlib_buffer_free_list_t * src) { uword l; u32 * d; trim_aligned (src); trim_aligned (dst); l = vec_len (src->aligned_buffers); if (l > 0) { vec_add2_aligned (dst->aligned_buffers, d, l, /* align */ sizeof (vlib_copy_unit_t)); vlib_aligned_memcpy (d, src->aligned_buffers, l * sizeof (d[0])); vec_free (src->aligned_buffers); } l = vec_len (src->unaligned_buffers); if (l > 0) { vec_add (dst->unaligned_buffers, src->unaligned_buffers, l); vec_free (src->unaligned_buffers); } } always_inline u32 vlib_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size) { vlib_buffer_main_t * bm = vm->buffer_main; size = vlib_buffer_round_size (size); uword * p = hash_get (bm->free_list_by_size, size); return p ? p[0] : ~0; } /* Add buffer free list. */ static u32 vlib_buffer_create_free_list_helper (vlib_main_t * vm, u32 n_data_bytes, u32 is_public, u32 is_default, u8 * name) { vlib_buffer_main_t * bm = vm->buffer_main; vlib_buffer_free_list_t * f; if (! is_default && pool_elts (bm->buffer_free_list_pool) == 0) { u32 default_free_free_list_index; default_free_free_list_index = vlib_buffer_create_free_list_helper (vm, /* default buffer size */ VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES, /* is_public */ 1, /* is_default */ 1, (u8 *) "default"); ASSERT (default_free_free_list_index == VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); if (n_data_bytes == VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES && is_public) return default_free_free_list_index; } pool_get_aligned (bm->buffer_free_list_pool, f, CLIB_CACHE_LINE_BYTES); memset (f, 0, sizeof (f[0])); f->index = f - bm->buffer_free_list_pool; f->n_data_bytes = vlib_buffer_round_size (n_data_bytes); f->min_n_buffers_each_physmem_alloc = 256; f->name = clib_mem_is_heap_object (name) ? name : format (0, "%s", name); /* Setup free buffer template. */ f->buffer_init_template.free_list_index = f->index; if (is_public) { uword * p = hash_get (bm->free_list_by_size, f->n_data_bytes); if (! p) hash_set (bm->free_list_by_size, f->n_data_bytes, f->index); } return f->index; } u32 vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes, char * fmt, ...) { va_list va; u8 * name; va_start (va, fmt); name = va_format (0, fmt, &va); va_end (va); return vlib_buffer_create_free_list_helper (vm, n_data_bytes, /* is_public */ 0, /* is_default */ 0, name); } u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes, char * fmt, ...) { u32 i = vlib_buffer_get_free_list_with_size (vm, n_data_bytes); if (i == ~0) { va_list va; u8 * name; va_start (va, fmt); name = va_format (0, fmt, &va); va_end (va); i = vlib_buffer_create_free_list_helper (vm, n_data_bytes, /* is_public */ 1, /* is_default */ 0, name); } return i; } static void del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f) { u32 i; for (i = 0; i < vec_len (f->buffer_memory_allocated); i++) vm->os_physmem_free (f->buffer_memory_allocated[i]); vec_free (f->name); vec_free (f->buffer_memory_allocated); vec_free (f->unaligned_buffers); vec_free (f->aligned_buffers); } /* Add buffer free list. */ void vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index) { vlib_buffer_main_t * bm = vm->buffer_main; vlib_buffer_free_list_t * f; u32 merge_index; f = vlib_buffer_get_free_list (vm, free_list_index); ASSERT (vec_len (f->unaligned_buffers) + vec_len (f->aligned_buffers) == f->n_alloc); merge_index = vlib_buffer_get_free_list_with_size (vm, f->n_data_bytes); if (merge_index != ~0 && merge_index != free_list_index) { merge_free_lists (pool_elt_at_index (bm->buffer_free_list_pool,
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/*
 * vnet/buffer.h: vnet buffer flags
 *
 * Copyright (c) 2008 Eliot Dresselhaus
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#ifndef included_vnet_buffer_h
#define included_vnet_buffer_h

#include <vlib/vlib.h>

/* VLIB buffer flags for ip4/ip6 packets.  Set by input interfaces for ip4/ip6
   tcp/udp packets with hardware computed checksums. */
#define LOG2_IP_BUFFER_L4_CHECKSUM_COMPUTED LOG2_VLIB_BUFFER_FLAG_USER(1)
#define LOG2_IP_BUFFER_L4_CHECKSUM_CORRECT  LOG2_VLIB_BUFFER_FLAG_USER(2)
#define IP_BUFFER_L4_CHECKSUM_COMPUTED (1 << LOG2_IP_BUFFER_L4_CHECKSUM_COMPUTED)
#define IP_BUFFER_L4_CHECKSUM_CORRECT  (1 << LOG2_IP_BUFFER_L4_CHECKSUM_CORRECT)

/* VLAN header flags.
 * These bits are zeroed in vlib_buffer_init_for_free_list()
 * meaning wherever the buffer comes from they have a reasonable
 * value (eg, if ip4/ip6 generates the packet.)
 */
#define LOG2_ETH_BUFFER_VLAN_2_DEEP LOG2_VLIB_BUFFER_FLAG_USER(3)
#define LOG2_ETH_BUFFER_VLAN_1_DEEP LOG2_VLIB_BUFFER_FLAG_USER(4)
#define ETH_BUFFER_VLAN_2_DEEP (1 << LOG2_ETH_BUFFER_VLAN_2_DEEP)
#define ETH_BUFFER_VLAN_1_DEEP (1 << LOG2_ETH_BUFFER_VLAN_1_DEEP)
#define ETH_BUFFER_VLAN_BITS (ETH_BUFFER_VLAN_1_DEEP | \
                              ETH_BUFFER_VLAN_2_DEEP)

#define LOG2_BUFFER_HANDOFF_NEXT_VALID LOG2_VLIB_BUFFER_FLAG_USER(6)
#define BUFFER_HANDOFF_NEXT_VALID (1 << LOG2_BUFFER_HANDOFF_NEXT_VALID)

#define LOG2_VNET_BUFFER_LOCALLY_ORIGINATED LOG2_VLIB_BUFFER_FLAG_USER(7)
#define VNET_BUFFER_LOCALLY_ORIGINATED (1 << LOG2_VNET_BUFFER_LOCALLY_ORIGINATED)

#define LOG2_VNET_BUFFER_SPAN_CLONE LOG2_VLIB_BUFFER_FLAG_USER(8)
#define VNET_BUFFER_SPAN_CLONE (1 << LOG2_VNET_BUFFER_SPAN_CLONE)

#define foreach_buffer_opaque_union_subtype     \
_(ethernet)                                     \
_(ip)                                           \
_(swt)                                          \
_(l2)                                           \
_(l2t)                                          \
_(gre)                                          \
_(l2_classify)                                  \
_(handoff)                                      \
_(policer)                                      \
_(ipsec)					\
_(map)						\
_(map_t)					\
_(ip_frag)

/*
 * vnet stack buffer opaque array overlay structure.
 * The vnet_buffer_opaque_t *must* be the same size as the
 * vlib_buffer_t "opaque" structure member, 32 bytes.
 *
 * When adding a union type, please add a stanza to
 * foreach_buffer_opaque_union_subtype (directly above).
 * Code in vnet_interface_init(...) verifies the size
 * of the union, and will announce any deviations in an
 * impossible-to-miss manner.
 */
typedef struct
{
  u32 sw_if_index[VLIB_N_RX_TX];

  union
  {
    /* Ethernet. */
    struct
    {
      /* Saved value of current header by ethernet-input. */
      i32 start_of_ethernet_header;
    } ethernet;

    /* IP4/6 buffer opaque. */
    struct
    {
      /* Adjacency from destination IP address lookup [VLIB_TX].
         Adjacency from source IP address lookup [VLIB_RX].
         This gets set to ~0 until source lookup is performed. */
      u32 adj_index[VLIB_N_RX_TX];

      union
      {
	struct
	{
	  /* Flow hash value for this packet computed from IP src/dst address
	     protocol and ports. */
	  u32 flow_hash;

	  /* next protocol */
	  u32 save_protocol;

	  /* Rewrite length */
	  u32 save_rewrite_length;

	  /* MFIB RPF ID */
	  u32 rpf_id;
	};

	/* ICMP */
	struct
	{
	  u8 type;
	  u8 code;
	  u32 data;
	} icmp;

	/* IP header offset from vlib_buffer.data - saved by ip*_local nodes */
	i32 start_of_ip_header;
      };

    } ip;

    /*
     * MPLS:
     * data copied from the MPLS header that was popped from the packet
     * during the look-up.
     */
    struct
    {
      u8 ttl;
      u8 exp;
      u8 first;
    } mpls;

    /* ip4-in-ip6 softwire termination, only valid there */
    struct
    {
      u8 swt_disable;
      u32 mapping_index;
    } swt;

    /* l2 bridging path, only valid there */
    struct
    {
      u32 feature_bitmap;
      u16 bd_index;		/* bridge-domain index */
      u8 l2_len;		/* ethernet header length */
      u8 shg;			/* split-horizon group */
      u8 bd_sn;			/* bridge domain seq# */
      u8 int_sn;		/* interface seq# */
    } l2;

    /* l2tpv3 softwire encap, only valid there */
    struct
    {
      u32 pad[4];		/* do not overlay w/ ip.adj_index[0,1] */
      u8 next_index;
      u32 session_index;
    } l2t;

    struct
    {
      u32 src, dst;
    } gre;

    /* L2 classify */
    struct
    {
      u64 pad;
      u32 table_index;
      u32 opaque_index;
      u64 hash;
    } l2_classify;

    /* IO - worker thread handoff */
    struct
    {
      u32 next_index;
    } handoff;

    /* vnet policer */
    struct
    {
      u32 pad[8 - VLIB_N_RX_TX - 1];	/* to end of opaque */
      u32 index;
    } policer;

    /* interface output features */
    struct
    {
      u32 flags;
      u32 sad_index;
    } ipsec;

    /* MAP */
    struct
    {
      u16 mtu;
    } map;

    /* MAP-T */
    struct
    {
      u32 map_domain_index;
      struct
      {
	u32 saddr, daddr;
	u16 frag_offset;	//Fragmentation header offset
	u16 l4_offset;		//L4 header overall offset
	u8 l4_protocol;		//The final protocol number
      } v6;