/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Copyright (c) 2001-2005 Eliot Dresselhaus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include /* for clib_arch_is_big_endian */ always_inline void zero_pair (hash_t * h, hash_pair_t * p) { clib_memset (p, 0, hash_pair_bytes (h)); } always_inline void init_pair (hash_t * h, hash_pair_t * p) { clib_memset (p->value, ~0, hash_value_bytes (h)); } always_inline hash_pair_union_t * get_pair (void *v, uword i) { hash_t *h = hash_header (v); hash_pair_t *p; ASSERT (i < vec_len (v)); p = v; p += i << h->log2_pair_size; return (hash_pair_union_t *) p; } always_inline void set_is_user (void *v, uword i, uword is_user) { hash_t *h = hash_header (v); uword i0 = i / BITS (h->is_user[0]); uword i1 = (uword) 1 << (i % BITS (h->is_user[0])); if (is_user) h->is_user[i0] |= i1; else h->is_user[i0] &= ~i1; } static u8 *hash_format_pair_default (u8 * s, va_list * args); #if uword_bits == 64 static inline u64 zap64 (u64 x, word n) { #define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1) static u64 masks_little_endian[] = { 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7), }; static u64 masks_big_endian[] = { 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1), }; #undef _ if (clib_arch_is_big_endian) return x & masks_big_endian[n]; else return x & masks_little_endian[n]; } /** * make address-sanitizer skip this: * clib_mem_unaligned + zap64 casts its input as u64, computes a mask * according to the input length, and returns the casted maked value. * Therefore all the 8 Bytes of the u64 are systematically read, which * rightfully causes address-sanitizer to raise an error on smaller inputs. * * However the invalid Bytes are discarded within zap64(), which is why * this can be silenced safely. * * The above is true *unless* the extra bytes cross a page boundary * into unmapped or no-access space, hence the boundary crossing check. */ static inline u64 hash_memory64 (void *p, word n_bytes, u64 state) { u64 *q = p; u64 a, b, c, n; int page_boundary_crossing; u64 start_addr, end_addr; union { u8 as_u8[8]; u64 as_u64; } tmp; /* * If the request crosses a 4k boundary, it's not OK to assume * that the zap64 game is safe. 4k is the minimum known page size. */ start_addr = (u64) p; end_addr = start_addr + n_bytes + 7; page_boundary_crossing = (start_addr >> 12) != (end_addr >> 12); a = b = 0x9e3779b97f4a7c13LL; c = state; n = n_bytes; while (n >= 3 * sizeof (u64)) { a += clib_mem_unaligned (q + 0, u64); b += clib_mem_unaligned (q + 1, u64); c += clib_mem_unaligned (q + 2, u64); hash_mix64 (a, b, c); n -= 3 * sizeof (u64); q += 3; } c += n_bytes; switch (n / sizeof (u64)) { case 2: a += clib_mem_unaligned (q + 0, u64); b += clib_mem_unaligned (q + 1, u64); if (n % sizeof (u64)) { if (PREDICT_TRUE (page_boundary_crossing == 0)) c += zap64 (CLIB_MEM_OVERFLOW (clib_mem_unaligned (q + 2, u64), q + 2, sizeof (u64)), n % sizeof (u64)) << 8; else { clib_memcpy_fast (tmp.as_u8, q + 2, n % sizeof (u64)); c += zap64 (tmp.as_u64, n % sizeof (u64)) << 8; } } break; case 1: a += clib_mem_unaligned (q + 0, u64); if (n % sizeof (u64)) { if (PREDICT_TRUE (page_boundary_crossing == 0)) b += zap64 (CLIB_MEM_OVERFLOW (clib_mem_unaligned (q + 1, u64), q + 1, sizeof (u64)), n % sizeof (u64)); else { clib_memcpy_fast (tmp.as_u8, q + 1, n % sizeof (u64)); b += zap64 (tmp.as_u64, n % sizeof (u64)); } } break; case 0: if (n % sizeof (u64)) { if (PREDICT_TRUE (page_boundary_crossing == 0)) a += zap64 (CLIB_MEM_OVERFLOW (clib_mem_unaligned (q + 0, u64), q + 0, sizeof (u64)), n % sizeof (u64)); else { clib_memcpy_fast (tmp.as_u8, q, n % sizeof (u64)); a += zap64 (tmp.as_u64, n % sizeof (u64)); } } break; } hash_mix64 (a, b, c); return c; } #else /* if uword_bits == 64 */ static inline u32 zap32 (u32 x, word n) { #define _(n) (((u32) 1 << (u32) (8*(n))) - (u32) 1) static u32 masks_little_endian[] = { 0, _(1), _(2), _(3), }; static u32 masks_big_endian[] = { 0, ~_(3), ~_(2), ~_(1), }; #undef _ if (clib_arch_is_big_endian) return x & masks_big_endian[n]; else return x & masks_little_endian[n]; } static inline u32 hash_memory32 (void *p, word n_bytes, u32 state) { u32 *q = p; u32 a, b, c, n; a = b = 0x9e3779b9; c = state; n = n_bytes; while (n >= 3 * sizeof (u32)) { a += clib_mem_unaligned (q + 0, u32); b += clib_mem_unaligned (q + 1, u32); c += clib_mem_unaligned (q + 2, u32); hash_mix32 (a, b, c); n -= 3 * sizeof (u32); q += 3; } c += n_bytes; switch (n / sizeof (u32)) { case 2: a += clib_mem_unaligned (q + 0, u32); b += clib_mem_unaligned (q + 1, u32); if (n % sizeof (u32)) c += zap32 (clib_mem_unaligned (q + 2, u32), n % sizeof (u32)) << 8; break; case 1: a += clib_mem_unaligned (q + 0, u32); if (n % sizeof (u32)) b += zap32 (clib_mem_unaligned (q + 1, u32), n % sizeof (u32)); break; case 0: if (n % sizeof (u32)) a += zap32 (clib_mem_unaligned (q + 0, u32), n % sizeof (u32)); break; } hash_mix32 (a, b, c); return c; } #endif __clib_export uword hash_memory (void *p, word n_bytes, uword state) { uword *q = p; #if uword_bits == 64 return hash_memory64 (q, n_bytes, state); #else return hash_memory32 (q, n_bytes, state); #endif } #if uword_bits == 64 always_inline uword hash_uword (uword x) { u64 a, b, c; a = b = 0x9e3779b97f4a7c13LL; c = 0; a += x; hash_mix64 (a, b, c); return c; } #else always_inline uword hash_uword (uword x) { u32 a, b, c; a = b = 0x9e3779b9; c = 0; a += x; hash_mix32 (a, b, c); return c; } #endif /* Call sum function. Hash code will be sum function value modulo the prime length of the hash table. */ always_inline uword key_sum (hash_t * h, uword key) { uword sum; switch (pointer_to_uword ((void *) h->key_sum)) { case KEY_FUNC_NONE: sum = hash_uword (key); break; case KEY_FUNC_POINTER_UWORD: sum = hash_uword (*uword_to_pointer (key, uword *)); break; case KEY_FUNC_POINTER_U32: sum = hash_uword (*uword_to_pointer (key, u32 *)); break; case KEY_FUNC_STRING: sum = string_key_sum (h, key); break; case KEY_FUNC_MEM: sum = mem_key_sum (h, key); break; default: sum = h->key_sum (h, key); break; } return sum; } always_inline uword key_equal1 (hash_t * h, uword key1, uword key2, uword e) { switch (pointer_to_uword ((void *) h->key_equal)) { case KEY_FUNC_NONE: break; case KEY_FUNC_POINTER_UWORD: e = *uword_to_pointer (key1, uword *) == *uword_to_pointer (key2, uword *); break; case KEY_FUNC_POINTER_U32: e = *uword_to_pointer (key1, u32 *) == *uword_to_pointer (key2, u32 *); break; case KEY_FUNC_STRING: e = string_key_equal (h, key1, key2); break; case KEY_FUNC_MEM: e = mem_key_equal (h, key1, key2); break; default: e = h->key_equal (h, key1, key2); break; } return e; } /* Compares two keys: returns 1 if equal, 0 if not. */ always_inline uword key_equal (hash_t * h, uword key1, uword key2) { uword e = key1 == key2; if (CLIB_DEBUG > 0 && key1 == key2) ASSERT (key_equal1 (h, key1, key2, e)); if (!e) e = key_equal1 (h, key1, key2, e); return e; } static hash_pair_union_t * get_indirect (void *v, hash_pair_indirect_t * pi, uword key) { hash_t *h = hash_header (v); hash_pair_t *p0, *p1; p0 = p1 = pi->pairs; if (h->log2_pair_size > 0) p1 = hash_forward (h, p0, indirect_pair_get_len (pi)); else p1 += vec_len (p0); while (p0 < p1) { if (key_equal (h, p0->key, key)) return (hash_pair_union_t *) p0; p0 = hash_forward1 (h, p0); } return (hash_pair_union_t *) 0; } static hash_pair_union_t * set_indirect_is_user (void *v, uword i, hash_pair_union_t * p, uword key) { hash_t *h = hash_header (v); hash_pair_t *q; hash_pair_indirect_t *pi = &p->indirect; uword log2_bytes = 0; if (h->log2_pair_size == 0) q = vec_new (hash_pair_t, 2); else { log2_bytes = 1 + hash_pair_log2_bytes (h); q = clib_mem_alloc (1ULL << log2_bytes); } clib_memcpy_fast (q, &p->direct, hash_pair_bytes (h)); pi->pairs = q; if (h->log2_pair_size > 0) indirect_pair_set (pi, log2_bytes, 2); set_is_user (v, i, 0); /* First element is used by existing pair, second will be used by caller. */ q = hash_forward1 (h, q); q->key = key; init_pair (h, q); return (hash_pair_union_t *) q; } static hash_pair_union_t * set_indirect (void *v, hash_pair_indirect_t * pi, uword key, uword * found_key) { hash_t *h = hash_header (v); hash_pair_t *new_pair; hash_pair_union_t *q; q = get_indirect (v, pi, key); if (q) { *found_key = 1; return q; } if (h->log2_pair_size == 0) vec_add2 (pi->pairs, new_pair, 1); else { uword len, new_len, log2_bytes; len = indirect_pair_get_len (pi); log2_bytes = indirect_pair_get_log2_bytes (pi); new_len = len + 1; if (new_len * hash_pair_bytes (h) > (1ULL << log2_bytes)) { pi->pairs = clib_mem_realloc (pi->pairs, 1ULL << (log2_bytes + 1), 1ULL << log2_bytes); log2_bytes++; } indirect_pair_set (pi, log2_bytes, new_len); new_pair = pi->pairs + (len << h->log2_pair_size); } new_pair->key = key; init_pair (h, new_pair); *found_key = 0; return (hash_pair_union_t *) new_pair; } static void unset_indirect (void *v, uword i, hash_pair_t * q) { hash_t *h = hash_header (v); hash_pair_union_t *p = get_pair (v, i); hash_pair_t *e; hash_pair_indirect_t *pi = &p->indirect; uword len, is_vec; is_vec = h->log2_pair_size == 0; ASSERT (!hash_is_user (v, i)); len = is_vec ? vec_len (pi->pairs) : indirect_pair_get_len (pi); e = hash_forward (h, pi->pairs, len - 1); ASSERT (q >= pi->pairs && q <= e); /* We have two or fewer pairs and we are delete one pair. Make indirect pointer direct and free indirect memory. */ if (len <= 2) { hash_pair_t *r = pi->pairs; if (len == 2) { clib_memcpy_fast (p, q == r ? hash_forward1 (h, r) : r, hash_pair_bytes (h)); set_is_user (v, i, 1); } else zero_pair (h, &p->direct); if (is_vec) vec_free (r); else if (r) clib_mem_free (r); } else { /* If deleting a pair we need to keep non-null pairs together. */ if (q < e) clib_memcpy_fast (q, e, hash_pair_bytes (h)); else zero_pair (h, q); if (is_vec) _vec_len (pi->pairs) -= 1; else indirect_pair_set (pi, indirect_pair_get_log2_bytes (pi), len - 1); } } enum lookup_opcode { GET = 1, SET = 2, UNSET = 3, }; static hash_pair_t * lookup (void *v, uword key, enum lookup_opcode op, void *new_value, void *old_value) { hash_t *h = hash_header (v); hash_pair_union_t *p = 0; uword found_key = 0; uword value_bytes; uword i; if (!v) return 0; i = key_sum (h, key) & (_vec_len (v) - 1); p = get_pair (v, i); value_bytes = hash_value_bytes (h); if (hash_is_user (v, i)) { found_key = key_equal (h, p->direct.key, key); if (found_key) { if (op == UNSET) { set_is_user (v, i, 0); if (old_value && value_bytes) clib_memcpy_fast (old_value, p->direct.value, value_bytes); zero_pair (h, &p->direct); } } else { if (op == SET) p = set_indirect_is_user (v, i, p, key); else p = 0; } } else { hash_pair_indirect_t *pi = &p->indirect; if (op == SET) { if (!pi->pairs) { p->direct.key = key; set_is_user (v, i, 1); } else p = set_indirect (v, pi, key, &found_key); } else { p = get_indirect (v, pi, key); found_key = p != 0; if (found_key && op == UNSET) { if (old_value && value_bytes) clib_memcpy_fast (old_value, &p->direct.value, value_bytes); unset_indirect (v, i, &p->direct); /* Nullify p (since it's just been deleted). Otherwise we
/*
 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/udp/udp_encap.h>
#include <vnet/udp/udp.h>

typedef struct udp4_encap_trace_t_
{
  udp_header_t udp;
  ip4_header_t ip;
} udp4_encap_trace_t;

typedef struct udp6_encap_trace_t_
{
  udp_header_t udp;
  ip6_header_t ip;
} udp6_encap_trace_t;

extern vlib_combined_counter_main_t udp_encap_counters;

static u8 *
format_udp4_encap_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t