summaryrefslogtreecommitdiffstats
path: root/src/vppinfra/bihash_40_8.h
blob: 1fb344fdeeb24b62571841ad57eb6fc7d560c908 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#undef BIHASH_TYPE
#undef BIHASH_KVP_PER_PAGE
#undef BIHASH_32_64_SVM
#undef BIHASH_ENABLE_STATS
#undef BIHASH_KVP_AT_BUCKET_LEVEL
#undef BIHASH_LAZY_INSTANTIATE
#undef BIHASH_BUCKET_PREFETCH_CACHE_LINES
#undef BIHASH_USE_HEAP

#define BIHASH_TYPE _40_8
#define BIHASH_KVP_PER_PAGE 4
#define BIHASH_KVP_AT_BUCKET_LEVEL 0
#define BIHASH_LAZY_INSTANTIATE 1
#define BIHASH_BUCKET_PREFETCH_CACHE_LINES 1
#define BIHASH_USE_HEAP 1

#ifndef __included_bihash_40_8_h__
#define __included_bihash_40_8_h__

#include <vppinfra/crc32.h>
#include <vppinfra/heap.h>
#include <vppinfra/format.h>
#include <vppinfra/pool.h>
#include <vppinfra/xxhash.h>

typedef struct
{
  u64 key[5];
  u64 value;
} clib_bihash_kv_40_8_t;

static inline int
clib_bihash_is_free_40_8 (const clib_bihash_kv_40_8_t * v)
{
  /* Free values are clib_memset to 0xff, check a bit... */
  if (v->key[0] == ~0ULL && v->value == ~0ULL)
    return 1;
  return 0;
}

static inline u64
clib_bihash_hash_40_8 (const clib_bihash_kv_40_8_t * v)
{
#ifdef clib_crc32c_uses_intrinsics
  return clib_crc32c ((u8 *) v->key, 40);
#else
  u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2] ^ v->key[3] ^ v->key[4];
  return clib_xxhash (tmp);
#endif
}

static inline u8 *
format_bihash_kvp_40_8 (u8 * s, va_list * args)
{
  clib_bihash_kv_40_8_t *v = va_arg (*args, clib_bihash_kv_40_8_t *);

  s = format (s, "key %llu %llu %llu %llu %llu value %llu", v->key[0],
	      v->key[1], v->key[2], v->key[3], v->key[4], v->value);
  return s;
}

static inline int
clib_bihash_key_compare_40_8 (u64 * a, u64 * b)
{
#if defined (CLIB_HAVE_VEC512)
  u64x8 v;
  v = u64x8_load_unaligned (a) ^ u64x8_load_unaligned (b);
  return (u64x8_is_zero_mask (v) & 0x1f) == 0;
#elif defined (CLIB_HAVE_VEC256)
  u64x4 v = { a[4] ^ b[4], 0, 0, 0 };
  v |= u64x4_load_unaligned (a) ^ u64x4_load_unaligned (b);
  return u64x4_is_all_zero (v);
#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
  u64x2 v = { a[4] ^ b[4], 0 };
  v |= u64x2_load_unaligned (a) ^ u64x2_load_unaligned (b);
  v |= u64x2_load_unaligned (a + 2) ^ u64x2_load_unaligned (b + 2);
  return u64x2_is_all_zero (v);
#else
  return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2]) | (a[3] ^ b[3])
	  | (a[4] ^ b[4])) == 0;
#endif
}

#undef __included_bihash_template_h__
#include <vppinfra/bihash_template.h>

#endif /* __included_bihash_40_8_h__ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
ass="p">); i = sparse_index / BITS (h->is_member_bitmap[0]); b = sparse_index % BITS (h->is_member_bitmap[0]); ASSERT (i < vec_len (h->is_member_bitmap)); ASSERT (i < vec_len (h->member_counts)); w = h->is_member_bitmap[i]; /* count_trailing_zeros(0) == 0, take care of that case */ if (PREDICT_FALSE (maybe_range == 0 && insert == 0 && w == 0)) return 0; if (PREDICT_TRUE (maybe_range == 0 && insert == 0 && count_trailing_zeros (w) == b)) return h->member_counts[i] + 1; d = h->member_counts[i] + count_set_bits (w & ((1ULL << b) - 1)); is_member = (w & (1ULL << b)) != 0; if (maybe_range) { u8 r = h->range_flags[d]; u8 is_range, is_valid_range; is_range = maybe_range & (r & SPARSE_VEC_IS_RANGE); is_valid_range = (r & SPARSE_VEC_IS_VALID_RANGE) != 0; is_member = is_range ? is_valid_range : is_member; } if (insert) { *insert = !is_member; if (!is_member) { uword j; w |= 1ULL << b; h->is_member_bitmap[i] = w; for (j = i + 1; j < vec_len (h->member_counts); j++) h->member_counts[j] += 1; } return 1 + d; } d = is_member ? d : 0; return is_member + d; } always_inline uword sparse_vec_index (void *v, uword sparse_index) { return sparse_vec_index_internal (v, sparse_index, /* maybe range */ 0, /* insert? */ 0); } always_inline void sparse_vec_index2 (void *v, u32 si0, u32 si1, u32 * i0_return, u32 * i1_return) { sparse_vec_header_t *h; uword b0, b1, w0, w1, v0, v1; u32 i0, i1, d0, d1; u8 is_member0, is_member1; h = sparse_vec_header (v); i0 = si0 / BITS (h->is_member_bitmap[0]); i1 = si1 / BITS (h->is_member_bitmap[0]); b0 = si0 % BITS (h->is_member_bitmap[0]); b1 = si1 % BITS (h->is_member_bitmap[0]); ASSERT (i0 < vec_len (h->is_member_bitmap)); ASSERT (i1 < vec_len (h->is_member_bitmap)); ASSERT (i0 < vec_len (h->member_counts)); ASSERT (i1 < vec_len (h->member_counts)); w0 = h->is_member_bitmap[i0]; w1 = h->is_member_bitmap[i1]; if (PREDICT_TRUE ((count_trailing_zeros (w0) == b0) + (count_trailing_zeros (w1) == b1) == 2)) { *i0_return = h->member_counts[i0] + 1; *i1_return = h->member_counts[i1] + 1; return; } v0 = w0 & ((1ULL << b0) - 1); v1 = w1 & ((1ULL << b1) - 1); /* Speculate that masks will have zero or one bits set. */ d0 = h->member_counts[i0] + (v0 != 0); d1 = h->member_counts[i1] + (v1 != 0); /* Validate speculation. */ if (PREDICT_FALSE (!is_pow2 (v0) || !is_pow2 (v1))) { d0 += count_set_bits (v0) - (v0 != 0); d1 += count_set_bits (v1) - (v1 != 0); } is_member0 = (w0 & (1ULL << b0)) != 0; is_member1 = (w1 & (1ULL << b1)) != 0; d0 = is_member0 ? d0 : 0; d1 = is_member1 ? d1 : 0; *i0_return = is_member0 + d0; *i1_return = is_member1 + d1; } #define sparse_vec_free(v) vec_free(v) #define sparse_vec_elt_at_index(v,i) \ vec_elt_at_index ((v), sparse_vec_index ((v), (i))) #define sparse_vec_validate(v,i) \ ({ \ uword _i; \ u32 _insert; \ \ if (! (v)) \ (v) = sparse_vec_new (sizeof ((v)[0]), BITS (u16)); \ \ _i = sparse_vec_index_internal ((v), (i), \ /* maybe range */ 0, \ /* insert? */ &_insert); \ if (_insert) \ vec_insert_ha ((v), 1, _i, \ /* header size */ sizeof (sparse_vec_header_t), \ /* align */ 0); \ \ /* Invalid index is 0. */ \ ASSERT (_i > 0); \ \ (v) + _i; \ }) #endif /* included_sparse_vec_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */