diff options
author | Benoît Ganne <bganne@cisco.com> | 2022-06-08 10:49:17 +0200 |
---|---|---|
committer | Damjan Marion <dmarion@0xa5.net> | 2022-06-29 21:23:38 +0000 |
commit | b03eec969f3db186fc354c3e885e51c0b24803f0 (patch) | |
tree | 54871a6dc00bad0644f75755d616f10ad67de6c2 /src/vnet/classify/vnet_classify.h | |
parent | b0057282d64a4f9ac5966fceb427057b0665b772 (diff) |
classify: use 32 bits hash
classify hash used to be stored as u64 in buffer metadata, use 32 bits
instead:
- on almost all our supported arch (x86 and arm64) we use crc32c
intrinsics to compute the final hash: we really get a 32-bits hash
- the hash itself is used to compute a 32-bits bucket index by masking
upper bits: we always discard the higher 32-bits
- this allows to increase the l2 classify buffer metadata padding such
as it does not overlap with the ip fib_index metadata anymore. This
overlap is an issue when using the 'set metadata' action in the ip
ACL node which updates both fields
Type: fix
Change-Id: I5d35bdae97b96c3cae534e859b63950fb500ff50
Signed-off-by: Benoît Ganne <bganne@cisco.com>
Diffstat (limited to 'src/vnet/classify/vnet_classify.h')
-rw-r--r-- | src/vnet/classify/vnet_classify.h | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/src/vnet/classify/vnet_classify.h b/src/vnet/classify/vnet_classify.h index c61ec8ff055..143833dfb20 100644 --- a/src/vnet/classify/vnet_classify.h +++ b/src/vnet/classify/vnet_classify.h @@ -243,7 +243,7 @@ u8 *format_classify_entry (u8 *s, va_list *args); u8 *format_classify_table (u8 * s, va_list * args); u8 *format_vnet_classify_table (u8 *s, va_list *args); -u64 vnet_classify_hash_packet (vnet_classify_table_t * t, u8 * h); +u32 vnet_classify_hash_packet (const vnet_classify_table_t *t, u8 *h); static_always_inline vnet_classify_table_t * vnet_classify_table_get (u32 table_index) @@ -253,8 +253,8 @@ vnet_classify_table_get (u32 table_index) return (pool_elt_at_index (vcm->tables, table_index)); } -static inline u64 -vnet_classify_hash_packet_inline (vnet_classify_table_t *t, const u8 *h) +static inline u32 +vnet_classify_hash_packet_inline (const vnet_classify_table_t *t, const u8 *h) { u64 xor_sum; ASSERT (t); @@ -361,7 +361,7 @@ vnet_classify_prefetch_bucket (vnet_classify_table_t * t, u64 hash) } static inline vnet_classify_entry_t * -vnet_classify_get_entry (vnet_classify_table_t * t, uword offset) +vnet_classify_get_entry (const vnet_classify_table_t *t, uword offset) { u8 *hp = clib_mem_get_heap_base (t->mheap); u8 *vp = hp + offset; @@ -383,8 +383,8 @@ vnet_classify_get_offset (vnet_classify_table_t * t, } static inline vnet_classify_entry_t * -vnet_classify_entry_at_index (vnet_classify_table_t * t, - vnet_classify_entry_t * e, u32 index) +vnet_classify_entry_at_index (const vnet_classify_table_t *t, + vnet_classify_entry_t *e, u32 index) { u8 *eu8; @@ -421,8 +421,9 @@ vnet_classify_prefetch_entry (vnet_classify_table_t * t, u64 hash) clib_prefetch_load (e); } -vnet_classify_entry_t *vnet_classify_find_entry (vnet_classify_table_t * t, - u8 * h, u64 hash, f64 now); +vnet_classify_entry_t * +vnet_classify_find_entry (const vnet_classify_table_t *t, u8 *h, u32 hash, + f64 now); static_always_inline int vnet_classify_entry_is_equal (vnet_classify_entry_t *v, const u8 *d, u8 *m, @@ -529,8 +530,8 @@ vnet_classify_entry_is_equal (vnet_classify_entry_t *v, const u8 *d, u8 *m, } static inline vnet_classify_entry_t * -vnet_classify_find_entry_inline (vnet_classify_table_t *t, const u8 *h, - u64 hash, f64 now) +vnet_classify_find_entry_inline (const vnet_classify_table_t *t, const u8 *h, + u32 hash, f64 now) { vnet_classify_entry_t *v; vnet_classify_bucket_t *b; |