From 4dc098f961064d0de1db9ba7245540e3b94a9912 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 22 Sep 2021 15:28:29 +0200 Subject: classify: use AVX-512 to calculate hash on x86 Type:improvement Change-Id: I9f9f16eabf64203db11cd4338948d76ca5e0ef12 Signed-off-by: Damjan Marion --- src/vnet/classify/vnet_classify.c | 1 + src/vnet/classify/vnet_classify.h | 29 ++++++++++++++++++++++++++++- src/vppinfra/vector_avx2.h | 3 +++ src/vppinfra/vector_avx512.h | 3 +++ 4 files changed, 35 insertions(+), 1 deletion(-) diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c index 796250735e4..0b819db3573 100644 --- a/src/vnet/classify/vnet_classify.c +++ b/src/vnet/classify/vnet_classify.c @@ -148,6 +148,7 @@ vnet_classify_new_table (vnet_classify_main_t *cm, const u8 *mask, t->match_n_vectors = match_n_vectors; t->skip_n_vectors = skip_n_vectors; t->entries_per_page = 2; + t->load_mask = pow2_mask (match_n_vectors * 2); t->mheap = clib_mem_create_heap (0, memory_size, 1 /* locked */ , "classify"); diff --git a/src/vnet/classify/vnet_classify.h b/src/vnet/classify/vnet_classify.h index baeaeaf18e9..65bcc3f137b 100644 --- a/src/vnet/classify/vnet_classify.h +++ b/src/vnet/classify/vnet_classify.h @@ -162,6 +162,7 @@ typedef struct u32 entries_per_page; u32 skip_n_vectors; u32 match_n_vectors; + u16 load_mask; /* Index of next table to try */ u32 next_table_index; @@ -254,7 +255,33 @@ vnet_classify_hash_packet_inline (vnet_classify_table_t *t, const u8 *h) ASSERT (t); h += t->skip_n_vectors * 16; -#if defined(CLIB_HAVE_VEC128) +#if defined(CLIB_HAVE_VEC512) && defined(CLIB_HAVE_VEC512_MASK_LOAD_STORE) + u64x8 xor_sum_x8, *mask = (u64x8 *) t->mask; + u16 load_mask = t->load_mask; + u64x8u *data = (u64x8u *) h; + + xor_sum_x8 = u64x8_mask_load_zero (data, load_mask) & mask[0]; + + if (PREDICT_FALSE (load_mask >> 8)) + xor_sum_x8 ^= u64x8_mask_load_zero (data + 1, load_mask >> 8) & mask[1]; + + xor_sum_x8 ^= u64x8_align_right (xor_sum_x8, xor_sum_x8, 4); + xor_sum_x8 ^= u64x8_align_right (xor_sum_x8, xor_sum_x8, 2); + xor_sum = xor_sum_x8[0] ^ xor_sum_x8[1]; +#elif defined(CLIB_HAVE_VEC256) && defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE) + u64x4 xor_sum_x4, *mask = (u64x4 *) t->mask; + u16 load_mask = t->load_mask; + u64x4u *data = (u64x4u *) h; + + xor_sum_x4 = u64x4_mask_load_zero (data, load_mask) & mask[0]; + xor_sum_x4 ^= u64x4_mask_load_zero (data + 1, load_mask >> 4) & mask[1]; + + if (PREDICT_FALSE (load_mask >> 8)) + xor_sum_x4 ^= u64x4_mask_load_zero (data + 2, load_mask >> 8) & mask[2]; + + xor_sum_x4 ^= u64x4_align_right (xor_sum_x4, xor_sum_x4, 2); + xor_sum = xor_sum_x4[0] ^ xor_sum_x4[1]; +#elif defined(CLIB_HAVE_VEC128) u64x2 *mask = (u64x2 *) t->mask; u64x2u *data = (u64x2u *) h; u64x2 xor_sum_x2; diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h index f38a3bdae73..7226c230e68 100644 --- a/src/vppinfra/vector_avx2.h +++ b/src/vppinfra/vector_avx2.h @@ -192,6 +192,9 @@ u8x32_shuffle (u8x32 v, u8x32 m) #define u8x32_align_right(a, b, imm) \ (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm) +#define u64x4_align_right(a, b, imm) \ + (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm) + static_always_inline u32 u32x8_sum_elts (u32x8 sum8) { diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h index 5da490162d0..a82231ac025 100644 --- a/src/vppinfra/vector_avx512.h +++ b/src/vppinfra/vector_avx512.h @@ -205,6 +205,9 @@ u8x64_shuffle (u8x64 v, u8x64 m) #define u8x64_align_right(a, b, imm) \ (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm) +#define u64x8_align_right(a, b, imm) \ + (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm) + static_always_inline u32 u32x16_sum_elts (u32x16 sum16) { -- cgit 1.2.3-korg