aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2021-04-20 21:28:45 +0200
committerFlorin Coras <florin.coras@gmail.com>2021-04-21 15:24:05 +0000
commit09aeee64da0609d027fd7d55525c8f080cbede8e (patch)
tree87681574ab4d6b7ee34e93ac65614c92bcd63acc /src
parent832926bd9666717ff2310f93981f3383e1de5ab6 (diff)
vppinfra: more avx512 inlines (compress, expand, from, is_equal_mask)
Type: improvement Change-Id: I4cb86cafba92ae70cea160b9bf45f28a916ab6db Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/vnet/ip/vtep.h2
-rw-r--r--src/vppinfra/vector.h5
-rw-r--r--src/vppinfra/vector_avx512.h105
3 files changed, 81 insertions, 31 deletions
diff --git a/src/vnet/ip/vtep.h b/src/vnet/ip/vtep.h
index 418d8439744..92e8002e55a 100644
--- a/src/vnet/ip/vtep.h
+++ b/src/vnet/ip/vtep.h
@@ -131,7 +131,7 @@ vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
u64x8 k4_u64x8 = u64x8_splat (k4.as_u64);
u64x8 cache = u64x8_load_unaligned (vtep4_u512->vtep4_cache);
- u8 result = u64x8_mask_is_equal (cache, k4_u64x8);
+ u8 result = u64x8_is_equal_mask (cache, k4_u64x8);
if (PREDICT_TRUE (result != 0))
{
last_k4->as_u64 =
diff --git a/src/vppinfra/vector.h b/src/vppinfra/vector.h
index 33e2b6a8773..d5bc955a2e5 100644
--- a/src/vppinfra/vector.h
+++ b/src/vppinfra/vector.h
@@ -177,10 +177,7 @@ foreach_vec
#include <vppinfra/vector_avx2.h>
#endif
-#if defined (__AVX512BITALG__)
-/* Due to power level transition issues, we don't preffer AVX-512 on
- Skylake X and CascadeLake CPUs, AVX512BITALG is introduced on
- icelake CPUs */
+#if defined(__AVX512F__)
#include <vppinfra/vector_avx512.h>
#endif
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
index a51644be1db..3406a7ddb91 100644
--- a/src/vppinfra/vector_avx512.h
+++ b/src/vppinfra/vector_avx512.h
@@ -109,29 +109,21 @@ u16x32_byte_swap (u16x32 v)
return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
}
-static_always_inline u32x8
-u32x16_extract_lo (u32x16 v)
-{
- return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
-}
-
-static_always_inline u32x8
-u32x16_extract_hi (u32x16 v)
-{
- return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
-}
-
-static_always_inline u8x32
-u8x64_extract_lo (u8x64 v)
-{
- return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
-}
-
-static_always_inline u8x32
-u8x64_extract_hi (u8x64 v)
-{
- return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
-}
+#define _(f, t) \
+ static_always_inline t f##_extract_lo (f v) \
+ { \
+ return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
+ } \
+ static_always_inline t f##_extract_hi (f v) \
+ { \
+ return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
+ }
+
+_ (u64x8, u64x4)
+_ (u32x16, u32x8)
+_ (u16x32, u16x16)
+_ (u8x64, u8x32)
+#undef _
static_always_inline u32
u32x16_min_scalar (u32x16 v)
@@ -246,11 +238,72 @@ u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
}
-static_always_inline u8
-u64x8_mask_is_equal (u64x8 a, u64x8 b)
+#define _(t, m, e, p, it) \
+ static_always_inline m t##_is_equal_mask (t a, t b) \
+ { \
+ return p##_cmpeq_##e##_mask ((it) a, (it) b); \
+ }
+_ (u8x16, u16, epu8, _mm, __m128i)
+_ (u16x8, u8, epu16, _mm, __m128i)
+_ (u32x4, u8, epu32, _mm, __m128i)
+_ (u64x2, u8, epu64, _mm, __m128i)
+
+_ (u8x32, u32, epu8, _mm256, __m256i)
+_ (u16x16, u16, epu16, _mm256, __m256i)
+_ (u32x8, u8, epu32, _mm256, __m256i)
+_ (u64x4, u8, epu64, _mm256, __m256i)
+
+_ (u8x64, u64, epu8, _mm512, __m512i)
+_ (u16x32, u32, epu16, _mm512, __m512i)
+_ (u32x16, u16, epu32, _mm512, __m512i)
+_ (u64x8, u8, epu64, _mm512, __m512i)
+#undef _
+
+#define _(f, t, fn, it) \
+ static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
+_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
+_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
+_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
+#undef _
+
+#define _(vt, mt, bits, epi) \
+ static_always_inline vt vt##_compress (vt a, mt mask) \
+ { \
+ return (vt) _mm##bits##_maskz_compress_##epi (mask, (__m##bits##i) a); \
+ } \
+ static_always_inline vt vt##_expand (vt a, mt mask) \
+ { \
+ return (vt) _mm##bits##_maskz_expand_##epi (mask, (__m##bits##i) a); \
+ }
+
+_ (u64x8, u8, 512, epi64)
+_ (u32x16, u16, 512, epi32)
+_ (u64x4, u8, 256, epi64)
+_ (u32x8, u8, 256, epi32)
+#ifdef __AVX512VBMI2__
+_ (u16x32, u32, 512, epi16)
+_ (u8x64, u64, 512, epi8)
+_ (u16x16, u16, 256, epi16)
+_ (u8x32, u32, 256, epi8)
+#endif
+#undef _
+
+#define CLIB_HAVE_VEC256_COMPRESS
+#define CLIB_HAVE_VEC512_COMPRESS
+
+#ifndef __AVX512VBMI2__
+static_always_inline u16x16
+u16x16_compress (u16x16 v, u16 mask)
+{
+ return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
+}
+
+static_always_inline u16x8
+u16x8_compress (u16x8 v, u8 mask)
{
- return _mm512_cmpeq_epu64_mask ((__m512i) a, (__m512i) b);
+ return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
}
+#endif
static_always_inline void
u32x16_transpose (u32x16 m[16])