aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/vector_avx512.h
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2021-04-25 10:51:51 +0200
committerFlorin Coras <florin.coras@gmail.com>2021-04-25 17:43:34 +0000
commite984831e079342865215d5511b535c8900095722 (patch)
tree509a43b99e5a6de047a91c163c51fcfd01a2993f /src/vppinfra/vector_avx512.h
parente21a0b26042f615fdbe8d2c6b29612c212079d52 (diff)
vppinfra: AVX512 mask load/stores and compress store
Type: improvement Change-Id: Id6be598aade072653e408cca465e62931d060233 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vppinfra/vector_avx512.h')
-rw-r--r--src/vppinfra/vector_avx512.h76
1 files changed, 55 insertions, 21 deletions
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
index 87999eee947..a688baec7fb 100644
--- a/src/vppinfra/vector_avx512.h
+++ b/src/vppinfra/vector_avx512.h
@@ -202,17 +202,43 @@ u32x16_sum_elts (u32x16 sum16)
return sum8[0] + sum8[4];
}
-static_always_inline u8x64
-u8x64_mask_load (u8x64 a, void *p, u64 mask)
-{
- return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
-}
+#define _(t, m, p, i, e) \
+ static_always_inline t t##_mask_load (t a, void *p, m mask) \
+ { \
+ return (t) p##_mask_loadu_##e ((i) a, mask, p); \
+ } \
+ static_always_inline t t##_mask_load_zero (void *p, m mask) \
+ { \
+ return (t) p##_maskz_loadu_##e (mask, p); \
+ } \
+ static_always_inline void t##_mask_store (t a, void *p, m mask) \
+ { \
+ p##_mask_storeu_##e (p, mask, (i) a); \
+ }
-static_always_inline void
-u8x64_mask_store (u8x64 a, void *p, u64 mask)
-{
- _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
-}
+_ (u8x64, u64, _mm512, __m512i, epi8)
+_ (u8x32, u32, _mm256, __m256i, epi8)
+_ (u8x16, u16, _mm, __m128i, epi8)
+_ (u16x32, u32, _mm512, __m512i, epi16)
+_ (u16x16, u16, _mm256, __m256i, epi16)
+_ (u16x8, u8, _mm, __m128i, epi16)
+_ (u32x16, u16, _mm512, __m512i, epi32)
+_ (u32x8, u8, _mm256, __m256i, epi32)
+_ (u32x4, u8, _mm, __m128i, epi32)
+_ (u64x8, u8, _mm512, __m512i, epi64)
+_ (u64x4, u8, _mm256, __m256i, epi64)
+_ (u64x2, u8, _mm, __m128i, epi64)
+#undef _
+
+#ifdef CLIB_HAVE_VEC512
+#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
+#endif
+#ifdef CLIB_HAVE_VEC256
+#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
+#endif
+#ifdef CLIB_HAVE_VEC128
+#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
+#endif
static_always_inline u8x64
u8x64_splat_u8x16 (u8x16 a)
@@ -267,25 +293,33 @@ _ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
#undef _
-#define _(vt, mt, bits, epi) \
+#define _(vt, mt, p, it, epi) \
static_always_inline vt vt##_compress (vt a, mt mask) \
{ \
- return (vt) _mm##bits##_maskz_compress_##epi (mask, (__m##bits##i) a); \
+ return (vt) p##_maskz_compress_##epi (mask, (it) a); \
} \
static_always_inline vt vt##_expand (vt a, mt mask) \
{ \
- return (vt) _mm##bits##_maskz_expand_##epi (mask, (__m##bits##i) a); \
+ return (vt) p##_maskz_expand_##epi (mask, (it) a); \
+ } \
+ static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
+ { \
+ p##_mask_compressstoreu_##epi (p, mask, (it) v); \
}
-_ (u64x8, u8, 512, epi64)
-_ (u32x16, u16, 512, epi32)
-_ (u64x4, u8, 256, epi64)
-_ (u32x8, u8, 256, epi32)
+_ (u64x8, u8, _mm512, __m512i, epi64)
+_ (u32x16, u16, _mm512, __m512i, epi32)
+_ (u64x4, u8, _mm256, __m256i, epi64)
+_ (u32x8, u8, _mm256, __m256i, epi32)
+_ (u64x2, u8, _mm, __m128i, epi64)
+_ (u32x4, u8, _mm, __m128i, epi32)
#ifdef __AVX512VBMI2__
-_ (u16x32, u32, 512, epi16)
-_ (u8x64, u64, 512, epi8)
-_ (u16x16, u16, 256, epi16)
-_ (u8x32, u32, 256, epi8)
+_ (u16x32, u32, _mm512, __m512i, epi16)
+_ (u8x64, u64, _mm512, __m512i, epi8)
+_ (u16x16, u16, _mm256, __m256i, epi16)
+_ (u8x32, u32, _mm256, __m256i, epi8)
+_ (u16x8, u8, _mm, __m128i, epi16)
+_ (u8x16, u16, _mm, __m128i, epi8)
#endif
#undef _