aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2020-07-15 20:18:39 +0200
committerDamjan Marion <damarion@cisco.com>2020-07-15 20:26:42 +0200
commit94dbf952204e811c5ec7a39600c25f7b40387f45 (patch)
treeafc3b85a7e9ac94e05f4290f49ac2c93db207d30
parent4d119a6f15333e107bd9766112410eaf4ce1a57f (diff)
vppinfra: more vector inlines
Type: improvement Change-Id: Ie0de374b89ec3a17befecf3f08e94951597609ec Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r--src/vppinfra/vector_avx2.h25
-rw-r--r--src/vppinfra/vector_avx512.h19
-rw-r--r--src/vppinfra/vector_sse42.h16
3 files changed, 60 insertions, 0 deletions
diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h
index 8cc1d77d63c..0511ec7e4e2 100644
--- a/src/vppinfra/vector_avx2.h
+++ b/src/vppinfra/vector_avx2.h
@@ -162,6 +162,23 @@ u16x16_byte_swap (u16x16 v)
return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
}
+static_always_inline u8x32
+u8x32_shuffle (u8x32 v, u8x32 m)
+{
+ return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
+}
+
+#define u8x32_align_right(a, b, imm) \
+ (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
+
+static_always_inline u32
+u32x8_sum_elts (u32x8 sum8)
+{
+ sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
+ sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
+ return sum8[0] + sum8[4];
+}
+
static_always_inline u32x8
u32x8_hadd (u32x8 v1, u32x8 v2)
{
@@ -196,6 +213,14 @@ u16x16_mask_last (u16x16 v, u8 n_last)
return v & masks[16 - n_last];
}
+#ifdef __AVX512F__
+static_always_inline u8x32
+u8x32_mask_load (u8x32 a, void *p, u32 mask)
+{
+ return (u8x32) _mm256_mask_loadu_epi8 ((__m256i) a, mask, p);
+}
+#endif
+
static_always_inline f32x8
f32x8_from_u32x8 (u32x8 v)
{
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
index 8a82650e1e8..6eb7c5eaa4d 100644
--- a/src/vppinfra/vector_avx512.h
+++ b/src/vppinfra/vector_avx512.h
@@ -192,6 +192,25 @@ u8x64_reflect_u8x16 (u8x64 x)
}
static_always_inline u8x64
+u8x64_shuffle (u8x64 v, u8x64 m)
+{
+ return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
+}
+
+#define u8x64_align_right(a, b, imm) \
+ (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
+
+static_always_inline u32
+u32x16_sum_elts (u32x16 sum16)
+{
+ u32x8 sum8;
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
+ sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
+ sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
+ return sum8[0] + sum8[4];
+}
+
+static_always_inline u8x64
u8x64_mask_load (u8x64 a, void *p, u64 mask)
{
return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index 918ded3cd52..8c28dd7783c 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -650,6 +650,14 @@ u32x4_hadd (u32x4 v1, u32x4 v2)
return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
}
+static_always_inline u32 __clib_unused
+u32x4_sum_elts (u32x4 sum4)
+{
+ sum4 += (u32x4) u8x16_align_right (sum4, sum4, 8);
+ sum4 += (u32x4) u8x16_align_right (sum4, sum4, 4);
+ return sum4[0];
+}
+
static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
@@ -756,6 +764,14 @@ u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
return a ^ b ^ c;
}
+#ifdef __AVX512F__
+static_always_inline u8x16
+u8x16_mask_load (u8x16 a, void *p, u16 mask)
+{
+ return (u8x16) _mm_mask_loadu_epi8 ((__m128i) a, mask, p);
+}
+#endif
+
#endif /* included_vector_sse2_h */
/*