aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/vector_neon.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/vppinfra/vector_neon.h')
-rw-r--r--src/vppinfra/vector_neon.h81
1 files changed, 67 insertions, 14 deletions
diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h
index 70b05c60884..48644ddbd98 100644
--- a/src/vppinfra/vector_neon.h
+++ b/src/vppinfra/vector_neon.h
@@ -43,7 +43,6 @@ u8x16_compare_byte_mask (u8x16 v)
return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}
-/* *INDENT-OFF* */
#define foreach_neon_vec128i \
_(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64)
#define foreach_neon_vec128u \
@@ -88,12 +87,6 @@ u8x16_compare_byte_mask (u8x16 v)
return u8x16_compare_byte_mask (v); \
} \
\
- static_always_inline u##s##x##c t##s##x##c##_is_greater (t##s##x##c a, \
- t##s##x##c b) \
- { \
- return (u##s##x##c) vcgtq_##i (a, b); \
- } \
- \
static_always_inline t##s##x##c t##s##x##c##_add_saturate (t##s##x##c a, \
t##s##x##c b) \
{ \
@@ -115,7 +108,6 @@ u8x16_compare_byte_mask (u8x16 v)
foreach_neon_vec128i foreach_neon_vec128u
#undef _
-/* *INDENT-ON* */
static_always_inline u16x8
u16x8_byte_swap (u16x8 v)
@@ -129,12 +121,6 @@ u32x4_byte_swap (u32x4 v)
return (u32x4) vrev32q_u8 ((u8x16) v);
}
-static_always_inline u8x16
-u8x16_shuffle (u8x16 v, u8x16 m)
-{
- return (u8x16) vqtbl1q_u8 (v, m);
-}
-
static_always_inline u32x4
u32x4_hadd (u32x4 v1, u32x4 v2)
{
@@ -211,6 +197,18 @@ u32x4_min_scalar (u32x4 v)
#define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n)
#define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)
+always_inline u32x4
+u32x4_interleave_hi (u32x4 a, u32x4 b)
+{
+ return (u32x4) vzip2q_u32 (a, b);
+}
+
+always_inline u32x4
+u32x4_interleave_lo (u32x4 a, u32x4 b)
+{
+ return (u32x4) vzip1q_u32 (a, b);
+}
+
static_always_inline u8x16
u8x16_reflect (u8x16 v)
{
@@ -231,6 +229,61 @@ __asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c)
return a ^ b ^ c;
}
+static_always_inline u8x16
+u8x16_load_partial (u8 *data, uword n)
+{
+ u8x16 r = {};
+ if (n > 7)
+ {
+ u64x2 r;
+ r[1] = *(u64u *) (data + n - 8);
+ r >>= (16 - n) * 8;
+ r[0] = *(u64u *) data;
+ return (u8x16) r;
+ }
+ else if (n > 3)
+ {
+ u32x4 r = {};
+ r[1] = *(u32u *) (data + n - 4);
+ r >>= (8 - n) * 8;
+ r[0] = *(u32u *) data;
+ return (u8x16) r;
+ }
+ else if (n > 1)
+ {
+ u16x8 r = {};
+ r[1] = *(u16u *) (data + n - 2);
+ r >>= (4 - n) * 8;
+ r[0] = *(u16u *) data;
+ return (u8x16) r;
+ }
+ else if (n > 0)
+ r[0] = *data;
+ return r;
+}
+
+static_always_inline void
+u8x16_store_partial (u8x16 r, u8 *data, uword n)
+{
+ if (n > 7)
+ {
+ *(u64u *) (data + n - 8) = ((u64x2) r)[1] << ((16 - n) * 8);
+ *(u64u *) data = ((u64x2) r)[0];
+ }
+ else if (n > 3)
+ {
+ *(u32u *) (data + n - 4) = ((u32x4) r)[1] << ((8 - n) * 8);
+ *(u32u *) data = ((u32x4) r)[0];
+ }
+ else if (n > 1)
+ {
+ *(u16u *) (data + n - 2) = ((u16x8) r)[1] << ((4 - n) * 8);
+ *(u16u *) data = ((u16x8) r)[0];
+ }
+ else if (n > 0)
+ data[0] = r[0];
+}
+
#define CLIB_HAVE_VEC128_MSB_MASK
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE