summaryrefslogtreecommitdiffstats
path: root/src/vppinfra
diff options
context:
space:
mode:
authorDamjan Marion <damjan.marion@gmail.com>2020-02-13 18:14:06 +0100
committerDamjan Marion <damjan.marion@gmail.com>2020-02-13 18:17:54 +0100
commitf75defa7676759fa81ae75e7edd492572c6b8fd6 (patch)
treeda5eecdcdec868d14d81db8c59e2d1271899d49b /src/vppinfra
parentadcfb15fa0b08403c5b5b170149f7d3662e65761 (diff)
vppinfra: add 128-bit and 512-bit a ^ b ^ c shortcut
This allows us to combine 2 XOR operations into signle instruction which makes difference in crypto op: - in x86, by using ternary logic instruction - on ARM, by using EOR3 instruction (available with sha3 feature) Type: refactor Change-Id: Ibdf9001840399d2f838d491ca81b57cbd8430433 Signed-off-by: Damjan Marion <damjan.marion@gmail.com>
Diffstat (limited to 'src/vppinfra')
-rw-r--r--src/vppinfra/vector_avx512.h7
-rw-r--r--src/vppinfra/vector_neon.h11
-rw-r--r--src/vppinfra/vector_sse42.h9
3 files changed, 27 insertions, 0 deletions
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
index c54d8cd2499..29d96f85ce9 100644
--- a/src/vppinfra/vector_avx512.h
+++ b/src/vppinfra/vector_avx512.h
@@ -143,6 +143,13 @@ u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
#define u32x16_ternary_logic(a, b, c, d) \
(u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
+static_always_inline u8x64
+u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
+{
+ return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
+ (__m512i) c, 0x96);
+}
+
static_always_inline void
u32x16_transpose (u32x16 m[16])
{
diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h
index 81d99a64f05..3855f55ad41 100644
--- a/src/vppinfra/vector_neon.h
+++ b/src/vppinfra/vector_neon.h
@@ -203,6 +203,17 @@ u8x16_reflect (u8x16 v)
return (u8x16) vqtbl1q_u8 (v, mask);
}
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
+ u8x16 r;
+__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
+ return r;
+#endif
+ return a ^ b ^ c;
+}
+
#define CLIB_HAVE_VEC128_MSB_MASK
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index c22e86e7437..e75580e6026 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -746,6 +746,15 @@ u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask)
return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
}
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __AVX512F__
+ return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
+ (__m128i) c, 0x96);
+#endif
+ return a ^ b ^ c;
+}
#endif /* included_vector_sse2_h */