aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/plugins/crypto_native/ghash.h24
-rw-r--r--src/vppinfra/vector_avx512.h7
-rw-r--r--src/vppinfra/vector_neon.h11
-rw-r--r--src/vppinfra/vector_sse42.h9
4 files changed, 33 insertions, 18 deletions
diff --git a/src/plugins/crypto_native/ghash.h b/src/plugins/crypto_native/ghash.h
index 1ee1a997997..a2886a468e9 100644
--- a/src/plugins/crypto_native/ghash.h
+++ b/src/plugins/crypto_native/ghash.h
@@ -105,18 +105,6 @@
#ifndef __ghash_h__
#define __ghash_h__
-/* on AVX-512 systems we can save a clock cycle by using ternary logic
- instruction to calculate a XOR b XOR c */
-static_always_inline u8x16
-ghash_xor3 (u8x16 a, u8x16 b, u8x16 c)
-{
-#if defined (__AVX512F__)
- return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
- (__m128i) c, 0x96);
-#endif
- return a ^ b ^ c;
-}
-
static_always_inline u8x16
gmul_lo_lo (u8x16 a, u8x16 b)
{
@@ -204,8 +192,8 @@ ghash_mul_next (ghash_data_t * gd, u8x16 a, u8x16 b)
if (gd->pending)
{
/* there is peding data from previous invocation so we can XOR */
- gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, hi);
- gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, lo);
+ gd->hi = u8x16_xor3 (gd->hi, gd->tmp_hi, hi);
+ gd->lo = u8x16_xor3 (gd->lo, gd->tmp_lo, lo);
gd->pending = 0;
}
else
@@ -217,7 +205,7 @@ ghash_mul_next (ghash_data_t * gd, u8x16 a, u8x16 b)
}
/* gd->mid ^= a0 * b1 ^ a1 * b0 */
- gd->mid = ghash_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b));
+ gd->mid = u8x16_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b));
}
static_always_inline void
@@ -233,8 +221,8 @@ ghash_reduce (ghash_data_t * gd)
if (gd->pending)
{
- gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, midl);
- gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, midr);
+ gd->lo = u8x16_xor3 (gd->lo, gd->tmp_lo, midl);
+ gd->hi = u8x16_xor3 (gd->hi, gd->tmp_hi, midr);
}
else
{
@@ -255,7 +243,7 @@ ghash_reduce2 (ghash_data_t * gd)
static_always_inline u8x16
ghash_final (ghash_data_t * gd)
{
- return ghash_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4),
+ return u8x16_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4),
u8x16_word_shift_left (gd->tmp_hi, 4));
}
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
index c54d8cd2499..29d96f85ce9 100644
--- a/src/vppinfra/vector_avx512.h
+++ b/src/vppinfra/vector_avx512.h
@@ -143,6 +143,13 @@ u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
#define u32x16_ternary_logic(a, b, c, d) \
(u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
+static_always_inline u8x64
+u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
+{
+ return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
+ (__m512i) c, 0x96);
+}
+
static_always_inline void
u32x16_transpose (u32x16 m[16])
{
diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h
index 81d99a64f05..3855f55ad41 100644
--- a/src/vppinfra/vector_neon.h
+++ b/src/vppinfra/vector_neon.h
@@ -203,6 +203,17 @@ u8x16_reflect (u8x16 v)
return (u8x16) vqtbl1q_u8 (v, mask);
}
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
+ u8x16 r;
+__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
+ return r;
+#endif
+ return a ^ b ^ c;
+}
+
#define CLIB_HAVE_VEC128_MSB_MASK
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index c22e86e7437..e75580e6026 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -746,6 +746,15 @@ u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask)
return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
}
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __AVX512F__
+ return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
+ (__m128i) c, 0x96);
+#endif
+ return a ^ b ^ c;
+}
#endif /* included_vector_sse2_h */