diff options
author | Damjan Marion <damarion@cisco.com> | 2020-02-11 17:04:38 +0100 |
---|---|---|
committer | Florin Coras <florin.coras@gmail.com> | 2020-02-12 08:48:27 +0000 |
commit | 415b4b0bbae661cbcbb93c3cb9d016dfae2f5081 (patch) | |
tree | a77ebdb088c23b5c7de9da03c78dd52c940b11f6 | |
parent | aba4983ad48374a50cd93ba91f66be241f210279 (diff) |
crypto-native: refactor GCM code to use generic types
Type: refactor
Change-Id: I76733a9ed362ec60badd22c0fbc2a9c5749da88d
Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r-- | src/plugins/crypto_native/aes.h | 236 | ||||
-rw-r--r-- | src/plugins/crypto_native/aes_gcm.c | 274 | ||||
-rw-r--r-- | src/plugins/crypto_native/ghash.h | 106 |
3 files changed, 325 insertions, 291 deletions
diff --git a/src/plugins/crypto_native/aes.h b/src/plugins/crypto_native/aes.h index 85d6f7916f8..371f5c4508c 100644 --- a/src/plugins/crypto_native/aes.h +++ b/src/plugins/crypto_native/aes.h @@ -30,6 +30,10 @@ typedef enum #ifdef __x86_64__ +static const u8x16 byte_mask_scale = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 +}; + static_always_inline u8x16 aes_block_load (u8 * p) { @@ -67,131 +71,177 @@ aes_block_store (u8 * p, u8x16 r) } static_always_inline u8x16 +aes_byte_mask (u8x16 x, u8 n_bytes) +{ + u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale); + __m128i zero = { }; + + return (u8x16) _mm_blendv_epi8 (zero, (__m128i) x, (__m128i) mask); +} + +static_always_inline u8x16 +aes_load_partial (u8x16u * p, int n_bytes) +{ + ASSERT (n_bytes <= 16); +#ifdef __AVX512F__ + __m128i zero = { }; + return (u8x16) _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p); +#else + return aes_byte_mask (CLIB_MEM_OVERFLOW_LOAD (*, p), n_bytes); +#endif +} + +static_always_inline void +aes_store_partial (void *p, u8x16 r, int n_bytes) +{ +#ifdef __AVX512F__ + _mm_mask_storeu_epi8 (p, (1 << n_bytes) - 1, (__m128i) r); +#else + u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale); + _mm_maskmoveu_si128 ((__m128i) r, (__m128i) mask, p); +#endif +} + + +static_always_inline u8x16 +aes_encrypt_block (u8x16 block, const u8x16 * round_keys, aes_key_size_t ks) +{ + int i; + block ^= round_keys[0]; + for (i = 1; i < AES_KEY_ROUNDS (ks); i += 1) + block = aes_enc_round (block, round_keys[i]); + return aes_enc_last_round (block, round_keys[i]); +} + +static_always_inline u8x16 aes_inv_mix_column (u8x16 a) { return (u8x16) _mm_aesimc_si128 ((__m128i) a); } +#define aes_keygen_assist(a, b) \ + (u8x16) _mm_aeskeygenassist_si128((__m128i) a, b) + /* AES-NI based AES key expansion based on code samples from Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper (323641-001) */ static_always_inline void -aes128_key_assist (__m128i * k, __m128i r) +aes128_key_assist (u8x16 * rk, u8x16 r) { - __m128i t = k[-1]; - t ^= _mm_slli_si128 (t, 4); - t ^= _mm_slli_si128 (t, 4); - t ^= _mm_slli_si128 (t, 4); - k[0] = t ^ _mm_shuffle_epi32 (r, 0xff); + u8x16 t = rk[-1]; + t ^= u8x16_word_shift_left (t, 4); + t ^= u8x16_word_shift_left (t, 4); + t ^= u8x16_word_shift_left (t, 4); + rk[0] = t ^ (u8x16) u32x4_shuffle ((u32x4) r, 3, 3, 3, 3); } static_always_inline void -aes128_key_expand (u8x16 * key_schedule, u8 * key) +aes128_key_expand (u8x16 * rk, u8x16 const *k) { - __m128i *k = (__m128i *) key_schedule; - k[0] = _mm_loadu_si128 ((const __m128i *) key); - aes128_key_assist (k + 1, _mm_aeskeygenassist_si128 (k[0], 0x01)); - aes128_key_assist (k + 2, _mm_aeskeygenassist_si128 (k[1], 0x02)); - aes128_key_assist (k + 3, _mm_aeskeygenassist_si128 (k[2], 0x04)); - aes128_key_assist (k + 4, _mm_aeskeygenassist_si128 (k[3], 0x08)); - aes128_key_assist (k + 5, _mm_aeskeygenassist_si128 (k[4], 0x10)); - aes128_key_assist (k + 6, _mm_aeskeygenassist_si128 (k[5], 0x20)); - aes128_key_assist (k + 7, _mm_aeskeygenassist_si128 (k[6], 0x40)); - aes128_key_assist (k + 8, _mm_aeskeygenassist_si128 (k[7], 0x80)); - aes128_key_assist (k + 9, _mm_aeskeygenassist_si128 (k[8], 0x1b)); - aes128_key_assist (k + 10, _mm_aeskeygenassist_si128 (k[9], 0x36)); + rk[0] = k[0]; + aes128_key_assist (rk + 1, aes_keygen_assist (rk[0], 0x01)); + aes128_key_assist (rk + 2, aes_keygen_assist (rk[1], 0x02)); + aes128_key_assist (rk + 3, aes_keygen_assist (rk[2], 0x04)); + aes128_key_assist (rk + 4, aes_keygen_assist (rk[3], 0x08)); + aes128_key_assist (rk + 5, aes_keygen_assist (rk[4], 0x10)); + aes128_key_assist (rk + 6, aes_keygen_assist (rk[5], 0x20)); + aes128_key_assist (rk + 7, aes_keygen_assist (rk[6], 0x40)); + aes128_key_assist (rk + 8, aes_keygen_assist (rk[7], 0x80)); + aes128_key_assist (rk + 9, aes_keygen_assist (rk[8], 0x1b)); + aes128_key_assist (rk + 10, aes_keygen_assist (rk[9], 0x36)); } static_always_inline void -aes192_key_assist (__m128i * r1, __m128i * r2, __m128i key_assist) +aes192_key_assist (u8x16 * r1, u8x16 * r2, u8x16 key_assist) { - __m128i t; - *r1 ^= t = _mm_slli_si128 (*r1, 0x4); - *r1 ^= t = _mm_slli_si128 (t, 0x4); - *r1 ^= _mm_slli_si128 (t, 0x4); - *r1 ^= _mm_shuffle_epi32 (key_assist, 0x55); - *r2 ^= _mm_slli_si128 (*r2, 0x4); - *r2 ^= _mm_shuffle_epi32 (*r1, 0xff); + u8x16 t; + r1[0] ^= t = u8x16_word_shift_left (r1[0], 4); + r1[0] ^= t = u8x16_word_shift_left (t, 4); + r1[0] ^= u8x16_word_shift_left (t, 4); + r1[0] ^= (u8x16) _mm_shuffle_epi32 ((__m128i) key_assist, 0x55); + r2[0] ^= u8x16_word_shift_left (r2[0], 4); + r2[0] ^= (u8x16) _mm_shuffle_epi32 ((__m128i) r1[0], 0xff); } static_always_inline void -aes192_key_expand (u8x16 * key_schedule, u8 * key) +aes192_key_expand (u8x16 * rk, u8x16u const *k) { - __m128i r1, r2, *k = (__m128i *) key_schedule; + u8x16 r1, r2; - k[0] = r1 = _mm_loadu_si128 ((__m128i *) key); - /* load the 24-bytes key as 2 * 16-bytes (and ignore last 8-bytes) */ - k[1] = r2 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) key + 1); + rk[0] = r1 = k[0]; + /* *INDENT-OFF* */ + rk[1] = r2 = (u8x16) (u64x2) { *(u64 *) (k + 1), 0 }; + /* *INDENT-ON* */ - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x1)); - k[1] = (__m128i) _mm_shuffle_pd ((__m128d) k[1], (__m128d) r1, 0); - k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x1)); + rk[1] = (u8x16) _mm_shuffle_pd ((__m128d) rk[1], (__m128d) r1, 0); + rk[2] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x2)); - k[3] = r1; - k[4] = r2; + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x2)); + rk[3] = r1; + rk[4] = r2; - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x4)); - k[4] = (__m128i) _mm_shuffle_pd ((__m128d) k[4], (__m128d) r1, 0); - k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x4)); + rk[4] = (u8x16) _mm_shuffle_pd ((__m128d) rk[4], (__m128d) r1, 0); + rk[5] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x8)); - k[6] = r1; - k[7] = r2; + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x8)); + rk[6] = r1; + rk[7] = r2; - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x10)); - k[7] = (__m128i) _mm_shuffle_pd ((__m128d) k[7], (__m128d) r1, 0); - k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x10)); + rk[7] = (u8x16) _mm_shuffle_pd ((__m128d) rk[7], (__m128d) r1, 0); + rk[8] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x20)); - k[9] = r1; - k[10] = r2; + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x20)); + rk[9] = r1; + rk[10] = r2; - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x40)); - k[10] = (__m128i) _mm_shuffle_pd ((__m128d) k[10], (__m128d) r1, 0); - k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x40)); + rk[10] = (u8x16) _mm_shuffle_pd ((__m128d) rk[10], (__m128d) r1, 0); + rk[11] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x80)); - k[12] = r1; + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x80)); + rk[12] = r1; } static_always_inline void -aes256_key_assist (__m128i * k, int i, __m128i key_assist) +aes256_key_assist (u8x16 * rk, int i, u8x16 key_assist) { - __m128i r, t; - k += i; - r = k[-2]; - r ^= t = _mm_slli_si128 (r, 0x4); - r ^= t = _mm_slli_si128 (t, 0x4); - r ^= _mm_slli_si128 (t, 0x4); - r ^= _mm_shuffle_epi32 (key_assist, 0xff); - k[0] = r; + u8x16 r, t; + rk += i; + r = rk[-2]; + r ^= t = u8x16_word_shift_left (r, 4); + r ^= t = u8x16_word_shift_left (t, 4); + r ^= u8x16_word_shift_left (t, 4); + r ^= (u8x16) u32x4_shuffle ((u32x4) key_assist, 3, 3, 3, 3); + rk[0] = r; if (i >= 14) return; - r = k[-1]; - r ^= t = _mm_slli_si128 (r, 0x4); - r ^= t = _mm_slli_si128 (t, 0x4); - r ^= _mm_slli_si128 (t, 0x4); - r ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (k[0], 0x0), 0xaa); - k[1] = r; + key_assist = aes_keygen_assist (rk[0], 0x0); + r = rk[-1]; + r ^= t = u8x16_word_shift_left (r, 4); + r ^= t = u8x16_word_shift_left (t, 4); + r ^= u8x16_word_shift_left (t, 4); + r ^= (u8x16) u32x4_shuffle ((u32x4) key_assist, 2, 2, 2, 2); + rk[1] = r; } static_always_inline void -aes256_key_expand (u8x16 * key_schedule, u8 * key) +aes256_key_expand (u8x16 * rk, u8x16u const *k) { - __m128i *k = (__m128i *) key_schedule; - k[0] = _mm_loadu_si128 ((__m128i *) key); - k[1] = _mm_loadu_si128 ((__m128i *) (key + 16)); - aes256_key_assist (k, 2, _mm_aeskeygenassist_si128 (k[1], 0x01)); - aes256_key_assist (k, 4, _mm_aeskeygenassist_si128 (k[3], 0x02)); - aes256_key_assist (k, 6, _mm_aeskeygenassist_si128 (k[5], 0x04)); - aes256_key_assist (k, 8, _mm_aeskeygenassist_si128 (k[7], 0x08)); - aes256_key_assist (k, 10, _mm_aeskeygenassist_si128 (k[9], 0x10)); - aes256_key_assist (k, 12, _mm_aeskeygenassist_si128 (k[11], 0x20)); - aes256_key_assist (k, 14, _mm_aeskeygenassist_si128 (k[13], 0x40)); + rk[0] = k[0]; + rk[1] = k[1]; + aes256_key_assist (rk, 2, aes_keygen_assist (rk[1], 0x01)); + aes256_key_assist (rk, 4, aes_keygen_assist (rk[3], 0x02)); + aes256_key_assist (rk, 6, aes_keygen_assist (rk[5], 0x04)); + aes256_key_assist (rk, 8, aes_keygen_assist (rk[7], 0x08)); + aes256_key_assist (rk, 10, aes_keygen_assist (rk[9], 0x10)); + aes256_key_assist (rk, 12, aes_keygen_assist (rk[11], 0x20)); + aes256_key_assist (rk, 14, aes_keygen_assist (rk[13], 0x40)); } #endif @@ -223,9 +273,9 @@ aes128_key_expand_round_neon (u8x16 * rk, u32 rcon) } void -aes128_key_expand (u8x16 * rk, const u8 * k) +aes128_key_expand (u8x16 * rk, const u8x16 * k) { - rk[0] = vld1q_u8 (k); + rk[0] = k[0]; aes128_key_expand_round_neon (rk + 1, 0x01); aes128_key_expand_round_neon (rk + 2, 0x02); aes128_key_expand_round_neon (rk + 3, 0x04); @@ -267,11 +317,11 @@ aes192_key_expand_round_neon (u8x8 * rk, u32 rcon) } void -aes192_key_expand (u8x16 * ek, const u8 * k) +aes192_key_expand (u8x16 * ek, const u8x16u * k) { u8x8 *rk = (u8x8 *) ek; - ek[0] = vld1q_u8 (k); - rk[2] = vld1_u8 (k + 16); + ek[0] = k[0]; + rk[2] = *(u8x8u *) (k + 1); aes192_key_expand_round_neon (rk + 3, 0x01); aes192_key_expand_round_neon (rk + 6, 0x02); aes192_key_expand_round_neon (rk + 9, 0x04); @@ -300,10 +350,10 @@ aes256_key_expand_round_neon (u8x16 * rk, u32 rcon) } void -aes256_key_expand (u8x16 * rk, const u8 * k) +aes256_key_expand (u8x16 * rk, u8x16 const *k) { - rk[0] = vld1q_u8 (k); - rk[1] = vld1q_u8 (k + 16); + rk[0] = k[0]; + rk[1] = k[1]; aes256_key_expand_round_neon (rk + 2, 0x01); aes256_key_expand_round_neon (rk + 3, 0); aes256_key_expand_round_neon (rk + 4, 0x02); @@ -322,18 +372,18 @@ aes256_key_expand (u8x16 * rk, const u8 * k) #endif static_always_inline void -aes_key_expand (u8x16 * key_schedule, u8 * key, aes_key_size_t ks) +aes_key_expand (u8x16 * key_schedule, u8 const *key, aes_key_size_t ks) { switch (ks) { case AES_KEY_128: - aes128_key_expand (key_schedule, key); + aes128_key_expand (key_schedule, (u8x16u const *) key); break; case AES_KEY_192: - aes192_key_expand (key_schedule, key); + aes192_key_expand (key_schedule, (u8x16u const *) key); break; case AES_KEY_256: - aes256_key_expand (key_schedule, key); + aes256_key_expand (key_schedule, (u8x16u const *) key); break; } } diff --git a/src/plugins/crypto_native/aes_gcm.c b/src/plugins/crypto_native/aes_gcm.c index 554fb2b2699..f2dec629359 100644 --- a/src/plugins/crypto_native/aes_gcm.c +++ b/src/plugins/crypto_native/aes_gcm.c @@ -30,113 +30,74 @@ typedef struct { /* pre-calculated hash key values */ - const __m128i Hi[8]; + const u8x16 Hi[8]; /* extracted AES key */ - const __m128i Ke[15]; + const u8x16 Ke[15]; } aes_gcm_key_data_t; -static const __m128i last_byte_one = { 0, 1ULL << 56 }; -static const __m128i zero = { 0, 0 }; +static const u32x4 last_byte_one = { 0, 0, 0, 1 << 24 }; static const u8x16 bswap_mask = { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; -static const u8x16 byte_mask_scale = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 -}; - -static_always_inline __m128i -aesni_gcm_bswap (__m128i x) -{ - return _mm_shuffle_epi8 (x, (__m128i) bswap_mask); -} - -static_always_inline __m128i -aesni_gcm_byte_mask (__m128i x, u8 n_bytes) -{ - u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale); - - return _mm_blendv_epi8 (zero, x, (__m128i) mask); -} - -static_always_inline __m128i -aesni_gcm_load_partial (__m128i * p, int n_bytes) -{ - ASSERT (n_bytes <= 16); -#ifdef __AVX512F__ - return _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p); -#else - return aesni_gcm_byte_mask (CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, p), - n_bytes); -#endif -} - -static_always_inline void -aesni_gcm_store_partial (void *p, __m128i r, int n_bytes) +static_always_inline u8x16 +aesni_gcm_bswap (u8x16 x) { -#ifdef __AVX512F__ - _mm_mask_storeu_epi8 (p, (1 << n_bytes) - 1, r); -#else - u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale); - _mm_maskmoveu_si128 (r, (__m128i) mask, p); -#endif + return (u8x16) _mm_shuffle_epi8 ((__m128i) x, (__m128i) bswap_mask); } static_always_inline void -aesni_gcm_load (__m128i * d, __m128i * inv, int n, int n_bytes) +aesni_gcm_load (u8x16 * d, u8x16u * inv, int n, int n_bytes) { for (int i = 0; i < n - 1; i++) - d[i] = _mm_loadu_si128 (inv + i); - d[n - 1] = n_bytes ? aesni_gcm_load_partial (inv + n - 1, n_bytes) : - _mm_loadu_si128 (inv + n - 1); + d[i] = inv[i]; + d[n - 1] = n_bytes ? aes_load_partial (inv + n - 1, n_bytes) : inv[n - 1]; } static_always_inline void -aesni_gcm_store (__m128i * d, __m128i * outv, int n, int n_bytes) +aesni_gcm_store (u8x16 * d, u8x16u * outv, int n, int n_bytes) { for (int i = 0; i < n - 1; i++) - _mm_storeu_si128 (outv + i, d[i]); + outv[i] = d[i]; if (n_bytes & 0xf) - aesni_gcm_store_partial (outv + n - 1, d[n - 1], n_bytes); + aes_store_partial (outv + n - 1, d[n - 1], n_bytes); else - _mm_storeu_si128 (outv + n - 1, d[n - 1]); + outv[n - 1] = d[n - 1]; } static_always_inline void -aesni_gcm_enc_first_round (__m128i * r, __m128i * Y, u32 * ctr, __m128i k, +aesni_gcm_enc_first_round (u8x16 * r, u32x4 * Y, u32 * ctr, u8x16 k, int n_blocks) { - u32 i; - if (PREDICT_TRUE ((u8) ctr[0] < (256 - n_blocks))) { - for (i = 0; i < n_blocks; i++) + for (int i = 0; i < n_blocks; i++) { - Y[0] = _mm_add_epi32 (Y[0], last_byte_one); - r[i] = k ^ Y[0]; + Y[0] += last_byte_one; + r[i] = k ^ (u8x16) Y[0]; } ctr[0] += n_blocks; } else { - for (i = 0; i < n_blocks; i++) + for (int i = 0; i < n_blocks; i++) { - Y[0] = _mm_insert_epi32 (Y[0], clib_host_to_net_u32 (++ctr[0]), 3); - r[i] = k ^ Y[0]; + Y[0][3] = clib_host_to_net_u32 (++ctr[0]); + r[i] = k ^ (u8x16) Y[0]; } } } static_always_inline void -aesni_gcm_enc_round (__m128i * r, __m128i k, int n_blocks) +aesni_gcm_enc_round (u8x16 * r, u8x16 k, int n_blocks) { for (int i = 0; i < n_blocks; i++) - r[i] = _mm_aesenc_si128 (r[i], k); + r[i] = aes_enc_round (r[i], k); } static_always_inline void -aesni_gcm_enc_last_round (__m128i * r, __m128i * d, const __m128i * k, +aesni_gcm_enc_last_round (u8x16 * r, u8x16 * d, u8x16 const *k, int rounds, int n_blocks) { @@ -145,26 +106,25 @@ aesni_gcm_enc_last_round (__m128i * r, __m128i * d, const __m128i * k, aesni_gcm_enc_round (r, k[i], n_blocks); for (int i = 0; i < n_blocks; i++) - d[i] ^= _mm_aesenclast_si128 (r[i], k[rounds]); + d[i] ^= aes_enc_last_round (r[i], k[rounds]); } -static_always_inline __m128i -aesni_gcm_ghash_blocks (__m128i T, aes_gcm_key_data_t * kd, - const __m128i * in, int n_blocks) +static_always_inline u8x16 +aesni_gcm_ghash_blocks (u8x16 T, aes_gcm_key_data_t * kd, + u8x16u * in, int n_blocks) { ghash_data_t _gd, *gd = &_gd; - const __m128i *Hi = kd->Hi + n_blocks - 1; - ghash_mul_first (gd, aesni_gcm_bswap (_mm_loadu_si128 (in)) ^ T, Hi[0]); + const u8x16 *Hi = kd->Hi + n_blocks - 1; + ghash_mul_first (gd, aesni_gcm_bswap (in[0]) ^ T, Hi[0]); for (int i = 1; i < n_blocks; i++) - ghash_mul_next (gd, aesni_gcm_bswap (_mm_loadu_si128 (in + i)), Hi[-i]); + ghash_mul_next (gd, aesni_gcm_bswap ((in[i])), Hi[-i]); ghash_reduce (gd); ghash_reduce2 (gd); return ghash_final (gd); } -static_always_inline __m128i -aesni_gcm_ghash (__m128i T, aes_gcm_key_data_t * kd, const __m128i * in, - u32 n_left) +static_always_inline u8x16 +aesni_gcm_ghash (u8x16 T, aes_gcm_key_data_t * kd, u8x16u * in, u32 n_left) { while (n_left >= 128) @@ -197,28 +157,28 @@ aesni_gcm_ghash (__m128i T, aes_gcm_key_data_t * kd, const __m128i * in, if (n_left) { - __m128i r = aesni_gcm_load_partial ((__m128i *) in, n_left); + u8x16 r = aes_load_partial (in, n_left); T = ghash_mul (aesni_gcm_bswap (r) ^ T, kd->Hi[0]); } return T; } -static_always_inline __m128i -aesni_gcm_calc (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, - __m128i * Y, u32 * ctr, __m128i * inv, __m128i * outv, +static_always_inline u8x16 +aesni_gcm_calc (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d, + u32x4 * Y, u32 * ctr, u8x16u * inv, u8x16u * outv, int rounds, int n, int last_block_bytes, int with_ghash, int is_encrypt) { - __m128i r[n]; + u8x16 r[n]; ghash_data_t _gd = { }, *gd = &_gd; - const __m128i *k = kd->Ke; + const u8x16 *rk = (u8x16 *) kd->Ke; int hidx = is_encrypt ? 4 : n, didx = 0; _mm_prefetch (inv + 4, _MM_HINT_T0); /* AES rounds 0 and 1 */ - aesni_gcm_enc_first_round (r, Y, ctr, k[0], n); - aesni_gcm_enc_round (r, k[1], n); + aesni_gcm_enc_first_round (r, Y, ctr, rk[0], n); + aesni_gcm_enc_round (r, rk[1], n); /* load data - decrypt round */ if (is_encrypt == 0) @@ -229,32 +189,32 @@ aesni_gcm_calc (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, ghash_mul_first (gd, aesni_gcm_bswap (d[didx++]) ^ T, kd->Hi[--hidx]); /* AES rounds 2 and 3 */ - aesni_gcm_enc_round (r, k[2], n); - aesni_gcm_enc_round (r, k[3], n); + aesni_gcm_enc_round (r, rk[2], n); + aesni_gcm_enc_round (r, rk[3], n); /* GHASH multiply block 2 */ if (with_ghash && hidx) ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]); /* AES rounds 4 and 5 */ - aesni_gcm_enc_round (r, k[4], n); - aesni_gcm_enc_round (r, k[5], n); + aesni_gcm_enc_round (r, rk[4], n); + aesni_gcm_enc_round (r, rk[5], n); /* GHASH multiply block 3 */ if (with_ghash && hidx) ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]); /* AES rounds 6 and 7 */ - aesni_gcm_enc_round (r, k[6], n); - aesni_gcm_enc_round (r, k[7], n); + aesni_gcm_enc_round (r, rk[6], n); + aesni_gcm_enc_round (r, rk[7], n); /* GHASH multiply block 4 */ if (with_ghash && hidx) ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]); /* AES rounds 8 and 9 */ - aesni_gcm_enc_round (r, k[8], n); - aesni_gcm_enc_round (r, k[9], n); + aesni_gcm_enc_round (r, rk[8], n); + aesni_gcm_enc_round (r, rk[9], n); /* GHASH reduce 1st step */ if (with_ghash) @@ -269,7 +229,7 @@ aesni_gcm_calc (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, ghash_reduce2 (gd); /* AES last round(s) */ - aesni_gcm_enc_last_round (r, d, k, rounds, n); + aesni_gcm_enc_last_round (r, d, rk, rounds, n); /* store data */ aesni_gcm_store (d, outv, n, last_block_bytes); @@ -281,18 +241,18 @@ aesni_gcm_calc (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, return T; } -static_always_inline __m128i -aesni_gcm_calc_double (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, - __m128i * Y, u32 * ctr, __m128i * inv, __m128i * outv, +static_always_inline u8x16 +aesni_gcm_calc_double (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d, + u32x4 * Y, u32 * ctr, u8x16u * inv, u8x16u * outv, int rounds, int is_encrypt) { - __m128i r[4]; + u8x16 r[4]; ghash_data_t _gd, *gd = &_gd; - const __m128i *k = kd->Ke; + const u8x16 *rk = (u8x16 *) kd->Ke; /* AES rounds 0 and 1 */ - aesni_gcm_enc_first_round (r, Y, ctr, k[0], 4); - aesni_gcm_enc_round (r, k[1], 4); + aesni_gcm_enc_first_round (r, Y, ctr, rk[0], 4); + aesni_gcm_enc_round (r, rk[1], 4); /* load 4 blocks of data - decrypt round */ if (is_encrypt == 0) @@ -302,36 +262,36 @@ aesni_gcm_calc_double (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[7]); /* AES rounds 2 and 3 */ - aesni_gcm_enc_round (r, k[2], 4); - aesni_gcm_enc_round (r, k[3], 4); + aesni_gcm_enc_round (r, rk[2], 4); + aesni_gcm_enc_round (r, rk[3], 4); /* GHASH multiply block 1 */ ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[6]); /* AES rounds 4 and 5 */ - aesni_gcm_enc_round (r, k[4], 4); - aesni_gcm_enc_round (r, k[5], 4); + aesni_gcm_enc_round (r, rk[4], 4); + aesni_gcm_enc_round (r, rk[5], 4); /* GHASH multiply block 2 */ ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[5]); /* AES rounds 6 and 7 */ - aesni_gcm_enc_round (r, k[6], 4); - aesni_gcm_enc_round (r, k[7], 4); + aesni_gcm_enc_round (r, rk[6], 4); + aesni_gcm_enc_round (r, rk[7], 4); /* GHASH multiply block 3 */ ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[4]); /* AES rounds 8 and 9 */ - aesni_gcm_enc_round (r, k[8], 4); - aesni_gcm_enc_round (r, k[9], 4); + aesni_gcm_enc_round (r, rk[8], 4); + aesni_gcm_enc_round (r, rk[9], 4); /* load 4 blocks of data - encrypt round */ if (is_encrypt) aesni_gcm_load (d, inv, 4, 0); /* AES last round(s) */ - aesni_gcm_enc_last_round (r, d, k, rounds, 4); + aesni_gcm_enc_last_round (r, d, rk, rounds, 4); /* store 4 blocks of data */ aesni_gcm_store (d, outv, 4, 0); @@ -344,36 +304,36 @@ aesni_gcm_calc_double (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, ghash_mul_next (gd, aesni_gcm_bswap (d[0]), kd->Hi[3]); /* AES rounds 0, 1 and 2 */ - aesni_gcm_enc_first_round (r, Y, ctr, k[0], 4); - aesni_gcm_enc_round (r, k[1], 4); - aesni_gcm_enc_round (r, k[2], 4); + aesni_gcm_enc_first_round (r, Y, ctr, rk[0], 4); + aesni_gcm_enc_round (r, rk[1], 4); + aesni_gcm_enc_round (r, rk[2], 4); /* GHASH multiply block 5 */ ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[2]); /* AES rounds 3 and 4 */ - aesni_gcm_enc_round (r, k[3], 4); - aesni_gcm_enc_round (r, k[4], 4); + aesni_gcm_enc_round (r, rk[3], 4); + aesni_gcm_enc_round (r, rk[4], 4); /* GHASH multiply block 6 */ ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[1]); /* AES rounds 5 and 6 */ - aesni_gcm_enc_round (r, k[5], 4); - aesni_gcm_enc_round (r, k[6], 4); + aesni_gcm_enc_round (r, rk[5], 4); + aesni_gcm_enc_round (r, rk[6], 4); /* GHASH multiply block 7 */ ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[0]); /* AES rounds 7 and 8 */ - aesni_gcm_enc_round (r, k[7], 4); - aesni_gcm_enc_round (r, k[8], 4); + aesni_gcm_enc_round (r, rk[7], 4); + aesni_gcm_enc_round (r, rk[8], 4); /* GHASH reduce 1st step */ ghash_reduce (gd); /* AES round 9 */ - aesni_gcm_enc_round (r, k[9], 4); + aesni_gcm_enc_round (r, rk[9], 4); /* load data - encrypt round */ if (is_encrypt) @@ -383,7 +343,7 @@ aesni_gcm_calc_double (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, ghash_reduce2 (gd); /* AES last round(s) */ - aesni_gcm_enc_last_round (r, d, k, rounds, 4); + aesni_gcm_enc_last_round (r, d, rk, rounds, 4); /* store data */ aesni_gcm_store (d, outv + 4, 4, 0); @@ -392,14 +352,14 @@ aesni_gcm_calc_double (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, return ghash_final (gd); } -static_always_inline __m128i -aesni_gcm_ghash_last (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, +static_always_inline u8x16 +aesni_gcm_ghash_last (u8x16 T, aes_gcm_key_data_t * kd, u8x16 * d, int n_blocks, int n_bytes) { ghash_data_t _gd, *gd = &_gd; if (n_bytes) - d[n_blocks - 1] = aesni_gcm_byte_mask (d[n_blocks - 1], n_bytes); + d[n_blocks - 1] = aes_byte_mask (d[n_blocks - 1], n_bytes); ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[n_blocks - 1]); if (n_blocks > 1) @@ -414,12 +374,11 @@ aesni_gcm_ghash_last (__m128i T, aes_gcm_key_data_t * kd, __m128i * d, } -static_always_inline __m128i -aesni_gcm_enc (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in, - const u8 * out, u32 n_left, int rounds) +static_always_inline u8x16 +aesni_gcm_enc (u8x16 T, aes_gcm_key_data_t * kd, u32x4 Y, u8x16u * inv, + u8x16u * outv, u32 n_left, int rounds) { - __m128i *inv = (__m128i *) in, *outv = (__m128i *) out; - __m128i d[4]; + u8x16 d[4]; u32 ctr = 1; if (n_left == 0) @@ -520,12 +479,11 @@ aesni_gcm_enc (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in, return aesni_gcm_ghash_last (T, kd, d, 1, n_left); } -static_always_inline __m128i -aesni_gcm_dec (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in, - const u8 * out, u32 n_left, int rounds) +static_always_inline u8x16 +aesni_gcm_dec (u8x16 T, aes_gcm_key_data_t * kd, u32x4 Y, u8x16u * inv, + u8x16u * outv, u32 n_left, int rounds) { - __m128i *inv = (__m128i *) in, *outv = (__m128i *) out; - __m128i d[8]; + u8x16 d[8]; u32 ctr = 1; while (n_left >= 128) @@ -572,12 +530,13 @@ aesni_gcm_dec (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in, } static_always_inline int -aes_gcm (const u8 * in, u8 * out, const u8 * addt, const u8 * iv, u8 * tag, +aes_gcm (u8x16u * in, u8x16u * out, u8x16u * addt, u8x16u * iv, u8x16u * tag, u32 data_bytes, u32 aad_bytes, u8 tag_len, aes_gcm_key_data_t * kd, int aes_rounds, int is_encrypt) { int i; - __m128i r, Y0, T = { }; + u8x16 r, T = { }; + u32x4 Y0; ghash_data_t _gd, *gd = &_gd; _mm_prefetch (iv, _MM_HINT_T0); @@ -586,15 +545,15 @@ aes_gcm (const u8 * in, u8 * out, const u8 * addt, const u8 * iv, u8 * tag, /* calculate ghash for AAD - optimized for ipsec common cases */ if (aad_bytes == 8) - T = aesni_gcm_ghash (T, kd, (__m128i *) addt, 8); + T = aesni_gcm_ghash (T, kd, addt, 8); else if (aad_bytes == 12) - T = aesni_gcm_ghash (T, kd, (__m128i *) addt, 12); + T = aesni_gcm_ghash (T, kd, addt, 12); else - T = aesni_gcm_ghash (T, kd, (__m128i *) addt, aad_bytes); + T = aesni_gcm_ghash (T, kd, addt, aad_bytes); /* initalize counter */ - Y0 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) iv); - Y0 = _mm_insert_epi32 (Y0, clib_host_to_net_u32 (1), 3); + Y0 = (u32x4) aes_load_partial (iv, 12); + Y0[3] = clib_host_to_net_u32 (1); /* ghash and encrypt/edcrypt */ if (is_encrypt) @@ -604,26 +563,24 @@ aes_gcm (const u8 * in, u8 * out, const u8 * addt, const u8 * iv, u8 * tag, _mm_prefetch (tag, _MM_HINT_T0); - /* Finalize ghash */ - r[0] = data_bytes; - r[1] = aad_bytes; - - /* bytes to bits */ - r <<= 3; + /* Finalize ghash - data bytes and aad bytes converted to bits */ + /* *INDENT-OFF* */ + r = (u8x16) ((u64x2) {data_bytes, aad_bytes} << 3); + /* *INDENT-ON* */ /* interleaved computation of final ghash and E(Y0, k) */ ghash_mul_first (gd, r ^ T, kd->Hi[0]); - r = kd->Ke[0] ^ Y0; + r = kd->Ke[0] ^ (u8x16) Y0; for (i = 1; i < 5; i += 1) - r = _mm_aesenc_si128 (r, kd->Ke[i]); + r = aes_enc_round (r, kd->Ke[i]); ghash_reduce (gd); ghash_reduce2 (gd); for (; i < 9; i += 1) - r = _mm_aesenc_si128 (r, kd->Ke[i]); + r = aes_enc_round (r, kd->Ke[i]); T = ghash_final (gd); for (; i < aes_rounds; i += 1) - r = _mm_aesenc_si128 (r, kd->Ke[i]); - r = _mm_aesenclast_si128 (r, kd->Ke[aes_rounds]); + r = aes_enc_round (r, kd->Ke[i]); + r = aes_enc_last_round (r, kd->Ke[aes_rounds]); T = aesni_gcm_bswap (T) ^ r; /* tag_len 16 -> 0 */ @@ -633,16 +590,15 @@ aes_gcm (const u8 * in, u8 * out, const u8 * addt, const u8 * iv, u8 * tag, { /* store tag */ if (tag_len) - aesni_gcm_store_partial ((__m128i *) tag, T, (1 << tag_len) - 1); + aes_store_partial (tag, T, (1 << tag_len) - 1); else - _mm_storeu_si128 ((__m128i *) tag, T); + tag[0] = T; } else { /* check tag */ u16 tag_mask = tag_len ? (1 << tag_len) - 1 : 0xffff; - r = _mm_loadu_si128 ((__m128i *) tag); - if (_mm_movemask_epi8 (r == T) != tag_mask) + if ((u8x16_msb_mask (tag[0] == T) & tag_mask) != tag_mask) return 0; } return 1; @@ -660,7 +616,8 @@ aesni_ops_enc_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], next: kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index]; - aes_gcm (op->src, op->dst, op->aad, op->iv, op->tag, op->len, op->aad_len, + aes_gcm ((u8x16u *) op->src, (u8x16u *) op->dst, (u8x16u *) op->aad, + (u8x16u *) op->iv, (u8x16u *) op->tag, op->len, op->aad_len, op->tag_len, kd, AES_KEY_ROUNDS (ks), /* is_encrypt */ 1); op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; @@ -685,7 +642,8 @@ aesni_ops_dec_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], next: kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index]; - rv = aes_gcm (op->src, op->dst, op->aad, op->iv, op->tag, op->len, + rv = aes_gcm ((u8x16u *) op->src, (u8x16u *) op->dst, (u8x16u *) op->aad, + (u8x16u *) op->iv, (u8x16u *) op->tag, op->len, op->aad_len, op->tag_len, kd, AES_KEY_ROUNDS (ks), /* is_encrypt */ 0); @@ -712,8 +670,7 @@ static_always_inline void * aesni_gcm_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks) { aes_gcm_key_data_t *kd; - __m128i H; - int i; + u8x16 H; kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES); @@ -721,12 +678,9 @@ aesni_gcm_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks) aes_key_expand ((u8x16 *) kd->Ke, key->data, ks); /* pre-calculate H */ - H = kd->Ke[0]; - for (i = 1; i < AES_KEY_ROUNDS (ks); i += 1) - H = _mm_aesenc_si128 (H, kd->Ke[i]); - H = _mm_aesenclast_si128 (H, kd->Ke[i]); + H = aes_encrypt_block (u8x16_splat (0), kd->Ke, ks); H = aesni_gcm_bswap (H); - ghash_precompute (H, (__m128i *) kd->Hi, 8); + ghash_precompute (H, (u8x16 *) kd->Hi, 8); return kd; } diff --git a/src/plugins/crypto_native/ghash.h b/src/plugins/crypto_native/ghash.h index 0b2f629e28a..3f68f80dab4 100644 --- a/src/plugins/crypto_native/ghash.h +++ b/src/plugins/crypto_native/ghash.h @@ -107,34 +107,65 @@ /* on AVX-512 systems we can save a clock cycle by using ternary logic instruction to calculate a XOR b XOR c */ -static_always_inline __m128i -ghash_xor3 (__m128i a, __m128i b, __m128i c) +static_always_inline u8x16 +ghash_xor3 (u8x16 a, u8x16 b, u8x16 c) { #if defined (__AVX512F__) - return _mm_ternarylogic_epi32 (a, b, c, 0x96); + return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b, + (__m128i) c, 0x96); #endif return a ^ b ^ c; } +static_always_inline u8x16 +gmul_lo_lo (u8x16 a, u8x16 b) +{ + return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x00); +} + +static_always_inline u8x16 +gmul_lo_hi (u8x16 a, u8x16 b) +{ + return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x01); +} + +static_always_inline u8x16 +gmul_hi_lo (u8x16 a, u8x16 b) +{ + return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x10); +} + +static_always_inline u8x16 +gmul_hi_hi (u8x16 a, u8x16 b) +{ + return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x11); +} + typedef struct { - __m128i mid, hi, lo, tmp_lo, tmp_hi; + u8x16 mid, hi, lo, tmp_lo, tmp_hi; int pending; } ghash_data_t; -static const __m128i ghash_poly = { 1, 0xC200000000000000 }; -static const __m128i ghash_poly2 = { 0x1C2000000, 0xC200000000000000 }; +static const u8x16 ghash_poly = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2 +}; + +static const u8x16 ghash_poly2 = { + 0x00, 0x00, 0x00, 0xc2, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2 +}; static_always_inline void -ghash_mul_first (ghash_data_t * gd, __m128i a, __m128i b) +ghash_mul_first (ghash_data_t * gd, u8x16 a, u8x16 b) { /* a1 * b1 */ - gd->hi = _mm_clmulepi64_si128 (a, b, 0x11); + gd->hi = gmul_hi_hi (a, b); /* a0 * b0 */ - gd->lo = _mm_clmulepi64_si128 (a, b, 0x00); + gd->lo = gmul_lo_lo (a, b); /* a0 * b1 ^ a1 * b0 */ - gd->mid = (_mm_clmulepi64_si128 (a, b, 0x01) ^ - _mm_clmulepi64_si128 (a, b, 0x10)); + gd->mid = (gmul_lo_hi (a, b) ^ gmul_hi_lo (a, b)); /* set gd->pending to 0 so next invocation of ghash_mul_next(...) knows that there is no pending data in tmp_lo and tmp_hi */ @@ -142,12 +173,12 @@ ghash_mul_first (ghash_data_t * gd, __m128i a, __m128i b) } static_always_inline void -ghash_mul_next (ghash_data_t * gd, __m128i a, __m128i b) +ghash_mul_next (ghash_data_t * gd, u8x16 a, u8x16 b) { /* a1 * b1 */ - __m128i hi = _mm_clmulepi64_si128 (a, b, 0x11); + u8x16 hi = gmul_hi_hi (a, b); /* a0 * b0 */ - __m128i lo = _mm_clmulepi64_si128 (a, b, 0x00); + u8x16 lo = gmul_lo_lo (a, b); /* this branch will be optimized out by the compiler, and it allows us to reduce number of XOR operations by using ternary logic */ @@ -167,21 +198,19 @@ ghash_mul_next (ghash_data_t * gd, __m128i a, __m128i b) } /* gd->mid ^= a0 * b1 ^ a1 * b0 */ - gd->mid = ghash_xor3 (gd->mid, - _mm_clmulepi64_si128 (a, b, 0x01), - _mm_clmulepi64_si128 (a, b, 0x10)); + gd->mid = ghash_xor3 (gd->mid, gmul_lo_hi (a, b), gmul_hi_lo (a, b)); } static_always_inline void ghash_reduce (ghash_data_t * gd) { - __m128i r; + u8x16 r; /* Final combination: gd->lo ^= gd->mid << 64 gd->hi ^= gd->mid >> 64 */ - __m128i midl = _mm_slli_si128 (gd->mid, 8); - __m128i midr = _mm_srli_si128 (gd->mid, 8); + u8x16 midl = u8x16_word_shift_left (gd->mid, 8); + u8x16 midr = u8x16_word_shift_right (gd->mid, 8); if (gd->pending) { @@ -194,26 +223,26 @@ ghash_reduce (ghash_data_t * gd) gd->hi ^= midr; } - r = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x01); - gd->lo ^= _mm_slli_si128 (r, 8); + r = gmul_lo_hi (ghash_poly2, gd->lo); + gd->lo ^= u8x16_word_shift_left (r, 8); } static_always_inline void ghash_reduce2 (ghash_data_t * gd) { - gd->tmp_lo = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x00); - gd->tmp_hi = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x10); + gd->tmp_lo = gmul_lo_lo (ghash_poly2, gd->lo); + gd->tmp_hi = gmul_hi_lo (ghash_poly2, gd->lo); } -static_always_inline __m128i +static_always_inline u8x16 ghash_final (ghash_data_t * gd) { - return ghash_xor3 (gd->hi, _mm_srli_si128 (gd->tmp_lo, 4), - _mm_slli_si128 (gd->tmp_hi, 4)); + return ghash_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4), + u8x16_word_shift_left (gd->tmp_hi, 4)); } -static_always_inline __m128i -ghash_mul (__m128i a, __m128i b) +static_always_inline u8x16 +ghash_mul (u8x16 a, u8x16 b) { ghash_data_t _gd, *gd = &_gd; ghash_mul_first (gd, a, b); @@ -223,19 +252,20 @@ ghash_mul (__m128i a, __m128i b) } static_always_inline void -ghash_precompute (__m128i H, __m128i * Hi, int count) +ghash_precompute (u8x16 H, u8x16 * Hi, int count) { - __m128i r; + u8x16 r8; + u32x4 r32; /* calcullate H<<1 mod poly from the hash key */ - r = _mm_srli_epi64 (H, 63); - H = _mm_slli_epi64 (H, 1); - H |= _mm_slli_si128 (r, 8); - r = _mm_srli_si128 (r, 8); - r = _mm_shuffle_epi32 (r, 0x24); + r8 = (u8x16) ((u64x2) H >> 63); + H = (u8x16) ((u64x2) H << 1); + H |= u8x16_word_shift_left (r8, 8); + r32 = (u32x4) u8x16_word_shift_right (r8, 8); + r32 = u32x4_shuffle (r32, 0, 1, 2, 0); /* *INDENT-OFF* */ - r = _mm_cmpeq_epi32 (r, (__m128i) (u32x4) {1, 0, 0, 1}); + r32 = r32 == (u32x4) {1, 0, 0, 1}; /* *INDENT-ON* */ - Hi[0] = H ^ (r & ghash_poly); + Hi[0] = H ^ ((u8x16) r32 & ghash_poly); /* calculate H^(i + 1) */ for (int i = 1; i < count; i++) |