From 415b4b0bbae661cbcbb93c3cb9d016dfae2f5081 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 11 Feb 2020 17:04:38 +0100 Subject: crypto-native: refactor GCM code to use generic types Type: refactor Change-Id: I76733a9ed362ec60badd22c0fbc2a9c5749da88d Signed-off-by: Damjan Marion --- src/plugins/crypto_native/aes.h | 236 ++++++++++++++++++++++++---------------- 1 file changed, 143 insertions(+), 93 deletions(-) (limited to 'src/plugins/crypto_native/aes.h') diff --git a/src/plugins/crypto_native/aes.h b/src/plugins/crypto_native/aes.h index 85d6f7916f8..371f5c4508c 100644 --- a/src/plugins/crypto_native/aes.h +++ b/src/plugins/crypto_native/aes.h @@ -30,6 +30,10 @@ typedef enum #ifdef __x86_64__ +static const u8x16 byte_mask_scale = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 +}; + static_always_inline u8x16 aes_block_load (u8 * p) { @@ -66,132 +70,178 @@ aes_block_store (u8 * p, u8x16 r) _mm_storeu_si128 ((__m128i *) p, (__m128i) r); } +static_always_inline u8x16 +aes_byte_mask (u8x16 x, u8 n_bytes) +{ + u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale); + __m128i zero = { }; + + return (u8x16) _mm_blendv_epi8 (zero, (__m128i) x, (__m128i) mask); +} + +static_always_inline u8x16 +aes_load_partial (u8x16u * p, int n_bytes) +{ + ASSERT (n_bytes <= 16); +#ifdef __AVX512F__ + __m128i zero = { }; + return (u8x16) _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p); +#else + return aes_byte_mask (CLIB_MEM_OVERFLOW_LOAD (*, p), n_bytes); +#endif +} + +static_always_inline void +aes_store_partial (void *p, u8x16 r, int n_bytes) +{ +#ifdef __AVX512F__ + _mm_mask_storeu_epi8 (p, (1 << n_bytes) - 1, (__m128i) r); +#else + u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale); + _mm_maskmoveu_si128 ((__m128i) r, (__m128i) mask, p); +#endif +} + + +static_always_inline u8x16 +aes_encrypt_block (u8x16 block, const u8x16 * round_keys, aes_key_size_t ks) +{ + int i; + block ^= round_keys[0]; + for (i = 1; i < AES_KEY_ROUNDS (ks); i += 1) + block = aes_enc_round (block, round_keys[i]); + return aes_enc_last_round (block, round_keys[i]); +} + static_always_inline u8x16 aes_inv_mix_column (u8x16 a) { return (u8x16) _mm_aesimc_si128 ((__m128i) a); } +#define aes_keygen_assist(a, b) \ + (u8x16) _mm_aeskeygenassist_si128((__m128i) a, b) + /* AES-NI based AES key expansion based on code samples from Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper (323641-001) */ static_always_inline void -aes128_key_assist (__m128i * k, __m128i r) +aes128_key_assist (u8x16 * rk, u8x16 r) { - __m128i t = k[-1]; - t ^= _mm_slli_si128 (t, 4); - t ^= _mm_slli_si128 (t, 4); - t ^= _mm_slli_si128 (t, 4); - k[0] = t ^ _mm_shuffle_epi32 (r, 0xff); + u8x16 t = rk[-1]; + t ^= u8x16_word_shift_left (t, 4); + t ^= u8x16_word_shift_left (t, 4); + t ^= u8x16_word_shift_left (t, 4); + rk[0] = t ^ (u8x16) u32x4_shuffle ((u32x4) r, 3, 3, 3, 3); } static_always_inline void -aes128_key_expand (u8x16 * key_schedule, u8 * key) +aes128_key_expand (u8x16 * rk, u8x16 const *k) { - __m128i *k = (__m128i *) key_schedule; - k[0] = _mm_loadu_si128 ((const __m128i *) key); - aes128_key_assist (k + 1, _mm_aeskeygenassist_si128 (k[0], 0x01)); - aes128_key_assist (k + 2, _mm_aeskeygenassist_si128 (k[1], 0x02)); - aes128_key_assist (k + 3, _mm_aeskeygenassist_si128 (k[2], 0x04)); - aes128_key_assist (k + 4, _mm_aeskeygenassist_si128 (k[3], 0x08)); - aes128_key_assist (k + 5, _mm_aeskeygenassist_si128 (k[4], 0x10)); - aes128_key_assist (k + 6, _mm_aeskeygenassist_si128 (k[5], 0x20)); - aes128_key_assist (k + 7, _mm_aeskeygenassist_si128 (k[6], 0x40)); - aes128_key_assist (k + 8, _mm_aeskeygenassist_si128 (k[7], 0x80)); - aes128_key_assist (k + 9, _mm_aeskeygenassist_si128 (k[8], 0x1b)); - aes128_key_assist (k + 10, _mm_aeskeygenassist_si128 (k[9], 0x36)); + rk[0] = k[0]; + aes128_key_assist (rk + 1, aes_keygen_assist (rk[0], 0x01)); + aes128_key_assist (rk + 2, aes_keygen_assist (rk[1], 0x02)); + aes128_key_assist (rk + 3, aes_keygen_assist (rk[2], 0x04)); + aes128_key_assist (rk + 4, aes_keygen_assist (rk[3], 0x08)); + aes128_key_assist (rk + 5, aes_keygen_assist (rk[4], 0x10)); + aes128_key_assist (rk + 6, aes_keygen_assist (rk[5], 0x20)); + aes128_key_assist (rk + 7, aes_keygen_assist (rk[6], 0x40)); + aes128_key_assist (rk + 8, aes_keygen_assist (rk[7], 0x80)); + aes128_key_assist (rk + 9, aes_keygen_assist (rk[8], 0x1b)); + aes128_key_assist (rk + 10, aes_keygen_assist (rk[9], 0x36)); } static_always_inline void -aes192_key_assist (__m128i * r1, __m128i * r2, __m128i key_assist) +aes192_key_assist (u8x16 * r1, u8x16 * r2, u8x16 key_assist) { - __m128i t; - *r1 ^= t = _mm_slli_si128 (*r1, 0x4); - *r1 ^= t = _mm_slli_si128 (t, 0x4); - *r1 ^= _mm_slli_si128 (t, 0x4); - *r1 ^= _mm_shuffle_epi32 (key_assist, 0x55); - *r2 ^= _mm_slli_si128 (*r2, 0x4); - *r2 ^= _mm_shuffle_epi32 (*r1, 0xff); + u8x16 t; + r1[0] ^= t = u8x16_word_shift_left (r1[0], 4); + r1[0] ^= t = u8x16_word_shift_left (t, 4); + r1[0] ^= u8x16_word_shift_left (t, 4); + r1[0] ^= (u8x16) _mm_shuffle_epi32 ((__m128i) key_assist, 0x55); + r2[0] ^= u8x16_word_shift_left (r2[0], 4); + r2[0] ^= (u8x16) _mm_shuffle_epi32 ((__m128i) r1[0], 0xff); } static_always_inline void -aes192_key_expand (u8x16 * key_schedule, u8 * key) +aes192_key_expand (u8x16 * rk, u8x16u const *k) { - __m128i r1, r2, *k = (__m128i *) key_schedule; + u8x16 r1, r2; - k[0] = r1 = _mm_loadu_si128 ((__m128i *) key); - /* load the 24-bytes key as 2 * 16-bytes (and ignore last 8-bytes) */ - k[1] = r2 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) key + 1); + rk[0] = r1 = k[0]; + /* *INDENT-OFF* */ + rk[1] = r2 = (u8x16) (u64x2) { *(u64 *) (k + 1), 0 }; + /* *INDENT-ON* */ - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x1)); - k[1] = (__m128i) _mm_shuffle_pd ((__m128d) k[1], (__m128d) r1, 0); - k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x1)); + rk[1] = (u8x16) _mm_shuffle_pd ((__m128d) rk[1], (__m128d) r1, 0); + rk[2] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x2)); - k[3] = r1; - k[4] = r2; + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x2)); + rk[3] = r1; + rk[4] = r2; - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x4)); - k[4] = (__m128i) _mm_shuffle_pd ((__m128d) k[4], (__m128d) r1, 0); - k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x4)); + rk[4] = (u8x16) _mm_shuffle_pd ((__m128d) rk[4], (__m128d) r1, 0); + rk[5] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x8)); - k[6] = r1; - k[7] = r2; + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x8)); + rk[6] = r1; + rk[7] = r2; - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x10)); - k[7] = (__m128i) _mm_shuffle_pd ((__m128d) k[7], (__m128d) r1, 0); - k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x10)); + rk[7] = (u8x16) _mm_shuffle_pd ((__m128d) rk[7], (__m128d) r1, 0); + rk[8] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x20)); - k[9] = r1; - k[10] = r2; + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x20)); + rk[9] = r1; + rk[10] = r2; - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x40)); - k[10] = (__m128i) _mm_shuffle_pd ((__m128d) k[10], (__m128d) r1, 0); - k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x40)); + rk[10] = (u8x16) _mm_shuffle_pd ((__m128d) rk[10], (__m128d) r1, 0); + rk[11] = (u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1); - aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x80)); - k[12] = r1; + aes192_key_assist (&r1, &r2, aes_keygen_assist (r2, 0x80)); + rk[12] = r1; } static_always_inline void -aes256_key_assist (__m128i * k, int i, __m128i key_assist) +aes256_key_assist (u8x16 * rk, int i, u8x16 key_assist) { - __m128i r, t; - k += i; - r = k[-2]; - r ^= t = _mm_slli_si128 (r, 0x4); - r ^= t = _mm_slli_si128 (t, 0x4); - r ^= _mm_slli_si128 (t, 0x4); - r ^= _mm_shuffle_epi32 (key_assist, 0xff); - k[0] = r; + u8x16 r, t; + rk += i; + r = rk[-2]; + r ^= t = u8x16_word_shift_left (r, 4); + r ^= t = u8x16_word_shift_left (t, 4); + r ^= u8x16_word_shift_left (t, 4); + r ^= (u8x16) u32x4_shuffle ((u32x4) key_assist, 3, 3, 3, 3); + rk[0] = r; if (i >= 14) return; - r = k[-1]; - r ^= t = _mm_slli_si128 (r, 0x4); - r ^= t = _mm_slli_si128 (t, 0x4); - r ^= _mm_slli_si128 (t, 0x4); - r ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (k[0], 0x0), 0xaa); - k[1] = r; + key_assist = aes_keygen_assist (rk[0], 0x0); + r = rk[-1]; + r ^= t = u8x16_word_shift_left (r, 4); + r ^= t = u8x16_word_shift_left (t, 4); + r ^= u8x16_word_shift_left (t, 4); + r ^= (u8x16) u32x4_shuffle ((u32x4) key_assist, 2, 2, 2, 2); + rk[1] = r; } static_always_inline void -aes256_key_expand (u8x16 * key_schedule, u8 * key) +aes256_key_expand (u8x16 * rk, u8x16u const *k) { - __m128i *k = (__m128i *) key_schedule; - k[0] = _mm_loadu_si128 ((__m128i *) key); - k[1] = _mm_loadu_si128 ((__m128i *) (key + 16)); - aes256_key_assist (k, 2, _mm_aeskeygenassist_si128 (k[1], 0x01)); - aes256_key_assist (k, 4, _mm_aeskeygenassist_si128 (k[3], 0x02)); - aes256_key_assist (k, 6, _mm_aeskeygenassist_si128 (k[5], 0x04)); - aes256_key_assist (k, 8, _mm_aeskeygenassist_si128 (k[7], 0x08)); - aes256_key_assist (k, 10, _mm_aeskeygenassist_si128 (k[9], 0x10)); - aes256_key_assist (k, 12, _mm_aeskeygenassist_si128 (k[11], 0x20)); - aes256_key_assist (k, 14, _mm_aeskeygenassist_si128 (k[13], 0x40)); + rk[0] = k[0]; + rk[1] = k[1]; + aes256_key_assist (rk, 2, aes_keygen_assist (rk[1], 0x01)); + aes256_key_assist (rk, 4, aes_keygen_assist (rk[3], 0x02)); + aes256_key_assist (rk, 6, aes_keygen_assist (rk[5], 0x04)); + aes256_key_assist (rk, 8, aes_keygen_assist (rk[7], 0x08)); + aes256_key_assist (rk, 10, aes_keygen_assist (rk[9], 0x10)); + aes256_key_assist (rk, 12, aes_keygen_assist (rk[11], 0x20)); + aes256_key_assist (rk, 14, aes_keygen_assist (rk[13], 0x40)); } #endif @@ -223,9 +273,9 @@ aes128_key_expand_round_neon (u8x16 * rk, u32 rcon) } void -aes128_key_expand (u8x16 * rk, const u8 * k) +aes128_key_expand (u8x16 * rk, const u8x16 * k) { - rk[0] = vld1q_u8 (k); + rk[0] = k[0]; aes128_key_expand_round_neon (rk + 1, 0x01); aes128_key_expand_round_neon (rk + 2, 0x02); aes128_key_expand_round_neon (rk + 3, 0x04); @@ -267,11 +317,11 @@ aes192_key_expand_round_neon (u8x8 * rk, u32 rcon) } void -aes192_key_expand (u8x16 * ek, const u8 * k) +aes192_key_expand (u8x16 * ek, const u8x16u * k) { u8x8 *rk = (u8x8 *) ek; - ek[0] = vld1q_u8 (k); - rk[2] = vld1_u8 (k + 16); + ek[0] = k[0]; + rk[2] = *(u8x8u *) (k + 1); aes192_key_expand_round_neon (rk + 3, 0x01); aes192_key_expand_round_neon (rk + 6, 0x02); aes192_key_expand_round_neon (rk + 9, 0x04); @@ -300,10 +350,10 @@ aes256_key_expand_round_neon (u8x16 * rk, u32 rcon) } void -aes256_key_expand (u8x16 * rk, const u8 * k) +aes256_key_expand (u8x16 * rk, u8x16 const *k) { - rk[0] = vld1q_u8 (k); - rk[1] = vld1q_u8 (k + 16); + rk[0] = k[0]; + rk[1] = k[1]; aes256_key_expand_round_neon (rk + 2, 0x01); aes256_key_expand_round_neon (rk + 3, 0); aes256_key_expand_round_neon (rk + 4, 0x02); @@ -322,18 +372,18 @@ aes256_key_expand (u8x16 * rk, const u8 * k) #endif static_always_inline void -aes_key_expand (u8x16 * key_schedule, u8 * key, aes_key_size_t ks) +aes_key_expand (u8x16 * key_schedule, u8 const *key, aes_key_size_t ks) { switch (ks) { case AES_KEY_128: - aes128_key_expand (key_schedule, key); + aes128_key_expand (key_schedule, (u8x16u const *) key); break; case AES_KEY_192: - aes192_key_expand (key_schedule, key); + aes192_key_expand (key_schedule, (u8x16u const *) key); break; case AES_KEY_256: - aes256_key_expand (key_schedule, key); + aes256_key_expand (key_schedule, (u8x16u const *) key); break; } } -- cgit 1.2.3-korg