diff options
Diffstat (limited to 'src/plugins/crypto_native/aes_cbc.c')
-rw-r--r-- | src/plugins/crypto_native/aes_cbc.c | 478 |
1 files changed, 142 insertions, 336 deletions
diff --git a/src/plugins/crypto_native/aes_cbc.c b/src/plugins/crypto_native/aes_cbc.c index c8ec37d152d..dd7ca3f1cf1 100644 --- a/src/plugins/crypto_native/aes_cbc.c +++ b/src/plugins/crypto_native/aes_cbc.c @@ -19,214 +19,30 @@ #include <vnet/plugin/plugin.h> #include <vnet/crypto/crypto.h> #include <crypto_native/crypto_native.h> -#include <crypto_native/aes.h> +#include <vppinfra/crypto/aes_cbc.h> #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0 #pragma GCC optimize ("O3") #endif -typedef struct -{ - u8x16 encrypt_key[15]; -#if __VAES__ - u8x64 decrypt_key[15]; -#else - u8x16 decrypt_key[15]; -#endif -} aes_cbc_key_data_t; - - -static_always_inline void __clib_unused -aes_cbc_dec (u8x16 * k, u8x16u * src, u8x16u * dst, u8x16u * iv, int count, - int rounds) -{ - u8x16 r[4], c[4], f; - - f = iv[0]; - while (count >= 64) - { - clib_prefetch_load (src + 8); - clib_prefetch_load (dst + 8); - - c[0] = r[0] = src[0]; - c[1] = r[1] = src[1]; - c[2] = r[2] = src[2]; - c[3] = r[3] = src[3]; - -#if __x86_64__ - r[0] ^= k[0]; - r[1] ^= k[0]; - r[2] ^= k[0]; - r[3] ^= k[0]; - - for (int i = 1; i < rounds; i++) - { - r[0] = aes_dec_round (r[0], k[i]); - r[1] = aes_dec_round (r[1], k[i]); - r[2] = aes_dec_round (r[2], k[i]); - r[3] = aes_dec_round (r[3], k[i]); - } - - r[0] = aes_dec_last_round (r[0], k[rounds]); - r[1] = aes_dec_last_round (r[1], k[rounds]); - r[2] = aes_dec_last_round (r[2], k[rounds]); - r[3] = aes_dec_last_round (r[3], k[rounds]); -#else - for (int i = 0; i < rounds - 1; i++) - { - r[0] = vaesimcq_u8 (vaesdq_u8 (r[0], k[i])); - r[1] = vaesimcq_u8 (vaesdq_u8 (r[1], k[i])); - r[2] = vaesimcq_u8 (vaesdq_u8 (r[2], k[i])); - r[3] = vaesimcq_u8 (vaesdq_u8 (r[3], k[i])); - } - r[0] = vaesdq_u8 (r[0], k[rounds - 1]) ^ k[rounds]; - r[1] = vaesdq_u8 (r[1], k[rounds - 1]) ^ k[rounds]; - r[2] = vaesdq_u8 (r[2], k[rounds - 1]) ^ k[rounds]; - r[3] = vaesdq_u8 (r[3], k[rounds - 1]) ^ k[rounds]; -#endif - dst[0] = r[0] ^ f; - dst[1] = r[1] ^ c[0]; - dst[2] = r[2] ^ c[1]; - dst[3] = r[3] ^ c[2]; - f = c[3]; - - count -= 64; - src += 4; - dst += 4; - } - - while (count > 0) - { - c[0] = r[0] = src[0]; -#if __x86_64__ - r[0] ^= k[0]; - for (int i = 1; i < rounds; i++) - r[0] = aes_dec_round (r[0], k[i]); - r[0] = aes_dec_last_round (r[0], k[rounds]); -#else - c[0] = r[0] = src[0]; - for (int i = 0; i < rounds - 1; i++) - r[0] = vaesimcq_u8 (vaesdq_u8 (r[0], k[i])); - r[0] = vaesdq_u8 (r[0], k[rounds - 1]) ^ k[rounds]; -#endif - dst[0] = r[0] ^ f; - f = c[0]; - - count -= 16; - src += 1; - dst += 1; - } -} - -#if __x86_64__ -#ifdef __VAES__ - -static_always_inline u8x64 -aes_block_load_x4 (u8 * src[], int i) -{ - u8x64 r = { }; - r = u8x64_insert_u8x16 (r, aes_block_load (src[0] + i), 0); - r = u8x64_insert_u8x16 (r, aes_block_load (src[1] + i), 1); - r = u8x64_insert_u8x16 (r, aes_block_load (src[2] + i), 2); - r = u8x64_insert_u8x16 (r, aes_block_load (src[3] + i), 3); - return r; -} - -static_always_inline void -aes_block_store_x4 (u8 * dst[], int i, u8x64 r) -{ - aes_block_store (dst[0] + i, u8x64_extract_u8x16 (r, 0)); - aes_block_store (dst[1] + i, u8x64_extract_u8x16 (r, 1)); - aes_block_store (dst[2] + i, u8x64_extract_u8x16 (r, 2)); - aes_block_store (dst[3] + i, u8x64_extract_u8x16 (r, 3)); -} - -static_always_inline u8x64 -aes_cbc_dec_permute (u8x64 a, u8x64 b) -{ - __m512i perm = { 6, 7, 8, 9, 10, 11, 12, 13 }; - return (u8x64) _mm512_permutex2var_epi64 ((__m512i) a, perm, (__m512i) b); -} - -static_always_inline void -vaes_cbc_dec (u8x64 * k, u8x64u * src, u8x64u * dst, u8x16 * iv, int count, - aes_key_size_t rounds) -{ - u8x64 f, r[4], c[4] = { }; - __mmask8 m; - int i, n_blocks = count >> 4; - - f = (u8x64) _mm512_mask_loadu_epi64 (_mm512_setzero_si512 (), 0xc0, - (__m512i *) (iv - 3)); - - while (n_blocks >= 16) - { - c[0] = src[0]; - c[1] = src[1]; - c[2] = src[2]; - c[3] = src[3]; - - r[0] = c[0] ^ k[0]; - r[1] = c[1] ^ k[0]; - r[2] = c[2] ^ k[0]; - r[3] = c[3] ^ k[0]; - - for (i = 1; i < rounds; i++) - { - r[0] = aes_dec_round_x4 (r[0], k[i]); - r[1] = aes_dec_round_x4 (r[1], k[i]); - r[2] = aes_dec_round_x4 (r[2], k[i]); - r[3] = aes_dec_round_x4 (r[3], k[i]); - } - - r[0] = aes_dec_last_round_x4 (r[0], k[i]); - r[1] = aes_dec_last_round_x4 (r[1], k[i]); - r[2] = aes_dec_last_round_x4 (r[2], k[i]); - r[3] = aes_dec_last_round_x4 (r[3], k[i]); - - dst[0] = r[0] ^= aes_cbc_dec_permute (f, c[0]); - dst[1] = r[1] ^= aes_cbc_dec_permute (c[0], c[1]); - dst[2] = r[2] ^= aes_cbc_dec_permute (c[1], c[2]); - dst[4] = r[3] ^= aes_cbc_dec_permute (c[2], c[3]); - f = c[3]; - - n_blocks -= 16; - src += 4; - dst += 4; - } - - while (n_blocks > 0) - { - m = (1 << (n_blocks * 2)) - 1; - c[0] = (u8x64) _mm512_mask_loadu_epi64 ((__m512i) c[0], m, - (__m512i *) src); - f = aes_cbc_dec_permute (f, c[0]); - r[0] = c[0] ^ k[0]; - for (i = 1; i < rounds; i++) - r[0] = aes_dec_round_x4 (r[0], k[i]); - r[0] = aes_dec_last_round_x4 (r[0], k[i]); - _mm512_mask_storeu_epi64 ((__m512i *) dst, m, (__m512i) (r[0] ^ f)); - f = c[0]; - n_blocks -= 4; - src += 1; - dst += 1; - } -} -#endif -#endif - -#ifdef __VAES__ -#define N 16 -#define u32xN u32x16 -#define u32xN_min_scalar u32x16_min_scalar +#if defined(__VAES__) && defined(__AVX512F__) +#define u8xN u8x64 +#define u32xN u32x16 +#define u32xN_min_scalar u32x16_min_scalar #define u32xN_is_all_zero u32x16_is_all_zero -#define u32xN_splat u32x16_splat +#define u32xN_splat u32x16_splat +#elif defined(__VAES__) +#define u8xN u8x32 +#define u32xN u32x8 +#define u32xN_min_scalar u32x8_min_scalar +#define u32xN_is_all_zero u32x8_is_all_zero +#define u32xN_splat u32x8_splat #else -#define N 4 -#define u32xN u32x4 -#define u32xN_min_scalar u32x4_min_scalar +#define u8xN u8x16 +#define u32xN u32x4 +#define u32xN_min_scalar u32x4_min_scalar #define u32xN_is_all_zero u32x4_is_all_zero -#define u32xN_splat u32x4_splat +#define u32xN_splat u32x4_splat #endif static_always_inline u32 @@ -234,30 +50,22 @@ aes_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops, aes_key_size_t ks) { crypto_native_main_t *cm = &crypto_native_main; - crypto_native_per_thread_data_t *ptd = - vec_elt_at_index (cm->per_thread_data, vm->thread_index); int rounds = AES_KEY_ROUNDS (ks); u8 placeholder[8192]; u32 i, j, count, n_left = n_ops; u32xN placeholder_mask = { }; u32xN len = { }; - vnet_crypto_key_index_t key_index[N]; - u8 *src[N] = { }; - u8 *dst[N] = { }; -#if __VAES__ - u8x64 r[N / 4] = { }; - u8x64 k[15][N / 4] = { }; - u8x16 *kq, *rq = (u8x16 *) r; -#else - u8x16 r[N] = { }; - u8x16 k[15][N] = { }; -#endif + vnet_crypto_key_index_t key_index[4 * N_AES_LANES]; + u8 *src[4 * N_AES_LANES] = {}; + u8 *dst[4 * N_AES_LANES] = {}; + u8xN r[4] = {}; + u8xN k[15][4] = {}; - for (i = 0; i < N; i++) + for (i = 0; i < 4 * N_AES_LANES; i++) key_index[i] = ~0; more: - for (i = 0; i < N; i++) + for (i = 0; i < 4 * N_AES_LANES; i++) if (len[i] == 0) { if (n_left == 0) @@ -269,20 +77,8 @@ more: } else { - u8x16 t; - if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV) - { - t = ptd->cbc_iv[i]; - *(u8x16u *) ops[0]->iv = t; - ptd->cbc_iv[i] = aes_enc_round (t, t); - } - else - t = aes_block_load (ops[0]->iv); -#if __VAES__ - rq[i] = t; -#else - r[i] = t; -#endif + u8x16 t = aes_block_load (ops[0]->iv); + ((u8x16 *) r)[i] = t; src[i] = ops[0]->src; dst[i] = ops[0]->dst; @@ -294,14 +90,7 @@ more: key_index[i] = ops[0]->key_index; kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]]; for (j = 0; j < rounds + 1; j++) - { -#if __VAES__ - kq = (u8x16 *) k[j]; - kq[i] = kd->encrypt_key[j]; -#else - k[j][i] = kd->encrypt_key[j]; -#endif - } + ((u8x16 *) k[j])[i] = kd->encrypt_key[j]; } ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED; n_left--; @@ -315,11 +104,11 @@ more: for (i = 0; i < count; i += 16) { -#ifdef __VAES__ +#if defined(__VAES__) && defined(__AVX512F__) r[0] = u8x64_xor3 (r[0], aes_block_load_x4 (src, i), k[0][0]); - r[1] = u8x64_xor3 (r[1], aes_block_load_x4 (src, i), k[0][1]); - r[2] = u8x64_xor3 (r[2], aes_block_load_x4 (src, i), k[0][2]); - r[3] = u8x64_xor3 (r[3], aes_block_load_x4 (src, i), k[0][3]); + r[1] = u8x64_xor3 (r[1], aes_block_load_x4 (src + 4, i), k[0][1]); + r[2] = u8x64_xor3 (r[2], aes_block_load_x4 (src + 8, i), k[0][2]); + r[3] = u8x64_xor3 (r[3], aes_block_load_x4 (src + 12, i), k[0][3]); for (j = 1; j < rounds; j++) { @@ -337,6 +126,28 @@ more: aes_block_store_x4 (dst + 4, i, r[1]); aes_block_store_x4 (dst + 8, i, r[2]); aes_block_store_x4 (dst + 12, i, r[3]); +#elif defined(__VAES__) + r[0] = u8x32_xor3 (r[0], aes_block_load_x2 (src, i), k[0][0]); + r[1] = u8x32_xor3 (r[1], aes_block_load_x2 (src + 2, i), k[0][1]); + r[2] = u8x32_xor3 (r[2], aes_block_load_x2 (src + 4, i), k[0][2]); + r[3] = u8x32_xor3 (r[3], aes_block_load_x2 (src + 6, i), k[0][3]); + + for (j = 1; j < rounds; j++) + { + r[0] = aes_enc_round_x2 (r[0], k[j][0]); + r[1] = aes_enc_round_x2 (r[1], k[j][1]); + r[2] = aes_enc_round_x2 (r[2], k[j][2]); + r[3] = aes_enc_round_x2 (r[3], k[j][3]); + } + r[0] = aes_enc_last_round_x2 (r[0], k[j][0]); + r[1] = aes_enc_last_round_x2 (r[1], k[j][1]); + r[2] = aes_enc_last_round_x2 (r[2], k[j][2]); + r[3] = aes_enc_last_round_x2 (r[3], k[j][3]); + + aes_block_store_x2 (dst, i, r[0]); + aes_block_store_x2 (dst + 2, i, r[1]); + aes_block_store_x2 (dst + 4, i, r[2]); + aes_block_store_x2 (dst + 6, i, r[3]); #else #if __x86_64__ r[0] = u8x16_xor3 (r[0], aes_block_load (src[0] + i), k[0][0]); @@ -346,16 +157,16 @@ more: for (j = 1; j < rounds; j++) { - r[0] = aes_enc_round (r[0], k[j][0]); - r[1] = aes_enc_round (r[1], k[j][1]); - r[2] = aes_enc_round (r[2], k[j][2]); - r[3] = aes_enc_round (r[3], k[j][3]); + r[0] = aes_enc_round_x1 (r[0], k[j][0]); + r[1] = aes_enc_round_x1 (r[1], k[j][1]); + r[2] = aes_enc_round_x1 (r[2], k[j][2]); + r[3] = aes_enc_round_x1 (r[3], k[j][3]); } - r[0] = aes_enc_last_round (r[0], k[j][0]); - r[1] = aes_enc_last_round (r[1], k[j][1]); - r[2] = aes_enc_last_round (r[2], k[j][2]); - r[3] = aes_enc_last_round (r[3], k[j][3]); + r[0] = aes_enc_last_round_x1 (r[0], k[j][0]); + r[1] = aes_enc_last_round_x1 (r[1], k[j][1]); + r[2] = aes_enc_last_round_x1 (r[2], k[j][2]); + r[3] = aes_enc_last_round_x1 (r[3], k[j][3]); aes_block_store (dst[0] + i, r[0]); aes_block_store (dst[1] + i, r[1]); @@ -387,7 +198,7 @@ more: len -= u32xN_splat (count); - for (i = 0; i < N; i++) + for (i = 0; i < 4 * N_AES_LANES; i++) { src[i] += count; dst[i] += count; @@ -416,8 +227,11 @@ aes_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], ASSERT (n_ops >= 1); decrypt: -#ifdef __VAES__ - vaes_cbc_dec (kd->decrypt_key, (u8x64u *) op->src, (u8x64u *) op->dst, +#if defined(__VAES__) && defined(__AVX512F__) + aes4_cbc_dec (kd->decrypt_key, (u8x64u *) op->src, (u8x64u *) op->dst, + (u8x16u *) op->iv, op->len, rounds); +#elif defined(__VAES__) + aes2_cbc_dec (kd->decrypt_key, (u8x32u *) op->src, (u8x32u *) op->dst, (u8x16u *) op->iv, op->len, rounds); #else aes_cbc_dec (kd->decrypt_key, (u8x16u *) op->src, (u8x16u *) op->dst, @@ -435,99 +249,91 @@ decrypt: return n_ops; } -static_always_inline void * -aes_cbc_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks) +static int +aes_cbc_cpu_probe () +{ +#if defined(__VAES__) && defined(__AVX512F__) + if (clib_cpu_supports_vaes () && clib_cpu_supports_avx512f ()) + return 50; +#elif defined(__VAES__) + if (clib_cpu_supports_vaes ()) + return 40; +#elif defined(__AVX512F__) + if (clib_cpu_supports_avx512f ()) + return 30; +#elif defined(__AVX2__) + if (clib_cpu_supports_avx2 ()) + return 20; +#elif __AES__ + if (clib_cpu_supports_aes ()) + return 10; +#elif __aarch64__ + if (clib_cpu_supports_aarch64_aes ()) + return 10; +#endif + return -1; +} + +static void * +aes_cbc_key_exp_128 (vnet_crypto_key_t *key) { - u8x16 e[15], d[15]; aes_cbc_key_data_t *kd; kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES); - aes_key_expand (e, key->data, ks); - aes_key_enc_to_dec (e, d, ks); - for (int i = 0; i < AES_KEY_ROUNDS (ks) + 1; i++) - { -#if __VAES__ - kd->decrypt_key[i] = (u8x64) _mm512_broadcast_i64x2 ((__m128i) d[i]); -#else - kd->decrypt_key[i] = d[i]; -#endif - kd->encrypt_key[i] = e[i]; - } + clib_aes128_cbc_key_expand (kd, key->data); return kd; } -#define foreach_aes_cbc_handler_type _(128) _(192) _(256) - -#define _(x) \ -static u32 aes_ops_dec_aes_cbc_##x \ -(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \ -{ return aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \ -static u32 aes_ops_enc_aes_cbc_##x \ -(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \ -{ return aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \ -static void * aes_cbc_key_exp_##x (vnet_crypto_key_t *key) \ -{ return aes_cbc_key_exp (key, AES_KEY_##x); } - -foreach_aes_cbc_handler_type; -#undef _ - -#include <fcntl.h> +static void * +aes_cbc_key_exp_192 (vnet_crypto_key_t *key) +{ + aes_cbc_key_data_t *kd; + kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES); + clib_aes192_cbc_key_expand (kd, key->data); + return kd; +} -clib_error_t * -#ifdef __VAES__ -crypto_native_aes_cbc_init_icl (vlib_main_t * vm) -#elif __AVX512F__ -crypto_native_aes_cbc_init_skx (vlib_main_t * vm) -#elif __aarch64__ -crypto_native_aes_cbc_init_neon (vlib_main_t * vm) -#elif __AVX2__ -crypto_native_aes_cbc_init_hsw (vlib_main_t * vm) -#else -crypto_native_aes_cbc_init_slm (vlib_main_t * vm) -#endif +static void * +aes_cbc_key_exp_256 (vnet_crypto_key_t *key) { - crypto_native_main_t *cm = &crypto_native_main; - crypto_native_per_thread_data_t *ptd; - clib_error_t *err = 0; - int fd; + aes_cbc_key_data_t *kd; + kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES); + clib_aes256_cbc_key_expand (kd, key->data); + return kd; +} - if ((fd = open ("/dev/urandom", O_RDONLY)) < 0) - return clib_error_return_unix (0, "failed to open '/dev/urandom'"); +#define foreach_aes_cbc_handler_type _ (128) _ (192) _ (256) + +#define _(x) \ + static u32 aes_ops_enc_aes_cbc_##x (vlib_main_t *vm, \ + vnet_crypto_op_t *ops[], u32 n_ops) \ + { \ + return aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); \ + } \ + \ + CRYPTO_NATIVE_OP_HANDLER (aes_##x##_cbc_enc) = { \ + .op_id = VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \ + .fn = aes_ops_enc_aes_cbc_##x, \ + .probe = aes_cbc_cpu_probe, \ + }; \ + \ + static u32 aes_ops_dec_aes_cbc_##x (vlib_main_t *vm, \ + vnet_crypto_op_t *ops[], u32 n_ops) \ + { \ + return aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); \ + } \ + \ + CRYPTO_NATIVE_OP_HANDLER (aes_##x##_cbc_dec) = { \ + .op_id = VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \ + .fn = aes_ops_dec_aes_cbc_##x, \ + .probe = aes_cbc_cpu_probe, \ + }; \ + \ + CRYPTO_NATIVE_KEY_HANDLER (aes_##x##_cbc) = { \ + .alg_id = VNET_CRYPTO_ALG_AES_##x##_CBC, \ + .key_fn = aes_cbc_key_exp_##x, \ + .probe = aes_cbc_cpu_probe, \ + }; - /* *INDENT-OFF* */ - vec_foreach (ptd, cm->per_thread_data) - { - for (int i = 0; i < 4; i++) - { - if (read(fd, ptd->cbc_iv, sizeof (ptd->cbc_iv)) != - sizeof (ptd->cbc_iv)) - { - err = clib_error_return_unix (0, "'/dev/urandom' read failure"); - goto error; - } - } - } - /* *INDENT-ON* */ - -#define _(x) \ - vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \ - VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \ - aes_ops_enc_aes_cbc_##x); \ - vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \ - VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \ - aes_ops_dec_aes_cbc_##x); \ - cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CBC] = aes_cbc_key_exp_##x; - foreach_aes_cbc_handler_type; +foreach_aes_cbc_handler_type; #undef _ -error: - close (fd); - return err; -} - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ |