diff options
-rw-r--r-- | src/cmake/cpu.cmake | 1 | ||||
-rw-r--r-- | src/plugins/crypto_ia32/CMakeLists.txt | 3 | ||||
-rw-r--r-- | src/plugins/crypto_ia32/aes_cbc.c | 282 | ||||
-rw-r--r-- | src/plugins/crypto_ia32/aes_gcm.c | 4 | ||||
-rw-r--r-- | src/plugins/crypto_ia32/crypto_ia32.h | 2 | ||||
-rw-r--r-- | src/plugins/crypto_ia32/main.c | 8 |
6 files changed, 250 insertions, 50 deletions
diff --git a/src/cmake/cpu.cmake b/src/cmake/cpu.cmake index e5061fca4e7..e407978ea37 100644 --- a/src/cmake/cpu.cmake +++ b/src/cmake/cpu.cmake @@ -65,6 +65,7 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*") list(APPEND MARCH_VARIANTS "avx2\;-march=core-avx2 -mtune=core-avx2") endif() check_c_compiler_flag("-march=skylake-avx512" compiler_flag_march_skylake_avx512) + check_c_compiler_flag("-march=icelake-client" compiler_flag_march_icelake_client) if(compiler_flag_march_skylake_avx512) list(APPEND MARCH_VARIANTS "avx512\;-march=skylake-avx512 -mtune=skylake-avx512") endif() diff --git a/src/plugins/crypto_ia32/CMakeLists.txt b/src/plugins/crypto_ia32/CMakeLists.txt index cab1c35e0f2..0a8b560731a 100644 --- a/src/plugins/crypto_ia32/CMakeLists.txt +++ b/src/plugins/crypto_ia32/CMakeLists.txt @@ -22,6 +22,9 @@ list(APPEND VARIANTS "avx2\;-march=core-avx2") if(compiler_flag_march_skylake_avx512) list(APPEND VARIANTS "avx512\;-march=skylake-avx512") endif() +if(compiler_flag_march_icelake_client) + list(APPEND VARIANTS "vaesni\;-march=icelake-client") +endif() foreach(VARIANT ${VARIANTS}) list(GET VARIANT 0 v) diff --git a/src/plugins/crypto_ia32/aes_cbc.c b/src/plugins/crypto_ia32/aes_cbc.c index 4ad01b52b29..910f8ca7413 100644 --- a/src/plugins/crypto_ia32/aes_cbc.c +++ b/src/plugins/crypto_ia32/aes_cbc.c @@ -29,27 +29,80 @@ typedef struct { __m128i encrypt_key[15]; +#if __VAES__ + __m512i decrypt_key[15]; +#else __m128i decrypt_key[15]; +#endif } aes_cbc_key_data_t; +static_always_inline __m128i +aes_block_load (u8 * p) +{ + return _mm_loadu_si128 ((__m128i *) p); +} + +static_always_inline void +aes_block_store (u8 * p, __m128i r) +{ + _mm_storeu_si128 ((__m128i *) p, r); +} + +static_always_inline __m128i __clib_unused +xor3 (__m128i a, __m128i b, __m128i c) +{ +#if __AVX512F__ + return _mm_ternarylogic_epi32 (a, b, c, 0x96); +#endif + return a ^ b ^ c; +} + +#if __VAES__ +static_always_inline __m512i +xor3_x4 (__m512i a, __m512i b, __m512i c) +{ + return _mm512_ternarylogic_epi32 (a, b, c, 0x96); +} + +static_always_inline __m512i +aes_block_load_x4 (u8 * src[], int i) +{ + __m512i r = { }; + r = _mm512_inserti64x2 (r, aes_block_load (src[0] + i), 0); + r = _mm512_inserti64x2 (r, aes_block_load (src[1] + i), 1); + r = _mm512_inserti64x2 (r, aes_block_load (src[2] + i), 2); + r = _mm512_inserti64x2 (r, aes_block_load (src[3] + i), 3); + return r; +} + static_always_inline void +aes_block_store_x4 (u8 * dst[], int i, __m512i r) +{ + aes_block_store (dst[0] + i, _mm512_extracti64x2_epi64 (r, 0)); + aes_block_store (dst[1] + i, _mm512_extracti64x2_epi64 (r, 1)); + aes_block_store (dst[2] + i, _mm512_extracti64x2_epi64 (r, 2)); + aes_block_store (dst[3] + i, _mm512_extracti64x2_epi64 (r, 3)); +} +#endif + +static_always_inline void __clib_unused aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count, aesni_key_size_t rounds) { __m128i r0, r1, r2, r3, c0, c1, c2, c3, f; int i; - f = _mm_loadu_si128 ((__m128i *) iv); + f = aes_block_load (iv); while (count >= 64) { _mm_prefetch (src + 128, _MM_HINT_T0); _mm_prefetch (dst + 128, _MM_HINT_T0); - c0 = _mm_loadu_si128 (((__m128i *) src + 0)); - c1 = _mm_loadu_si128 (((__m128i *) src + 1)); - c2 = _mm_loadu_si128 (((__m128i *) src + 2)); - c3 = _mm_loadu_si128 (((__m128i *) src + 3)); + c0 = aes_block_load (src); + c1 = aes_block_load (src + 16); + c2 = aes_block_load (src + 32); + c3 = aes_block_load (src + 48); r0 = c0 ^ k[0]; r1 = c1 ^ k[0]; @@ -69,10 +122,10 @@ aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count, r2 = _mm_aesdeclast_si128 (r2, k[i]); r3 = _mm_aesdeclast_si128 (r3, k[i]); - _mm_storeu_si128 ((__m128i *) dst + 0, r0 ^ f); - _mm_storeu_si128 ((__m128i *) dst + 1, r1 ^ c0); - _mm_storeu_si128 ((__m128i *) dst + 2, r2 ^ c1); - _mm_storeu_si128 ((__m128i *) dst + 3, r3 ^ c2); + aes_block_store (dst, r0 ^ f); + aes_block_store (dst + 16, r1 ^ c0); + aes_block_store (dst + 32, r2 ^ c1); + aes_block_store (dst + 48, r3 ^ c2); f = c3; @@ -83,12 +136,12 @@ aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count, while (count > 0) { - c0 = _mm_loadu_si128 (((__m128i *) src)); + c0 = aes_block_load (src); r0 = c0 ^ k[0]; for (i = 1; i < rounds; i++) r0 = _mm_aesdec_si128 (r0, k[i]); r0 = _mm_aesdeclast_si128 (r0, k[i]); - _mm_storeu_si128 ((__m128i *) dst, r0 ^ f); + aes_block_store (dst, r0 ^ f); f = c0; count -= 16; src += 16; @@ -96,6 +149,91 @@ aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count, } } +#ifdef __VAES__ +static_always_inline void +vaes_cbc_dec (__m512i * k, u8 * src, u8 * dst, u8 * iv, int count, + aesni_key_size_t rounds) +{ + __m512i permute = { 6, 7, 8, 9, 10, 11, 12, 13 }; + __m512i r0, r1, r2, r3, c0, c1, c2, c3, f = { }; + __mmask8 m; + int i, n_blocks = count >> 4; + + f = _mm512_mask_loadu_epi64 (f, 0xc0, (__m512i *) (iv - 48)); + + while (n_blocks >= 16) + { + c0 = _mm512_loadu_si512 ((__m512i *) src); + c1 = _mm512_loadu_si512 ((__m512i *) (src + 64)); + c2 = _mm512_loadu_si512 ((__m512i *) (src + 128)); + c3 = _mm512_loadu_si512 ((__m512i *) (src + 192)); + + r0 = c0 ^ k[0]; + r1 = c1 ^ k[0]; + r2 = c2 ^ k[0]; + r3 = c3 ^ k[0]; + + for (i = 1; i < rounds; i++) + { + r0 = _mm512_aesdec_epi128 (r0, k[i]); + r1 = _mm512_aesdec_epi128 (r1, k[i]); + r2 = _mm512_aesdec_epi128 (r2, k[i]); + r3 = _mm512_aesdec_epi128 (r3, k[i]); + } + + r0 = _mm512_aesdeclast_epi128 (r0, k[i]); + r1 = _mm512_aesdeclast_epi128 (r1, k[i]); + r2 = _mm512_aesdeclast_epi128 (r2, k[i]); + r3 = _mm512_aesdeclast_epi128 (r3, k[i]); + + r0 ^= _mm512_permutex2var_epi64 (f, permute, c0); + _mm512_storeu_si512 ((__m512i *) dst, r0); + + r1 ^= _mm512_permutex2var_epi64 (c0, permute, c1); + _mm512_storeu_si512 ((__m512i *) (dst + 64), r1); + + r2 ^= _mm512_permutex2var_epi64 (c1, permute, c2); + _mm512_storeu_si512 ((__m512i *) (dst + 128), r2); + + r3 ^= _mm512_permutex2var_epi64 (c2, permute, c3); + _mm512_storeu_si512 ((__m512i *) (dst + 192), r3); + f = c3; + + n_blocks -= 16; + src += 256; + dst += 256; + } + + while (n_blocks > 0) + { + m = (1 << (n_blocks * 2)) - 1; + c0 = _mm512_mask_loadu_epi64 (c0, m, (__m512i *) src); + f = _mm512_permutex2var_epi64 (f, permute, c0); + r0 = c0 ^ k[0]; + for (i = 1; i < rounds; i++) + r0 = _mm512_aesdec_epi128 (r0, k[i]); + r0 = _mm512_aesdeclast_epi128 (r0, k[i]); + _mm512_mask_storeu_epi64 ((__m512i *) dst, m, r0 ^ f); + f = c0; + n_blocks -= 4; + src += 64; + dst += 64; + } +} +#endif + +#ifdef __VAES__ +#define N 16 +#define u32xN u32x16 +#define u32xN_min_scalar u32x16_min_scalar +#define u32xN_is_all_zero u32x16_is_all_zero +#else +#define N 4 +#define u32xN u32x4 +#define u32xN_min_scalar u32x4_min_scalar +#define u32xN_is_all_zero u32x4_is_all_zero +#endif + static_always_inline u32 aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops, aesni_key_size_t ks) @@ -105,16 +243,25 @@ aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], vm->thread_index); int rounds = AESNI_KEY_ROUNDS (ks); u8 dummy[8192]; - u8 *src[4] = { }; - u8 *dst[4] = { }; - vnet_crypto_key_index_t key_index[4] = { ~0, ~0, ~0, ~0 }; - u32x4 dummy_mask = { }; - u32x4 len = { }; u32 i, j, count, n_left = n_ops; - __m128i r[4] = { }, k[4][rounds + 1]; + u32xN dummy_mask = { }; + u32xN len = { }; + vnet_crypto_key_index_t key_index[N]; + u8 *src[N] = { }; + u8 *dst[N] = { }; + /* *INDENT-OFF* */ + union + { + __m128i x1[N]; + __m512i x4[N / 4]; + } r = { }, k[15] = { }; + /* *INDENT-ON* */ + + for (i = 0; i < N; i++) + key_index[i] = ~0; more: - for (i = 0; i < 4; i++) + for (i = 0; i < N; i++) if (len[i] == 0) { if (n_left == 0) @@ -128,12 +275,13 @@ more: { if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV) { - r[i] = ptd->cbc_iv[i]; - _mm_storeu_si128 ((__m128i *) ops[0]->iv, r[i]); - ptd->cbc_iv[i] = _mm_aesenc_si128 (r[i], r[i]); + r.x1[i] = ptd->cbc_iv[i]; + aes_block_store (ops[0]->iv, r.x1[i]); + ptd->cbc_iv[i] = _mm_aesenc_si128 (r.x1[i], r.x1[i]); } else - r[i] = _mm_loadu_si128 ((__m128i *) ops[0]->iv); + r.x1[i] = aes_block_load (ops[0]->iv); + src[i] = ops[0]->src; dst[i] = ops[0]->dst; len[i] = ops[0]->len; @@ -143,8 +291,8 @@ more: aes_cbc_key_data_t *kd; key_index[i] = ops[0]->key_index; kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]]; - clib_memcpy_fast (k[i], kd->encrypt_key, - (rounds + 1) * sizeof (__m128i)); + for (j = 0; j < rounds + 1; j++) + k[j].x1[i] = kd->encrypt_key[j]; } ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED; n_left--; @@ -152,37 +300,61 @@ more: } } - count = u32x4_min_scalar (len); + count = u32xN_min_scalar (len); ASSERT (count % 16 == 0); for (i = 0; i < count; i += 16) { - r[0] ^= _mm_loadu_si128 ((__m128i *) (src[0] + i)) ^ k[0][0]; - r[1] ^= _mm_loadu_si128 ((__m128i *) (src[1] + i)) ^ k[1][0]; - r[2] ^= _mm_loadu_si128 ((__m128i *) (src[2] + i)) ^ k[2][0]; - r[3] ^= _mm_loadu_si128 ((__m128i *) (src[3] + i)) ^ k[3][0]; +#ifdef __VAES__ + r.x4[0] = xor3_x4 (r.x4[0], aes_block_load_x4 (src, i), k[0].x4[0]); + r.x4[1] = xor3_x4 (r.x4[1], aes_block_load_x4 (src, i), k[0].x4[1]); + r.x4[2] = xor3_x4 (r.x4[2], aes_block_load_x4 (src, i), k[0].x4[2]); + r.x4[3] = xor3_x4 (r.x4[3], aes_block_load_x4 (src, i), k[0].x4[3]); + + for (j = 1; j < rounds; j++) + { + r.x4[0] = _mm512_aesenc_epi128 (r.x4[0], k[j].x4[0]); + r.x4[1] = _mm512_aesenc_epi128 (r.x4[1], k[j].x4[1]); + r.x4[2] = _mm512_aesenc_epi128 (r.x4[2], k[j].x4[2]); + r.x4[3] = _mm512_aesenc_epi128 (r.x4[3], k[j].x4[3]); + } + r.x4[0] = _mm512_aesenclast_epi128 (r.x4[0], k[j].x4[0]); + r.x4[1] = _mm512_aesenclast_epi128 (r.x4[1], k[j].x4[1]); + r.x4[2] = _mm512_aesenclast_epi128 (r.x4[2], k[j].x4[2]); + r.x4[3] = _mm512_aesenclast_epi128 (r.x4[3], k[j].x4[3]); + + aes_block_store_x4 (dst, i, r.x4[0]); + aes_block_store_x4 (dst + 4, i, r.x4[1]); + aes_block_store_x4 (dst + 8, i, r.x4[2]); + aes_block_store_x4 (dst + 12, i, r.x4[3]); +#else + r.x1[0] = xor3 (r.x1[0], aes_block_load (src[0] + i), k[0].x1[0]); + r.x1[1] = xor3 (r.x1[1], aes_block_load (src[1] + i), k[0].x1[1]); + r.x1[2] = xor3 (r.x1[2], aes_block_load (src[2] + i), k[0].x1[2]); + r.x1[3] = xor3 (r.x1[3], aes_block_load (src[3] + i), k[0].x1[3]); for (j = 1; j < rounds; j++) { - r[0] = _mm_aesenc_si128 (r[0], k[0][j]); - r[1] = _mm_aesenc_si128 (r[1], k[1][j]); - r[2] = _mm_aesenc_si128 (r[2], k[2][j]); - r[3] = _mm_aesenc_si128 (r[3], k[3][j]); + r.x1[0] = _mm_aesenc_si128 (r.x1[0], k[j].x1[0]); + r.x1[1] = _mm_aesenc_si128 (r.x1[1], k[j].x1[1]); + r.x1[2] = _mm_aesenc_si128 (r.x1[2], k[j].x1[2]); + r.x1[3] = _mm_aesenc_si128 (r.x1[3], k[j].x1[3]); } - r[0] = _mm_aesenclast_si128 (r[0], k[0][j]); - r[1] = _mm_aesenclast_si128 (r[1], k[1][j]); - r[2] = _mm_aesenclast_si128 (r[2], k[2][j]); - r[3] = _mm_aesenclast_si128 (r[3], k[3][j]); + r.x1[0] = _mm_aesenclast_si128 (r.x1[0], k[j].x1[0]); + r.x1[1] = _mm_aesenclast_si128 (r.x1[1], k[j].x1[1]); + r.x1[2] = _mm_aesenclast_si128 (r.x1[2], k[j].x1[2]); + r.x1[3] = _mm_aesenclast_si128 (r.x1[3], k[j].x1[3]); - _mm_storeu_si128 ((__m128i *) (dst[0] + i), r[0]); - _mm_storeu_si128 ((__m128i *) (dst[1] + i), r[1]); - _mm_storeu_si128 ((__m128i *) (dst[2] + i), r[2]); - _mm_storeu_si128 ((__m128i *) (dst[3] + i), r[3]); + aes_block_store (dst[0] + i, r.x1[0]); + aes_block_store (dst[1] + i, r.x1[1]); + aes_block_store (dst[2] + i, r.x1[2]); + aes_block_store (dst[3] + i, r.x1[3]); +#endif } - for (i = 0; i < 4; i++) + for (i = 0; i < N; i++) { src[i] += count; dst[i] += count; @@ -192,7 +364,7 @@ more: if (n_left > 0) goto more; - if (!u32x4_is_all_zero (len & dummy_mask)) + if (!u32xN_is_all_zero (len & dummy_mask)) goto more; return n_ops; @@ -211,7 +383,11 @@ aesni_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], ASSERT (n_ops >= 1); decrypt: +#ifdef __VAES__ + vaes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds); +#else aes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds); +#endif op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; if (--n_left) @@ -227,11 +403,21 @@ decrypt: static_always_inline void * aesni_cbc_key_exp (vnet_crypto_key_t * key, aesni_key_size_t ks) { + __m128i e[15], d[15]; aes_cbc_key_data_t *kd; kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES); - aes_key_expand (kd->encrypt_key, key->data, ks); - aes_key_expand (kd->decrypt_key, key->data, ks); - aes_key_enc_to_dec (kd->decrypt_key, ks); + aes_key_expand (e, key->data, ks); + aes_key_expand (d, key->data, ks); + aes_key_enc_to_dec (d, ks); + for (int i = 0; i < AESNI_KEY_ROUNDS (ks) + 1; i++) + { +#if __VAES__ + kd->decrypt_key[i] = _mm512_broadcast_i64x2 (d[i]); +#else + kd->decrypt_key[i] = d[i]; +#endif + kd->encrypt_key[i] = e[i]; + } return kd; } @@ -253,7 +439,9 @@ foreach_aesni_cbc_handler_type; #include <fcntl.h> clib_error_t * -#ifdef __AVX512F__ +#ifdef __VAES__ +crypto_ia32_aesni_cbc_init_vaes (vlib_main_t * vm) +#elif __AVX512F__ crypto_ia32_aesni_cbc_init_avx512 (vlib_main_t * vm) #elif __AVX2__ crypto_ia32_aesni_cbc_init_avx2 (vlib_main_t * vm) diff --git a/src/plugins/crypto_ia32/aes_gcm.c b/src/plugins/crypto_ia32/aes_gcm.c index a21ecf3e6df..e45dda79faf 100644 --- a/src/plugins/crypto_ia32/aes_gcm.c +++ b/src/plugins/crypto_ia32/aes_gcm.c @@ -746,7 +746,9 @@ foreach_aesni_gcm_handler_type; #undef _ clib_error_t * -#ifdef __AVX512F__ +#ifdef __VAES__ +crypto_ia32_aesni_gcm_init_vaes (vlib_main_t * vm) +#elif __AVX512F__ crypto_ia32_aesni_gcm_init_avx512 (vlib_main_t * vm) #elif __AVX2__ crypto_ia32_aesni_gcm_init_avx2 (vlib_main_t * vm) diff --git a/src/plugins/crypto_ia32/crypto_ia32.h b/src/plugins/crypto_ia32/crypto_ia32.h index f80a0d09a3b..e2b30071854 100644 --- a/src/plugins/crypto_ia32/crypto_ia32.h +++ b/src/plugins/crypto_ia32/crypto_ia32.h @@ -38,10 +38,12 @@ extern crypto_ia32_main_t crypto_ia32_main; clib_error_t *crypto_ia32_aesni_cbc_init_sse42 (vlib_main_t * vm); clib_error_t *crypto_ia32_aesni_cbc_init_avx2 (vlib_main_t * vm); clib_error_t *crypto_ia32_aesni_cbc_init_avx512 (vlib_main_t * vm); +clib_error_t *crypto_ia32_aesni_cbc_init_vaes (vlib_main_t * vm); clib_error_t *crypto_ia32_aesni_gcm_init_sse42 (vlib_main_t * vm); clib_error_t *crypto_ia32_aesni_gcm_init_avx2 (vlib_main_t * vm); clib_error_t *crypto_ia32_aesni_gcm_init_avx512 (vlib_main_t * vm); +clib_error_t *crypto_ia32_aesni_gcm_init_vaes (vlib_main_t * vm); #endif /* __crypto_ia32_h__ */ /* diff --git a/src/plugins/crypto_ia32/main.c b/src/plugins/crypto_ia32/main.c index 194182fd57a..bcfd7fa6ffd 100644 --- a/src/plugins/crypto_ia32/main.c +++ b/src/plugins/crypto_ia32/main.c @@ -72,7 +72,9 @@ crypto_ia32_init (vlib_main_t * vm) vnet_crypto_register_engine (vm, "ia32", 100, "Intel IA32 ISA Optimized Crypto"); - if (clib_cpu_supports_avx512f ()) + if (clib_cpu_supports_vaes ()) + error = crypto_ia32_aesni_cbc_init_vaes (vm); + else if (clib_cpu_supports_avx512f ()) error = crypto_ia32_aesni_cbc_init_avx512 (vm); else if (clib_cpu_supports_avx2 ()) error = crypto_ia32_aesni_cbc_init_avx2 (vm); @@ -84,7 +86,9 @@ crypto_ia32_init (vlib_main_t * vm) if (clib_cpu_supports_pclmulqdq ()) { - if (clib_cpu_supports_avx512f ()) + if (clib_cpu_supports_vaes ()) + error = crypto_ia32_aesni_gcm_init_vaes (vm); + else if (clib_cpu_supports_avx512f ()) error = crypto_ia32_aesni_gcm_init_avx512 (vm); else if (clib_cpu_supports_avx2 ()) error = crypto_ia32_aesni_gcm_init_avx2 (vm); |