From 776644efe78f427a75fc5e122014b44b39d470c3 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Fri, 31 Jan 2020 10:24:07 +0100 Subject: crypto-native: add ARMv8 AES-CBC implementation Type: feature Change-Id: I32256061b9509880eec843db2f918879cdafbe47 Signed-off-by: Damjan Marion --- src/plugins/crypto_native/aes_cbc_aesni.h | 381 ++++++++++++++++++++++++++++++ 1 file changed, 381 insertions(+) create mode 100644 src/plugins/crypto_native/aes_cbc_aesni.h (limited to 'src/plugins/crypto_native/aes_cbc_aesni.h') diff --git a/src/plugins/crypto_native/aes_cbc_aesni.h b/src/plugins/crypto_native/aes_cbc_aesni.h new file mode 100644 index 00000000000..206dace5d56 --- /dev/null +++ b/src/plugins/crypto_native/aes_cbc_aesni.h @@ -0,0 +1,381 @@ +/* + *------------------------------------------------------------------ + * Copyright (c) 2020 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#ifdef __x86_64__ + +static_always_inline u8x16 __clib_unused +xor3 (u8x16 a, u8x16 b, u8x16 c) +{ +#if __AVX512F__ + return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b, + (__m128i) c, 0x96); +#endif + return a ^ b ^ c; +} + +#if __VAES__ +static_always_inline __m512i +xor3_x4 (__m512i a, __m512i b, __m512i c) +{ + return _mm512_ternarylogic_epi32 (a, b, c, 0x96); +} + +static_always_inline __m512i +aes_block_load_x4 (u8 * src[], int i) +{ + __m512i r = { }; + r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[0] + i), 0); + r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[1] + i), 1); + r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[2] + i), 2); + r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[3] + i), 3); + return r; +} + +static_always_inline void +aes_block_store_x4 (u8 * dst[], int i, __m512i r) +{ + aes_block_store (dst[0] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 0)); + aes_block_store (dst[1] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 1)); + aes_block_store (dst[2] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 2)); + aes_block_store (dst[3] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 3)); +} +#endif + +static_always_inline void __clib_unused +aes_cbc_dec (u8x16 * k, u8 * src, u8 * dst, u8 * iv, int count, + aes_key_size_t rounds) +{ + u8x16 r0, r1, r2, r3, c0, c1, c2, c3, f; + int i; + + f = aes_block_load (iv); + + while (count >= 64) + { + _mm_prefetch (src + 128, _MM_HINT_T0); + _mm_prefetch (dst + 128, _MM_HINT_T0); + + c0 = aes_block_load (src); + c1 = aes_block_load (src + 16); + c2 = aes_block_load (src + 32); + c3 = aes_block_load (src + 48); + + r0 = c0 ^ k[0]; + r1 = c1 ^ k[0]; + r2 = c2 ^ k[0]; + r3 = c3 ^ k[0]; + + for (i = 1; i < rounds; i++) + { + r0 = aes_dec_round (r0, k[i]); + r1 = aes_dec_round (r1, k[i]); + r2 = aes_dec_round (r2, k[i]); + r3 = aes_dec_round (r3, k[i]); + } + + r0 = aes_dec_last_round (r0, k[i]); + r1 = aes_dec_last_round (r1, k[i]); + r2 = aes_dec_last_round (r2, k[i]); + r3 = aes_dec_last_round (r3, k[i]); + + aes_block_store (dst, r0 ^ f); + aes_block_store (dst + 16, r1 ^ c0); + aes_block_store (dst + 32, r2 ^ c1); + aes_block_store (dst + 48, r3 ^ c2); + + f = c3; + + count -= 64; + src += 64; + dst += 64; + } + + while (count > 0) + { + c0 = aes_block_load (src); + r0 = c0 ^ k[0]; + for (i = 1; i < rounds; i++) + r0 = aes_dec_round (r0, k[i]); + r0 = aes_dec_last_round (r0, k[i]); + aes_block_store (dst, r0 ^ f); + f = c0; + count -= 16; + src += 16; + dst += 16; + } +} + +#ifdef __VAES__ +static_always_inline void +vaes_cbc_dec (__m512i * k, u8 * src, u8 * dst, u8 * iv, int count, + aes_key_size_t rounds) +{ + __m512i permute = { 6, 7, 8, 9, 10, 11, 12, 13 }; + __m512i r0, r1, r2, r3, c0, c1, c2, c3, f = { }; + __mmask8 m; + int i, n_blocks = count >> 4; + + f = _mm512_mask_loadu_epi64 (f, 0xc0, (__m512i *) (iv - 48)); + + while (n_blocks >= 16) + { + c0 = _mm512_loadu_si512 ((__m512i *) src); + c1 = _mm512_loadu_si512 ((__m512i *) (src + 64)); + c2 = _mm512_loadu_si512 ((__m512i *) (src + 128)); + c3 = _mm512_loadu_si512 ((__m512i *) (src + 192)); + + r0 = c0 ^ k[0]; + r1 = c1 ^ k[0]; + r2 = c2 ^ k[0]; + r3 = c3 ^ k[0]; + + for (i = 1; i < rounds; i++) + { + r0 = _mm512_aesdec_epi128 (r0, k[i]); + r1 = _mm512_aesdec_epi128 (r1, k[i]); + r2 = _mm512_aesdec_epi128 (r2, k[i]); + r3 = _mm512_aesdec_epi128 (r3, k[i]); + } + + r0 = _mm512_aesdeclast_epi128 (r0, k[i]); + r1 = _mm512_aesdeclast_epi128 (r1, k[i]); + r2 = _mm512_aesdeclast_epi128 (r2, k[i]); + r3 = _mm512_aesdeclast_epi128 (r3, k[i]); + + r0 ^= _mm512_permutex2var_epi64 (f, permute, c0); + _mm512_storeu_si512 ((__m512i *) dst, r0); + + r1 ^= _mm512_permutex2var_epi64 (c0, permute, c1); + _mm512_storeu_si512 ((__m512i *) (dst + 64), r1); + + r2 ^= _mm512_permutex2var_epi64 (c1, permute, c2); + _mm512_storeu_si512 ((__m512i *) (dst + 128), r2); + + r3 ^= _mm512_permutex2var_epi64 (c2, permute, c3); + _mm512_storeu_si512 ((__m512i *) (dst + 192), r3); + f = c3; + + n_blocks -= 16; + src += 256; + dst += 256; + } + + while (n_blocks > 0) + { + m = (1 << (n_blocks * 2)) - 1; + c0 = _mm512_mask_loadu_epi64 (c0, m, (__m512i *) src); + f = _mm512_permutex2var_epi64 (f, permute, c0); + r0 = c0 ^ k[0]; + for (i = 1; i < rounds; i++) + r0 = _mm512_aesdec_epi128 (r0, k[i]); + r0 = _mm512_aesdeclast_epi128 (r0, k[i]); + _mm512_mask_storeu_epi64 ((__m512i *) dst, m, r0 ^ f); + f = c0; + n_blocks -= 4; + src += 64; + dst += 64; + } +} +#endif + +#ifdef __VAES__ +#define N 16 +#define u32xN u32x16 +#define u32xN_min_scalar u32x16_min_scalar +#define u32xN_is_all_zero u32x16_is_all_zero +#else +#define N 4 +#define u32xN u32x4 +#define u32xN_min_scalar u32x4_min_scalar +#define u32xN_is_all_zero u32x4_is_all_zero +#endif + +static_always_inline u32 +aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], + u32 n_ops, aes_key_size_t ks) +{ + crypto_native_main_t *cm = &crypto_native_main; + crypto_native_per_thread_data_t *ptd = + vec_elt_at_index (cm->per_thread_data, vm->thread_index); + int rounds = AES_KEY_ROUNDS (ks); + u8 dummy[8192]; + u32 i, j, count, n_left = n_ops; + u32xN dummy_mask = { }; + u32xN len = { }; + vnet_crypto_key_index_t key_index[N]; + u8 *src[N] = { }; + u8 *dst[N] = { }; + /* *INDENT-OFF* */ + union + { + u8x16 x1[N]; + __m512i x4[N / 4]; + } r = { }, k[15] = { }; + /* *INDENT-ON* */ + + for (i = 0; i < N; i++) + key_index[i] = ~0; + +more: + for (i = 0; i < N; i++) + if (len[i] == 0) + { + if (n_left == 0) + { + /* no more work to enqueue, so we are enqueueing dummy buffer */ + src[i] = dst[i] = dummy; + len[i] = sizeof (dummy); + dummy_mask[i] = 0; + } + else + { + if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV) + { + r.x1[i] = ptd->cbc_iv[i]; + aes_block_store (ops[0]->iv, r.x1[i]); + ptd->cbc_iv[i] = aes_enc_round (r.x1[i], r.x1[i]); + } + else + r.x1[i] = aes_block_load (ops[0]->iv); + + src[i] = ops[0]->src; + dst[i] = ops[0]->dst; + len[i] = ops[0]->len; + dummy_mask[i] = ~0; + if (key_index[i] != ops[0]->key_index) + { + aes_cbc_key_data_t *kd; + key_index[i] = ops[0]->key_index; + kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]]; + for (j = 0; j < rounds + 1; j++) + k[j].x1[i] = kd->encrypt_key[j]; + } + ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED; + n_left--; + ops++; + } + } + + count = u32xN_min_scalar (len); + + ASSERT (count % 16 == 0); + + for (i = 0; i < count; i += 16) + { +#ifdef __VAES__ + r.x4[0] = xor3_x4 (r.x4[0], aes_block_load_x4 (src, i), k[0].x4[0]); + r.x4[1] = xor3_x4 (r.x4[1], aes_block_load_x4 (src, i), k[0].x4[1]); + r.x4[2] = xor3_x4 (r.x4[2], aes_block_load_x4 (src, i), k[0].x4[2]); + r.x4[3] = xor3_x4 (r.x4[3], aes_block_load_x4 (src, i), k[0].x4[3]); + + for (j = 1; j < rounds; j++) + { + r.x4[0] = _mm512_aesenc_epi128 (r.x4[0], k[j].x4[0]); + r.x4[1] = _mm512_aesenc_epi128 (r.x4[1], k[j].x4[1]); + r.x4[2] = _mm512_aesenc_epi128 (r.x4[2], k[j].x4[2]); + r.x4[3] = _mm512_aesenc_epi128 (r.x4[3], k[j].x4[3]); + } + r.x4[0] = _mm512_aesenclast_epi128 (r.x4[0], k[j].x4[0]); + r.x4[1] = _mm512_aesenclast_epi128 (r.x4[1], k[j].x4[1]); + r.x4[2] = _mm512_aesenclast_epi128 (r.x4[2], k[j].x4[2]); + r.x4[3] = _mm512_aesenclast_epi128 (r.x4[3], k[j].x4[3]); + + aes_block_store_x4 (dst, i, r.x4[0]); + aes_block_store_x4 (dst + 4, i, r.x4[1]); + aes_block_store_x4 (dst + 8, i, r.x4[2]); + aes_block_store_x4 (dst + 12, i, r.x4[3]); +#else + r.x1[0] = xor3 (r.x1[0], aes_block_load (src[0] + i), k[0].x1[0]); + r.x1[1] = xor3 (r.x1[1], aes_block_load (src[1] + i), k[0].x1[1]); + r.x1[2] = xor3 (r.x1[2], aes_block_load (src[2] + i), k[0].x1[2]); + r.x1[3] = xor3 (r.x1[3], aes_block_load (src[3] + i), k[0].x1[3]); + + for (j = 1; j < rounds; j++) + { + r.x1[0] = aes_enc_round (r.x1[0], k[j].x1[0]); + r.x1[1] = aes_enc_round (r.x1[1], k[j].x1[1]); + r.x1[2] = aes_enc_round (r.x1[2], k[j].x1[2]); + r.x1[3] = aes_enc_round (r.x1[3], k[j].x1[3]); + } + + r.x1[0] = aes_enc_last_round (r.x1[0], k[j].x1[0]); + r.x1[1] = aes_enc_last_round (r.x1[1], k[j].x1[1]); + r.x1[2] = aes_enc_last_round (r.x1[2], k[j].x1[2]); + r.x1[3] = aes_enc_last_round (r.x1[3], k[j].x1[3]); + + aes_block_store (dst[0] + i, r.x1[0]); + aes_block_store (dst[1] + i, r.x1[1]); + aes_block_store (dst[2] + i, r.x1[2]); + aes_block_store (dst[3] + i, r.x1[3]); +#endif + } + + for (i = 0; i < N; i++) + { + src[i] += count; + dst[i] += count; + len[i] -= count; + } + + if (n_left > 0) + goto more; + + if (!u32xN_is_all_zero (len & dummy_mask)) + goto more; + + return n_ops; +} + +static_always_inline u32 +aesni_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], + u32 n_ops, aes_key_size_t ks) +{ + crypto_native_main_t *cm = &crypto_native_main; + int rounds = AES_KEY_ROUNDS (ks); + vnet_crypto_op_t *op = ops[0]; + aes_cbc_key_data_t *kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index]; + u32 n_left = n_ops; + + ASSERT (n_ops >= 1); + +decrypt: +#ifdef __VAES__ + vaes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds); +#else + aes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds); +#endif + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; + + if (--n_left) + { + op += 1; + kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index]; + goto decrypt; + } + + return n_ops; +} + +#endif + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg