summaryrefslogtreecommitdiffstats
path: root/src/plugins/crypto_ia32
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2020-01-28 09:55:25 +0100
committerDamjan Marion <dmarion@me.com>2020-01-28 10:24:18 +0000
commit7d08e39a87f5805d1ef764aa0fd986490fb4f7bb (patch)
tree27d838a8f5681dea82d2661c2d70526af9d0fef0 /src/plugins/crypto_ia32
parent0d4a61216c2329eec5167d0411481431037ac5c1 (diff)
crypto-native: rename crypto_ia32 to crypto_native
Type: refactor Change-Id: I9f21b3bf669ff913ff50afe5459cf52ff987e701 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/plugins/crypto_ia32')
-rw-r--r--src/plugins/crypto_ia32/CMakeLists.txt37
-rw-r--r--src/plugins/crypto_ia32/FEATURE.yaml10
-rw-r--r--src/plugins/crypto_ia32/aes_cbc.c497
-rw-r--r--src/plugins/crypto_ia32/aes_gcm.c780
-rw-r--r--src/plugins/crypto_ia32/aesni.h226
-rw-r--r--src/plugins/crypto_ia32/crypto_ia32.h55
-rw-r--r--src/plugins/crypto_ia32/ghash.h253
-rw-r--r--src/plugins/crypto_ia32/main.c135
8 files changed, 0 insertions, 1993 deletions
diff --git a/src/plugins/crypto_ia32/CMakeLists.txt b/src/plugins/crypto_ia32/CMakeLists.txt
deleted file mode 100644
index 0a8b560731a..00000000000
--- a/src/plugins/crypto_ia32/CMakeLists.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
- return()
-endif()
-
-add_vpp_plugin(crypto_ia32 SOURCES main.c)
-
-list(APPEND VARIANTS "sse42\;-march=silvermont")
-list(APPEND VARIANTS "avx2\;-march=core-avx2")
-if(compiler_flag_march_skylake_avx512)
- list(APPEND VARIANTS "avx512\;-march=skylake-avx512")
-endif()
-if(compiler_flag_march_icelake_client)
- list(APPEND VARIANTS "vaesni\;-march=icelake-client")
-endif()
-
-foreach(VARIANT ${VARIANTS})
- list(GET VARIANT 0 v)
- list(GET VARIANT 1 f)
- set(l crypto_ia32_${v})
- add_library(${l} OBJECT aes_cbc.c aes_gcm.c)
- set_target_properties(${l} PROPERTIES POSITION_INDEPENDENT_CODE ON)
- target_compile_options(${l} PUBLIC ${f} -Wall -fno-common -maes)
- target_sources(crypto_ia32_plugin PRIVATE $<TARGET_OBJECTS:${l}>)
-endforeach()
diff --git a/src/plugins/crypto_ia32/FEATURE.yaml b/src/plugins/crypto_ia32/FEATURE.yaml
deleted file mode 100644
index 206caceb2d4..00000000000
--- a/src/plugins/crypto_ia32/FEATURE.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-name: IPSec crypto engine provided by native implementation
-maintainer: Damjan Marion <damarion@cisco.com>
-features:
- - CBC(128, 192, 256)
- - GCM(128, 192, 256)
-
-description: "An implentation of a native crypto-engine"
-state: production
-properties: [API, CLI, MULTITHREAD]
diff --git a/src/plugins/crypto_ia32/aes_cbc.c b/src/plugins/crypto_ia32/aes_cbc.c
deleted file mode 100644
index 910f8ca7413..00000000000
--- a/src/plugins/crypto_ia32/aes_cbc.c
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- *------------------------------------------------------------------
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *------------------------------------------------------------------
- */
-
-#include <vlib/vlib.h>
-#include <vnet/plugin/plugin.h>
-#include <vnet/crypto/crypto.h>
-#include <x86intrin.h>
-#include <crypto_ia32/crypto_ia32.h>
-#include <crypto_ia32/aesni.h>
-
-#if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
-#pragma GCC optimize ("O3")
-#endif
-
-typedef struct
-{
- __m128i encrypt_key[15];
-#if __VAES__
- __m512i decrypt_key[15];
-#else
- __m128i decrypt_key[15];
-#endif
-} aes_cbc_key_data_t;
-
-static_always_inline __m128i
-aes_block_load (u8 * p)
-{
- return _mm_loadu_si128 ((__m128i *) p);
-}
-
-static_always_inline void
-aes_block_store (u8 * p, __m128i r)
-{
- _mm_storeu_si128 ((__m128i *) p, r);
-}
-
-static_always_inline __m128i __clib_unused
-xor3 (__m128i a, __m128i b, __m128i c)
-{
-#if __AVX512F__
- return _mm_ternarylogic_epi32 (a, b, c, 0x96);
-#endif
- return a ^ b ^ c;
-}
-
-#if __VAES__
-static_always_inline __m512i
-xor3_x4 (__m512i a, __m512i b, __m512i c)
-{
- return _mm512_ternarylogic_epi32 (a, b, c, 0x96);
-}
-
-static_always_inline __m512i
-aes_block_load_x4 (u8 * src[], int i)
-{
- __m512i r = { };
- r = _mm512_inserti64x2 (r, aes_block_load (src[0] + i), 0);
- r = _mm512_inserti64x2 (r, aes_block_load (src[1] + i), 1);
- r = _mm512_inserti64x2 (r, aes_block_load (src[2] + i), 2);
- r = _mm512_inserti64x2 (r, aes_block_load (src[3] + i), 3);
- return r;
-}
-
-static_always_inline void
-aes_block_store_x4 (u8 * dst[], int i, __m512i r)
-{
- aes_block_store (dst[0] + i, _mm512_extracti64x2_epi64 (r, 0));
- aes_block_store (dst[1] + i, _mm512_extracti64x2_epi64 (r, 1));
- aes_block_store (dst[2] + i, _mm512_extracti64x2_epi64 (r, 2));
- aes_block_store (dst[3] + i, _mm512_extracti64x2_epi64 (r, 3));
-}
-#endif
-
-static_always_inline void __clib_unused
-aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count,
- aesni_key_size_t rounds)
-{
- __m128i r0, r1, r2, r3, c0, c1, c2, c3, f;
- int i;
-
- f = aes_block_load (iv);
-
- while (count >= 64)
- {
- _mm_prefetch (src + 128, _MM_HINT_T0);
- _mm_prefetch (dst + 128, _MM_HINT_T0);
-
- c0 = aes_block_load (src);
- c1 = aes_block_load (src + 16);
- c2 = aes_block_load (src + 32);
- c3 = aes_block_load (src + 48);
-
- r0 = c0 ^ k[0];
- r1 = c1 ^ k[0];
- r2 = c2 ^ k[0];
- r3 = c3 ^ k[0];
-
- for (i = 1; i < rounds; i++)
- {
- r0 = _mm_aesdec_si128 (r0, k[i]);
- r1 = _mm_aesdec_si128 (r1, k[i]);
- r2 = _mm_aesdec_si128 (r2, k[i]);
- r3 = _mm_aesdec_si128 (r3, k[i]);
- }
-
- r0 = _mm_aesdeclast_si128 (r0, k[i]);
- r1 = _mm_aesdeclast_si128 (r1, k[i]);
- r2 = _mm_aesdeclast_si128 (r2, k[i]);
- r3 = _mm_aesdeclast_si128 (r3, k[i]);
-
- aes_block_store (dst, r0 ^ f);
- aes_block_store (dst + 16, r1 ^ c0);
- aes_block_store (dst + 32, r2 ^ c1);
- aes_block_store (dst + 48, r3 ^ c2);
-
- f = c3;
-
- count -= 64;
- src += 64;
- dst += 64;
- }
-
- while (count > 0)
- {
- c0 = aes_block_load (src);
- r0 = c0 ^ k[0];
- for (i = 1; i < rounds; i++)
- r0 = _mm_aesdec_si128 (r0, k[i]);
- r0 = _mm_aesdeclast_si128 (r0, k[i]);
- aes_block_store (dst, r0 ^ f);
- f = c0;
- count -= 16;
- src += 16;
- dst += 16;
- }
-}
-
-#ifdef __VAES__
-static_always_inline void
-vaes_cbc_dec (__m512i * k, u8 * src, u8 * dst, u8 * iv, int count,
- aesni_key_size_t rounds)
-{
- __m512i permute = { 6, 7, 8, 9, 10, 11, 12, 13 };
- __m512i r0, r1, r2, r3, c0, c1, c2, c3, f = { };
- __mmask8 m;
- int i, n_blocks = count >> 4;
-
- f = _mm512_mask_loadu_epi64 (f, 0xc0, (__m512i *) (iv - 48));
-
- while (n_blocks >= 16)
- {
- c0 = _mm512_loadu_si512 ((__m512i *) src);
- c1 = _mm512_loadu_si512 ((__m512i *) (src + 64));
- c2 = _mm512_loadu_si512 ((__m512i *) (src + 128));
- c3 = _mm512_loadu_si512 ((__m512i *) (src + 192));
-
- r0 = c0 ^ k[0];
- r1 = c1 ^ k[0];
- r2 = c2 ^ k[0];
- r3 = c3 ^ k[0];
-
- for (i = 1; i < rounds; i++)
- {
- r0 = _mm512_aesdec_epi128 (r0, k[i]);
- r1 = _mm512_aesdec_epi128 (r1, k[i]);
- r2 = _mm512_aesdec_epi128 (r2, k[i]);
- r3 = _mm512_aesdec_epi128 (r3, k[i]);
- }
-
- r0 = _mm512_aesdeclast_epi128 (r0, k[i]);
- r1 = _mm512_aesdeclast_epi128 (r1, k[i]);
- r2 = _mm512_aesdeclast_epi128 (r2, k[i]);
- r3 = _mm512_aesdeclast_epi128 (r3, k[i]);
-
- r0 ^= _mm512_permutex2var_epi64 (f, permute, c0);
- _mm512_storeu_si512 ((__m512i *) dst, r0);
-
- r1 ^= _mm512_permutex2var_epi64 (c0, permute, c1);
- _mm512_storeu_si512 ((__m512i *) (dst + 64), r1);
-
- r2 ^= _mm512_permutex2var_epi64 (c1, permute, c2);
- _mm512_storeu_si512 ((__m512i *) (dst + 128), r2);
-
- r3 ^= _mm512_permutex2var_epi64 (c2, permute, c3);
- _mm512_storeu_si512 ((__m512i *) (dst + 192), r3);
- f = c3;
-
- n_blocks -= 16;
- src += 256;
- dst += 256;
- }
-
- while (n_blocks > 0)
- {
- m = (1 << (n_blocks * 2)) - 1;
- c0 = _mm512_mask_loadu_epi64 (c0, m, (__m512i *) src);
- f = _mm512_permutex2var_epi64 (f, permute, c0);
- r0 = c0 ^ k[0];
- for (i = 1; i < rounds; i++)
- r0 = _mm512_aesdec_epi128 (r0, k[i]);
- r0 = _mm512_aesdeclast_epi128 (r0, k[i]);
- _mm512_mask_storeu_epi64 ((__m512i *) dst, m, r0 ^ f);
- f = c0;
- n_blocks -= 4;
- src += 64;
- dst += 64;
- }
-}
-#endif
-
-#ifdef __VAES__
-#define N 16
-#define u32xN u32x16
-#define u32xN_min_scalar u32x16_min_scalar
-#define u32xN_is_all_zero u32x16_is_all_zero
-#else
-#define N 4
-#define u32xN u32x4
-#define u32xN_min_scalar u32x4_min_scalar
-#define u32xN_is_all_zero u32x4_is_all_zero
-#endif
-
-static_always_inline u32
-aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
- u32 n_ops, aesni_key_size_t ks)
-{
- crypto_ia32_main_t *cm = &crypto_ia32_main;
- crypto_ia32_per_thread_data_t *ptd = vec_elt_at_index (cm->per_thread_data,
- vm->thread_index);
- int rounds = AESNI_KEY_ROUNDS (ks);
- u8 dummy[8192];
- u32 i, j, count, n_left = n_ops;
- u32xN dummy_mask = { };
- u32xN len = { };
- vnet_crypto_key_index_t key_index[N];
- u8 *src[N] = { };
- u8 *dst[N] = { };
- /* *INDENT-OFF* */
- union
- {
- __m128i x1[N];
- __m512i x4[N / 4];
- } r = { }, k[15] = { };
- /* *INDENT-ON* */
-
- for (i = 0; i < N; i++)
- key_index[i] = ~0;
-
-more:
- for (i = 0; i < N; i++)
- if (len[i] == 0)
- {
- if (n_left == 0)
- {
- /* no more work to enqueue, so we are enqueueing dummy buffer */
- src[i] = dst[i] = dummy;
- len[i] = sizeof (dummy);
- dummy_mask[i] = 0;
- }
- else
- {
- if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
- {
- r.x1[i] = ptd->cbc_iv[i];
- aes_block_store (ops[0]->iv, r.x1[i]);
- ptd->cbc_iv[i] = _mm_aesenc_si128 (r.x1[i], r.x1[i]);
- }
- else
- r.x1[i] = aes_block_load (ops[0]->iv);
-
- src[i] = ops[0]->src;
- dst[i] = ops[0]->dst;
- len[i] = ops[0]->len;
- dummy_mask[i] = ~0;
- if (key_index[i] != ops[0]->key_index)
- {
- aes_cbc_key_data_t *kd;
- key_index[i] = ops[0]->key_index;
- kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]];
- for (j = 0; j < rounds + 1; j++)
- k[j].x1[i] = kd->encrypt_key[j];
- }
- ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
- n_left--;
- ops++;
- }
- }
-
- count = u32xN_min_scalar (len);
-
- ASSERT (count % 16 == 0);
-
- for (i = 0; i < count; i += 16)
- {
-#ifdef __VAES__
- r.x4[0] = xor3_x4 (r.x4[0], aes_block_load_x4 (src, i), k[0].x4[0]);
- r.x4[1] = xor3_x4 (r.x4[1], aes_block_load_x4 (src, i), k[0].x4[1]);
- r.x4[2] = xor3_x4 (r.x4[2], aes_block_load_x4 (src, i), k[0].x4[2]);
- r.x4[3] = xor3_x4 (r.x4[3], aes_block_load_x4 (src, i), k[0].x4[3]);
-
- for (j = 1; j < rounds; j++)
- {
- r.x4[0] = _mm512_aesenc_epi128 (r.x4[0], k[j].x4[0]);
- r.x4[1] = _mm512_aesenc_epi128 (r.x4[1], k[j].x4[1]);
- r.x4[2] = _mm512_aesenc_epi128 (r.x4[2], k[j].x4[2]);
- r.x4[3] = _mm512_aesenc_epi128 (r.x4[3], k[j].x4[3]);
- }
- r.x4[0] = _mm512_aesenclast_epi128 (r.x4[0], k[j].x4[0]);
- r.x4[1] = _mm512_aesenclast_epi128 (r.x4[1], k[j].x4[1]);
- r.x4[2] = _mm512_aesenclast_epi128 (r.x4[2], k[j].x4[2]);
- r.x4[3] = _mm512_aesenclast_epi128 (r.x4[3], k[j].x4[3]);
-
- aes_block_store_x4 (dst, i, r.x4[0]);
- aes_block_store_x4 (dst + 4, i, r.x4[1]);
- aes_block_store_x4 (dst + 8, i, r.x4[2]);
- aes_block_store_x4 (dst + 12, i, r.x4[3]);
-#else
- r.x1[0] = xor3 (r.x1[0], aes_block_load (src[0] + i), k[0].x1[0]);
- r.x1[1] = xor3 (r.x1[1], aes_block_load (src[1] + i), k[0].x1[1]);
- r.x1[2] = xor3 (r.x1[2], aes_block_load (src[2] + i), k[0].x1[2]);
- r.x1[3] = xor3 (r.x1[3], aes_block_load (src[3] + i), k[0].x1[3]);
-
- for (j = 1; j < rounds; j++)
- {
- r.x1[0] = _mm_aesenc_si128 (r.x1[0], k[j].x1[0]);
- r.x1[1] = _mm_aesenc_si128 (r.x1[1], k[j].x1[1]);
- r.x1[2] = _mm_aesenc_si128 (r.x1[2], k[j].x1[2]);
- r.x1[3] = _mm_aesenc_si128 (r.x1[3], k[j].x1[3]);
- }
-
- r.x1[0] = _mm_aesenclast_si128 (r.x1[0], k[j].x1[0]);
- r.x1[1] = _mm_aesenclast_si128 (r.x1[1], k[j].x1[1]);
- r.x1[2] = _mm_aesenclast_si128 (r.x1[2], k[j].x1[2]);
- r.x1[3] = _mm_aesenclast_si128 (r.x1[3], k[j].x1[3]);
-
- aes_block_store (dst[0] + i, r.x1[0]);
- aes_block_store (dst[1] + i, r.x1[1]);
- aes_block_store (dst[2] + i, r.x1[2]);
- aes_block_store (dst[3] + i, r.x1[3]);
-#endif
- }
-
- for (i = 0; i < N; i++)
- {
- src[i] += count;
- dst[i] += count;
- len[i] -= count;
- }
-
- if (n_left > 0)
- goto more;
-
- if (!u32xN_is_all_zero (len & dummy_mask))
- goto more;
-
- return n_ops;
-}
-
-static_always_inline u32
-aesni_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
- u32 n_ops, aesni_key_size_t ks)
-{
- crypto_ia32_main_t *cm = &crypto_ia32_main;
- int rounds = AESNI_KEY_ROUNDS (ks);
- vnet_crypto_op_t *op = ops[0];
- aes_cbc_key_data_t *kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
- u32 n_left = n_ops;
-
- ASSERT (n_ops >= 1);
-
-decrypt:
-#ifdef __VAES__
- vaes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
-#else
- aes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
-#endif
- op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
-
- if (--n_left)
- {
- op += 1;
- kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
- goto decrypt;
- }
-
- return n_ops;
-}
-
-static_always_inline void *
-aesni_cbc_key_exp (vnet_crypto_key_t * key, aesni_key_size_t ks)
-{
- __m128i e[15], d[15];
- aes_cbc_key_data_t *kd;
- kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
- aes_key_expand (e, key->data, ks);
- aes_key_expand (d, key->data, ks);
- aes_key_enc_to_dec (d, ks);
- for (int i = 0; i < AESNI_KEY_ROUNDS (ks) + 1; i++)
- {
-#if __VAES__
- kd->decrypt_key[i] = _mm512_broadcast_i64x2 (d[i]);
-#else
- kd->decrypt_key[i] = d[i];
-#endif
- kd->encrypt_key[i] = e[i];
- }
- return kd;
-}
-
-#define foreach_aesni_cbc_handler_type _(128) _(192) _(256)
-
-#define _(x) \
-static u32 aesni_ops_dec_aes_cbc_##x \
-(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return aesni_ops_dec_aes_cbc (vm, ops, n_ops, AESNI_KEY_##x); } \
-static u32 aesni_ops_enc_aes_cbc_##x \
-(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return aesni_ops_enc_aes_cbc (vm, ops, n_ops, AESNI_KEY_##x); } \
-static void * aesni_cbc_key_exp_##x (vnet_crypto_key_t *key) \
-{ return aesni_cbc_key_exp (key, AESNI_KEY_##x); }
-
-foreach_aesni_cbc_handler_type;
-#undef _
-
-#include <fcntl.h>
-
-clib_error_t *
-#ifdef __VAES__
-crypto_ia32_aesni_cbc_init_vaes (vlib_main_t * vm)
-#elif __AVX512F__
-crypto_ia32_aesni_cbc_init_avx512 (vlib_main_t * vm)
-#elif __AVX2__
-crypto_ia32_aesni_cbc_init_avx2 (vlib_main_t * vm)
-#else
-crypto_ia32_aesni_cbc_init_sse42 (vlib_main_t * vm)
-#endif
-{
- crypto_ia32_main_t *cm = &crypto_ia32_main;
- crypto_ia32_per_thread_data_t *ptd;
- clib_error_t *err = 0;
- int fd;
-
- if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
- return clib_error_return_unix (0, "failed to open '/dev/urandom'");
-
- /* *INDENT-OFF* */
- vec_foreach (ptd, cm->per_thread_data)
- {
- for (int i = 0; i < 4; i++)
- {
- if (read(fd, ptd->cbc_iv, sizeof (ptd->cbc_iv)) !=
- sizeof (ptd->cbc_iv))
- {
- err = clib_error_return_unix (0, "'/dev/urandom' read failure");
- goto error;
- }
- }
- }
- /* *INDENT-ON* */
-
-#define _(x) \
- vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
- VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
- aesni_ops_enc_aes_cbc_##x); \
- vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
- VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
- aesni_ops_dec_aes_cbc_##x); \
- cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CBC] = aesni_cbc_key_exp_##x;
- foreach_aesni_cbc_handler_type;
-#undef _
-
-error:
- close (fd);
- return err;
-}
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/crypto_ia32/aes_gcm.c b/src/plugins/crypto_ia32/aes_gcm.c
deleted file mode 100644
index e45dda79faf..00000000000
--- a/src/plugins/crypto_ia32/aes_gcm.c
+++ /dev/null
@@ -1,780 +0,0 @@
-/*
- *------------------------------------------------------------------
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *------------------------------------------------------------------
- */
-
-#include <vlib/vlib.h>
-#include <vnet/plugin/plugin.h>
-#include <vnet/crypto/crypto.h>
-#include <x86intrin.h>
-#include <crypto_ia32/crypto_ia32.h>
-#include <crypto_ia32/aesni.h>
-#include <crypto_ia32/ghash.h>
-
-#if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
-#pragma GCC optimize ("O3")
-#endif
-
-typedef struct
-{
- /* pre-calculated hash key values */
- const __m128i Hi[8];
- /* extracted AES key */
- const __m128i Ke[15];
-} aes_gcm_key_data_t;
-
-static const __m128i last_byte_one = { 0, 1ULL << 56 };
-static const __m128i zero = { 0, 0 };
-
-static const u8x16 bswap_mask = {
- 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
-};
-
-static const u8x16 byte_mask_scale = {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
-};
-
-static_always_inline __m128i
-aesni_gcm_bswap (__m128i x)
-{
- return _mm_shuffle_epi8 (x, (__m128i) bswap_mask);
-}
-
-static_always_inline __m128i
-aesni_gcm_byte_mask (__m128i x, u8 n_bytes)
-{
- u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale);
-
- return _mm_blendv_epi8 (zero, x, (__m128i) mask);
-}
-
-static_always_inline __m128i
-aesni_gcm_load_partial (__m128i * p, int n_bytes)
-{
- ASSERT (n_bytes <= 16);
-#ifdef __AVX512F__
- return _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p);
-#else
- return aesni_gcm_byte_mask (CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, p),
- n_bytes);
-#endif
-}
-
-static_always_inline void
-aesni_gcm_store_partial (void *p, __m128i r, int n_bytes)
-{
-#ifdef __AVX512F__
- _mm_mask_storeu_epi8 (p, (1 << n_bytes) - 1, r);
-#else
- u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale);
- _mm_maskmoveu_si128 (r, (__m128i) mask, p);
-#endif
-}
-
-static_always_inline void
-aesni_gcm_load (__m128i * d, __m128i * inv, int n, int n_bytes)
-{
- for (int i = 0; i < n - 1; i++)
- d[i] = _mm_loadu_si128 (inv + i);
- d[n - 1] = n_bytes ? aesni_gcm_load_partial (inv + n - 1, n_bytes) :
- _mm_loadu_si128 (inv + n - 1);
-}
-
-static_always_inline void
-aesni_gcm_store (__m128i * d, __m128i * outv, int n, int n_bytes)
-{
- for (int i = 0; i < n - 1; i++)
- _mm_storeu_si128 (outv + i, d[i]);
- if (n_bytes & 0xf)
- aesni_gcm_store_partial (outv + n - 1, d[n - 1], n_bytes);
- else
- _mm_storeu_si128 (outv + n - 1, d[n - 1]);
-}
-
-static_always_inline void
-aesni_gcm_enc_first_round (__m128i * r, __m128i * Y, u32 * ctr, __m128i k,
- int n_blocks)
-{
- u32 i;
-
- if (PREDICT_TRUE ((u8) ctr[0] < (256 - n_blocks)))
- {
- for (i = 0; i < n_blocks; i++)
- {
- Y[0] = _mm_add_epi32 (Y[0], last_byte_one);
- r[i] = k ^ Y[0];
- }
- ctr[0] += n_blocks;
- }
- else
- {
- for (i = 0; i < n_blocks; i++)
- {
- Y[0] = _mm_insert_epi32 (Y[0], clib_host_to_net_u32 (++ctr[0]), 3);
- r[i] = k ^ Y[0];
- }
- }
-}
-
-static_always_inline void
-aesni_gcm_enc_round (__m128i * r, __m128i k, int n_blocks)
-{
- for (int i = 0; i < n_blocks; i++)
- r[i] = _mm_aesenc_si128 (r[i], k);
-}
-
-static_always_inline void
-aesni_gcm_enc_last_round (__m128i * r, __m128i * d, const __m128i * k,
- int rounds, int n_blocks)
-{
-
- /* additional ronuds for AES-192 and AES-256 */
- for (int i = 10; i < rounds; i++)
- aesni_gcm_enc_round (r, k[i], n_blocks);
-
- for (int i = 0; i < n_blocks; i++)
- d[i] ^= _mm_aesenclast_si128 (r[i], k[rounds]);
-}
-
-static_always_inline __m128i
-aesni_gcm_ghash_blocks (__m128i T, aes_gcm_key_data_t * kd,
- const __m128i * in, int n_blocks)
-{
- ghash_data_t _gd, *gd = &_gd;
- const __m128i *Hi = kd->Hi + n_blocks - 1;
- ghash_mul_first (gd, aesni_gcm_bswap (_mm_loadu_si128 (in)) ^ T, Hi[0]);
- for (int i = 1; i < n_blocks; i++)
- ghash_mul_next (gd, aesni_gcm_bswap (_mm_loadu_si128 (in + i)), Hi[-i]);
- ghash_reduce (gd);
- ghash_reduce2 (gd);
- return ghash_final (gd);
-}
-
-static_always_inline __m128i
-aesni_gcm_ghash (__m128i T, aes_gcm_key_data_t * kd, const __m128i * in,
- u32 n_left)
-{
-
- while (n_left >= 128)
- {
- T = aesni_gcm_ghash_blocks (T, kd, in, 8);
- n_left -= 128;
- in += 8;
- }
-
- if (n_left >= 64)
- {
- T = aesni_gcm_ghash_blocks (T, kd, in, 4);
- n_left -= 64;
- in += 4;
- }
-
- if (n_left >= 32)
- {
- T = aesni_gcm_ghash_blocks (T, kd, in, 2);
- n_left -= 32;
- in += 2;
- }
-
- if (n_left >= 16)
- {
- T = aesni_gcm_ghash_blocks (T, kd, in, 1);
- n_left -= 16;
- in += 1;
- }
-
- if (n_left)
- {
- __m128i r = aesni_gcm_load_partial ((__m128i *) in, n_left);
- T = ghash_mul (aesni_gcm_bswap (r) ^ T, kd->Hi[0]);
- }
- return T;
-}
-
-static_always_inline __m128i
-aesni_gcm_calc (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
- __m128i * Y, u32 * ctr, __m128i * inv, __m128i * outv,
- int rounds, int n, int last_block_bytes, int with_ghash,
- int is_encrypt)
-{
- __m128i r[n];
- ghash_data_t _gd = { }, *gd = &_gd;
- const __m128i *k = kd->Ke;
- int hidx = is_encrypt ? 4 : n, didx = 0;
-
- _mm_prefetch (inv + 4, _MM_HINT_T0);
-
- /* AES rounds 0 and 1 */
- aesni_gcm_enc_first_round (r, Y, ctr, k[0], n);
- aesni_gcm_enc_round (r, k[1], n);
-
- /* load data - decrypt round */
- if (is_encrypt == 0)
- aesni_gcm_load (d, inv, n, last_block_bytes);
-
- /* GHASH multiply block 1 */
- if (with_ghash)
- ghash_mul_first (gd, aesni_gcm_bswap (d[didx++]) ^ T, kd->Hi[--hidx]);
-
- /* AES rounds 2 and 3 */
- aesni_gcm_enc_round (r, k[2], n);
- aesni_gcm_enc_round (r, k[3], n);
-
- /* GHASH multiply block 2 */
- if (with_ghash && hidx)
- ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
-
- /* AES rounds 4 and 5 */
- aesni_gcm_enc_round (r, k[4], n);
- aesni_gcm_enc_round (r, k[5], n);
-
- /* GHASH multiply block 3 */
- if (with_ghash && hidx)
- ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
-
- /* AES rounds 6 and 7 */
- aesni_gcm_enc_round (r, k[6], n);
- aesni_gcm_enc_round (r, k[7], n);
-
- /* GHASH multiply block 4 */
- if (with_ghash && hidx)
- ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
-
- /* AES rounds 8 and 9 */
- aesni_gcm_enc_round (r, k[8], n);
- aesni_gcm_enc_round (r, k[9], n);
-
- /* GHASH reduce 1st step */
- if (with_ghash)
- ghash_reduce (gd);
-
- /* load data - encrypt round */
- if (is_encrypt)
- aesni_gcm_load (d, inv, n, last_block_bytes);
-
- /* GHASH reduce 2nd step */
- if (with_ghash)
- ghash_reduce2 (gd);
-
- /* AES last round(s) */
- aesni_gcm_enc_last_round (r, d, k, rounds, n);
-
- /* store data */
- aesni_gcm_store (d, outv, n, last_block_bytes);
-
- /* GHASH final step */
- if (with_ghash)
- T = ghash_final (gd);
-
- return T;
-}
-
-static_always_inline __m128i
-aesni_gcm_calc_double (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
- __m128i * Y, u32 * ctr, __m128i * inv, __m128i * outv,
- int rounds, int is_encrypt)
-{
- __m128i r[4];
- ghash_data_t _gd, *gd = &_gd;
- const __m128i *k = kd->Ke;
-
- /* AES rounds 0 and 1 */
- aesni_gcm_enc_first_round (r, Y, ctr, k[0], 4);
- aesni_gcm_enc_round (r, k[1], 4);
-
- /* load 4 blocks of data - decrypt round */
- if (is_encrypt == 0)
- aesni_gcm_load (d, inv, 4, 0);
-
- /* GHASH multiply block 0 */
- ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[7]);
-
- /* AES rounds 2 and 3 */
- aesni_gcm_enc_round (r, k[2], 4);
- aesni_gcm_enc_round (r, k[3], 4);
-
- /* GHASH multiply block 1 */
- ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[6]);
-
- /* AES rounds 4 and 5 */
- aesni_gcm_enc_round (r, k[4], 4);
- aesni_gcm_enc_round (r, k[5], 4);
-
- /* GHASH multiply block 2 */
- ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[5]);
-
- /* AES rounds 6 and 7 */
- aesni_gcm_enc_round (r, k[6], 4);
- aesni_gcm_enc_round (r, k[7], 4);
-
- /* GHASH multiply block 3 */
- ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[4]);
-
- /* AES rounds 8 and 9 */
- aesni_gcm_enc_round (r, k[8], 4);
- aesni_gcm_enc_round (r, k[9], 4);
-
- /* load 4 blocks of data - encrypt round */
- if (is_encrypt)
- aesni_gcm_load (d, inv, 4, 0);
-
- /* AES last round(s) */
- aesni_gcm_enc_last_round (r, d, k, rounds, 4);
-
- /* store 4 blocks of data */
- aesni_gcm_store (d, outv, 4, 0);
-
- /* load next 4 blocks of data data - decrypt round */
- if (is_encrypt == 0)
- aesni_gcm_load (d, inv + 4, 4, 0);
-
- /* GHASH multiply block 4 */
- ghash_mul_next (gd, aesni_gcm_bswap (d[0]), kd->Hi[3]);
-
- /* AES rounds 0, 1 and 2 */
- aesni_gcm_enc_first_round (r, Y, ctr, k[0], 4);
- aesni_gcm_enc_round (r, k[1], 4);
- aesni_gcm_enc_round (r, k[2], 4);
-
- /* GHASH multiply block 5 */
- ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[2]);
-
- /* AES rounds 3 and 4 */
- aesni_gcm_enc_round (r, k[3], 4);
- aesni_gcm_enc_round (r, k[4], 4);
-
- /* GHASH multiply block 6 */
- ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[1]);
-
- /* AES rounds 5 and 6 */
- aesni_gcm_enc_round (r, k[5], 4);
- aesni_gcm_enc_round (r, k[6], 4);
-
- /* GHASH multiply block 7 */
- ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[0]);
-
- /* AES rounds 7 and 8 */
- aesni_gcm_enc_round (r, k[7], 4);
- aesni_gcm_enc_round (r, k[8], 4);
-
- /* GHASH reduce 1st step */
- ghash_reduce (gd);
-
- /* AES round 9 */
- aesni_gcm_enc_round (r, k[9], 4);
-
- /* load data - encrypt round */
- if (is_encrypt)
- aesni_gcm_load (d, inv + 4, 4, 0);
-
- /* GHASH reduce 2nd step */
- ghash_reduce2 (gd);
-
- /* AES last round(s) */
- aesni_gcm_enc_last_round (r, d, k, rounds, 4);
-
- /* store data */
- aesni_gcm_store (d, outv + 4, 4, 0);
-
- /* GHASH final step */
- return ghash_final (gd);
-}
-
-static_always_inline __m128i
-aesni_gcm_ghash_last (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
- int n_blocks, int n_bytes)
-{
- ghash_data_t _gd, *gd = &_gd;
-
- if (n_bytes)
- d[n_blocks - 1] = aesni_gcm_byte_mask (d[n_blocks - 1], n_bytes);
-
- ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[n_blocks - 1]);
- if (n_blocks > 1)
- ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[n_blocks - 2]);
- if (n_blocks > 2)
- ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[n_blocks - 3]);
- if (n_blocks > 3)
- ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[n_blocks - 4]);
- ghash_reduce (gd);
- ghash_reduce2 (gd);
- return ghash_final (gd);
-}
-
-
-static_always_inline __m128i
-aesni_gcm_enc (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in,
- const u8 * out, u32 n_left, int rounds)
-{
- __m128i *inv = (__m128i *) in, *outv = (__m128i *) out;
- __m128i d[4];
- u32 ctr = 1;
-
- if (n_left == 0)
- return T;
-
- if (n_left < 64)
- {
- if (n_left > 48)
- {
- n_left &= 0x0f;
- aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
- /* with_ghash */ 0, /* is_encrypt */ 1);
- return aesni_gcm_ghash_last (T, kd, d, 4, n_left);
- }
- else if (n_left > 32)
- {
- n_left &= 0x0f;
- aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
- /* with_ghash */ 0, /* is_encrypt */ 1);
- return aesni_gcm_ghash_last (T, kd, d, 3, n_left);
- }
- else if (n_left > 16)
- {
- n_left &= 0x0f;
- aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
- /* with_ghash */ 0, /* is_encrypt */ 1);
- return aesni_gcm_ghash_last (T, kd, d, 2, n_left);
- }
- else
- {
- n_left &= 0x0f;
- aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
- /* with_ghash */ 0, /* is_encrypt */ 1);
- return aesni_gcm_ghash_last (T, kd, d, 1, n_left);
- }
- }
-
- aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
- /* with_ghash */ 0, /* is_encrypt */ 1);
-
- /* next */
- n_left -= 64;
- outv += 4;
- inv += 4;
-
- while (n_left >= 128)
- {
- T = aesni_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
- /* is_encrypt */ 1);
-
- /* next */
- n_left -= 128;
- outv += 8;
- inv += 8;
- }
-
- if (n_left >= 64)
- {
- T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
- /* with_ghash */ 1, /* is_encrypt */ 1);
-
- /* next */
- n_left -= 64;
- outv += 4;
- inv += 4;
- }
-
- if (n_left == 0)
- return aesni_gcm_ghash_last (T, kd, d, 4, 0);
-
- if (n_left > 48)
- {
- n_left &= 0x0f;
- T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
- /* with_ghash */ 1, /* is_encrypt */ 1);
- return aesni_gcm_ghash_last (T, kd, d, 4, n_left);
- }
-
- if (n_left > 32)
- {
- n_left &= 0x0f;
- T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
- /* with_ghash */ 1, /* is_encrypt */ 1);
- return aesni_gcm_ghash_last (T, kd, d, 3, n_left);
- }
-
- if (n_left > 16)
- {
- n_left &= 0x0f;
- T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
- /* with_ghash */ 1, /* is_encrypt */ 1);
- return aesni_gcm_ghash_last (T, kd, d, 2, n_left);
- }
-
- n_left &= 0x0f;
- T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
- /* with_ghash */ 1, /* is_encrypt */ 1);
- return aesni_gcm_ghash_last (T, kd, d, 1, n_left);
-}
-
-static_always_inline __m128i
-aesni_gcm_dec (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in,
- const u8 * out, u32 n_left, int rounds)
-{
- __m128i *inv = (__m128i *) in, *outv = (__m128i *) out;
- __m128i d[8];
- u32 ctr = 1;
-
- while (n_left >= 128)
- {
- T = aesni_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
- /* is_encrypt */ 0);
-
- /* next */
- n_left -= 128;
- outv += 8;
- inv += 8;
- }
-
- if (n_left >= 64)
- {
- T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0, 1, 0);
-
- /* next */
- n_left -= 64;
- outv += 4;
- inv += 4;
- }
-
- if (n_left == 0)
- return T;
-
- if (n_left > 48)
- return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4,
- n_left - 48,
- /* with_ghash */ 1, /* is_encrypt */ 0);
-
- if (n_left > 32)
- return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3,
- n_left - 32,
- /* with_ghash */ 1, /* is_encrypt */ 0);
-
- if (n_left > 16)
- return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2,
- n_left - 16,
- /* with_ghash */ 1, /* is_encrypt */ 0);
-
- return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
- /* with_ghash */ 1, /* is_encrypt */ 0);
-}
-
-static_always_inline int
-aes_gcm (const u8 * in, u8 * out, const u8 * addt, const u8 * iv, u8 * tag,
- u32 data_bytes, u32 aad_bytes, u8 tag_len, aes_gcm_key_data_t * kd,
- int aes_rounds, int is_encrypt)
-{
- int i;
- __m128i r, Y0, T = { };
- ghash_data_t _gd, *gd = &_gd;
-
- _mm_prefetch (iv, _MM_HINT_T0);
- _mm_prefetch (in, _MM_HINT_T0);
- _mm_prefetch (in + CLIB_CACHE_LINE_BYTES, _MM_HINT_T0);
-
- /* calculate ghash for AAD - optimized for ipsec common cases */
- if (aad_bytes == 8)
- T = aesni_gcm_ghash (T, kd, (__m128i *) addt, 8);
- else if (aad_bytes == 12)
- T = aesni_gcm_ghash (T, kd, (__m128i *) addt, 12);
- else
- T = aesni_gcm_ghash (T, kd, (__m128i *) addt, aad_bytes);
-
- /* initalize counter */
- Y0 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) iv);
- Y0 = _mm_insert_epi32 (Y0, clib_host_to_net_u32 (1), 3);
-
- /* ghash and encrypt/edcrypt */
- if (is_encrypt)
- T = aesni_gcm_enc (T, kd, Y0, in, out, data_bytes, aes_rounds);
- else
- T = aesni_gcm_dec (T, kd, Y0, in, out, data_bytes, aes_rounds);
-
- _mm_prefetch (tag, _MM_HINT_T0);
-
- /* Finalize ghash */
- r[0] = data_bytes;
- r[1] = aad_bytes;
-
- /* bytes to bits */
- r <<= 3;
-
- /* interleaved computation of final ghash and E(Y0, k) */
- ghash_mul_first (gd, r ^ T, kd->Hi[0]);
- r = kd->Ke[0] ^ Y0;
- for (i = 1; i < 5; i += 1)
- r = _mm_aesenc_si128 (r, kd->Ke[i]);
- ghash_reduce (gd);
- ghash_reduce2 (gd);
- for (; i < 9; i += 1)
- r = _mm_aesenc_si128 (r, kd->Ke[i]);
- T = ghash_final (gd);
- for (; i < aes_rounds; i += 1)
- r = _mm_aesenc_si128 (r, kd->Ke[i]);
- r = _mm_aesenclast_si128 (r, kd->Ke[aes_rounds]);
- T = aesni_gcm_bswap (T) ^ r;
-
- /* tag_len 16 -> 0 */
- tag_len &= 0xf;
-
- if (is_encrypt)
- {
- /* store tag */
- if (tag_len)
- aesni_gcm_store_partial ((__m128i *) tag, T, (1 << tag_len) - 1);
- else
- _mm_storeu_si128 ((__m128i *) tag, T);
- }
- else
- {
- /* check tag */
- u16 tag_mask = tag_len ? (1 << tag_len) - 1 : 0xffff;
- r = _mm_loadu_si128 ((__m128i *) tag);
- if (_mm_movemask_epi8 (r == T) != tag_mask)
- return 0;
- }
- return 1;
-}
-
-static_always_inline u32
-aesni_ops_enc_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
- u32 n_ops, aesni_key_size_t ks)
-{
- crypto_ia32_main_t *cm = &crypto_ia32_main;
- vnet_crypto_op_t *op = ops[0];
- aes_gcm_key_data_t *kd;
- u32 n_left = n_ops;
-
-
-next:
- kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
- aes_gcm (op->src, op->dst, op->aad, op->iv, op->tag, op->len, op->aad_len,
- op->tag_len, kd, AESNI_KEY_ROUNDS (ks), /* is_encrypt */ 1);
- op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
-
- if (--n_left)
- {
- op += 1;
- goto next;
- }
-
- return n_ops;
-}
-
-static_always_inline u32
-aesni_ops_dec_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
- u32 n_ops, aesni_key_size_t ks)
-{
- crypto_ia32_main_t *cm = &crypto_ia32_main;
- vnet_crypto_op_t *op = ops[0];
- aes_gcm_key_data_t *kd;
- u32 n_left = n_ops;
- int rv;
-
-next:
- kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
- rv = aes_gcm (op->src, op->dst, op->aad, op->iv, op->tag, op->len,
- op->aad_len, op->tag_len, kd, AESNI_KEY_ROUNDS (ks),
- /* is_encrypt */ 0);
-
- if (rv)
- {
- op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
- }
- else
- {
- op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
- n_ops--;
- }
-
- if (--n_left)
- {
- op += 1;
- goto next;
- }
-
- return n_ops;
-}
-
-static_always_inline void *
-aesni_gcm_key_exp (vnet_crypto_key_t * key, aesni_key_size_t ks)
-{
- aes_gcm_key_data_t *kd;
- __m128i H;
- int i;
-
- kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
-
- /* expand AES key */
- aes_key_expand ((__m128i *) kd->Ke, key->data, ks);
-
- /* pre-calculate H */
- H = kd->Ke[0];
- for (i = 1; i < AESNI_KEY_ROUNDS (ks); i += 1)
- H = _mm_aesenc_si128 (H, kd->Ke[i]);
- H = _mm_aesenclast_si128 (H, kd->Ke[i]);
- H = aesni_gcm_bswap (H);
- ghash_precompute (H, (__m128i *) kd->Hi, 8);
- return kd;
-}
-
-#define foreach_aesni_gcm_handler_type _(128) _(192) _(256)
-
-#define _(x) \
-static u32 aesni_ops_dec_aes_gcm_##x \
-(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return aesni_ops_dec_aes_gcm (vm, ops, n_ops, AESNI_KEY_##x); } \
-static u32 aesni_ops_enc_aes_gcm_##x \
-(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return aesni_ops_enc_aes_gcm (vm, ops, n_ops, AESNI_KEY_##x); } \
-static void * aesni_gcm_key_exp_##x (vnet_crypto_key_t *key) \
-{ return aesni_gcm_key_exp (key, AESNI_KEY_##x); }
-
-foreach_aesni_gcm_handler_type;
-#undef _
-
-clib_error_t *
-#ifdef __VAES__
-crypto_ia32_aesni_gcm_init_vaes (vlib_main_t * vm)
-#elif __AVX512F__
-crypto_ia32_aesni_gcm_init_avx512 (vlib_main_t * vm)
-#elif __AVX2__
-crypto_ia32_aesni_gcm_init_avx2 (vlib_main_t * vm)
-#else
-crypto_ia32_aesni_gcm_init_sse42 (vlib_main_t * vm)
-#endif
-{
- crypto_ia32_main_t *cm = &crypto_ia32_main;
-
-#define _(x) \
- vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
- VNET_CRYPTO_OP_AES_##x##_GCM_ENC, \
- aesni_ops_enc_aes_gcm_##x); \
- vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
- VNET_CRYPTO_OP_AES_##x##_GCM_DEC, \
- aesni_ops_dec_aes_gcm_##x); \
- cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_GCM] = aesni_gcm_key_exp_##x;
- foreach_aesni_gcm_handler_type;
-#undef _
- return 0;
-}
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/crypto_ia32/aesni.h b/src/plugins/crypto_ia32/aesni.h
deleted file mode 100644
index ece61c13cf3..00000000000
--- a/src/plugins/crypto_ia32/aesni.h
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- *------------------------------------------------------------------
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *------------------------------------------------------------------
- */
-
-#ifndef __aesni_h__
-#define __aesni_h__
-
-typedef enum
-{
- AESNI_KEY_128 = 0,
- AESNI_KEY_192 = 1,
- AESNI_KEY_256 = 2,
-} aesni_key_size_t;
-
-#define AESNI_KEY_ROUNDS(x) (10 + x *2)
-#define AESNI_KEY_BYTES(x) (16 + x * 8)
-
-
-/* AES-NI based AES key expansion based on code samples from
- Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper
- (323641-001) */
-
-static_always_inline __m128i
-aes128_key_assist (__m128i r1, __m128i r2)
-{
- r1 ^= _mm_slli_si128 (r1, 4);
- r1 ^= _mm_slli_si128 (r1, 4);
- r1 ^= _mm_slli_si128 (r1, 4);
- return r1 ^ _mm_shuffle_epi32 (r2, 0xff);
-}
-
-static_always_inline void
-aes128_key_expand (__m128i * k, u8 * key)
-{
- k[0] = _mm_loadu_si128 ((const __m128i *) key);
- k[1] = aes128_key_assist (k[0], _mm_aeskeygenassist_si128 (k[0], 0x01));
- k[2] = aes128_key_assist (k[1], _mm_aeskeygenassist_si128 (k[1], 0x02));
- k[3] = aes128_key_assist (k[2], _mm_aeskeygenassist_si128 (k[2], 0x04));
- k[4] = aes128_key_assist (k[3], _mm_aeskeygenassist_si128 (k[3], 0x08));
- k[5] = aes128_key_assist (k[4], _mm_aeskeygenassist_si128 (k[4], 0x10));
- k[6] = aes128_key_assist (k[5], _mm_aeskeygenassist_si128 (k[5], 0x20));
- k[7] = aes128_key_assist (k[6], _mm_aeskeygenassist_si128 (k[6], 0x40));
- k[8] = aes128_key_assist (k[7], _mm_aeskeygenassist_si128 (k[7], 0x80));
- k[9] = aes128_key_assist (k[8], _mm_aeskeygenassist_si128 (k[8], 0x1b));
- k[10] = aes128_key_assist (k[9], _mm_aeskeygenassist_si128 (k[9], 0x36));
-}
-
-static_always_inline void
-aes192_key_assist (__m128i * r1, __m128i * r2, __m128i * r3)
-{
- __m128i r;
- *r1 ^= r = _mm_slli_si128 (*r1, 0x4);
- *r1 ^= r = _mm_slli_si128 (r, 0x4);
- *r1 ^= _mm_slli_si128 (r, 0x4);
- *r1 ^= _mm_shuffle_epi32 (*r2, 0x55);
- *r3 ^= _mm_slli_si128 (*r3, 0x4);
- *r3 ^= *r2 = _mm_shuffle_epi32 (*r1, 0xff);
-}
-
-static_always_inline void
-aes192_key_expand (__m128i * k, u8 * key)
-{
- __m128i r1, r2, r3;
-
- k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
- /* load the 24-bytes key as 2 * 16-bytes (and ignore last 8-bytes) */
- r3 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) (key + 16));
-
- k[1] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x1);
- aes192_key_assist (&r1, &r2, &r3);
- k[1] = (__m128i) _mm_shuffle_pd ((__m128d) k[1], (__m128d) r1, 0);
- k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
- r2 = _mm_aeskeygenassist_si128 (r3, 0x2);
- aes192_key_assist (&r1, &r2, &r3);
- k[3] = r1;
-
- k[4] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x4);
- aes192_key_assist (&r1, &r2, &r3);
- k[4] = (__m128i) _mm_shuffle_pd ((__m128d) k[4], (__m128d) r1, 0);
- k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
- r2 = _mm_aeskeygenassist_si128 (r3, 0x8);
- aes192_key_assist (&r1, &r2, &r3);
- k[6] = r1;
-
- k[7] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x10);
- aes192_key_assist (&r1, &r2, &r3);
- k[7] = (__m128i) _mm_shuffle_pd ((__m128d) k[7], (__m128d) r1, 0);
- k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
- r2 = _mm_aeskeygenassist_si128 (r3, 0x20);
- aes192_key_assist (&r1, &r2, &r3);
- k[9] = r1;
-
- k[10] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x40);
- aes192_key_assist (&r1, &r2, &r3);
- k[10] = (__m128i) _mm_shuffle_pd ((__m128d) k[10], (__m128d) r1, 0);
- k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
- r2 = _mm_aeskeygenassist_si128 (r3, 0x80);
- aes192_key_assist (&r1, &r2, &r3);
- k[12] = r1;
-}
-
-static_always_inline void
-aes256_key_assist1 (__m128i * r1, __m128i * r2)
-{
- __m128i r;
- *r1 ^= r = _mm_slli_si128 (*r1, 0x4);
- *r1 ^= r = _mm_slli_si128 (r, 0x4);
- *r1 ^= _mm_slli_si128 (r, 0x4);
- *r1 ^= *r2 = _mm_shuffle_epi32 (*r2, 0xff);
-}
-
-static_always_inline void
-aes256_key_assist2 (__m128i r1, __m128i * r3)
-{
- __m128i r;
- *r3 ^= r = _mm_slli_si128 (*r3, 0x4);
- *r3 ^= r = _mm_slli_si128 (r, 0x4);
- *r3 ^= _mm_slli_si128 (r, 0x4);
- *r3 ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (r1, 0x0), 0xaa);
-}
-
-static_always_inline void
-aes256_key_expand (__m128i * k, u8 * key)
-{
- __m128i r1, r2, r3;
- k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
- k[1] = r3 = _mm_loadu_si128 ((__m128i *) (key + 16));
- r2 = _mm_aeskeygenassist_si128 (k[1], 0x01);
- aes256_key_assist1 (&r1, &r2);
- k[2] = r1;
- aes256_key_assist2 (r1, &r3);
- k[3] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x02);
- aes256_key_assist1 (&r1, &r2);
- k[4] = r1;
- aes256_key_assist2 (r1, &r3);
- k[5] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x04);
- aes256_key_assist1 (&r1, &r2);
- k[6] = r1;
- aes256_key_assist2 (r1, &r3);
- k[7] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x08);
- aes256_key_assist1 (&r1, &r2);
- k[8] = r1;
- aes256_key_assist2 (r1, &r3);
- k[9] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x10);
- aes256_key_assist1 (&r1, &r2);
- k[10] = r1;
- aes256_key_assist2 (r1, &r3);
- k[11] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x20);
- aes256_key_assist1 (&r1, &r2);
- k[12] = r1;
- aes256_key_assist2 (r1, &r3);
- k[13] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x40);
- aes256_key_assist1 (&r1, &r2);
- k[14] = r1;
-}
-
-static_always_inline void
-aes_key_expand (__m128i * k, u8 * key, aesni_key_size_t ks)
-{
- switch (ks)
- {
- case AESNI_KEY_128:
- aes128_key_expand (k, key);
- break;
- case AESNI_KEY_192:
- aes192_key_expand (k, key);
- break;
- case AESNI_KEY_256:
- aes256_key_expand (k, key);
- break;
- }
-}
-
-
-static_always_inline void
-aes_key_enc_to_dec (__m128i * k, aesni_key_size_t ks)
-{
- int rounds = AESNI_KEY_ROUNDS (ks);
- __m128i r;
-
- r = k[rounds];
- k[rounds] = k[0];
- k[0] = r;
-
- for (int i = 1; i < (rounds / 2); i++)
- {
- r = k[rounds - i];
- k[rounds - i] = _mm_aesimc_si128 (k[i]);
- k[i] = _mm_aesimc_si128 (r);
- }
-
- k[rounds / 2] = _mm_aesimc_si128 (k[rounds / 2]);
-}
-
-#endif /* __aesni_h__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/crypto_ia32/crypto_ia32.h b/src/plugins/crypto_ia32/crypto_ia32.h
deleted file mode 100644
index e2b30071854..00000000000
--- a/src/plugins/crypto_ia32/crypto_ia32.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- *------------------------------------------------------------------
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *------------------------------------------------------------------
- */
-
-#ifndef __crypto_ia32_h__
-#define __crypto_ia32_h__
-
-typedef void *(crypto_ia32_key_fn_t) (vnet_crypto_key_t * key);
-
-typedef struct
-{
- __m128i cbc_iv[4];
-} crypto_ia32_per_thread_data_t;
-
-typedef struct
-{
- u32 crypto_engine_index;
- crypto_ia32_per_thread_data_t *per_thread_data;
- crypto_ia32_key_fn_t *key_fn[VNET_CRYPTO_N_ALGS];
- void **key_data;
-} crypto_ia32_main_t;
-
-extern crypto_ia32_main_t crypto_ia32_main;
-
-clib_error_t *crypto_ia32_aesni_cbc_init_sse42 (vlib_main_t * vm);
-clib_error_t *crypto_ia32_aesni_cbc_init_avx2 (vlib_main_t * vm);
-clib_error_t *crypto_ia32_aesni_cbc_init_avx512 (vlib_main_t * vm);
-clib_error_t *crypto_ia32_aesni_cbc_init_vaes (vlib_main_t * vm);
-
-clib_error_t *crypto_ia32_aesni_gcm_init_sse42 (vlib_main_t * vm);
-clib_error_t *crypto_ia32_aesni_gcm_init_avx2 (vlib_main_t * vm);
-clib_error_t *crypto_ia32_aesni_gcm_init_avx512 (vlib_main_t * vm);
-clib_error_t *crypto_ia32_aesni_gcm_init_vaes (vlib_main_t * vm);
-#endif /* __crypto_ia32_h__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/crypto_ia32/ghash.h b/src/plugins/crypto_ia32/ghash.h
deleted file mode 100644
index 0b2f629e28a..00000000000
--- a/src/plugins/crypto_ia32/ghash.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- *------------------------------------------------------------------
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *------------------------------------------------------------------
- */
-
-/*
- *------------------------------------------------------------------
- * Copyright(c) 2018, Intel Corporation All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES * LOSS OF USE,
- * DATA, OR PROFITS * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *------------------------------------------------------------------
- */
-
-/*
- * Based on work by: Shay Gueron, Michael E. Kounavis, Erdinc Ozturk,
- * Vinodh Gopal, James Guilford, Tomasz Kantecki
- *
- * References:
- * [1] Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation on
- * Intel Architecture Processors. August, 2010
- * [2] Erdinc Ozturk et. al. Enabling High-Performance Galois-Counter-Mode on
- * Intel Architecture Processors. October, 2012.
- * [3] intel-ipsec-mb library, https://github.com/01org/intel-ipsec-mb.git
- *
- * Definitions:
- * GF Galois Extension Field GF(2^128) - finite field where elements are
- * represented as polynomials with coefficients in GF(2) with the
- * highest degree of 127. Polynomials are represented as 128-bit binary
- * numbers where each bit represents one coefficient.
- * e.g. polynomial x^5 + x^3 + x + 1 is represented in binary 101011.
- * H hash key (128 bit)
- * POLY irreducible polynomial x^127 + x^7 + x^2 + x + 1
- * RPOLY irreducible polynomial x^128 + x^127 + x^126 + x^121 + 1
- * + addition in GF, which equals to XOR operation
- * * multiplication in GF
- *
- * GF multiplication consists of 2 steps:
- * - carry-less multiplication of two 128-bit operands into 256-bit result
- * - reduction of 256-bit result into 128-bit with modulo POLY
- *
- * GHash is calculated on 128-bit blocks of data according to the following
- * formula:
- * GH = (GH + data) * hash_key
- *
- * To avoid bit-reflection of data, this code uses GF multipication
- * with reversed polynomial:
- * a * b * x^-127 mod RPOLY
- *
- * To improve computation speed table Hi is precomputed with powers of H',
- * where H' is calculated as H<<1 mod RPOLY.
- * This allows us to improve performance by deferring reduction. For example
- * to caclulate ghash of 4 128-bit blocks of data (b0, b1, b2, b3), we can do:
- *
- * __i128 Hi[4];
- * ghash_precompute (H, Hi, 4);
- *
- * ghash_data_t _gd, *gd = &_gd;
- * ghash_mul_first (gd, GH ^ b0, Hi[3]);
- * ghash_mul_next (gd, b1, Hi[2]);
- * ghash_mul_next (gd, b2, Hi[1]);
- * ghash_mul_next (gd, b3, Hi[0]);
- * ghash_reduce (gd);
- * ghash_reduce2 (gd);
- * GH = ghash_final (gd);
- *
- * Reduction step is split into 3 functions so it can be better interleaved
- * with other code, (i.e. with AES computation).
- */
-
-#ifndef __ghash_h__
-#define __ghash_h__
-
-/* on AVX-512 systems we can save a clock cycle by using ternary logic
- instruction to calculate a XOR b XOR c */
-static_always_inline __m128i
-ghash_xor3 (__m128i a, __m128i b, __m128i c)
-{
-#if defined (__AVX512F__)
- return _mm_ternarylogic_epi32 (a, b, c, 0x96);
-#endif
- return a ^ b ^ c;
-}
-
-typedef struct
-{
- __m128i mid, hi, lo, tmp_lo, tmp_hi;
- int pending;
-} ghash_data_t;
-
-static const __m128i ghash_poly = { 1, 0xC200000000000000 };
-static const __m128i ghash_poly2 = { 0x1C2000000, 0xC200000000000000 };
-
-static_always_inline void
-ghash_mul_first (ghash_data_t * gd, __m128i a, __m128i b)
-{
- /* a1 * b1 */
- gd->hi = _mm_clmulepi64_si128 (a, b, 0x11);
- /* a0 * b0 */
- gd->lo = _mm_clmulepi64_si128 (a, b, 0x00);
- /* a0 * b1 ^ a1 * b0 */
- gd->mid = (_mm_clmulepi64_si128 (a, b, 0x01) ^
- _mm_clmulepi64_si128 (a, b, 0x10));
-
- /* set gd->pending to 0 so next invocation of ghash_mul_next(...) knows that
- there is no pending data in tmp_lo and tmp_hi */
- gd->pending = 0;
-}
-
-static_always_inline void
-ghash_mul_next (ghash_data_t * gd, __m128i a, __m128i b)
-{
- /* a1 * b1 */
- __m128i hi = _mm_clmulepi64_si128 (a, b, 0x11);
- /* a0 * b0 */
- __m128i lo = _mm_clmulepi64_si128 (a, b, 0x00);
-
- /* this branch will be optimized out by the compiler, and it allows us to
- reduce number of XOR operations by using ternary logic */
- if (gd->pending)
- {
- /* there is peding data from previous invocation so we can XOR */
- gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, hi);
- gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, lo);
- gd->pending = 0;
- }
- else
- {
- /* there is no peding data from previous invocation so we postpone XOR */
- gd->tmp_hi = hi;
- gd->tmp_lo = lo;
- gd->pending = 1;
- }
-
- /* gd->mid ^= a0 * b1 ^ a1 * b0 */
- gd->mid = ghash_xor3 (gd->mid,
- _mm_clmulepi64_si128 (a, b, 0x01),
- _mm_clmulepi64_si128 (a, b, 0x10));
-}
-
-static_always_inline void
-ghash_reduce (ghash_data_t * gd)
-{
- __m128i r;
-
- /* Final combination:
- gd->lo ^= gd->mid << 64
- gd->hi ^= gd->mid >> 64 */
- __m128i midl = _mm_slli_si128 (gd->mid, 8);
- __m128i midr = _mm_srli_si128 (gd->mid, 8);
-
- if (gd->pending)
- {
- gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, midl);
- gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, midr);
- }
- else
- {
- gd->lo ^= midl;
- gd->hi ^= midr;
- }
-
- r = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x01);
- gd->lo ^= _mm_slli_si128 (r, 8);
-}
-
-static_always_inline void
-ghash_reduce2 (ghash_data_t * gd)
-{
- gd->tmp_lo = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x00);
- gd->tmp_hi = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x10);
-}
-
-static_always_inline __m128i
-ghash_final (ghash_data_t * gd)
-{
- return ghash_xor3 (gd->hi, _mm_srli_si128 (gd->tmp_lo, 4),
- _mm_slli_si128 (gd->tmp_hi, 4));
-}
-
-static_always_inline __m128i
-ghash_mul (__m128i a, __m128i b)
-{
- ghash_data_t _gd, *gd = &_gd;
- ghash_mul_first (gd, a, b);
- ghash_reduce (gd);
- ghash_reduce2 (gd);
- return ghash_final (gd);
-}
-
-static_always_inline void
-ghash_precompute (__m128i H, __m128i * Hi, int count)
-{
- __m128i r;
- /* calcullate H<<1 mod poly from the hash key */
- r = _mm_srli_epi64 (H, 63);
- H = _mm_slli_epi64 (H, 1);
- H |= _mm_slli_si128 (r, 8);
- r = _mm_srli_si128 (r, 8);
- r = _mm_shuffle_epi32 (r, 0x24);
- /* *INDENT-OFF* */
- r = _mm_cmpeq_epi32 (r, (__m128i) (u32x4) {1, 0, 0, 1});
- /* *INDENT-ON* */
- Hi[0] = H ^ (r & ghash_poly);
-
- /* calculate H^(i + 1) */
- for (int i = 1; i < count; i++)
- Hi[i] = ghash_mul (Hi[0], Hi[i - 1]);
-}
-
-#endif /* __ghash_h__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/crypto_ia32/main.c b/src/plugins/crypto_ia32/main.c
deleted file mode 100644
index bcfd7fa6ffd..00000000000
--- a/src/plugins/crypto_ia32/main.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- *------------------------------------------------------------------
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *------------------------------------------------------------------
- */
-
-#include <vlib/vlib.h>
-#include <vnet/plugin/plugin.h>
-#include <vnet/crypto/crypto.h>
-#include <crypto_ia32/crypto_ia32.h>
-
-crypto_ia32_main_t crypto_ia32_main;
-
-static void
-crypto_ia32_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
- vnet_crypto_key_index_t idx)
-{
- vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
- crypto_ia32_main_t *cm = &crypto_ia32_main;
-
- if (cm->key_fn[key->alg] == 0)
- return;
-
- if (kop == VNET_CRYPTO_KEY_OP_DEL)
- {
- if (idx >= vec_len (cm->key_data))
- return;
-
- if (cm->key_data[idx] == 0)
- return;
-
- clib_mem_free_s (cm->key_data[idx]);
- cm->key_data[idx] = 0;
- return;
- }
-
- vec_validate_aligned (cm->key_data, idx, CLIB_CACHE_LINE_BYTES);
-
- if (kop == VNET_CRYPTO_KEY_OP_MODIFY && cm->key_data[idx])
- {
- clib_mem_free_s (cm->key_data[idx]);
- }
-
- cm->key_data[idx] = cm->key_fn[key->alg] (key);
-}
-
-clib_error_t *
-crypto_ia32_init (vlib_main_t * vm)
-{
- crypto_ia32_main_t *cm = &crypto_ia32_main;
- vlib_thread_main_t *tm = vlib_get_thread_main ();
- clib_error_t *error = 0;
-
- if (clib_cpu_supports_x86_aes () == 0)
- return 0;
-
- vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
- CLIB_CACHE_LINE_BYTES);
-
- cm->crypto_engine_index =
- vnet_crypto_register_engine (vm, "ia32", 100,
- "Intel IA32 ISA Optimized Crypto");
-
- if (clib_cpu_supports_vaes ())
- error = crypto_ia32_aesni_cbc_init_vaes (vm);
- else if (clib_cpu_supports_avx512f ())
- error = crypto_ia32_aesni_cbc_init_avx512 (vm);
- else if (clib_cpu_supports_avx2 ())
- error = crypto_ia32_aesni_cbc_init_avx2 (vm);
- else
- error = crypto_ia32_aesni_cbc_init_sse42 (vm);
-
- if (error)
- goto error;
-
- if (clib_cpu_supports_pclmulqdq ())
- {
- if (clib_cpu_supports_vaes ())
- error = crypto_ia32_aesni_gcm_init_vaes (vm);
- else if (clib_cpu_supports_avx512f ())
- error = crypto_ia32_aesni_gcm_init_avx512 (vm);
- else if (clib_cpu_supports_avx2 ())
- error = crypto_ia32_aesni_gcm_init_avx2 (vm);
- else
- error = crypto_ia32_aesni_gcm_init_sse42 (vm);
-
- if (error)
- goto error;
- }
-
- vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
- crypto_ia32_key_handler);
-
-
-error:
- if (error)
- vec_free (cm->per_thread_data);
-
- return error;
-}
-
-/* *INDENT-OFF* */
-VLIB_INIT_FUNCTION (crypto_ia32_init) =
-{
- .runs_after = VLIB_INITS ("vnet_crypto_init"),
-};
-/* *INDENT-ON* */
-
-#include <vpp/app/version.h>
-
-/* *INDENT-OFF* */
-VLIB_PLUGIN_REGISTER () = {
- .version = VPP_BUILD_VER,
- .description = "Intel IA32 Software Crypto Engine",
-};
-/* *INDENT-ON* */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */