aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/crypto_native
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/crypto_native')
-rw-r--r--src/plugins/crypto_native/CMakeLists.txt11
-rw-r--r--src/plugins/crypto_native/aes_cbc.c305
-rw-r--r--src/plugins/crypto_native/aes_ctr.c64
-rw-r--r--src/plugins/crypto_native/aes_gcm.c71
-rw-r--r--src/plugins/crypto_native/crypto_native.h67
-rw-r--r--src/plugins/crypto_native/main.c117
-rw-r--r--src/plugins/crypto_native/sha2.c198
7 files changed, 452 insertions, 381 deletions
diff --git a/src/plugins/crypto_native/CMakeLists.txt b/src/plugins/crypto_native/CMakeLists.txt
index 9b6091610d9..5499ed4608a 100644
--- a/src/plugins/crypto_native/CMakeLists.txt
+++ b/src/plugins/crypto_native/CMakeLists.txt
@@ -12,8 +12,8 @@
# limitations under the License.
if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
- list(APPEND VARIANTS "slm\;-march=silvermont")
- list(APPEND VARIANTS "hsw\;-march=haswell")
+ list(APPEND VARIANTS "slm\;-march=silvermont -maes")
+ list(APPEND VARIANTS "hsw\;-march=haswell -maes")
if(compiler_flag_march_skylake_avx512 AND compiler_flag_mprefer_vector_width_256)
list(APPEND VARIANTS "skx\;-march=skylake-avx512 -mprefer-vector-width=256")
endif()
@@ -23,16 +23,15 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
if(compiler_flag_march_alderlake)
list(APPEND VARIANTS "adl\;-march=alderlake -mprefer-vector-width=256")
endif()
- set (COMPILE_FILES aes_cbc.c aes_gcm.c aes_ctr.c)
- set (COMPILE_OPTS -Wall -fno-common -maes)
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
list(APPEND VARIANTS "armv8\;-march=armv8.1-a+crc+crypto")
- set (COMPILE_FILES aes_cbc.c aes_gcm.c aes_ctr.c)
- set (COMPILE_OPTS -Wall -fno-common)
endif()
+set (COMPILE_FILES aes_cbc.c aes_gcm.c aes_ctr.c sha2.c)
+set (COMPILE_OPTS -Wall -fno-common)
+
if (NOT VARIANTS)
return()
endif()
diff --git a/src/plugins/crypto_native/aes_cbc.c b/src/plugins/crypto_native/aes_cbc.c
index c84390c3108..c981897783f 100644
--- a/src/plugins/crypto_native/aes_cbc.c
+++ b/src/plugins/crypto_native/aes_cbc.c
@@ -25,191 +25,40 @@
#pragma GCC optimize ("O3")
#endif
-#if defined(__VAES__) && defined(__AVX512F__)
-#define u8xN u8x64
-#define u32xN u32x16
-#define u32xN_min_scalar u32x16_min_scalar
-#define u32xN_is_all_zero u32x16_is_all_zero
-#define u32xN_splat u32x16_splat
-#elif defined(__VAES__)
-#define u8xN u8x32
-#define u32xN u32x8
-#define u32xN_min_scalar u32x8_min_scalar
-#define u32xN_is_all_zero u32x8_is_all_zero
-#define u32xN_splat u32x8_splat
-#else
-#define u8xN u8x16
-#define u32xN u32x4
-#define u32xN_min_scalar u32x4_min_scalar
-#define u32xN_is_all_zero u32x4_is_all_zero
-#define u32xN_splat u32x4_splat
-#endif
+#define CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE 256
static_always_inline u32
aes_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
u32 n_ops, aes_key_size_t ks)
{
crypto_native_main_t *cm = &crypto_native_main;
- int rounds = AES_KEY_ROUNDS (ks);
- u8 placeholder[8192];
- u32 i, j, count, n_left = n_ops;
- u32xN placeholder_mask = { };
- u32xN len = { };
- vnet_crypto_key_index_t key_index[4 * N_AES_LANES];
- u8 *src[4 * N_AES_LANES] = {};
- u8 *dst[4 * N_AES_LANES] = {};
- u8xN r[4] = {};
- u8xN k[15][4] = {};
-
- for (i = 0; i < 4 * N_AES_LANES; i++)
- key_index[i] = ~0;
-
-more:
- for (i = 0; i < 4 * N_AES_LANES; i++)
- if (len[i] == 0)
- {
- if (n_left == 0)
- {
- /* no more work to enqueue, so we are enqueueing placeholder buffer */
- src[i] = dst[i] = placeholder;
- len[i] = sizeof (placeholder);
- placeholder_mask[i] = 0;
- }
- else
- {
- u8x16 t = aes_block_load (ops[0]->iv);
- ((u8x16 *) r)[i] = t;
-
- src[i] = ops[0]->src;
- dst[i] = ops[0]->dst;
- len[i] = ops[0]->len;
- placeholder_mask[i] = ~0;
- if (key_index[i] != ops[0]->key_index)
- {
- aes_cbc_key_data_t *kd;
- key_index[i] = ops[0]->key_index;
- kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]];
- for (j = 0; j < rounds + 1; j++)
- ((u8x16 *) k[j])[i] = kd->encrypt_key[j];
- }
- ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
- n_left--;
- ops++;
- }
- }
-
- count = u32xN_min_scalar (len);
-
- ASSERT (count % 16 == 0);
-
- for (i = 0; i < count; i += 16)
+ u32 i, n_left = n_ops;
+ uword key_indices[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+ u8 *plaintext[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+ uword oplen[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+ u8 *iv[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+ u8 *ciphertext[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+
+ while (n_left)
{
-#if defined(__VAES__) && defined(__AVX512F__)
- r[0] = u8x64_xor3 (r[0], aes_block_load_x4 (src, i), k[0][0]);
- r[1] = u8x64_xor3 (r[1], aes_block_load_x4 (src + 4, i), k[0][1]);
- r[2] = u8x64_xor3 (r[2], aes_block_load_x4 (src + 8, i), k[0][2]);
- r[3] = u8x64_xor3 (r[3], aes_block_load_x4 (src + 12, i), k[0][3]);
-
- for (j = 1; j < rounds; j++)
+ i = 0;
+ while (n_left && i < CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE)
{
- r[0] = aes_enc_round_x4 (r[0], k[j][0]);
- r[1] = aes_enc_round_x4 (r[1], k[j][1]);
- r[2] = aes_enc_round_x4 (r[2], k[j][2]);
- r[3] = aes_enc_round_x4 (r[3], k[j][3]);
+ key_indices[i] = ops[0]->key_index;
+ plaintext[i] = ops[0]->src;
+ ciphertext[i] = ops[0]->dst;
+ oplen[i] = ops[0]->len;
+ iv[i] = ops[0]->iv;
+ ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+
+ ops++;
+ n_left--;
+ i++;
}
- r[0] = aes_enc_last_round_x4 (r[0], k[j][0]);
- r[1] = aes_enc_last_round_x4 (r[1], k[j][1]);
- r[2] = aes_enc_last_round_x4 (r[2], k[j][2]);
- r[3] = aes_enc_last_round_x4 (r[3], k[j][3]);
-
- aes_block_store_x4 (dst, i, r[0]);
- aes_block_store_x4 (dst + 4, i, r[1]);
- aes_block_store_x4 (dst + 8, i, r[2]);
- aes_block_store_x4 (dst + 12, i, r[3]);
-#elif defined(__VAES__)
- r[0] = u8x32_xor3 (r[0], aes_block_load_x2 (src, i), k[0][0]);
- r[1] = u8x32_xor3 (r[1], aes_block_load_x2 (src + 2, i), k[0][1]);
- r[2] = u8x32_xor3 (r[2], aes_block_load_x2 (src + 4, i), k[0][2]);
- r[3] = u8x32_xor3 (r[3], aes_block_load_x2 (src + 6, i), k[0][3]);
-
- for (j = 1; j < rounds; j++)
- {
- r[0] = aes_enc_round_x2 (r[0], k[j][0]);
- r[1] = aes_enc_round_x2 (r[1], k[j][1]);
- r[2] = aes_enc_round_x2 (r[2], k[j][2]);
- r[3] = aes_enc_round_x2 (r[3], k[j][3]);
- }
- r[0] = aes_enc_last_round_x2 (r[0], k[j][0]);
- r[1] = aes_enc_last_round_x2 (r[1], k[j][1]);
- r[2] = aes_enc_last_round_x2 (r[2], k[j][2]);
- r[3] = aes_enc_last_round_x2 (r[3], k[j][3]);
-
- aes_block_store_x2 (dst, i, r[0]);
- aes_block_store_x2 (dst + 2, i, r[1]);
- aes_block_store_x2 (dst + 4, i, r[2]);
- aes_block_store_x2 (dst + 6, i, r[3]);
-#else
-#if __x86_64__
- r[0] = u8x16_xor3 (r[0], aes_block_load (src[0] + i), k[0][0]);
- r[1] = u8x16_xor3 (r[1], aes_block_load (src[1] + i), k[0][1]);
- r[2] = u8x16_xor3 (r[2], aes_block_load (src[2] + i), k[0][2]);
- r[3] = u8x16_xor3 (r[3], aes_block_load (src[3] + i), k[0][3]);
-
- for (j = 1; j < rounds; j++)
- {
- r[0] = aes_enc_round_x1 (r[0], k[j][0]);
- r[1] = aes_enc_round_x1 (r[1], k[j][1]);
- r[2] = aes_enc_round_x1 (r[2], k[j][2]);
- r[3] = aes_enc_round_x1 (r[3], k[j][3]);
- }
-
- r[0] = aes_enc_last_round_x1 (r[0], k[j][0]);
- r[1] = aes_enc_last_round_x1 (r[1], k[j][1]);
- r[2] = aes_enc_last_round_x1 (r[2], k[j][2]);
- r[3] = aes_enc_last_round_x1 (r[3], k[j][3]);
-
- aes_block_store (dst[0] + i, r[0]);
- aes_block_store (dst[1] + i, r[1]);
- aes_block_store (dst[2] + i, r[2]);
- aes_block_store (dst[3] + i, r[3]);
-#else
- r[0] ^= aes_block_load (src[0] + i);
- r[1] ^= aes_block_load (src[1] + i);
- r[2] ^= aes_block_load (src[2] + i);
- r[3] ^= aes_block_load (src[3] + i);
- for (j = 0; j < rounds - 1; j++)
- {
- r[0] = vaesmcq_u8 (vaeseq_u8 (r[0], k[j][0]));
- r[1] = vaesmcq_u8 (vaeseq_u8 (r[1], k[j][1]));
- r[2] = vaesmcq_u8 (vaeseq_u8 (r[2], k[j][2]));
- r[3] = vaesmcq_u8 (vaeseq_u8 (r[3], k[j][3]));
- }
- r[0] = vaeseq_u8 (r[0], k[j][0]) ^ k[rounds][0];
- r[1] = vaeseq_u8 (r[1], k[j][1]) ^ k[rounds][1];
- r[2] = vaeseq_u8 (r[2], k[j][2]) ^ k[rounds][2];
- r[3] = vaeseq_u8 (r[3], k[j][3]) ^ k[rounds][3];
- aes_block_store (dst[0] + i, r[0]);
- aes_block_store (dst[1] + i, r[1]);
- aes_block_store (dst[2] + i, r[2]);
- aes_block_store (dst[3] + i, r[3]);
-#endif
-#endif
+ clib_aes_cbc_encrypt_multi ((aes_cbc_key_data_t **) cm->key_data,
+ key_indices, plaintext, oplen, iv, ks,
+ ciphertext, i);
}
-
- len -= u32xN_splat (count);
-
- for (i = 0; i < 4 * N_AES_LANES; i++)
- {
- src[i] += count;
- dst[i] += count;
- }
-
- if (n_left > 0)
- goto more;
-
- if (!u32xN_is_all_zero (len & placeholder_mask))
- goto more;
-
return n_ops;
}
@@ -249,18 +98,30 @@ decrypt:
return n_ops;
}
-#define foreach_aes_cbc_handler_type _(128) _(192) _(256)
-
-#define _(x) \
-static u32 aes_ops_dec_aes_cbc_##x \
-(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \
-static u32 aes_ops_enc_aes_cbc_##x \
-(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \
-
-foreach_aes_cbc_handler_type;
-#undef _
+static int
+aes_cbc_cpu_probe ()
+{
+#if defined(__VAES__) && defined(__AVX512F__)
+ if (clib_cpu_supports_vaes () && clib_cpu_supports_avx512f ())
+ return 50;
+#elif defined(__VAES__)
+ if (clib_cpu_supports_vaes ())
+ return 40;
+#elif defined(__AVX512F__)
+ if (clib_cpu_supports_avx512f ())
+ return 30;
+#elif defined(__AVX2__)
+ if (clib_cpu_supports_avx2 ())
+ return 20;
+#elif __AES__
+ if (clib_cpu_supports_aes ())
+ return 10;
+#elif __aarch64__
+ if (clib_cpu_supports_aarch64_aes ())
+ return 10;
+#endif
+ return -1;
+}
static void *
aes_cbc_key_exp_128 (vnet_crypto_key_t *key)
@@ -289,43 +150,39 @@ aes_cbc_key_exp_256 (vnet_crypto_key_t *key)
return kd;
}
-#include <fcntl.h>
+#define foreach_aes_cbc_handler_type _ (128) _ (192) _ (256)
+
+#define _(x) \
+ static u32 aes_ops_enc_aes_cbc_##x (vlib_main_t *vm, \
+ vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); \
+ } \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##x##_cbc_enc) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
+ .fn = aes_ops_enc_aes_cbc_##x, \
+ .probe = aes_cbc_cpu_probe, \
+ }; \
+ \
+ static u32 aes_ops_dec_aes_cbc_##x (vlib_main_t *vm, \
+ vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); \
+ } \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##x##_cbc_dec) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
+ .fn = aes_ops_dec_aes_cbc_##x, \
+ .probe = aes_cbc_cpu_probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_KEY_HANDLER (aes_##x##_cbc) = { \
+ .alg_id = VNET_CRYPTO_ALG_AES_##x##_CBC, \
+ .key_fn = aes_cbc_key_exp_##x, \
+ .probe = aes_cbc_cpu_probe, \
+ };
-clib_error_t *
-#if defined(__VAES__) && defined(__AVX512F__)
-crypto_native_aes_cbc_init_icl (vlib_main_t *vm)
-#elif defined(__VAES__)
-crypto_native_aes_cbc_init_adl (vlib_main_t *vm)
-#elif __AVX512F__
-crypto_native_aes_cbc_init_skx (vlib_main_t * vm)
-#elif __aarch64__
-crypto_native_aes_cbc_init_neon (vlib_main_t * vm)
-#elif __AVX2__
-crypto_native_aes_cbc_init_hsw (vlib_main_t * vm)
-#else
-crypto_native_aes_cbc_init_slm (vlib_main_t * vm)
-#endif
-{
- crypto_native_main_t *cm = &crypto_native_main;
-
-#define _(x) \
- vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
- VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
- aes_ops_enc_aes_cbc_##x); \
- vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
- VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
- aes_ops_dec_aes_cbc_##x); \
- cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CBC] = aes_cbc_key_exp_##x;
- foreach_aes_cbc_handler_type;
+foreach_aes_cbc_handler_type;
#undef _
- return 0;
-}
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/crypto_native/aes_ctr.c b/src/plugins/crypto_native/aes_ctr.c
index 3a219510419..d02a7b69b9d 100644
--- a/src/plugins/crypto_native/aes_ctr.c
+++ b/src/plugins/crypto_native/aes_ctr.c
@@ -81,32 +81,50 @@ aes_ctr_key_exp (vnet_crypto_key_t *key, aes_key_size_t ks)
foreach_aes_ctr_handler_type;
#undef _
-clib_error_t *
+static int
+probe ()
+{
#if defined(__VAES__) && defined(__AVX512F__)
-crypto_native_aes_ctr_init_icl (vlib_main_t *vm)
+ if (clib_cpu_supports_vaes () && clib_cpu_supports_avx512f ())
+ return 50;
#elif defined(__VAES__)
-crypto_native_aes_ctr_init_adl (vlib_main_t *vm)
-#elif __AVX512F__
-crypto_native_aes_ctr_init_skx (vlib_main_t *vm)
-#elif __AVX2__
-crypto_native_aes_ctr_init_hsw (vlib_main_t *vm)
+ if (clib_cpu_supports_vaes ())
+ return 40;
+#elif defined(__AVX512F__)
+ if (clib_cpu_supports_avx512f ())
+ return 30;
+#elif defined(__AVX2__)
+ if (clib_cpu_supports_avx2 ())
+ return 20;
+#elif __AES__
+ if (clib_cpu_supports_aes ())
+ return 10;
#elif __aarch64__
-crypto_native_aes_ctr_init_neon (vlib_main_t *vm)
-#else
-crypto_native_aes_ctr_init_slm (vlib_main_t *vm)
+ if (clib_cpu_supports_aarch64_aes ())
+ return 10;
#endif
-{
- crypto_native_main_t *cm = &crypto_native_main;
+ return -1;
+}
-#define _(x) \
- vnet_crypto_register_ops_handlers ( \
- vm, cm->crypto_engine_index, VNET_CRYPTO_OP_AES_##x##_CTR_ENC, \
- aes_ops_aes_ctr_##x, aes_ops_aes_ctr_##x##_chained); \
- vnet_crypto_register_ops_handlers ( \
- vm, cm->crypto_engine_index, VNET_CRYPTO_OP_AES_##x##_CTR_DEC, \
- aes_ops_aes_ctr_##x, aes_ops_aes_ctr_##x##_chained); \
- cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CTR] = aes_ctr_key_exp_##x;
- foreach_aes_ctr_handler_type;
+#define _(b) \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##b##_ctr_enc) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##b##_CTR_ENC, \
+ .fn = aes_ops_aes_ctr_##b, \
+ .cfn = aes_ops_aes_ctr_##b##_chained, \
+ .probe = probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##b##_ctr_dec) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##b##_CTR_DEC, \
+ .fn = aes_ops_aes_ctr_##b, \
+ .cfn = aes_ops_aes_ctr_##b##_chained, \
+ .probe = probe, \
+ }; \
+ CRYPTO_NATIVE_KEY_HANDLER (aes_##b##_ctr) = { \
+ .alg_id = VNET_CRYPTO_ALG_AES_##b##_CTR, \
+ .key_fn = aes_ctr_key_exp_##b, \
+ .probe = probe, \
+ };
+
+_ (128) _ (192) _ (256)
#undef _
- return 0;
-}
diff --git a/src/plugins/crypto_native/aes_gcm.c b/src/plugins/crypto_native/aes_gcm.c
index 6589d411975..220788d4e97 100644
--- a/src/plugins/crypto_native/aes_gcm.c
+++ b/src/plugins/crypto_native/aes_gcm.c
@@ -118,40 +118,49 @@ aes_gcm_key_exp (vnet_crypto_key_t *key, aes_key_size_t ks)
foreach_aes_gcm_handler_type;
#undef _
-clib_error_t *
+static int
+probe ()
+{
#if defined(__VAES__) && defined(__AVX512F__)
-crypto_native_aes_gcm_init_icl (vlib_main_t *vm)
+ if (clib_cpu_supports_vpclmulqdq () && clib_cpu_supports_vaes () &&
+ clib_cpu_supports_avx512f ())
+ return 50;
#elif defined(__VAES__)
-crypto_native_aes_gcm_init_adl (vlib_main_t *vm)
-#elif __AVX512F__
-crypto_native_aes_gcm_init_skx (vlib_main_t *vm)
-#elif __AVX2__
-crypto_native_aes_gcm_init_hsw (vlib_main_t *vm)
+ if (clib_cpu_supports_vpclmulqdq () && clib_cpu_supports_vaes ())
+ return 40;
+#elif defined(__AVX512F__)
+ if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_avx512f ())
+ return 30;
+#elif defined(__AVX2__)
+ if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_avx2 ())
+ return 20;
+#elif __AES__
+ if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_aes ())
+ return 10;
#elif __aarch64__
-crypto_native_aes_gcm_init_neon (vlib_main_t *vm)
-#else
-crypto_native_aes_gcm_init_slm (vlib_main_t *vm)
+ if (clib_cpu_supports_aarch64_aes ())
+ return 10;
#endif
-{
- crypto_native_main_t *cm = &crypto_native_main;
-
-#define _(x) \
- vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
- VNET_CRYPTO_OP_AES_##x##_GCM_ENC, \
- aes_ops_enc_aes_gcm_##x); \
- vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
- VNET_CRYPTO_OP_AES_##x##_GCM_DEC, \
- aes_ops_dec_aes_gcm_##x); \
- cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_GCM] = aes_gcm_key_exp_##x;
- foreach_aes_gcm_handler_type;
-#undef _
- return 0;
+ return -1;
}
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
+#define _(b) \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##b##_gcm_enc) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##b##_GCM_ENC, \
+ .fn = aes_ops_enc_aes_gcm_##b, \
+ .probe = probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##b##_gcm_dec) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##b##_GCM_DEC, \
+ .fn = aes_ops_dec_aes_gcm_##b, \
+ .probe = probe, \
+ }; \
+ CRYPTO_NATIVE_KEY_HANDLER (aes_##b##_gcm) = { \
+ .alg_id = VNET_CRYPTO_ALG_AES_##b##_GCM, \
+ .key_fn = aes_gcm_key_exp_##b, \
+ .probe = probe, \
+ };
+
+_ (128) _ (192) _ (256)
+#undef _
diff --git a/src/plugins/crypto_native/crypto_native.h b/src/plugins/crypto_native/crypto_native.h
index c15b8cbd1da..3d18e8cabd0 100644
--- a/src/plugins/crypto_native/crypto_native.h
+++ b/src/plugins/crypto_native/crypto_native.h
@@ -19,33 +19,66 @@
#define __crypto_native_h__
typedef void *(crypto_native_key_fn_t) (vnet_crypto_key_t * key);
+typedef int (crypto_native_variant_probe_t) ();
+
+typedef struct crypto_native_op_handler
+{
+ struct crypto_native_op_handler *next;
+ vnet_crypto_op_id_t op_id;
+ vnet_crypto_ops_handler_t *fn;
+ vnet_crypto_chained_ops_handler_t *cfn;
+ crypto_native_variant_probe_t *probe;
+ int priority;
+} crypto_native_op_handler_t;
+
+typedef struct crypto_native_key_handler
+{
+ struct crypto_native_key_handler *next;
+ vnet_crypto_alg_t alg_id;
+ crypto_native_key_fn_t *key_fn;
+ crypto_native_variant_probe_t *probe;
+ int priority;
+} crypto_native_key_handler_t;
typedef struct
{
u32 crypto_engine_index;
crypto_native_key_fn_t *key_fn[VNET_CRYPTO_N_ALGS];
void **key_data;
+ crypto_native_op_handler_t *op_handlers;
+ crypto_native_key_handler_t *key_handlers;
} crypto_native_main_t;
extern crypto_native_main_t crypto_native_main;
-#define foreach_crypto_native_march_variant \
- _ (slm) _ (hsw) _ (skx) _ (icl) _ (adl) _ (neon)
-
-#define _(v) \
- clib_error_t __clib_weak *crypto_native_aes_cbc_init_##v (vlib_main_t *vm); \
- clib_error_t __clib_weak *crypto_native_aes_ctr_init_##v (vlib_main_t *vm); \
- clib_error_t __clib_weak *crypto_native_aes_gcm_init_##v (vlib_main_t *vm);
-
-foreach_crypto_native_march_variant;
-#undef _
+#define CRYPTO_NATIVE_OP_HANDLER(x) \
+ static crypto_native_op_handler_t __crypto_native_op_handler_##x; \
+ static void __clib_constructor __crypto_native_op_handler_cb_##x (void) \
+ { \
+ crypto_native_main_t *cm = &crypto_native_main; \
+ int priority = __crypto_native_op_handler_##x.probe (); \
+ if (priority >= 0) \
+ { \
+ __crypto_native_op_handler_##x.priority = priority; \
+ __crypto_native_op_handler_##x.next = cm->op_handlers; \
+ cm->op_handlers = &__crypto_native_op_handler_##x; \
+ } \
+ } \
+ static crypto_native_op_handler_t __crypto_native_op_handler_##x
+#define CRYPTO_NATIVE_KEY_HANDLER(x) \
+ static crypto_native_key_handler_t __crypto_native_key_handler_##x; \
+ static void __clib_constructor __crypto_native_key_handler_cb_##x (void) \
+ { \
+ crypto_native_main_t *cm = &crypto_native_main; \
+ int priority = __crypto_native_key_handler_##x.probe (); \
+ if (priority >= 0) \
+ { \
+ __crypto_native_key_handler_##x.priority = priority; \
+ __crypto_native_key_handler_##x.next = cm->key_handlers; \
+ cm->key_handlers = &__crypto_native_key_handler_##x; \
+ } \
+ } \
+ static crypto_native_key_handler_t __crypto_native_key_handler_##x
#endif /* __crypto_native_h__ */
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/crypto_native/main.c b/src/plugins/crypto_native/main.c
index 8a59be319b9..2bc0d98f196 100644
--- a/src/plugins/crypto_native/main.c
+++ b/src/plugins/crypto_native/main.c
@@ -63,95 +63,52 @@ clib_error_t *
crypto_native_init (vlib_main_t * vm)
{
crypto_native_main_t *cm = &crypto_native_main;
- clib_error_t *error = 0;
- if (clib_cpu_supports_x86_aes () == 0 &&
- clib_cpu_supports_aarch64_aes () == 0)
+ if (cm->op_handlers == 0)
return 0;
cm->crypto_engine_index =
vnet_crypto_register_engine (vm, "native", 100,
"Native ISA Optimized Crypto");
- if (0);
-#if __x86_64__
- else if (crypto_native_aes_cbc_init_icl && clib_cpu_supports_vaes () &&
- clib_cpu_supports_avx512f ())
- error = crypto_native_aes_cbc_init_icl (vm);
- else if (crypto_native_aes_cbc_init_adl && clib_cpu_supports_vaes ())
- error = crypto_native_aes_cbc_init_adl (vm);
- else if (crypto_native_aes_cbc_init_skx && clib_cpu_supports_avx512f ())
- error = crypto_native_aes_cbc_init_skx (vm);
- else if (crypto_native_aes_cbc_init_hsw && clib_cpu_supports_avx2 ())
- error = crypto_native_aes_cbc_init_hsw (vm);
- else if (crypto_native_aes_cbc_init_slm)
- error = crypto_native_aes_cbc_init_slm (vm);
-#endif
-#if __aarch64__
- else if (crypto_native_aes_cbc_init_neon)
- error = crypto_native_aes_cbc_init_neon (vm);
-#endif
- else
- error = clib_error_return (0, "No AES CBC implemenation available");
-
- if (error)
- return error;
-
- if (0)
- ;
-#if __x86_64__
- else if (crypto_native_aes_ctr_init_icl && clib_cpu_supports_vaes () &&
- clib_cpu_supports_avx512f ())
- error = crypto_native_aes_ctr_init_icl (vm);
- else if (crypto_native_aes_ctr_init_adl && clib_cpu_supports_vaes ())
- error = crypto_native_aes_ctr_init_adl (vm);
- else if (crypto_native_aes_ctr_init_skx && clib_cpu_supports_avx512f ())
- error = crypto_native_aes_ctr_init_skx (vm);
- else if (crypto_native_aes_ctr_init_hsw && clib_cpu_supports_avx2 ())
- error = crypto_native_aes_ctr_init_hsw (vm);
- else if (crypto_native_aes_ctr_init_slm)
- error = crypto_native_aes_ctr_init_slm (vm);
-#endif
-#if __aarch64__
- else if (crypto_native_aes_ctr_init_neon)
- error = crypto_native_aes_ctr_init_neon (vm);
-#endif
- else
- error = clib_error_return (0, "No AES CTR implemenation available");
-
- if (error)
- return error;
-
-#if __x86_64__
- if (clib_cpu_supports_pclmulqdq ())
+ crypto_native_op_handler_t *oh = cm->op_handlers;
+ crypto_native_key_handler_t *kh = cm->key_handlers;
+ crypto_native_op_handler_t **best_by_op_id = 0;
+ crypto_native_key_handler_t **best_by_alg_id = 0;
+
+ while (oh)
{
- if (crypto_native_aes_gcm_init_icl && clib_cpu_supports_vaes () &&
- clib_cpu_supports_avx512f ())
- error = crypto_native_aes_gcm_init_icl (vm);
- else if (crypto_native_aes_gcm_init_adl && clib_cpu_supports_vaes ())
- error = crypto_native_aes_gcm_init_adl (vm);
- else if (crypto_native_aes_gcm_init_skx && clib_cpu_supports_avx512f ())
- error = crypto_native_aes_gcm_init_skx (vm);
- else if (crypto_native_aes_gcm_init_hsw && clib_cpu_supports_avx2 ())
- error = crypto_native_aes_gcm_init_hsw (vm);
- else if (crypto_native_aes_gcm_init_slm)
- error = crypto_native_aes_gcm_init_slm (vm);
- else
- error = clib_error_return (0, "No AES GCM implemenation available");
-
- if (error)
- return error;
+ vec_validate (best_by_op_id, oh->op_id);
+
+ if (best_by_op_id[oh->op_id] == 0 ||
+ best_by_op_id[oh->op_id]->priority < oh->priority)
+ best_by_op_id[oh->op_id] = oh;
+
+ oh = oh->next;
}
-#endif
-#if __aarch64__
- if (crypto_native_aes_gcm_init_neon)
- error = crypto_native_aes_gcm_init_neon (vm);
- else
- error = clib_error_return (0, "No AES GCM implemenation available");
-
- if (error)
- return error;
-#endif
+
+ while (kh)
+ {
+ vec_validate (best_by_alg_id, kh->alg_id);
+
+ if (best_by_alg_id[kh->alg_id] == 0 ||
+ best_by_alg_id[kh->alg_id]->priority < kh->priority)
+ best_by_alg_id[kh->alg_id] = kh;
+
+ kh = kh->next;
+ }
+
+ vec_foreach_pointer (oh, best_by_op_id)
+ if (oh)
+ vnet_crypto_register_ops_handlers (vm, cm->crypto_engine_index,
+ oh->op_id, oh->fn, oh->cfn);
+
+ vec_foreach_pointer (kh, best_by_alg_id)
+ if (kh)
+ cm->key_fn[kh->alg_id] = kh->key_fn;
+
+ vec_free (best_by_op_id);
+ vec_free (best_by_alg_id);
vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
crypto_native_key_handler);
diff --git a/src/plugins/crypto_native/sha2.c b/src/plugins/crypto_native/sha2.c
new file mode 100644
index 00000000000..6787f629104
--- /dev/null
+++ b/src/plugins/crypto_native/sha2.c
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2024 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/crypto/crypto.h>
+#include <crypto_native/crypto_native.h>
+#include <vppinfra/crypto/sha2.h>
+
+static_always_inline u32
+crypto_native_ops_hash_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops, vnet_crypto_op_chunk_t *chunks,
+ clib_sha2_type_t type, int maybe_chained)
+{
+ vnet_crypto_op_t *op = ops[0];
+ clib_sha2_ctx_t ctx;
+ u32 n_left = n_ops;
+
+next:
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
+ clib_sha2_init (&ctx, type);
+ for (int j = 0; j < op->n_chunks; j++, chp++)
+ clib_sha2_update (&ctx, chp->src, chp->len);
+ clib_sha2_final (&ctx, op->digest);
+ }
+ else
+ clib_sha2 (type, op->src, op->len, op->digest);
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+
+ if (--n_left)
+ {
+ op += 1;
+ goto next;
+ }
+
+ return n_ops;
+}
+
+static_always_inline u32
+crypto_native_ops_hmac_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops, vnet_crypto_op_chunk_t *chunks,
+ clib_sha2_type_t type)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+ vnet_crypto_op_t *op = ops[0];
+ u32 n_left = n_ops;
+ clib_sha2_hmac_ctx_t ctx;
+ u8 buffer[64];
+ u32 sz, n_fail = 0;
+
+ for (; n_left; n_left--, op++)
+ {
+ clib_sha2_hmac_init (
+ &ctx, type, (clib_sha2_hmac_key_data_t *) cm->key_data[op->key_index]);
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
+ for (int j = 0; j < op->n_chunks; j++, chp++)
+ clib_sha2_hmac_update (&ctx, chp->src, chp->len);
+ }
+ else
+ clib_sha2_hmac_update (&ctx, op->src, op->len);
+
+ clib_sha2_hmac_final (&ctx, buffer);
+
+ if (op->digest_len)
+ {
+ sz = op->digest_len;
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if ((memcmp (op->digest, buffer, sz)))
+ {
+ n_fail++;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ continue;
+ }
+ }
+ else
+ clib_memcpy_fast (op->digest, buffer, sz);
+ }
+ else
+ {
+ sz = clib_sha2_variants[type].digest_size;
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if ((memcmp (op->digest, buffer, sz)))
+ {
+ n_fail++;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ continue;
+ }
+ }
+ else
+ clib_memcpy_fast (op->digest, buffer, sz);
+ }
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+
+ return n_ops - n_fail;
+}
+
+static void *
+sha2_key_add (vnet_crypto_key_t *key, clib_sha2_type_t type)
+{
+ clib_sha2_hmac_key_data_t *kd;
+
+ kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
+ clib_sha2_hmac_key_data (type, key->data, vec_len (key->data), kd);
+
+ return kd;
+}
+
+static int
+probe ()
+{
+#if defined(__x86_64__)
+
+#if defined(__SHA__) && defined(__AVX512F__)
+ if (clib_cpu_supports_sha () && clib_cpu_supports_avx512f ())
+ return 30;
+#elif defined(__SHA__) && defined(__AVX2__)
+ if (clib_cpu_supports_sha () && clib_cpu_supports_avx2 ())
+ return 20;
+#elif defined(__SHA__)
+ if (clib_cpu_supports_sha ())
+ return 10;
+#endif
+
+#elif defined(__aarch64__)
+#if defined(__ARM_FEATURE_SHA2)
+ if (clib_cpu_supports_sha2 ())
+ return 10;
+#endif
+#endif
+ return -1;
+}
+
+#define _(b) \
+ static u32 crypto_native_ops_hash_sha##b ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return crypto_native_ops_hash_sha2 (vm, ops, n_ops, 0, CLIB_SHA2_##b, 0); \
+ } \
+ \
+ static u32 crypto_native_ops_chained_hash_sha##b ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return crypto_native_ops_hash_sha2 (vm, ops, n_ops, chunks, \
+ CLIB_SHA2_##b, 1); \
+ } \
+ \
+ static u32 crypto_native_ops_hmac_sha##b ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return crypto_native_ops_hmac_sha2 (vm, ops, n_ops, 0, CLIB_SHA2_##b); \
+ } \
+ \
+ static u32 crypto_native_ops_chained_hmac_sha##b ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return crypto_native_ops_hmac_sha2 (vm, ops, n_ops, chunks, \
+ CLIB_SHA2_##b); \
+ } \
+ \
+ static void *sha2_##b##_key_add (vnet_crypto_key_t *k) \
+ { \
+ return sha2_key_add (k, CLIB_SHA2_##b); \
+ } \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (crypto_native_hash_sha##b) = { \
+ .op_id = VNET_CRYPTO_OP_SHA##b##_HASH, \
+ .fn = crypto_native_ops_hash_sha##b, \
+ .cfn = crypto_native_ops_chained_hash_sha##b, \
+ .probe = probe, \
+ }; \
+ CRYPTO_NATIVE_OP_HANDLER (crypto_native_hmac_sha##b) = { \
+ .op_id = VNET_CRYPTO_OP_SHA##b##_HMAC, \
+ .fn = crypto_native_ops_hmac_sha##b, \
+ .cfn = crypto_native_ops_chained_hmac_sha##b, \
+ .probe = probe, \
+ }; \
+ CRYPTO_NATIVE_KEY_HANDLER (crypto_native_hmac_sha##b) = { \
+ .alg_id = VNET_CRYPTO_ALG_HMAC_SHA##b, \
+ .key_fn = sha2_##b##_key_add, \
+ .probe = probe, \
+ };
+
+_ (224)
+_ (256)
+
+#undef _