aboutsummaryrefslogtreecommitdiffstats
path: root/src/crypto_engines
diff options
context:
space:
mode:
authorDamjan Marion <dmarion@me.com>2024-12-16 09:06:42 +0000
committerDamjan Marion <dmarion@me.com>2024-12-18 12:34:55 +0000
commit0cf4eef73a4c1bd2831a4618af50939a2aab01c6 (patch)
tree809caae588fa1e12556cb180bc4b253120627134 /src/crypto_engines
parent4358a18dea319b590da5b64e263439136bd8f806 (diff)
crypto: move crypto engines outside of plugins
This is first step in process of making crypto engine binaries less dependant on specific VPP version. Type: improvement Change-Id: Ib08135688be409049b660e2b2ac435578b63be65 Signed-off-by: Damjan Marion <dmarion@me.com>
Diffstat (limited to 'src/crypto_engines')
-rw-r--r--src/crypto_engines/CMakeLists.txt30
-rw-r--r--src/crypto_engines/ipsecmb/CMakeLists.txt50
-rw-r--r--src/crypto_engines/ipsecmb/FEATURE.yaml11
-rw-r--r--src/crypto_engines/ipsecmb/ipsecmb.c933
-rw-r--r--src/crypto_engines/native/CMakeLists.txt50
-rw-r--r--src/crypto_engines/native/FEATURE.yaml13
-rw-r--r--src/crypto_engines/native/aes_cbc.c188
-rw-r--r--src/crypto_engines/native/aes_ctr.c130
-rw-r--r--src/crypto_engines/native/aes_gcm.c166
-rw-r--r--src/crypto_engines/native/crypto_native.h83
-rw-r--r--src/crypto_engines/native/main.c113
-rw-r--r--src/crypto_engines/native/sha2.c198
-rw-r--r--src/crypto_engines/openssl/CMakeLists.txt27
-rw-r--r--src/crypto_engines/openssl/FEATURE.yaml14
-rw-r--r--src/crypto_engines/openssl/crypto_openssl.h19
-rw-r--r--src/crypto_engines/openssl/main.c685
16 files changed, 2710 insertions, 0 deletions
diff --git a/src/crypto_engines/CMakeLists.txt b/src/crypto_engines/CMakeLists.txt
new file mode 100644
index 00000000000..8c4d930c08c
--- /dev/null
+++ b/src/crypto_engines/CMakeLists.txt
@@ -0,0 +1,30 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+include_directories (
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${CMAKE_CURRENT_BINARY_DIR}
+)
+
+##############################################################################
+# find and add all crypto engine subdirs
+##############################################################################
+FILE(GLOB files RELATIVE
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/*/CMakeLists.txt
+)
+
+foreach (f ${files})
+ get_filename_component(dir ${f} DIRECTORY)
+ add_subdirectory(${dir})
+endforeach()
diff --git a/src/crypto_engines/ipsecmb/CMakeLists.txt b/src/crypto_engines/ipsecmb/CMakeLists.txt
new file mode 100644
index 00000000000..738bc05b0f5
--- /dev/null
+++ b/src/crypto_engines/ipsecmb/CMakeLists.txt
@@ -0,0 +1,50 @@
+
+# Copyright (c) 2019 Cisco Systems
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
+ return()
+endif()
+
+vpp_find_path(IPSECMB_INCLUDE_DIR NAMES intel-ipsec-mb.h HINTS ${IPSECMB_INCLUDE_DIR_HINT})
+vpp_find_library(IPSECMB_LIB NAMES libIPSec_MB.a HINTS ${IPSECMB_LIB_DIR_HINT})
+
+if(IPSECMB_INCLUDE_DIR AND IPSECMB_LIB)
+
+ get_filename_component(IPSECMB_LIB_DIR ${IPSECMB_LIB} DIRECTORY)
+ set(IPSECMB_LINK_FLAGS "${IPSECMB_LINK_FLAGS} -L${IPSECMB_LIB_DIR} -Wl,--whole-archive ${IPSECMB_LIB} -Wl,--no-whole-archive")
+ set(IPSECMB_LINK_FLAGS "${IPSECMB_LINK_FLAGS} -Wl,--exclude-libs,libIPSec_MB.a,-l:libIPSec_MB.a")
+ include_directories(${IPSECMB_INCLUDE_DIR})
+ add_vpp_crypto_engine(ipsecmb
+ SOURCES
+ ipsecmb.c
+
+ LINK_FLAGS
+ ${IPSECMB_LINK_FLAGS}
+ )
+
+ file(READ "${IPSECMB_INCLUDE_DIR}/intel-ipsec-mb.h" ipsecmb_header)
+ string(REGEX MATCH "IMB_VERSION_STR (\"+[0-9]+\\.[0-9]+\\.[0-9]+\")" _ ${ipsecmb_header})
+ string(REPLACE "\"" "" IPSECMB_VERSION ${CMAKE_MATCH_1})
+
+ if (${IPSECMB_VERSION} VERSION_GREATER "0.54.0")
+ add_definitions(-DHAVE_IPSECMB_CHACHA_POLY)
+ else()
+ message(STATUS "Intel IPSecMB ${IPSECMB_VERSION} does not support chacha20-poly1305. Disabled")
+ endif()
+
+ target_compile_options(ipsecmb_crypto_engine PRIVATE "-march=silvermont" "-maes")
+ message(STATUS "Intel IPSecMB found: ${IPSECMB_INCLUDE_DIR}")
+else()
+ message(STATUS "Intel IPSecMB not found")
+endif()
diff --git a/src/crypto_engines/ipsecmb/FEATURE.yaml b/src/crypto_engines/ipsecmb/FEATURE.yaml
new file mode 100644
index 00000000000..3ca03bf5515
--- /dev/null
+++ b/src/crypto_engines/ipsecmb/FEATURE.yaml
@@ -0,0 +1,11 @@
+---
+name: IPSec crypto engine provided by Intel IPSecMB library
+maintainer: Neale Ranns <nranns@cisco.com>
+features:
+ - SHA(1, 224, 256, 384, 512)
+ - CBC(128, 192, 256)
+ - GCM(128, 192, 256)
+
+description: ""
+state: production
+properties: [API, CLI, MULTITHREAD]
diff --git a/src/crypto_engines/ipsecmb/ipsecmb.c b/src/crypto_engines/ipsecmb/ipsecmb.c
new file mode 100644
index 00000000000..256856bed8c
--- /dev/null
+++ b/src/crypto_engines/ipsecmb/ipsecmb.c
@@ -0,0 +1,933 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2024 Cisco Systems, Inc.
+ */
+
+#include <fcntl.h>
+
+#include <intel-ipsec-mb.h>
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+#include <vnet/crypto/crypto.h>
+#include <vnet/crypto/engine.h>
+#include <vppinfra/cpu.h>
+
+#define HMAC_MAX_BLOCK_SIZE IMB_SHA_512_BLOCK_SIZE
+#define EXPANDED_KEY_N_BYTES (16 * 15)
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ IMB_MGR *mgr;
+#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
+ IMB_JOB burst_jobs[IMB_MAX_BURST_SIZE];
+#endif
+} ipsecmb_per_thread_data_t;
+
+typedef struct
+{
+ u16 data_size;
+ u8 block_size;
+ aes_gcm_pre_t aes_gcm_pre;
+ keyexp_t keyexp;
+ hash_one_block_t hash_one_block;
+ hash_fn_t hash_fn;
+} ipsecmb_alg_data_t;
+
+typedef struct ipsecmb_main_t_
+{
+ ipsecmb_per_thread_data_t *per_thread_data;
+ ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
+ void **key_data;
+} ipsecmb_main_t;
+
+typedef struct
+{
+ u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
+ u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
+} ipsecmb_aes_key_data_t;
+
+static ipsecmb_main_t ipsecmb_main = { };
+
+/* clang-format off */
+/*
+ * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
+ */
+#define foreach_ipsecmb_hmac_op \
+ _(SHA1, SHA_1, sha1, 64, 20, 20) \
+ _(SHA224, SHA_224, sha224, 64, 32, 28) \
+ _(SHA256, SHA_256, sha256, 64, 32, 32) \
+ _(SHA384, SHA_384, sha384, 128, 64, 48) \
+ _(SHA512, SHA_512, sha512, 128, 64, 64)
+
+/*
+ * (Alg, key-len-bits, JOB_CIPHER_MODE)
+ */
+#define foreach_ipsecmb_cipher_op \
+ _ (AES_128_CBC, 128, CBC) \
+ _ (AES_192_CBC, 192, CBC) \
+ _ (AES_256_CBC, 256, CBC) \
+ _ (AES_128_CTR, 128, CNTR) \
+ _ (AES_192_CTR, 192, CNTR) \
+ _ (AES_256_CTR, 256, CNTR)
+
+/*
+ * (Alg, key-len-bytes, iv-len-bytes)
+ */
+#define foreach_ipsecmb_gcm_cipher_op \
+ _(AES_128_GCM, 128) \
+ _(AES_192_GCM, 192) \
+ _(AES_256_GCM, 256)
+/* clang-format on */
+static_always_inline vnet_crypto_op_status_t
+ipsecmb_status_job (IMB_STATUS status)
+{
+ switch (status)
+ {
+ case IMB_STATUS_COMPLETED:
+ return VNET_CRYPTO_OP_STATUS_COMPLETED;
+ case IMB_STATUS_BEING_PROCESSED:
+ case IMB_STATUS_COMPLETED_CIPHER:
+ case IMB_STATUS_COMPLETED_AUTH:
+ return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
+ case IMB_STATUS_INVALID_ARGS:
+ case IMB_STATUS_INTERNAL_ERROR:
+ case IMB_STATUS_ERROR:
+ return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+ }
+ ASSERT (0);
+ return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+}
+
+always_inline void
+ipsecmb_retire_hmac_job (IMB_JOB *job, u32 *n_fail, u32 digest_size)
+{
+ vnet_crypto_op_t *op = job->user_data;
+ u32 len = op->digest_len ? op->digest_len : digest_size;
+
+ if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
+ {
+ op->status = ipsecmb_status_job (job->status);
+ *n_fail = *n_fail + 1;
+ return;
+ }
+
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if ((memcmp (op->digest, job->auth_tag_output, len)))
+ {
+ *n_fail = *n_fail + 1;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ return;
+ }
+ }
+ else if (len == digest_size)
+ clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
+ else
+ clib_memcpy_fast (op->digest, job->auth_tag_output, len);
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+}
+
+#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
+static_always_inline u32
+ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+ u32 block_size, u32 hash_size, u32 digest_size,
+ IMB_HASH_ALG alg)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + vm->thread_index;
+ IMB_JOB *job;
+ u32 i, n_fail = 0, ops_index = 0;
+ u8 scratch[n_ops][digest_size];
+ const u32 burst_sz =
+ (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
+
+ while (n_ops)
+ {
+ const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
+ /*
+ * configure all the jobs first ...
+ */
+ for (i = 0; i < n; i++, ops_index++)
+ {
+ vnet_crypto_op_t *op = ops[ops_index];
+ const u8 *kd = (u8 *) imbm->key_data[op->key_index];
+
+ job = &ptd->burst_jobs[i];
+
+ job->src = op->src;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes = op->len;
+ job->auth_tag_output_len_in_bytes = digest_size;
+ job->auth_tag_output = scratch[ops_index];
+
+ job->u.HMAC._hashed_auth_key_xor_ipad = kd;
+ job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
+ job->user_data = op;
+ }
+
+ /*
+ * submit all jobs to be processed and retire completed jobs
+ */
+ IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);
+
+ for (i = 0; i < n; i++)
+ {
+ job = &ptd->burst_jobs[i];
+ ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
+ }
+
+ n_ops -= n;
+ }
+
+ return ops_index - n_fail;
+}
+#else
+static_always_inline u32
+ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+ u32 block_size, u32 hash_size, u32 digest_size,
+ JOB_HASH_ALG alg)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + vm->thread_index;
+ IMB_JOB *job;
+ u32 i, n_fail = 0;
+ u8 scratch[n_ops][digest_size];
+
+ /*
+ * queue all the jobs first ...
+ */
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ u8 *kd = (u8 *) imbm->key_data[op->key_index];
+
+ job = IMB_GET_NEXT_JOB (ptd->mgr);
+
+ job->src = op->src;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes = op->len;
+ job->hash_alg = alg;
+ job->auth_tag_output_len_in_bytes = digest_size;
+ job->auth_tag_output = scratch[i];
+
+ job->cipher_mode = IMB_CIPHER_NULL;
+ job->cipher_direction = IMB_DIR_DECRYPT;
+ job->chain_order = IMB_ORDER_HASH_CIPHER;
+
+ job->u.HMAC._hashed_auth_key_xor_ipad = kd;
+ job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
+ job->user_data = op;
+
+ job = IMB_SUBMIT_JOB (ptd->mgr);
+
+ if (job)
+ ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
+ }
+
+ while ((job = IMB_FLUSH_JOB (ptd->mgr)))
+ ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
+
+ return n_ops - n_fail;
+}
+#endif
+
+/* clang-format off */
+#define _(a, b, c, d, e, f) \
+static_always_inline u32 \
+ipsecmb_ops_hmac_##a (vlib_main_t * vm, \
+ vnet_crypto_op_t * ops[], \
+ u32 n_ops) \
+{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, \
+ IMB_AUTH_HMAC_##b); } \
+
+foreach_ipsecmb_hmac_op;
+#undef _
+/* clang-format on */
+
+always_inline void
+ipsecmb_retire_cipher_job (IMB_JOB *job, u32 *n_fail)
+{
+ vnet_crypto_op_t *op = job->user_data;
+
+ if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
+ {
+ op->status = ipsecmb_status_job (job->status);
+ *n_fail = *n_fail + 1;
+ }
+ else
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+}
+
+#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
+static_always_inline u32
+ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops, u32 key_len,
+ IMB_CIPHER_DIRECTION direction,
+ IMB_CIPHER_MODE cipher_mode)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + vm->thread_index;
+ IMB_JOB *job;
+ u32 i, n_fail = 0, ops_index = 0;
+ const u32 burst_sz =
+ (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;
+
+ while (n_ops)
+ {
+ const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
+
+ for (i = 0; i < n; i++)
+ {
+ ipsecmb_aes_key_data_t *kd;
+ vnet_crypto_op_t *op = ops[ops_index++];
+ kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
+
+ job = &ptd->burst_jobs[i];
+
+ job->src = op->src;
+ job->dst = op->dst;
+ job->msg_len_to_cipher_in_bytes = op->len;
+ job->cipher_start_src_offset_in_bytes = 0;
+
+ job->hash_alg = IMB_AUTH_NULL;
+
+ job->enc_keys = kd->enc_key_exp;
+ job->dec_keys = kd->dec_key_exp;
+ job->iv = op->iv;
+ job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;
+
+ job->user_data = op;
+ }
+
+ IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
+ cipher_mode, direction, key_len / 8);
+ for (i = 0; i < n; i++)
+ {
+ job = &ptd->burst_jobs[i];
+ ipsecmb_retire_cipher_job (job, &n_fail);
+ }
+
+ n_ops -= n;
+ }
+
+ return ops_index - n_fail;
+}
+#else
+static_always_inline u32
+ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops, u32 key_len,
+ JOB_CIPHER_DIRECTION direction,
+ JOB_CIPHER_MODE cipher_mode)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + vm->thread_index;
+ IMB_JOB *job;
+ u32 i, n_fail = 0;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ ipsecmb_aes_key_data_t *kd;
+ vnet_crypto_op_t *op = ops[i];
+ kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];
+
+ job = IMB_GET_NEXT_JOB (ptd->mgr);
+
+ job->src = op->src;
+ job->dst = op->dst;
+ job->msg_len_to_cipher_in_bytes = op->len;
+ job->cipher_start_src_offset_in_bytes = 0;
+
+ job->hash_alg = IMB_AUTH_NULL;
+ job->cipher_mode = cipher_mode;
+ job->cipher_direction = direction;
+ job->chain_order =
+ (direction == IMB_DIR_ENCRYPT ? IMB_ORDER_CIPHER_HASH :
+ IMB_ORDER_HASH_CIPHER);
+
+ job->aes_key_len_in_bytes = key_len / 8;
+ job->enc_keys = kd->enc_key_exp;
+ job->dec_keys = kd->dec_key_exp;
+ job->iv = op->iv;
+ job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;
+
+ job->user_data = op;
+
+ job = IMB_SUBMIT_JOB (ptd->mgr);
+
+ if (job)
+ ipsecmb_retire_cipher_job (job, &n_fail);
+ }
+
+ while ((job = IMB_FLUSH_JOB (ptd->mgr)))
+ ipsecmb_retire_cipher_job (job, &n_fail);
+
+ return n_ops - n_fail;
+}
+#endif
+
+/* clang-format off */
+#define _(a, b, c) \
+ static_always_inline u32 ipsecmb_ops_cipher_enc_##a ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return ipsecmb_ops_aes_cipher_inline ( \
+ vm, ops, n_ops, b, IMB_DIR_ENCRYPT, IMB_CIPHER_##c); \
+ } \
+ \
+ static_always_inline u32 ipsecmb_ops_cipher_dec_##a ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return ipsecmb_ops_aes_cipher_inline ( \
+ vm, ops, n_ops, b, IMB_DIR_DECRYPT, IMB_CIPHER_##c); \
+ }
+
+foreach_ipsecmb_cipher_op;
+#undef _
+
+#define _(a, b) \
+static_always_inline u32 \
+ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm, \
+ vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \
+{ \
+ ipsecmb_main_t *imbm = &ipsecmb_main; \
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + \
+ vm->thread_index; \
+ IMB_MGR *m = ptd->mgr; \
+ vnet_crypto_op_chunk_t *chp; \
+ u32 i, j; \
+ \
+ for (i = 0; i < n_ops; i++) \
+ { \
+ struct gcm_key_data *kd; \
+ struct gcm_context_data ctx; \
+ vnet_crypto_op_t *op = ops[i]; \
+ \
+ kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
+ ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \
+ IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \
+ chp = chunks + op->chunk_index; \
+ for (j = 0; j < op->n_chunks; j++) \
+ { \
+ IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \
+ chp->len); \
+ chp += 1; \
+ } \
+ IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len); \
+ \
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
+ } \
+ \
+ return n_ops; \
+} \
+ \
+static_always_inline u32 \
+ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
+ u32 n_ops) \
+{ \
+ ipsecmb_main_t *imbm = &ipsecmb_main; \
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + \
+ vm->thread_index; \
+ IMB_MGR *m = ptd->mgr; \
+ u32 i; \
+ \
+ for (i = 0; i < n_ops; i++) \
+ { \
+ struct gcm_key_data *kd; \
+ struct gcm_context_data ctx; \
+ vnet_crypto_op_t *op = ops[i]; \
+ \
+ kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
+ IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \
+ op->aad, op->aad_len, op->tag, op->tag_len); \
+ \
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
+ } \
+ \
+ return n_ops; \
+} \
+ \
+static_always_inline u32 \
+ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm, \
+ vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \
+{ \
+ ipsecmb_main_t *imbm = &ipsecmb_main; \
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + \
+ vm->thread_index; \
+ IMB_MGR *m = ptd->mgr; \
+ vnet_crypto_op_chunk_t *chp; \
+ u32 i, j, n_failed = 0; \
+ \
+ for (i = 0; i < n_ops; i++) \
+ { \
+ struct gcm_key_data *kd; \
+ struct gcm_context_data ctx; \
+ vnet_crypto_op_t *op = ops[i]; \
+ u8 scratch[64]; \
+ \
+ kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
+ ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \
+ IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \
+ chp = chunks + op->chunk_index; \
+ for (j = 0; j < op->n_chunks; j++) \
+ { \
+ IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \
+ chp->len); \
+ chp += 1; \
+ } \
+ IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len); \
+ \
+ if ((memcmp (op->tag, scratch, op->tag_len))) \
+ { \
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \
+ n_failed++; \
+ } \
+ else \
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
+ } \
+ \
+ return n_ops - n_failed; \
+} \
+ \
+static_always_inline u32 \
+ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
+ u32 n_ops) \
+{ \
+ ipsecmb_main_t *imbm = &ipsecmb_main; \
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + \
+ vm->thread_index; \
+ IMB_MGR *m = ptd->mgr; \
+ u32 i, n_failed = 0; \
+ \
+ for (i = 0; i < n_ops; i++) \
+ { \
+ struct gcm_key_data *kd; \
+ struct gcm_context_data ctx; \
+ vnet_crypto_op_t *op = ops[i]; \
+ u8 scratch[64]; \
+ \
+ kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \
+ IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \
+ op->aad, op->aad_len, scratch, op->tag_len); \
+ \
+ if ((memcmp (op->tag, scratch, op->tag_len))) \
+ { \
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \
+ n_failed++; \
+ } \
+ else \
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \
+ } \
+ \
+ return n_ops - n_failed; \
+}
+/* clang-format on */
+foreach_ipsecmb_gcm_cipher_op;
+#undef _
+
+#ifdef HAVE_IPSECMB_CHACHA_POLY
+always_inline void
+ipsecmb_retire_aead_job (IMB_JOB *job, u32 *n_fail)
+{
+ vnet_crypto_op_t *op = job->user_data;
+ u32 len = op->tag_len;
+
+ if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
+ {
+ op->status = ipsecmb_status_job (job->status);
+ *n_fail = *n_fail + 1;
+ return;
+ }
+
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if (memcmp (op->tag, job->auth_tag_output, len))
+ {
+ *n_fail = *n_fail + 1;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ return;
+ }
+ }
+
+ clib_memcpy_fast (op->tag, job->auth_tag_output, len);
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+ IMB_CIPHER_DIRECTION dir)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + vm->thread_index;
+ struct IMB_JOB *job;
+ IMB_MGR *m = ptd->mgr;
+ u32 i, n_fail = 0, last_key_index = ~0;
+ u8 scratch[VLIB_FRAME_SIZE][16];
+ u8 *key = 0;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+
+ job = IMB_GET_NEXT_JOB (m);
+ if (last_key_index != op->key_index)
+ {
+ vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+ key = kd->data;
+ last_key_index = op->key_index;
+ }
+
+ job->cipher_direction = dir;
+ job->chain_order = IMB_ORDER_HASH_CIPHER;
+ job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
+ job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
+ job->enc_keys = job->dec_keys = key;
+ job->key_len_in_bytes = 32;
+
+ job->u.CHACHA20_POLY1305.aad = op->aad;
+ job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
+ job->src = op->src;
+ job->dst = op->dst;
+
+ job->iv = op->iv;
+ job->iv_len_in_bytes = 12;
+ job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
+ op->len;
+ job->cipher_start_src_offset_in_bytes =
+ job->hash_start_src_offset_in_bytes = 0;
+
+ job->auth_tag_output = scratch[i];
+ job->auth_tag_output_len_in_bytes = 16;
+
+ job->user_data = op;
+
+ job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
+ if (job)
+ ipsecmb_retire_aead_job (job, &n_fail);
+
+ op++;
+ }
+
+ while ((job = IMB_FLUSH_JOB (ptd->mgr)))
+ ipsecmb_retire_aead_job (job, &n_fail);
+
+ return n_ops - n_fail;
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops)
+{
+ return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops)
+{
+ return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
+}
+
+static_always_inline u32
+ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ IMB_CIPHER_DIRECTION dir)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_per_thread_data_t *ptd = imbm->per_thread_data + vm->thread_index;
+ IMB_MGR *m = ptd->mgr;
+ u32 i, n_fail = 0, last_key_index = ~0;
+ u8 *key = 0;
+
+ if (dir == IMB_DIR_ENCRYPT)
+ {
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ struct chacha20_poly1305_context_data ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u32 j;
+
+ ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
+
+ if (last_key_index != op->key_index)
+ {
+ vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+ key = kd->data;
+ last_key_index = op->key_index;
+ }
+
+ IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
+ op->aad_len);
+
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
+ chp->src, chp->len);
+ chp += 1;
+ }
+
+ IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ }
+ else /* dir == IMB_DIR_DECRYPT */
+ {
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ struct chacha20_poly1305_context_data ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u8 scratch[16];
+ u32 j;
+
+ ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);
+
+ if (last_key_index != op->key_index)
+ {
+ vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);
+
+ key = kd->data;
+ last_key_index = op->key_index;
+ }
+
+ IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
+ op->aad_len);
+
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
+ chp->src, chp->len);
+ chp += 1;
+ }
+
+ IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);
+
+ if (memcmp (op->tag, scratch, op->tag_len))
+ {
+ n_fail = n_fail + 1;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ }
+ else
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ }
+
+ return n_ops - n_fail;
+}
+
+static_always_inline u32
+ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks,
+ u32 n_ops)
+{
+ return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
+ IMB_DIR_ENCRYPT);
+}
+
+static_always_inline u32
+ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks,
+ u32 n_ops)
+{
+ return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
+ IMB_DIR_DECRYPT);
+}
+#endif
+
+static void
+crypto_ipsecmb_key_handler (vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
+ ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
+ u32 i;
+ void *kd;
+
+ /** TODO: add linked alg support **/
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ return;
+
+ if (kop == VNET_CRYPTO_KEY_OP_DEL)
+ {
+ if (idx >= vec_len (imbm->key_data))
+ return;
+
+ if (imbm->key_data[idx] == 0)
+ return;
+
+ clib_mem_free_s (imbm->key_data[idx]);
+ imbm->key_data[idx] = 0;
+ return;
+ }
+
+ if (ad->data_size == 0)
+ return;
+
+ vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);
+
+ if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
+ {
+ clib_mem_free_s (imbm->key_data[idx]);
+ }
+
+ kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
+ CLIB_CACHE_LINE_BYTES);
+
+ /* AES CBC key expansion */
+ if (ad->keyexp)
+ {
+ ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
+ ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
+ return;
+ }
+
+ /* AES GCM */
+ if (ad->aes_gcm_pre)
+ {
+ ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
+ return;
+ }
+
+ /* HMAC */
+ if (ad->hash_one_block)
+ {
+ const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
+ u64 pad[block_qw], key_hash[block_qw];
+
+ clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
+ if (vec_len (key->data) <= ad->block_size)
+ clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
+ else
+ ad->hash_fn (key->data, vec_len (key->data), key_hash);
+
+ for (i = 0; i < block_qw; i++)
+ pad[i] = key_hash[i] ^ 0x3636363636363636;
+ ad->hash_one_block (pad, kd);
+
+ for (i = 0; i < block_qw; i++)
+ pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
+ ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));
+
+ return;
+ }
+}
+
+static char *
+crypto_ipsecmb_init (vnet_crypto_engine_registration_t *r)
+{
+ ipsecmb_main_t *imbm = &ipsecmb_main;
+ ipsecmb_alg_data_t *ad;
+ ipsecmb_per_thread_data_t *ptd;
+ IMB_MGR *m = 0;
+
+ if (!clib_cpu_supports_aes ())
+ return "AES ISA not available on this CPU";
+
+ imbm->per_thread_data = r->per_thread_data;
+
+ for (u32 i = 0; i < r->num_threads; i++)
+ {
+ ptd = imbm->per_thread_data + i;
+ ptd->mgr = alloc_mb_mgr (0);
+#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
+ clib_memset_u8 (ptd->burst_jobs, 0,
+ sizeof (IMB_JOB) * IMB_MAX_BURST_SIZE);
+#endif
+ if (clib_cpu_supports_avx512f ())
+ init_mb_mgr_avx512 (ptd->mgr);
+ else if (clib_cpu_supports_avx2 () && clib_cpu_supports_bmi2 ())
+ init_mb_mgr_avx2 (ptd->mgr);
+ else
+ init_mb_mgr_sse (ptd->mgr);
+
+ if (ptd == imbm->per_thread_data)
+ m = ptd->mgr;
+ }
+
+#define _(a, b, c, d, e, f) \
+ ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a; \
+ ad->block_size = d; \
+ ad->data_size = e * 2; \
+ ad->hash_one_block = m->c##_one_block; \
+ ad->hash_fn = m->c;
+
+ foreach_ipsecmb_hmac_op;
+#undef _
+#define _(a, b, c) \
+ ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
+ ad->data_size = sizeof (ipsecmb_aes_key_data_t); \
+ ad->keyexp = m->keyexp_##b;
+
+ foreach_ipsecmb_cipher_op;
+#undef _
+#define _(a, b) \
+ ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \
+ ad->data_size = sizeof (struct gcm_key_data); \
+ ad->aes_gcm_pre = m->gcm##b##_pre;
+
+ foreach_ipsecmb_gcm_cipher_op;
+#undef _
+
+#ifdef HAVE_IPSECMB_CHACHA_POLY
+ ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
+ ad->data_size = 0;
+#endif
+
+ return 0;
+}
+
+vnet_crypto_engine_op_handlers_t op_handlers[] = {
+#define _(a, b) \
+ { \
+ .opt = VNET_CRYPTO_OP_##a##_ENC, \
+ .fn = ipsecmb_ops_gcm_cipher_enc_##a, \
+ .cfn = ipsecmb_ops_gcm_cipher_enc_##a##_chained, \
+ }, \
+ { \
+ .opt = VNET_CRYPTO_OP_##a##_DEC, \
+ .fn = ipsecmb_ops_gcm_cipher_dec_##a, \
+ .cfn = ipsecmb_ops_gcm_cipher_dec_##a##_chained, \
+ },
+ foreach_ipsecmb_gcm_cipher_op
+#undef _
+#define _(a, b, c, d, e, f) \
+ { .opt = VNET_CRYPTO_OP_##a##_HMAC, .fn = ipsecmb_ops_hmac_##a },
+
+ foreach_ipsecmb_hmac_op
+#undef _
+#define _(a, b, c) \
+ { .opt = VNET_CRYPTO_OP_##a##_ENC, .fn = ipsecmb_ops_cipher_enc_##a }, \
+ { .opt = VNET_CRYPTO_OP_##a##_DEC, .fn = ipsecmb_ops_cipher_dec_##a },
+
+ foreach_ipsecmb_cipher_op
+#undef _
+#ifdef HAVE_IPSECMB_CHACHA_POLY
+ { .opt = VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
+ .fn = ipsecmb_ops_chacha_poly_enc,
+ .cfn = ipsec_mb_ops_chacha_poly_enc_chained },
+ { .opt = VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
+ .fn = ipsecmb_ops_chacha_poly_dec,
+ .cfn = ipsec_mb_ops_chacha_poly_dec_chained },
+#endif
+ {}
+};
+
+VNET_CRYPTO_ENGINE_REGISTRATION () = {
+ .name = "ipsecmb",
+ .desc = "Intel(R) Multi-Buffer Crypto for IPsec Library" IMB_VERSION_STR,
+ .prio = 80,
+ .per_thread_data_sz = sizeof (ipsecmb_per_thread_data_t),
+ .init_fn = crypto_ipsecmb_init,
+ .key_handler = crypto_ipsecmb_key_handler,
+ .op_handlers = op_handlers,
+};
diff --git a/src/crypto_engines/native/CMakeLists.txt b/src/crypto_engines/native/CMakeLists.txt
new file mode 100644
index 00000000000..d9d72aff58e
--- /dev/null
+++ b/src/crypto_engines/native/CMakeLists.txt
@@ -0,0 +1,50 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
+ list(APPEND VARIANTS "slm\;-march=silvermont -maes")
+ list(APPEND VARIANTS "hsw\;-march=haswell -maes")
+ if(compiler_flag_march_skylake_avx512 AND compiler_flag_mprefer_vector_width_256)
+ list(APPEND VARIANTS "skx\;-march=skylake-avx512 -mprefer-vector-width=256")
+ endif()
+ if(compiler_flag_march_icelake_client AND compiler_flag_mprefer_vector_width_512)
+ list(APPEND VARIANTS "icl\;-march=icelake-client -mprefer-vector-width=512")
+ endif()
+ if(compiler_flag_march_alderlake)
+ list(APPEND VARIANTS "adl\;-march=alderlake -mprefer-vector-width=256")
+ endif()
+endif()
+
+if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
+ list(APPEND VARIANTS "armv8\;-march=armv8.1-a+crc+crypto")
+endif()
+
+set (COMPILE_FILES aes_cbc.c aes_gcm.c aes_ctr.c sha2.c)
+set (COMPILE_OPTS -Wall -fno-common)
+
+if (NOT VARIANTS)
+ return()
+endif()
+
+add_vpp_crypto_engine(native SOURCES main.c)
+
+foreach(VARIANT ${VARIANTS})
+ list(GET VARIANT 0 v)
+ list(GET VARIANT 1 f)
+ set(l native_crypto_engine_${v})
+ add_library(${l} OBJECT ${COMPILE_FILES})
+ set_target_properties(${l} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+ separate_arguments(f)
+ target_compile_options(${l} PUBLIC ${f} ${COMPILE_OPTS})
+ target_sources(native_crypto_engine PRIVATE $<TARGET_OBJECTS:${l}>)
+endforeach()
diff --git a/src/crypto_engines/native/FEATURE.yaml b/src/crypto_engines/native/FEATURE.yaml
new file mode 100644
index 00000000000..d54816d673f
--- /dev/null
+++ b/src/crypto_engines/native/FEATURE.yaml
@@ -0,0 +1,13 @@
+---
+name: IPSec crypto engine provided by native implementation
+maintainer: Damjan Marion <damarion@cisco.com>
+features:
+ - CBC(128, 192, 256)
+ - GCM(128, 192, 256)
+ - CTR(128, 192, 256)
+ - SHA(224, 256)
+ - HMAC-SHA(224, 256)
+
+description: "An implementation of a native crypto-engine"
+state: production
+properties: [API, CLI, MULTITHREAD]
diff --git a/src/crypto_engines/native/aes_cbc.c b/src/crypto_engines/native/aes_cbc.c
new file mode 100644
index 00000000000..b4ed2b3493d
--- /dev/null
+++ b/src/crypto_engines/native/aes_cbc.c
@@ -0,0 +1,188 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/crypto/crypto.h>
+#include <native/crypto_native.h>
+#include <vppinfra/crypto/aes_cbc.h>
+
+#if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
+#pragma GCC optimize ("O3")
+#endif
+
+#define CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE 256
+
+static_always_inline u32
+aes_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ u32 n_ops, aes_key_size_t ks)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+ u32 i, n_left = n_ops;
+ uword key_indices[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+ u8 *plaintext[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+ uword oplen[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+ u8 *iv[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+ u8 *ciphertext[CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE] = {};
+
+ while (n_left)
+ {
+ i = 0;
+ while (n_left && i < CRYPTO_NATIVE_AES_CBC_ENC_VEC_SIZE)
+ {
+ key_indices[i] = ops[0]->key_index;
+ plaintext[i] = ops[0]->src;
+ ciphertext[i] = ops[0]->dst;
+ oplen[i] = ops[0]->len;
+ iv[i] = ops[0]->iv;
+ ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+
+ ops++;
+ n_left--;
+ i++;
+ }
+ clib_aes_cbc_encrypt_multi ((aes_cbc_key_data_t **) cm->key_data,
+ key_indices, plaintext, oplen, iv, ks,
+ ciphertext, i);
+ }
+ return n_ops;
+}
+
+
+static_always_inline u32
+aes_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ u32 n_ops, aes_key_size_t ks)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+ int rounds = AES_KEY_ROUNDS (ks);
+ vnet_crypto_op_t *op = ops[0];
+ aes_cbc_key_data_t *kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
+ u32 n_left = n_ops;
+
+ ASSERT (n_ops >= 1);
+
+decrypt:
+#if defined(__VAES__) && defined(__AVX512F__)
+ aes4_cbc_dec (kd->decrypt_key, (u8x64u *) op->src, (u8x64u *) op->dst,
+ (u8x16u *) op->iv, op->len, rounds);
+#elif defined(__VAES__)
+ aes2_cbc_dec (kd->decrypt_key, (u8x32u *) op->src, (u8x32u *) op->dst,
+ (u8x16u *) op->iv, op->len, rounds);
+#else
+ aes_cbc_dec (kd->decrypt_key, (u8x16u *) op->src, (u8x16u *) op->dst,
+ (u8x16u *) op->iv, op->len, rounds);
+#endif
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+
+ if (--n_left)
+ {
+ op += 1;
+ kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
+ goto decrypt;
+ }
+
+ return n_ops;
+}
+
+static int
+aes_cbc_cpu_probe ()
+{
+#if defined(__VAES__) && defined(__AVX512F__)
+ if (clib_cpu_supports_vaes () && clib_cpu_supports_avx512f ())
+ return 50;
+#elif defined(__VAES__)
+ if (clib_cpu_supports_vaes ())
+ return 40;
+#elif defined(__AVX512F__)
+ if (clib_cpu_supports_avx512f ())
+ return 30;
+#elif defined(__AVX2__)
+ if (clib_cpu_supports_avx2 ())
+ return 20;
+#elif __AES__
+ if (clib_cpu_supports_aes ())
+ return 10;
+#elif __aarch64__
+ if (clib_cpu_supports_aarch64_aes ())
+ return 10;
+#endif
+ return -1;
+}
+
+static void *
+aes_cbc_key_exp_128 (vnet_crypto_key_t *key)
+{
+ aes_cbc_key_data_t *kd;
+ kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
+ clib_aes128_cbc_key_expand (kd, key->data);
+ return kd;
+}
+
+static void *
+aes_cbc_key_exp_192 (vnet_crypto_key_t *key)
+{
+ aes_cbc_key_data_t *kd;
+ kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
+ clib_aes192_cbc_key_expand (kd, key->data);
+ return kd;
+}
+
+static void *
+aes_cbc_key_exp_256 (vnet_crypto_key_t *key)
+{
+ aes_cbc_key_data_t *kd;
+ kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
+ clib_aes256_cbc_key_expand (kd, key->data);
+ return kd;
+}
+
+#define foreach_aes_cbc_handler_type _ (128) _ (192) _ (256)
+
+#define _(x) \
+ static u32 aes_ops_enc_aes_cbc_##x (vlib_main_t *vm, \
+ vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); \
+ } \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##x##_cbc_enc) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
+ .fn = aes_ops_enc_aes_cbc_##x, \
+ .probe = aes_cbc_cpu_probe, \
+ }; \
+ \
+ static u32 aes_ops_dec_aes_cbc_##x (vlib_main_t *vm, \
+ vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); \
+ } \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##x##_cbc_dec) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
+ .fn = aes_ops_dec_aes_cbc_##x, \
+ .probe = aes_cbc_cpu_probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_KEY_HANDLER (aes_##x##_cbc) = { \
+ .alg_id = VNET_CRYPTO_ALG_AES_##x##_CBC, \
+ .key_fn = aes_cbc_key_exp_##x, \
+ .probe = aes_cbc_cpu_probe, \
+ };
+
+foreach_aes_cbc_handler_type;
+#undef _
+
diff --git a/src/crypto_engines/native/aes_ctr.c b/src/crypto_engines/native/aes_ctr.c
new file mode 100644
index 00000000000..d39b1c83842
--- /dev/null
+++ b/src/crypto_engines/native/aes_ctr.c
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2024 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/crypto/crypto.h>
+#include <native/crypto_native.h>
+#include <vppinfra/crypto/aes_ctr.h>
+
+#if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
+#pragma GCC optimize("O3")
+#endif
+
+static_always_inline u32
+aes_ops_aes_ctr (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+ vnet_crypto_op_chunk_t *chunks, aes_key_size_t ks,
+ int maybe_chained)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+ vnet_crypto_op_t *op = ops[0];
+ aes_ctr_key_data_t *kd;
+ aes_ctr_ctx_t ctx;
+ u32 n_left = n_ops;
+
+next:
+ kd = (aes_ctr_key_data_t *) cm->key_data[op->key_index];
+
+ clib_aes_ctr_init (&ctx, kd, op->iv, ks);
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
+ for (int j = 0; j < op->n_chunks; j++, chp++)
+ clib_aes_ctr_transform (&ctx, chp->src, chp->dst, chp->len, ks);
+ }
+ else
+ clib_aes_ctr_transform (&ctx, op->src, op->dst, op->len, ks);
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+
+ if (--n_left)
+ {
+ op += 1;
+ goto next;
+ }
+
+ return n_ops;
+}
+
+static_always_inline void *
+aes_ctr_key_exp (vnet_crypto_key_t *key, aes_key_size_t ks)
+{
+ aes_ctr_key_data_t *kd;
+
+ kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
+
+ clib_aes_ctr_key_expand (kd, key->data, ks);
+
+ return kd;
+}
+
+#define foreach_aes_ctr_handler_type _ (128) _ (192) _ (256)
+
+#define _(x) \
+ static u32 aes_ops_aes_ctr_##x (vlib_main_t *vm, vnet_crypto_op_t *ops[], \
+ u32 n_ops) \
+ { \
+ return aes_ops_aes_ctr (vm, ops, n_ops, 0, AES_KEY_##x, 0); \
+ } \
+ static u32 aes_ops_aes_ctr_##x##_chained ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return aes_ops_aes_ctr (vm, ops, n_ops, chunks, AES_KEY_##x, 1); \
+ } \
+ static void *aes_ctr_key_exp_##x (vnet_crypto_key_t *key) \
+ { \
+ return aes_ctr_key_exp (key, AES_KEY_##x); \
+ }
+
+foreach_aes_ctr_handler_type;
+#undef _
+
+static int
+probe ()
+{
+#if defined(__VAES__) && defined(__AVX512F__)
+ if (clib_cpu_supports_vaes () && clib_cpu_supports_avx512f ())
+ return 50;
+#elif defined(__VAES__)
+ if (clib_cpu_supports_vaes ())
+ return 40;
+#elif defined(__AVX512F__)
+ if (clib_cpu_supports_avx512f ())
+ return 30;
+#elif defined(__AVX2__)
+ if (clib_cpu_supports_avx2 ())
+ return 20;
+#elif __AES__
+ if (clib_cpu_supports_aes ())
+ return 10;
+#elif __aarch64__
+ if (clib_cpu_supports_aarch64_aes ())
+ return 10;
+#endif
+ return -1;
+}
+
+#define _(b) \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##b##_ctr_enc) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##b##_CTR_ENC, \
+ .fn = aes_ops_aes_ctr_##b, \
+ .cfn = aes_ops_aes_ctr_##b##_chained, \
+ .probe = probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##b##_ctr_dec) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##b##_CTR_DEC, \
+ .fn = aes_ops_aes_ctr_##b, \
+ .cfn = aes_ops_aes_ctr_##b##_chained, \
+ .probe = probe, \
+ }; \
+ CRYPTO_NATIVE_KEY_HANDLER (aes_##b##_ctr) = { \
+ .alg_id = VNET_CRYPTO_ALG_AES_##b##_CTR, \
+ .key_fn = aes_ctr_key_exp_##b, \
+ .probe = probe, \
+ };
+
+_ (128) _ (192) _ (256)
+#undef _
diff --git a/src/crypto_engines/native/aes_gcm.c b/src/crypto_engines/native/aes_gcm.c
new file mode 100644
index 00000000000..57eee17f3d0
--- /dev/null
+++ b/src/crypto_engines/native/aes_gcm.c
@@ -0,0 +1,166 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/crypto/crypto.h>
+#include <native/crypto_native.h>
+#include <vppinfra/crypto/aes_gcm.h>
+
+#if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
+#pragma GCC optimize("O3")
+#endif
+
+static_always_inline u32
+aes_ops_enc_aes_gcm (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+ aes_key_size_t ks)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+ vnet_crypto_op_t *op = ops[0];
+ aes_gcm_key_data_t *kd;
+ u32 n_left = n_ops;
+
+next:
+ kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
+ aes_gcm (op->src, op->dst, op->aad, (u8 *) op->iv, op->tag, op->len,
+ op->aad_len, op->tag_len, kd, AES_KEY_ROUNDS (ks),
+ AES_GCM_OP_ENCRYPT);
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+
+ if (--n_left)
+ {
+ op += 1;
+ goto next;
+ }
+
+ return n_ops;
+}
+
+static_always_inline u32
+aes_ops_dec_aes_gcm (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
+ aes_key_size_t ks)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+ vnet_crypto_op_t *op = ops[0];
+ aes_gcm_key_data_t *kd;
+ u32 n_left = n_ops;
+ int rv;
+
+next:
+ kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
+ rv = aes_gcm (op->src, op->dst, op->aad, (u8 *) op->iv, op->tag, op->len,
+ op->aad_len, op->tag_len, kd, AES_KEY_ROUNDS (ks),
+ AES_GCM_OP_DECRYPT);
+
+ if (rv)
+ {
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ else
+ {
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ n_ops--;
+ }
+
+ if (--n_left)
+ {
+ op += 1;
+ goto next;
+ }
+
+ return n_ops;
+}
+
+static_always_inline void *
+aes_gcm_key_exp (vnet_crypto_key_t *key, aes_key_size_t ks)
+{
+ aes_gcm_key_data_t *kd;
+
+ kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
+
+ clib_aes_gcm_key_expand (kd, key->data, ks);
+
+ return kd;
+}
+
+#define foreach_aes_gcm_handler_type _ (128) _ (192) _ (256)
+
+#define _(x) \
+ static u32 aes_ops_dec_aes_gcm_##x (vlib_main_t *vm, \
+ vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return aes_ops_dec_aes_gcm (vm, ops, n_ops, AES_KEY_##x); \
+ } \
+ static u32 aes_ops_enc_aes_gcm_##x (vlib_main_t *vm, \
+ vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return aes_ops_enc_aes_gcm (vm, ops, n_ops, AES_KEY_##x); \
+ } \
+ static void *aes_gcm_key_exp_##x (vnet_crypto_key_t *key) \
+ { \
+ return aes_gcm_key_exp (key, AES_KEY_##x); \
+ }
+
+foreach_aes_gcm_handler_type;
+#undef _
+
+static int
+probe ()
+{
+#if defined(__VAES__) && defined(__AVX512F__)
+ if (clib_cpu_supports_vpclmulqdq () && clib_cpu_supports_vaes () &&
+ clib_cpu_supports_avx512f ())
+ return 50;
+#elif defined(__VAES__)
+ if (clib_cpu_supports_vpclmulqdq () && clib_cpu_supports_vaes ())
+ return 40;
+#elif defined(__AVX512F__)
+ if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_avx512f ())
+ return 30;
+#elif defined(__AVX2__)
+ if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_avx2 ())
+ return 20;
+#elif __AES__
+ if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_aes ())
+ return 10;
+#elif __aarch64__
+ if (clib_cpu_supports_aarch64_aes ())
+ return 10;
+#endif
+ return -1;
+}
+
+#define _(b) \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##b##_gcm_enc) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##b##_GCM_ENC, \
+ .fn = aes_ops_enc_aes_gcm_##b, \
+ .probe = probe, \
+ }; \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (aes_##b##_gcm_dec) = { \
+ .op_id = VNET_CRYPTO_OP_AES_##b##_GCM_DEC, \
+ .fn = aes_ops_dec_aes_gcm_##b, \
+ .probe = probe, \
+ }; \
+ CRYPTO_NATIVE_KEY_HANDLER (aes_##b##_gcm) = { \
+ .alg_id = VNET_CRYPTO_ALG_AES_##b##_GCM, \
+ .key_fn = aes_gcm_key_exp_##b, \
+ .probe = probe, \
+ };
+
+_ (128) _ (192) _ (256)
+#undef _
diff --git a/src/crypto_engines/native/crypto_native.h b/src/crypto_engines/native/crypto_native.h
new file mode 100644
index 00000000000..0fcb6a99524
--- /dev/null
+++ b/src/crypto_engines/native/crypto_native.h
@@ -0,0 +1,83 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __crypto_native_h__
+#define __crypto_native_h__
+
+typedef void *(crypto_native_key_fn_t) (vnet_crypto_key_t * key);
+typedef int (crypto_native_variant_probe_t) ();
+
+typedef struct crypto_native_op_handler
+{
+ struct crypto_native_op_handler *next;
+ vnet_crypto_op_id_t op_id;
+ vnet_crypto_ops_handler_t *fn;
+ vnet_crypto_chained_ops_handler_t *cfn;
+ crypto_native_variant_probe_t *probe;
+ int priority;
+} crypto_native_op_handler_t;
+
+typedef struct crypto_native_key_handler
+{
+ struct crypto_native_key_handler *next;
+ vnet_crypto_alg_t alg_id;
+ crypto_native_key_fn_t *key_fn;
+ crypto_native_variant_probe_t *probe;
+ int priority;
+} crypto_native_key_handler_t;
+
+typedef struct
+{
+ crypto_native_key_fn_t *key_fn[VNET_CRYPTO_N_ALGS];
+ void **key_data;
+ crypto_native_op_handler_t *op_handlers;
+ crypto_native_key_handler_t *key_handlers;
+} crypto_native_main_t;
+
+extern crypto_native_main_t crypto_native_main;
+
+#define CRYPTO_NATIVE_OP_HANDLER(x) \
+ static crypto_native_op_handler_t __crypto_native_op_handler_##x; \
+ static void __clib_constructor __crypto_native_op_handler_cb_##x (void) \
+ { \
+ crypto_native_main_t *cm = &crypto_native_main; \
+ int priority = __crypto_native_op_handler_##x.probe (); \
+ if (priority >= 0) \
+ { \
+ __crypto_native_op_handler_##x.priority = priority; \
+ __crypto_native_op_handler_##x.next = cm->op_handlers; \
+ cm->op_handlers = &__crypto_native_op_handler_##x; \
+ } \
+ } \
+ static crypto_native_op_handler_t __crypto_native_op_handler_##x
+
+#define CRYPTO_NATIVE_KEY_HANDLER(x) \
+ static crypto_native_key_handler_t __crypto_native_key_handler_##x; \
+ static void __clib_constructor __crypto_native_key_handler_cb_##x (void) \
+ { \
+ crypto_native_main_t *cm = &crypto_native_main; \
+ int priority = __crypto_native_key_handler_##x.probe (); \
+ if (priority >= 0) \
+ { \
+ __crypto_native_key_handler_##x.priority = priority; \
+ __crypto_native_key_handler_##x.next = cm->key_handlers; \
+ cm->key_handlers = &__crypto_native_key_handler_##x; \
+ } \
+ } \
+ static crypto_native_key_handler_t __crypto_native_key_handler_##x
+#endif /* __crypto_native_h__ */
+
diff --git a/src/crypto_engines/native/main.c b/src/crypto_engines/native/main.c
new file mode 100644
index 00000000000..e9e71b6fb6d
--- /dev/null
+++ b/src/crypto_engines/native/main.c
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2024 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/crypto/crypto.h>
+#include <vnet/crypto/engine.h>
+#include <native/crypto_native.h>
+
+crypto_native_main_t crypto_native_main;
+vnet_crypto_engine_op_handlers_t op_handlers[24], *ophp = op_handlers;
+
+static void
+crypto_native_key_handler (vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx)
+{
+ vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
+ crypto_native_main_t *cm = &crypto_native_main;
+
+ /** TODO: add linked alg support **/
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ return;
+
+ if (cm->key_fn[key->alg] == 0)
+ return;
+
+ if (kop == VNET_CRYPTO_KEY_OP_DEL)
+ {
+ if (idx >= vec_len (cm->key_data))
+ return;
+
+ if (cm->key_data[idx] == 0)
+ return;
+
+ clib_mem_free_s (cm->key_data[idx]);
+ cm->key_data[idx] = 0;
+ return;
+ }
+
+ vec_validate_aligned (cm->key_data, idx, CLIB_CACHE_LINE_BYTES);
+
+ if (kop == VNET_CRYPTO_KEY_OP_MODIFY && cm->key_data[idx])
+ {
+ clib_mem_free_s (cm->key_data[idx]);
+ }
+
+ cm->key_data[idx] = cm->key_fn[key->alg] (key);
+}
+
+static char *
+crypto_native_init (vnet_crypto_engine_registration_t *r)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+
+ if (cm->op_handlers == 0)
+ return 0;
+
+ crypto_native_op_handler_t *oh = cm->op_handlers;
+ crypto_native_key_handler_t *kh = cm->key_handlers;
+ crypto_native_op_handler_t **best_by_op_id = 0;
+ crypto_native_key_handler_t **best_by_alg_id = 0;
+
+ while (oh)
+ {
+ vec_validate (best_by_op_id, oh->op_id);
+
+ if (best_by_op_id[oh->op_id] == 0 ||
+ best_by_op_id[oh->op_id]->priority < oh->priority)
+ best_by_op_id[oh->op_id] = oh;
+
+ oh = oh->next;
+ }
+
+ while (kh)
+ {
+ vec_validate (best_by_alg_id, kh->alg_id);
+
+ if (best_by_alg_id[kh->alg_id] == 0 ||
+ best_by_alg_id[kh->alg_id]->priority < kh->priority)
+ best_by_alg_id[kh->alg_id] = kh;
+
+ kh = kh->next;
+ }
+
+ vec_foreach_pointer (oh, best_by_op_id)
+ if (oh)
+ {
+ *ophp = (vnet_crypto_engine_op_handlers_t){ .opt = oh->op_id,
+ .fn = oh->fn,
+ .cfn = oh->cfn };
+ ophp++;
+ ASSERT ((ophp - op_handlers) < ARRAY_LEN (op_handlers));
+ }
+
+ vec_foreach_pointer (kh, best_by_alg_id)
+ if (kh)
+ cm->key_fn[kh->alg_id] = kh->key_fn;
+
+ vec_free (best_by_op_id);
+ vec_free (best_by_alg_id);
+
+ return 0;
+}
+
+VNET_CRYPTO_ENGINE_REGISTRATION () = {
+ .name = "native",
+ .desc = "Native ISA Optimized Crypto",
+ .prio = 100,
+ .init_fn = crypto_native_init,
+ .key_handler = crypto_native_key_handler,
+ .op_handlers = op_handlers,
+};
diff --git a/src/crypto_engines/native/sha2.c b/src/crypto_engines/native/sha2.c
new file mode 100644
index 00000000000..b61a5f08060
--- /dev/null
+++ b/src/crypto_engines/native/sha2.c
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2024 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/crypto/crypto.h>
+#include <native/crypto_native.h>
+#include <vppinfra/crypto/sha2.h>
+
+static_always_inline u32
+crypto_native_ops_hash_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops, vnet_crypto_op_chunk_t *chunks,
+ clib_sha2_type_t type, int maybe_chained)
+{
+ vnet_crypto_op_t *op = ops[0];
+ clib_sha2_ctx_t ctx;
+ u32 n_left = n_ops;
+
+next:
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
+ clib_sha2_init (&ctx, type);
+ for (int j = 0; j < op->n_chunks; j++, chp++)
+ clib_sha2_update (&ctx, chp->src, chp->len);
+ clib_sha2_final (&ctx, op->digest);
+ }
+ else
+ clib_sha2 (type, op->src, op->len, op->digest);
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+
+ if (--n_left)
+ {
+ op += 1;
+ goto next;
+ }
+
+ return n_ops;
+}
+
+static_always_inline u32
+crypto_native_ops_hmac_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ u32 n_ops, vnet_crypto_op_chunk_t *chunks,
+ clib_sha2_type_t type)
+{
+ crypto_native_main_t *cm = &crypto_native_main;
+ vnet_crypto_op_t *op = ops[0];
+ u32 n_left = n_ops;
+ clib_sha2_hmac_ctx_t ctx;
+ u8 buffer[64];
+ u32 sz, n_fail = 0;
+
+ for (; n_left; n_left--, op++)
+ {
+ clib_sha2_hmac_init (
+ &ctx, type, (clib_sha2_hmac_key_data_t *) cm->key_data[op->key_index]);
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
+ for (int j = 0; j < op->n_chunks; j++, chp++)
+ clib_sha2_hmac_update (&ctx, chp->src, chp->len);
+ }
+ else
+ clib_sha2_hmac_update (&ctx, op->src, op->len);
+
+ clib_sha2_hmac_final (&ctx, buffer);
+
+ if (op->digest_len)
+ {
+ sz = op->digest_len;
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if ((memcmp (op->digest, buffer, sz)))
+ {
+ n_fail++;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ continue;
+ }
+ }
+ else
+ clib_memcpy_fast (op->digest, buffer, sz);
+ }
+ else
+ {
+ sz = clib_sha2_variants[type].digest_size;
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if ((memcmp (op->digest, buffer, sz)))
+ {
+ n_fail++;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ continue;
+ }
+ }
+ else
+ clib_memcpy_fast (op->digest, buffer, sz);
+ }
+
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+
+ return n_ops - n_fail;
+}
+
+static void *
+sha2_key_add (vnet_crypto_key_t *key, clib_sha2_type_t type)
+{
+ clib_sha2_hmac_key_data_t *kd;
+
+ kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
+ clib_sha2_hmac_key_data (type, key->data, vec_len (key->data), kd);
+
+ return kd;
+}
+
+static int
+probe ()
+{
+#if defined(__x86_64__)
+
+#if defined(__SHA__) && defined(__AVX512F__)
+ if (clib_cpu_supports_sha () && clib_cpu_supports_avx512f ())
+ return 30;
+#elif defined(__SHA__) && defined(__AVX2__)
+ if (clib_cpu_supports_sha () && clib_cpu_supports_avx2 ())
+ return 20;
+#elif defined(__SHA__)
+ if (clib_cpu_supports_sha ())
+ return 10;
+#endif
+
+#elif defined(__aarch64__)
+#if defined(__ARM_FEATURE_SHA2)
+ if (clib_cpu_supports_sha2 ())
+ return 10;
+#endif
+#endif
+ return -1;
+}
+
+#define _(b) \
+ static u32 crypto_native_ops_hash_sha##b ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return crypto_native_ops_hash_sha2 (vm, ops, n_ops, 0, CLIB_SHA2_##b, 0); \
+ } \
+ \
+ static u32 crypto_native_ops_chained_hash_sha##b ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return crypto_native_ops_hash_sha2 (vm, ops, n_ops, chunks, \
+ CLIB_SHA2_##b, 1); \
+ } \
+ \
+ static u32 crypto_native_ops_hmac_sha##b ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
+ { \
+ return crypto_native_ops_hmac_sha2 (vm, ops, n_ops, 0, CLIB_SHA2_##b); \
+ } \
+ \
+ static u32 crypto_native_ops_chained_hmac_sha##b ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return crypto_native_ops_hmac_sha2 (vm, ops, n_ops, chunks, \
+ CLIB_SHA2_##b); \
+ } \
+ \
+ static void *sha2_##b##_key_add (vnet_crypto_key_t *k) \
+ { \
+ return sha2_key_add (k, CLIB_SHA2_##b); \
+ } \
+ \
+ CRYPTO_NATIVE_OP_HANDLER (crypto_native_hash_sha##b) = { \
+ .op_id = VNET_CRYPTO_OP_SHA##b##_HASH, \
+ .fn = crypto_native_ops_hash_sha##b, \
+ .cfn = crypto_native_ops_chained_hash_sha##b, \
+ .probe = probe, \
+ }; \
+ CRYPTO_NATIVE_OP_HANDLER (crypto_native_hmac_sha##b) = { \
+ .op_id = VNET_CRYPTO_OP_SHA##b##_HMAC, \
+ .fn = crypto_native_ops_hmac_sha##b, \
+ .cfn = crypto_native_ops_chained_hmac_sha##b, \
+ .probe = probe, \
+ }; \
+ CRYPTO_NATIVE_KEY_HANDLER (crypto_native_hmac_sha##b) = { \
+ .alg_id = VNET_CRYPTO_ALG_HMAC_SHA##b, \
+ .key_fn = sha2_##b##_key_add, \
+ .probe = probe, \
+ };
+
+_ (224)
+_ (256)
+
+#undef _
diff --git a/src/crypto_engines/openssl/CMakeLists.txt b/src/crypto_engines/openssl/CMakeLists.txt
new file mode 100644
index 00000000000..01f00a01a74
--- /dev/null
+++ b/src/crypto_engines/openssl/CMakeLists.txt
@@ -0,0 +1,27 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if(NOT OPENSSL_FOUND)
+ return()
+endif()
+
+include_directories(${OPENSSL_INCLUDE_DIR})
+add_compile_definitions(OPENSSL_SUPPRESS_DEPRECATED)
+
+add_vpp_crypto_engine(openssl
+ SOURCES
+ main.c
+
+ LINK_LIBRARIES
+ ${OPENSSL_CRYPTO_LIBRARIES}
+)
diff --git a/src/crypto_engines/openssl/FEATURE.yaml b/src/crypto_engines/openssl/FEATURE.yaml
new file mode 100644
index 00000000000..da0a0812595
--- /dev/null
+++ b/src/crypto_engines/openssl/FEATURE.yaml
@@ -0,0 +1,14 @@
+---
+name: IPSec crypto engine provided by Openssl library
+maintainer: Damjan Marion <damarion@cisco.com>
+features:
+ - SHA(1, 224, 256, 384, 512)
+ - CBC(128, 192, 256)
+ - GCM(128, 192, 256)
+ - CTR(128, 192, 256)
+ - DES, 3DES
+ - MD5
+
+description: ""
+state: production
+properties: [API, CLI, MULTITHREAD]
diff --git a/src/crypto_engines/openssl/crypto_openssl.h b/src/crypto_engines/openssl/crypto_openssl.h
new file mode 100644
index 00000000000..cff9820fe99
--- /dev/null
+++ b/src/crypto_engines/openssl/crypto_openssl.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 ARM Ltd and/or its affiliates.
+ */
+
+#ifndef __crypto_openssl_h__
+#define __crypto_openssl_h__
+
+typedef void *(crypto_openssl_ctx_fn_t) (vnet_crypto_key_t *key,
+ vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx);
+
+typedef struct
+{
+ crypto_openssl_ctx_fn_t *ctx_fn[VNET_CRYPTO_N_ALGS];
+} crypto_openssl_main_t;
+
+extern crypto_openssl_main_t crypto_openssl_main;
+
+#endif /* __crypto_openssl_h__ */
diff --git a/src/crypto_engines/openssl/main.c b/src/crypto_engines/openssl/main.c
new file mode 100644
index 00000000000..c5636add266
--- /dev/null
+++ b/src/crypto_engines/openssl/main.c
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2024 Cisco Systems, Inc.
+ */
+
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+#include <openssl/rand.h>
+#include <openssl/sha.h>
+
+#include <vlib/vlib.h>
+#include <vnet/crypto/crypto.h>
+#include <vnet/crypto/engine.h>
+#include <openssl/crypto_openssl.h>
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ EVP_CIPHER_CTX **evp_cipher_enc_ctx;
+ EVP_CIPHER_CTX **evp_cipher_dec_ctx;
+ HMAC_CTX **hmac_ctx;
+ EVP_MD_CTX *hash_ctx;
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+ HMAC_CTX _hmac_ctx;
+#endif
+} openssl_per_thread_data_t;
+
+static openssl_per_thread_data_t *per_thread_data;
+static u32 num_threads;
+
+#define foreach_openssl_aes_evp_op \
+ _ (cbc, DES_CBC, EVP_des_cbc) \
+ _ (cbc, 3DES_CBC, EVP_des_ede3_cbc) \
+ _ (cbc, AES_128_CBC, EVP_aes_128_cbc) \
+ _ (cbc, AES_192_CBC, EVP_aes_192_cbc) \
+ _ (cbc, AES_256_CBC, EVP_aes_256_cbc) \
+ _ (gcm, AES_128_GCM, EVP_aes_128_gcm) \
+ _ (gcm, AES_192_GCM, EVP_aes_192_gcm) \
+ _ (gcm, AES_256_GCM, EVP_aes_256_gcm) \
+ _ (cbc, AES_128_CTR, EVP_aes_128_ctr) \
+ _ (cbc, AES_192_CTR, EVP_aes_192_ctr) \
+ _ (cbc, AES_256_CTR, EVP_aes_256_ctr) \
+ _ (null_gmac, AES_128_NULL_GMAC, EVP_aes_128_gcm) \
+ _ (null_gmac, AES_192_NULL_GMAC, EVP_aes_192_gcm) \
+ _ (null_gmac, AES_256_NULL_GMAC, EVP_aes_256_gcm)
+
+#define foreach_openssl_chacha20_evp_op \
+ _ (chacha20_poly1305, CHACHA20_POLY1305, EVP_chacha20_poly1305)
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+#define foreach_openssl_evp_op foreach_openssl_aes_evp_op \
+ foreach_openssl_chacha20_evp_op
+#else
+#define foreach_openssl_evp_op foreach_openssl_aes_evp_op
+#endif
+
+#ifndef EVP_CTRL_AEAD_GET_TAG
+#define EVP_CTRL_AEAD_GET_TAG EVP_CTRL_GCM_GET_TAG
+#endif
+
+#ifndef EVP_CTRL_AEAD_SET_TAG
+#define EVP_CTRL_AEAD_SET_TAG EVP_CTRL_GCM_SET_TAG
+#endif
+
+#define foreach_openssl_hash_op \
+ _ (SHA1, EVP_sha1) \
+ _ (SHA224, EVP_sha224) \
+ _ (SHA256, EVP_sha256) \
+ _ (SHA384, EVP_sha384) \
+ _ (SHA512, EVP_sha512)
+
+#define foreach_openssl_hmac_op \
+ _(MD5, EVP_md5) \
+ _(SHA1, EVP_sha1) \
+ _(SHA224, EVP_sha224) \
+ _(SHA256, EVP_sha256) \
+ _(SHA384, EVP_sha384) \
+ _(SHA512, EVP_sha512)
+
+crypto_openssl_main_t crypto_openssl_main;
+
+static_always_inline u32
+openssl_ops_enc_cbc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher)
+{
+ openssl_per_thread_data_t *ptd = per_thread_data + vm->thread_index;
+ EVP_CIPHER_CTX *ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u32 i, j, curr_len = 0;
+ u8 out_buf[VLIB_BUFFER_DEFAULT_DATA_SIZE * 5];
+
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ int out_len = 0;
+
+ ctx = ptd->evp_cipher_enc_ctx[op->key_index];
+ EVP_EncryptInit_ex (ctx, NULL, NULL, NULL, op->iv);
+
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ chp = chunks + op->chunk_index;
+ u32 offset = 0;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ EVP_EncryptUpdate (ctx, out_buf + offset, &out_len, chp->src,
+ chp->len);
+ curr_len = chp->len;
+ offset += out_len;
+ chp += 1;
+ }
+ if (out_len < curr_len)
+ EVP_EncryptFinal_ex (ctx, out_buf + offset, &out_len);
+
+ offset = 0;
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ clib_memcpy_fast (chp->dst, out_buf + offset, chp->len);
+ offset += chp->len;
+ chp += 1;
+ }
+ }
+ else
+ {
+ EVP_EncryptUpdate (ctx, op->dst, &out_len, op->src, op->len);
+ if (out_len < op->len)
+ EVP_EncryptFinal_ex (ctx, op->dst + out_len, &out_len);
+ }
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ return n_ops;
+}
+
+static_always_inline u32
+openssl_ops_dec_cbc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher)
+{
+ openssl_per_thread_data_t *ptd = per_thread_data + vm->thread_index;
+ EVP_CIPHER_CTX *ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u32 i, j, curr_len = 0;
+ u8 out_buf[VLIB_BUFFER_DEFAULT_DATA_SIZE * 5];
+
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ int out_len = 0;
+
+ ctx = ptd->evp_cipher_dec_ctx[op->key_index];
+ EVP_DecryptInit_ex (ctx, NULL, NULL, NULL, op->iv);
+
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ chp = chunks + op->chunk_index;
+ u32 offset = 0;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ EVP_DecryptUpdate (ctx, out_buf + offset, &out_len, chp->src,
+ chp->len);
+ curr_len = chp->len;
+ offset += out_len;
+ chp += 1;
+ }
+ if (out_len < curr_len)
+ EVP_DecryptFinal_ex (ctx, out_buf + offset, &out_len);
+
+ offset = 0;
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ clib_memcpy_fast (chp->dst, out_buf + offset, chp->len);
+ offset += chp->len;
+ chp += 1;
+ }
+ }
+ else
+ {
+ EVP_DecryptUpdate (ctx, op->dst, &out_len, op->src, op->len);
+ if (out_len < op->len)
+ EVP_DecryptFinal_ex (ctx, op->dst + out_len, &out_len);
+ }
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ return n_ops;
+}
+
+static_always_inline u32
+openssl_ops_enc_aead (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher, int is_gcm, int is_gmac)
+{
+ openssl_per_thread_data_t *ptd = per_thread_data + vm->thread_index;
+ EVP_CIPHER_CTX *ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u32 i, j;
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ int len = 0;
+
+ if (i + 2 < n_ops)
+ {
+ CLIB_PREFETCH (ops[i + 1]->src, 4 * CLIB_CACHE_PREFETCH_BYTES, LOAD);
+ CLIB_PREFETCH (ops[i + 1]->dst, 4 * CLIB_CACHE_PREFETCH_BYTES,
+ STORE);
+
+ CLIB_PREFETCH (ops[i + 2]->src, 4 * CLIB_CACHE_PREFETCH_BYTES, LOAD);
+ CLIB_PREFETCH (ops[i + 2]->dst, 4 * CLIB_CACHE_PREFETCH_BYTES,
+ STORE);
+ }
+
+ ctx = ptd->evp_cipher_enc_ctx[op->key_index];
+ EVP_EncryptInit_ex (ctx, 0, 0, NULL, op->iv);
+ if (op->aad_len)
+ EVP_EncryptUpdate (ctx, NULL, &len, op->aad, op->aad_len);
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ EVP_EncryptUpdate (ctx, is_gmac ? 0 : chp->dst, &len, chp->src,
+ chp->len);
+ chp += 1;
+ }
+ }
+ else
+ EVP_EncryptUpdate (ctx, is_gmac ? 0 : op->dst, &len, op->src, op->len);
+ EVP_EncryptFinal_ex (ctx, is_gmac ? 0 : op->dst + len, &len);
+ EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_AEAD_GET_TAG, op->tag_len, op->tag);
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ return n_ops;
+}
+
+static_always_inline u32
+openssl_ops_enc_null_gmac (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher)
+{
+ return openssl_ops_enc_aead (vm, ops, chunks, n_ops, cipher,
+ /* is_gcm */ 1, /* is_gmac */ 1);
+}
+
+static_always_inline u32
+openssl_ops_enc_gcm (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher)
+{
+ return openssl_ops_enc_aead (vm, ops, chunks, n_ops, cipher,
+ /* is_gcm */ 1, /* is_gmac */ 0);
+}
+
+static_always_inline __clib_unused u32
+openssl_ops_enc_chacha20_poly1305 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher)
+{
+ return openssl_ops_enc_aead (vm, ops, chunks, n_ops, cipher,
+ /* is_gcm */ 0, /* is_gmac */ 0);
+}
+
+static_always_inline u32
+openssl_ops_dec_aead (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher, int is_gcm, int is_gmac)
+{
+ openssl_per_thread_data_t *ptd = per_thread_data + vm->thread_index;
+ EVP_CIPHER_CTX *ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u32 i, j, n_fail = 0;
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ int len = 0;
+
+ ctx = ptd->evp_cipher_dec_ctx[op->key_index];
+ EVP_DecryptInit_ex (ctx, 0, 0, NULL, op->iv);
+ if (op->aad_len)
+ EVP_DecryptUpdate (ctx, 0, &len, op->aad, op->aad_len);
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ EVP_DecryptUpdate (ctx, is_gmac ? 0 : chp->dst, &len, chp->src,
+ chp->len);
+ chp += 1;
+ }
+ }
+ else
+ {
+ EVP_DecryptUpdate (ctx, is_gmac ? 0 : op->dst, &len, op->src,
+ op->len);
+ }
+ EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_AEAD_SET_TAG, op->tag_len, op->tag);
+
+ if (EVP_DecryptFinal_ex (ctx, is_gmac ? 0 : op->dst + len, &len) > 0)
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ else
+ {
+ n_fail++;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ }
+ }
+ return n_ops - n_fail;
+}
+
+static_always_inline u32
+openssl_ops_dec_null_gmac (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher)
+{
+ return openssl_ops_dec_aead (vm, ops, chunks, n_ops, cipher,
+ /* is_gcm */ 1, /* is_gmac */ 1);
+}
+
+static_always_inline u32
+openssl_ops_dec_gcm (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher)
+{
+ return openssl_ops_dec_aead (vm, ops, chunks, n_ops, cipher,
+ /* is_gcm */ 1, /* is_gmac */ 0);
+}
+
+static_always_inline __clib_unused u32
+openssl_ops_dec_chacha20_poly1305 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops,
+ const EVP_CIPHER *cipher)
+{
+ return openssl_ops_dec_aead (vm, ops, chunks, n_ops, cipher,
+ /* is_gcm */ 0, /* is_gmac */ 0);
+}
+
+static_always_inline u32
+openssl_ops_hash (vlib_main_t *vm, vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks, u32 n_ops, const EVP_MD *md)
+{
+ openssl_per_thread_data_t *ptd = per_thread_data + vm->thread_index;
+ EVP_MD_CTX *ctx = ptd->hash_ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u32 md_len, i, j, n_fail = 0;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+
+ EVP_DigestInit_ex (ctx, md, NULL);
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ EVP_DigestUpdate (ctx, chp->src, chp->len);
+ chp += 1;
+ }
+ }
+ else
+ EVP_DigestUpdate (ctx, op->src, op->len);
+
+ EVP_DigestFinal_ex (ctx, op->digest, &md_len);
+ op->digest_len = md_len;
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ return n_ops - n_fail;
+}
+
+static_always_inline u32
+openssl_ops_hmac (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ vnet_crypto_op_chunk_t * chunks, u32 n_ops,
+ const EVP_MD * md)
+{
+ u8 buffer[64];
+ openssl_per_thread_data_t *ptd = per_thread_data + vm->thread_index;
+ HMAC_CTX *ctx;
+ vnet_crypto_op_chunk_t *chp;
+ u32 i, j, n_fail = 0;
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ unsigned int out_len = 0;
+ size_t sz = op->digest_len ? op->digest_len : EVP_MD_size (md);
+
+ ctx = ptd->hmac_ctx[op->key_index];
+ HMAC_Init_ex (ctx, NULL, 0, NULL, NULL);
+ if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ chp = chunks + op->chunk_index;
+ for (j = 0; j < op->n_chunks; j++)
+ {
+ HMAC_Update (ctx, chp->src, chp->len);
+ chp += 1;
+ }
+ }
+ else
+ HMAC_Update (ctx, op->src, op->len);
+ HMAC_Final (ctx, buffer, &out_len);
+
+ if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
+ {
+ if ((memcmp (op->digest, buffer, sz)))
+ {
+ n_fail++;
+ op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ continue;
+ }
+ }
+ else
+ clib_memcpy_fast (op->digest, buffer, sz);
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ return n_ops - n_fail;
+}
+
+static_always_inline void *
+openssl_ctx_cipher (vnet_crypto_key_t *key, vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx, const EVP_CIPHER *cipher,
+ int is_gcm)
+{
+ EVP_CIPHER_CTX *ctx;
+ openssl_per_thread_data_t *ptd;
+
+ if (VNET_CRYPTO_KEY_OP_ADD == kop)
+ {
+ for (ptd = per_thread_data; ptd - per_thread_data < num_threads; ptd++)
+ {
+ vec_validate_aligned (ptd->evp_cipher_enc_ctx, idx,
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (ptd->evp_cipher_dec_ctx, idx,
+ CLIB_CACHE_LINE_BYTES);
+
+ ctx = EVP_CIPHER_CTX_new ();
+ EVP_CIPHER_CTX_set_padding (ctx, 0);
+ EVP_EncryptInit_ex (ctx, cipher, NULL, NULL, NULL);
+ if (is_gcm)
+ EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_GCM_SET_IVLEN, 12, NULL);
+ EVP_EncryptInit_ex (ctx, 0, 0, key->data, 0);
+ ptd->evp_cipher_enc_ctx[idx] = ctx;
+
+ ctx = EVP_CIPHER_CTX_new ();
+ EVP_CIPHER_CTX_set_padding (ctx, 0);
+ EVP_DecryptInit_ex (ctx, cipher, 0, 0, 0);
+ if (is_gcm)
+ EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_GCM_SET_IVLEN, 12, 0);
+ EVP_DecryptInit_ex (ctx, 0, 0, key->data, 0);
+ ptd->evp_cipher_dec_ctx[idx] = ctx;
+ }
+ }
+ else if (VNET_CRYPTO_KEY_OP_MODIFY == kop)
+ {
+ for (ptd = per_thread_data; ptd - per_thread_data < num_threads; ptd++)
+ {
+ ctx = ptd->evp_cipher_enc_ctx[idx];
+ EVP_EncryptInit_ex (ctx, cipher, NULL, NULL, NULL);
+ if (is_gcm)
+ EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_GCM_SET_IVLEN, 12, NULL);
+ EVP_EncryptInit_ex (ctx, 0, 0, key->data, 0);
+
+ ctx = ptd->evp_cipher_dec_ctx[idx];
+ EVP_DecryptInit_ex (ctx, cipher, 0, 0, 0);
+ if (is_gcm)
+ EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_GCM_SET_IVLEN, 12, 0);
+ EVP_DecryptInit_ex (ctx, 0, 0, key->data, 0);
+ }
+ }
+ else if (VNET_CRYPTO_KEY_OP_DEL == kop)
+ {
+ for (ptd = per_thread_data; ptd - per_thread_data < num_threads; ptd++)
+ {
+ ctx = ptd->evp_cipher_enc_ctx[idx];
+ EVP_CIPHER_CTX_free (ctx);
+ ptd->evp_cipher_enc_ctx[idx] = NULL;
+
+ ctx = ptd->evp_cipher_dec_ctx[idx];
+ EVP_CIPHER_CTX_free (ctx);
+ ptd->evp_cipher_dec_ctx[idx] = NULL;
+ }
+ }
+ return NULL;
+}
+
+static_always_inline void *
+openssl_ctx_hmac (vnet_crypto_key_t *key, vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx, const EVP_MD *md)
+{
+ HMAC_CTX *ctx;
+ openssl_per_thread_data_t *ptd;
+ if (VNET_CRYPTO_KEY_OP_ADD == kop)
+ {
+ for (ptd = per_thread_data; ptd - per_thread_data < num_threads; ptd++)
+ {
+ vec_validate_aligned (ptd->hmac_ctx, idx, CLIB_CACHE_LINE_BYTES);
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ ctx = HMAC_CTX_new ();
+ HMAC_Init_ex (ctx, key->data, vec_len (key->data), md, NULL);
+ ptd->hmac_ctx[idx] = ctx;
+#else
+ HMAC_CTX_init (&(ptd->_hmac_ctx));
+ ptd->hmac_ctx[idx] = &ptd->_hmac_ctx;
+#endif
+ }
+ }
+ else if (VNET_CRYPTO_KEY_OP_MODIFY == kop)
+ {
+ for (ptd = per_thread_data; ptd - per_thread_data < num_threads; ptd++)
+ {
+ ctx = ptd->hmac_ctx[idx];
+ HMAC_Init_ex (ctx, key->data, vec_len (key->data), md, NULL);
+ }
+ }
+ else if (VNET_CRYPTO_KEY_OP_DEL == kop)
+ {
+ for (ptd = per_thread_data; ptd - per_thread_data < num_threads; ptd++)
+ {
+ ctx = ptd->hmac_ctx[idx];
+ HMAC_CTX_free (ctx);
+ ptd->hmac_ctx[idx] = NULL;
+ }
+ }
+ return NULL;
+}
+
+static void
+crypto_openssl_key_handler (vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx)
+{
+ vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
+ crypto_openssl_main_t *cm = &crypto_openssl_main;
+
+ /** TODO: add linked alg support **/
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ return;
+
+ if (cm->ctx_fn[key->alg] == 0)
+ return;
+
+ cm->ctx_fn[key->alg](key, kop, idx);
+}
+
+#define _(m, a, b) \
+ static u32 openssl_ops_enc_##a (vlib_main_t *vm, vnet_crypto_op_t *ops[], \
+ u32 n_ops) \
+ { \
+ return openssl_ops_enc_##m (vm, ops, 0, n_ops, b ()); \
+ } \
+ \
+ u32 openssl_ops_dec_##a (vlib_main_t *vm, vnet_crypto_op_t *ops[], \
+ u32 n_ops) \
+ { \
+ return openssl_ops_dec_##m (vm, ops, 0, n_ops, b ()); \
+ } \
+ \
+ static u32 openssl_ops_enc_chained_##a ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return openssl_ops_enc_##m (vm, ops, chunks, n_ops, b ()); \
+ } \
+ \
+ static u32 openssl_ops_dec_chained_##a ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return openssl_ops_dec_##m (vm, ops, chunks, n_ops, b ()); \
+ } \
+ static void *openssl_ctx_##a (vnet_crypto_key_t *key, \
+ vnet_crypto_key_op_t kop, \
+ vnet_crypto_key_index_t idx) \
+ { \
+ int is_gcm = ((VNET_CRYPTO_ALG_AES_128_GCM <= key->alg) && \
+ (VNET_CRYPTO_ALG_AES_256_NULL_GMAC >= key->alg)) ? \
+ 1 : \
+ 0; \
+ return openssl_ctx_cipher (key, kop, idx, b (), is_gcm); \
+ }
+
+foreach_openssl_evp_op;
+#undef _
+
+#define _(a, b) \
+ static u32 openssl_ops_hash_##a (vlib_main_t *vm, vnet_crypto_op_t *ops[], \
+ u32 n_ops) \
+ { \
+ return openssl_ops_hash (vm, ops, 0, n_ops, b ()); \
+ } \
+ static u32 openssl_ops_hash_chained_##a ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return openssl_ops_hash (vm, ops, chunks, n_ops, b ()); \
+ }
+
+foreach_openssl_hash_op;
+#undef _
+
+#define _(a, b) \
+ static u32 openssl_ops_hmac_##a (vlib_main_t *vm, vnet_crypto_op_t *ops[], \
+ u32 n_ops) \
+ { \
+ return openssl_ops_hmac (vm, ops, 0, n_ops, b ()); \
+ } \
+ static u32 openssl_ops_hmac_chained_##a ( \
+ vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
+ u32 n_ops) \
+ { \
+ return openssl_ops_hmac (vm, ops, chunks, n_ops, b ()); \
+ } \
+ static void *openssl_ctx_hmac_##a (vnet_crypto_key_t *key, \
+ vnet_crypto_key_op_t kop, \
+ vnet_crypto_key_index_t idx) \
+ { \
+ return openssl_ctx_hmac (key, kop, idx, b ()); \
+ }
+
+foreach_openssl_hmac_op;
+#undef _
+
+static char *
+crypto_openssl_init (vnet_crypto_engine_registration_t *r)
+{
+ crypto_openssl_main_t *cm = &crypto_openssl_main;
+ u8 seed[32];
+
+ if (syscall (SYS_getrandom, &seed, sizeof (seed), 0) != sizeof (seed))
+ return "getrandom() failed";
+
+ num_threads = r->num_threads;
+
+ RAND_seed (seed, sizeof (seed));
+
+#define _(m, a, b) cm->ctx_fn[VNET_CRYPTO_ALG_##a] = openssl_ctx_##a;
+ foreach_openssl_evp_op;
+#undef _
+
+#define _(a, b) cm->ctx_fn[VNET_CRYPTO_ALG_HMAC_##a] = openssl_ctx_hmac_##a;
+ foreach_openssl_hmac_op;
+#undef _
+
+ per_thread_data = r->per_thread_data;
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ for (u32 i = 0; i < r->num_threads; i++)
+ per_thread_data[i].hash_ctx = EVP_MD_CTX_create ();
+#endif
+ return 0;
+}
+
+vnet_crypto_engine_op_handlers_t op_handlers[] = {
+#define _(m, a, b) \
+ { \
+ .opt = VNET_CRYPTO_OP_##a##_ENC, \
+ .fn = openssl_ops_enc_##a, \
+ .cfn = openssl_ops_enc_chained_##a, \
+ }, \
+ { .opt = VNET_CRYPTO_OP_##a##_DEC, \
+ .fn = openssl_ops_dec_##a, \
+ .cfn = openssl_ops_dec_chained_##a },
+ foreach_openssl_evp_op
+#undef _
+#define _(a, b) \
+ { .opt = VNET_CRYPTO_OP_##a##_HMAC, \
+ .fn = openssl_ops_hmac_##a, \
+ .cfn = openssl_ops_hmac_chained_##a },
+ foreach_openssl_hmac_op
+#undef _
+#define _(a, b) \
+ { .opt = VNET_CRYPTO_OP_##a##_HASH, \
+ .fn = openssl_ops_hash_##a, \
+ .cfn = openssl_ops_hash_chained_##a },
+ foreach_openssl_hash_op
+#undef _
+ {}
+};
+
+VNET_CRYPTO_ENGINE_REGISTRATION () = {
+ .name = "openssl",
+ .desc = "OpenSSL",
+ .prio = 50,
+ .per_thread_data_sz = sizeof (openssl_per_thread_data_t),
+ .init_fn = crypto_openssl_init,
+ .key_handler = crypto_openssl_key_handler,
+ .op_handlers = op_handlers,
+};