diff options
Diffstat (limited to 'src/plugins/crypto_ipsecmb')
-rw-r--r-- | src/plugins/crypto_ipsecmb/CMakeLists.txt | 10 | ||||
-rw-r--r-- | src/plugins/crypto_ipsecmb/ipsecmb.c | 294 |
2 files changed, 192 insertions, 112 deletions
diff --git a/src/plugins/crypto_ipsecmb/CMakeLists.txt b/src/plugins/crypto_ipsecmb/CMakeLists.txt index 981a045262e..429343a9f3b 100644 --- a/src/plugins/crypto_ipsecmb/CMakeLists.txt +++ b/src/plugins/crypto_ipsecmb/CMakeLists.txt @@ -33,6 +33,16 @@ if(IPSECMB_INCLUDE_DIR AND IPSECMB_LIB) ${IPSECMB_LINK_FLAGS} ) + file(READ "${IPSECMB_INCLUDE_DIR}/intel-ipsec-mb.h" ipsecmb_header) + string(REGEX MATCH "IMB_VERSION_STR (\"+[0-9]+\\.[0-9]+\\.[0-9]+\")" _ ${ipsecmb_header}) + string(REPLACE "\"" "" IPSECMB_VERSION ${CMAKE_MATCH_1}) + + if (${IPSECMB_VERSION} VERSION_GREATER "0.54.0") + add_definitions(-DHAVE_IPSECMB_CHACHA_POLY) + else() + message(STATUS "Intel IPSecMB ${IPSECMB_VERSION} does not support chacha20-poly1305. Disabled") + endif() + target_compile_options(crypto_ipsecmb_plugin PRIVATE "-march=silvermont" "-maes") message(STATUS "Intel IPSecMB found: ${IPSECMB_INCLUDE_DIR}") else() diff --git a/src/plugins/crypto_ipsecmb/ipsecmb.c b/src/plugins/crypto_ipsecmb/ipsecmb.c index ad5f7bfe006..064c129ba12 100644 --- a/src/plugins/crypto_ipsecmb/ipsecmb.c +++ b/src/plugins/crypto_ipsecmb/ipsecmb.c @@ -25,14 +25,16 @@ #include <vnet/crypto/crypto.h> #include <vppinfra/cpu.h> -#define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE +#define HMAC_MAX_BLOCK_SIZE IMB_SHA_512_BLOCK_SIZE #define EXPANDED_KEY_N_BYTES (16 * 15) typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - MB_MGR *mgr; - __m128i cbc_iv; + IMB_MGR *mgr; +#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0) + IMB_JOB burst_jobs[IMB_MAX_BURST_SIZE]; +#endif } ipsecmb_per_thread_data_t; typedef struct @@ -60,11 +62,12 @@ typedef struct static ipsecmb_main_t ipsecmb_main = { }; +/* clang-format off */ /* * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes) */ #define foreach_ipsecmb_hmac_op \ - _(SHA1, SHA1, sha1, 64, 20, 20) \ + _(SHA1, SHA_1, sha1, 64, 20, 20) \ _(SHA224, SHA_224, sha224, 64, 32, 28) \ _(SHA256, SHA_256, sha256, 64, 32, 32) \ _(SHA384, SHA_384, sha384, 128, 64, 48) \ @@ -88,21 +91,21 @@ static ipsecmb_main_t ipsecmb_main = { }; _(AES_128_GCM, 128) \ _(AES_192_GCM, 192) \ _(AES_256_GCM, 256) - +/* clang-format on */ static_always_inline vnet_crypto_op_status_t -ipsecmb_status_job (JOB_STS status) +ipsecmb_status_job (IMB_STATUS status) { switch (status) { - case STS_COMPLETED: + case IMB_STATUS_COMPLETED: return VNET_CRYPTO_OP_STATUS_COMPLETED; - case STS_BEING_PROCESSED: - case STS_COMPLETED_AES: - case STS_COMPLETED_HMAC: + case IMB_STATUS_BEING_PROCESSED: + case IMB_STATUS_COMPLETED_CIPHER: + case IMB_STATUS_COMPLETED_AUTH: return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS; - case STS_INVALID_ARGS: - case STS_INTERNAL_ERROR: - case STS_ERROR: + case IMB_STATUS_INVALID_ARGS: + case IMB_STATUS_INTERNAL_ERROR: + case IMB_STATUS_ERROR: return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR; } ASSERT (0); @@ -110,12 +113,12 @@ ipsecmb_status_job (JOB_STS status) } always_inline void -ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size) +ipsecmb_retire_hmac_job (IMB_JOB *job, u32 *n_fail, u32 digest_size) { vnet_crypto_op_t *op = job->user_data; u32 len = op->digest_len ? op->digest_len : digest_size; - if (PREDICT_FALSE (STS_COMPLETED != job->status)) + if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status)) { op->status = ipsecmb_status_job (job->status); *n_fail = *n_fail + 1; @@ -139,15 +142,71 @@ ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size) op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; } +#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0) +static_always_inline u32 +ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, + u32 block_size, u32 hash_size, u32 digest_size, + IMB_HASH_ALG alg) +{ + ipsecmb_main_t *imbm = &ipsecmb_main; + ipsecmb_per_thread_data_t *ptd = + vec_elt_at_index (imbm->per_thread_data, vm->thread_index); + IMB_JOB *job; + u32 i, n_fail = 0, ops_index = 0; + u8 scratch[n_ops][digest_size]; + const u32 burst_sz = + (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops; + + while (n_ops) + { + const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops; + /* + * configure all the jobs first ... + */ + for (i = 0; i < n; i++, ops_index++) + { + vnet_crypto_op_t *op = ops[ops_index]; + const u8 *kd = (u8 *) imbm->key_data[op->key_index]; + + job = &ptd->burst_jobs[i]; + + job->src = op->src; + job->hash_start_src_offset_in_bytes = 0; + job->msg_len_to_hash_in_bytes = op->len; + job->auth_tag_output_len_in_bytes = digest_size; + job->auth_tag_output = scratch[ops_index]; + + job->u.HMAC._hashed_auth_key_xor_ipad = kd; + job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size; + job->user_data = op; + } + + /* + * submit all jobs to be processed and retire completed jobs + */ + IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg); + + for (i = 0; i < n; i++) + { + job = &ptd->burst_jobs[i]; + ipsecmb_retire_hmac_job (job, &n_fail, digest_size); + } + + n_ops -= n; + } + + return ops_index - n_fail; +} +#else static_always_inline u32 -ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[], - u32 n_ops, u32 block_size, u32 hash_size, - u32 digest_size, JOB_HASH_ALG alg) +ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, + u32 block_size, u32 hash_size, u32 digest_size, + JOB_HASH_ALG alg) { ipsecmb_main_t *imbm = &ipsecmb_main; - ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, - vm->thread_index); - JOB_AES_HMAC *job; + ipsecmb_per_thread_data_t *ptd = + vec_elt_at_index (imbm->per_thread_data, vm->thread_index); + IMB_JOB *job; u32 i, n_fail = 0; u8 scratch[n_ops][digest_size]; @@ -168,9 +227,9 @@ ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[], job->auth_tag_output_len_in_bytes = digest_size; job->auth_tag_output = scratch[i]; - job->cipher_mode = NULL_CIPHER; - job->cipher_direction = DECRYPT; - job->chain_order = HASH_CIPHER; + job->cipher_mode = IMB_CIPHER_NULL; + job->cipher_direction = IMB_DIR_DECRYPT; + job->chain_order = IMB_ORDER_HASH_CIPHER; job->u.HMAC._hashed_auth_key_xor_ipad = kd; job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size; @@ -187,23 +246,27 @@ ipsecmb_ops_hmac_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[], return n_ops - n_fail; } +#endif +/* clang-format off */ #define _(a, b, c, d, e, f) \ static_always_inline u32 \ ipsecmb_ops_hmac_##a (vlib_main_t * vm, \ vnet_crypto_op_t * ops[], \ u32 n_ops) \ -{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); } \ +{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, \ + IMB_AUTH_HMAC_##b); } \ foreach_ipsecmb_hmac_op; #undef _ +/* clang-format on */ always_inline void -ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail) +ipsecmb_retire_cipher_job (IMB_JOB *job, u32 *n_fail) { vnet_crypto_op_t *op = job->user_data; - if (PREDICT_FALSE (STS_COMPLETED != job->status)) + if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status)) { op->status = ipsecmb_status_job (job->status); *n_fail = *n_fail + 1; @@ -212,6 +275,62 @@ ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail) op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; } +#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0) +static_always_inline u32 +ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], + u32 n_ops, u32 key_len, + IMB_CIPHER_DIRECTION direction, + IMB_CIPHER_MODE cipher_mode) +{ + ipsecmb_main_t *imbm = &ipsecmb_main; + ipsecmb_per_thread_data_t *ptd = + vec_elt_at_index (imbm->per_thread_data, vm->thread_index); + IMB_JOB *job; + u32 i, n_fail = 0, ops_index = 0; + const u32 burst_sz = + (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops; + + while (n_ops) + { + const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops; + + for (i = 0; i < n; i++) + { + ipsecmb_aes_key_data_t *kd; + vnet_crypto_op_t *op = ops[ops_index++]; + kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index]; + + job = &ptd->burst_jobs[i]; + + job->src = op->src; + job->dst = op->dst; + job->msg_len_to_cipher_in_bytes = op->len; + job->cipher_start_src_offset_in_bytes = 0; + + job->hash_alg = IMB_AUTH_NULL; + + job->enc_keys = kd->enc_key_exp; + job->dec_keys = kd->dec_key_exp; + job->iv = op->iv; + job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE; + + job->user_data = op; + } + + IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, + cipher_mode, direction, key_len / 8); + for (i = 0; i < n; i++) + { + job = &ptd->burst_jobs[i]; + ipsecmb_retire_cipher_job (job, &n_fail); + } + + n_ops -= n; + } + + return ops_index - n_fail; +} +#else static_always_inline u32 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, u32 key_len, @@ -219,9 +338,9 @@ ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], JOB_CIPHER_MODE cipher_mode) { ipsecmb_main_t *imbm = &ipsecmb_main; - ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, - vm->thread_index); - JOB_AES_HMAC *job; + ipsecmb_per_thread_data_t *ptd = + vec_elt_at_index (imbm->per_thread_data, vm->thread_index); + IMB_JOB *job; u32 i, n_fail = 0; for (i = 0; i < n_ops; i++) @@ -229,7 +348,6 @@ ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], ipsecmb_aes_key_data_t *kd; vnet_crypto_op_t *op = ops[i]; kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index]; - __m128i iv; job = IMB_GET_NEXT_JOB (ptd->mgr); @@ -238,23 +356,18 @@ ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], job->msg_len_to_cipher_in_bytes = op->len; job->cipher_start_src_offset_in_bytes = 0; - job->hash_alg = NULL_HASH; + job->hash_alg = IMB_AUTH_NULL; job->cipher_mode = cipher_mode; job->cipher_direction = direction; - job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER); - - if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)) - { - iv = ptd->cbc_iv; - _mm_storeu_si128 ((__m128i *) op->iv, iv); - ptd->cbc_iv = _mm_aesenc_si128 (iv, iv); - } + job->chain_order = + (direction == IMB_DIR_ENCRYPT ? IMB_ORDER_CIPHER_HASH : + IMB_ORDER_HASH_CIPHER); job->aes_key_len_in_bytes = key_len / 8; - job->aes_enc_key_expanded = kd->enc_key_exp; - job->aes_dec_key_expanded = kd->dec_key_exp; + job->enc_keys = kd->enc_key_exp; + job->dec_keys = kd->dec_key_exp; job->iv = op->iv; - job->iv_len_in_bytes = AES_BLOCK_SIZE; + job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE; job->user_data = op; @@ -269,18 +382,22 @@ ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], return n_ops - n_fail; } +#endif +/* clang-format off */ #define _(a, b, c) \ static_always_inline u32 ipsecmb_ops_cipher_enc_##a ( \ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \ { \ - return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, ENCRYPT, c); \ + return ipsecmb_ops_aes_cipher_inline ( \ + vm, ops, n_ops, b, IMB_DIR_ENCRYPT, IMB_CIPHER_##c); \ } \ \ static_always_inline u32 ipsecmb_ops_cipher_dec_##a ( \ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \ { \ - return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, DECRYPT, c); \ + return ipsecmb_ops_aes_cipher_inline ( \ + vm, ops, n_ops, b, IMB_DIR_DECRYPT, IMB_CIPHER_##c); \ } foreach_ipsecmb_cipher_op; @@ -294,7 +411,7 @@ ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm, \ ipsecmb_main_t *imbm = &ipsecmb_main; \ ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ vm->thread_index); \ - MB_MGR *m = ptd->mgr; \ + IMB_MGR *m = ptd->mgr; \ vnet_crypto_op_chunk_t *chp; \ u32 i, j; \ \ @@ -329,7 +446,7 @@ ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ ipsecmb_main_t *imbm = &ipsecmb_main; \ ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ vm->thread_index); \ - MB_MGR *m = ptd->mgr; \ + IMB_MGR *m = ptd->mgr; \ u32 i; \ \ for (i = 0; i < n_ops; i++) \ @@ -355,7 +472,7 @@ ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm, \ ipsecmb_main_t *imbm = &ipsecmb_main; \ ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ vm->thread_index); \ - MB_MGR *m = ptd->mgr; \ + IMB_MGR *m = ptd->mgr; \ vnet_crypto_op_chunk_t *chp; \ u32 i, j, n_failed = 0; \ \ @@ -397,7 +514,7 @@ ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ ipsecmb_main_t *imbm = &ipsecmb_main; \ ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ vm->thread_index); \ - MB_MGR *m = ptd->mgr; \ + IMB_MGR *m = ptd->mgr; \ u32 i, n_failed = 0; \ \ for (i = 0; i < n_ops; i++) \ @@ -422,17 +539,18 @@ ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ \ return n_ops - n_failed; \ } - +/* clang-format on */ foreach_ipsecmb_gcm_cipher_op; #undef _ +#ifdef HAVE_IPSECMB_CHACHA_POLY always_inline void -ipsecmb_retire_aead_job (JOB_AES_HMAC *job, u32 *n_fail) +ipsecmb_retire_aead_job (IMB_JOB *job, u32 *n_fail) { vnet_crypto_op_t *op = job->user_data; u32 len = op->tag_len; - if (PREDICT_FALSE (STS_COMPLETED != job->status)) + if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status)) { op->status = ipsecmb_status_job (job->status); *n_fail = *n_fail + 1; @@ -462,16 +580,14 @@ ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); struct IMB_JOB *job; - MB_MGR *m = ptd->mgr; + IMB_MGR *m = ptd->mgr; u32 i, n_fail = 0, last_key_index = ~0; u8 scratch[VLIB_FRAME_SIZE][16]; - u8 iv_data[16]; u8 *key = 0; for (i = 0; i < n_ops; i++) { vnet_crypto_op_t *op = ops[i]; - __m128i iv; job = IMB_GET_NEXT_JOB (m); if (last_key_index != op->key_index) @@ -494,15 +610,6 @@ ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, job->src = op->src; job->dst = op->dst; - if ((dir == IMB_DIR_ENCRYPT) && - (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)) - { - iv = ptd->cbc_iv; - _mm_storeu_si128 ((__m128i *) iv_data, iv); - clib_memcpy_fast (op->iv, iv_data, 12); - ptd->cbc_iv = _mm_aesenc_si128 (iv, iv); - } - job->iv = op->iv; job->iv_len_in_bytes = 12; job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes = @@ -550,9 +657,8 @@ ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[], ipsecmb_main_t *imbm = &ipsecmb_main; ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); - MB_MGR *m = ptd->mgr; + IMB_MGR *m = ptd->mgr; u32 i, n_fail = 0, last_key_index = ~0; - u8 iv_data[16]; u8 *key = 0; if (dir == IMB_DIR_ENCRYPT) @@ -562,7 +668,6 @@ ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_t *op = ops[i]; struct chacha20_poly1305_context_data ctx; vnet_crypto_op_chunk_t *chp; - __m128i iv; u32 j; ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); @@ -575,14 +680,6 @@ ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[], last_key_index = op->key_index; } - if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV) - { - iv = ptd->cbc_iv; - _mm_storeu_si128 ((__m128i *) iv_data, iv); - clib_memcpy_fast (op->iv, iv_data, 12); - ptd->cbc_iv = _mm_aesenc_si128 (iv, iv); - } - IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad, op->aad_len); @@ -662,30 +759,7 @@ ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[], return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops, IMB_DIR_DECRYPT); } - -clib_error_t * -crypto_ipsecmb_iv_init (ipsecmb_main_t * imbm) -{ - ipsecmb_per_thread_data_t *ptd; - clib_error_t *err = 0; - int fd; - - if ((fd = open ("/dev/urandom", O_RDONLY)) < 0) - return clib_error_return_unix (0, "failed to open '/dev/urandom'"); - - vec_foreach (ptd, imbm->per_thread_data) - { - if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv)) - { - err = clib_error_return_unix (0, "'/dev/urandom' read failure"); - close (fd); - return (err); - } - } - - close (fd); - return (NULL); -} +#endif static void crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop, @@ -773,8 +847,7 @@ crypto_ipsecmb_init (vlib_main_t * vm) ipsecmb_alg_data_t *ad; ipsecmb_per_thread_data_t *ptd; vlib_thread_main_t *tm = vlib_get_thread_main (); - clib_error_t *error; - MB_MGR *m = 0; + IMB_MGR *m = 0; u32 eidx; u8 *name; @@ -791,13 +864,16 @@ crypto_ipsecmb_init (vlib_main_t * vm) vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1, CLIB_CACHE_LINE_BYTES); - /* *INDENT-OFF* */ vec_foreach (ptd, imbm->per_thread_data) { ptd->mgr = alloc_mb_mgr (0); - if (clib_cpu_supports_avx512f ()) +#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0) + clib_memset_u8 (ptd->burst_jobs, 0, + sizeof (IMB_JOB) * IMB_MAX_BURST_SIZE); +#endif + if (clib_cpu_supports_avx512f ()) init_mb_mgr_avx512 (ptd->mgr); - else if (clib_cpu_supports_avx2 ()) + else if (clib_cpu_supports_avx2 () && clib_cpu_supports_bmi2 ()) init_mb_mgr_avx2 (ptd->mgr); else init_mb_mgr_sse (ptd->mgr); @@ -805,10 +881,6 @@ crypto_ipsecmb_init (vlib_main_t * vm) if (ptd == imbm->per_thread_data) m = ptd->mgr; } - /* *INDENT-ON* */ - - if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm))) - return (error); #define _(a, b, c, d, e, f) \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \ @@ -850,6 +922,7 @@ crypto_ipsecmb_init (vlib_main_t * vm) foreach_ipsecmb_gcm_cipher_op; #undef _ +#ifdef HAVE_IPSECMB_CHACHA_POLY vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC, ipsecmb_ops_chacha_poly_enc); @@ -864,25 +937,22 @@ crypto_ipsecmb_init (vlib_main_t * vm) ipsec_mb_ops_chacha_poly_dec_chained); ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305; ad->data_size = 0; +#endif vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler); return (NULL); } -/* *INDENT-OFF* */ VLIB_INIT_FUNCTION (crypto_ipsecmb_init) = { .runs_after = VLIB_INITS ("vnet_crypto_init"), }; -/* *INDENT-ON* */ -/* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { .version = VPP_BUILD_VER, .description = "Intel IPSEC Multi-buffer Crypto Engine", }; -/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON |