diff options
author | Ranjan Raj <ranjanx.raj@intel.com> | 2023-07-07 06:09:50 +0000 |
---|---|---|
committer | Damjan Marion <dmarion@0xa5.net> | 2023-09-26 15:07:32 +0000 |
commit | 40242b88e9d4548c4e76a69b00c87c0151218d03 (patch) | |
tree | bc427c890d01e86c4954677996981f3aa541de7c | |
parent | d732f34911461907c9c0bdb98c29ff9e7aa82204 (diff) |
crypto-ipsecmb: bump intel-ipsec-mb version to 1.4
Type: feature
This patch update the Intel IPsec-MB lib to v1.4
Remove v0.54 and v0.55 support, as the compatible IMB APIs
are deprecated in v1.4
Signed-off-by: Ranjan Raj <ranjanx.raj@intel.com>
Change-Id: I01f71134c6bd17a68ec20b7bb4b0b0ff43fc644b
-rw-r--r-- | build/external/packages/ipsec-mb.mk | 5 | ||||
-rw-r--r-- | src/plugins/crypto_ipsecmb/ipsecmb.c | 109 |
2 files changed, 61 insertions, 53 deletions
diff --git a/build/external/packages/ipsec-mb.mk b/build/external/packages/ipsec-mb.mk index 02c2e194456..638f3c2de17 100644 --- a/build/external/packages/ipsec-mb.mk +++ b/build/external/packages/ipsec-mb.mk @@ -11,14 +11,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -ipsec-mb_version := 1.3 +ipsec-mb_version := 1.4 ipsec-mb_tarball := v$(ipsec-mb_version).tar.gz -ipsec-mb_tarball_md5sum_0.54 := 258941f7ba90c275fcf9d19c622d2d21 -ipsec-mb_tarball_md5sum_0.55 := deca674bca7ae2282890e1fa7f953609 ipsec-mb_tarball_md5sum_1.0 := 906e701937751e761671dc83a41cff65 ipsec-mb_tarball_md5sum_1.1 := 3916471d3713d27e42473cb6af9c65e5 ipsec-mb_tarball_md5sum_1.2 := f551d9c208893a436c1f5c146a615bd6 ipsec-mb_tarball_md5sum_1.3 := d8692db9efe32a263b61f12ac0dca950 +ipsec-mb_tarball_md5sum_1.4 := fddba2611f822296ddd82d1c31d22b24 ipsec-mb_tarball_md5sum := $(ipsec-mb_tarball_md5sum_$(ipsec-mb_version)) ipsec-mb_tarball_strip_dirs := 1 diff --git a/src/plugins/crypto_ipsecmb/ipsecmb.c b/src/plugins/crypto_ipsecmb/ipsecmb.c index 4ad4fb281a4..9d874101163 100644 --- a/src/plugins/crypto_ipsecmb/ipsecmb.c +++ b/src/plugins/crypto_ipsecmb/ipsecmb.c @@ -25,15 +25,15 @@ #include <vnet/crypto/crypto.h> #include <vppinfra/cpu.h> -#define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE +#define HMAC_MAX_BLOCK_SIZE IMB_SHA_512_BLOCK_SIZE #define EXPANDED_KEY_N_BYTES (16 * 15) typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - MB_MGR *mgr; + IMB_MGR *mgr; #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0) - JOB_AES_HMAC burst_jobs[IMB_MAX_BURST_SIZE]; + IMB_JOB burst_jobs[IMB_MAX_BURST_SIZE]; #endif } ipsecmb_per_thread_data_t; @@ -62,11 +62,12 @@ typedef struct static ipsecmb_main_t ipsecmb_main = { }; +/* clang-format off */ /* * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes) */ #define foreach_ipsecmb_hmac_op \ - _(SHA1, SHA1, sha1, 64, 20, 20) \ + _(SHA1, SHA_1, sha1, 64, 20, 20) \ _(SHA224, SHA_224, sha224, 64, 32, 28) \ _(SHA256, SHA_256, sha256, 64, 32, 32) \ _(SHA384, SHA_384, sha384, 128, 64, 48) \ @@ -90,21 +91,21 @@ static ipsecmb_main_t ipsecmb_main = { }; _(AES_128_GCM, 128) \ _(AES_192_GCM, 192) \ _(AES_256_GCM, 256) - +/* clang-format on */ static_always_inline vnet_crypto_op_status_t -ipsecmb_status_job (JOB_STS status) +ipsecmb_status_job (IMB_STATUS status) { switch (status) { - case STS_COMPLETED: + case IMB_STATUS_COMPLETED: return VNET_CRYPTO_OP_STATUS_COMPLETED; - case STS_BEING_PROCESSED: - case STS_COMPLETED_AES: - case STS_COMPLETED_HMAC: + case IMB_STATUS_BEING_PROCESSED: + case IMB_STATUS_COMPLETED_CIPHER: + case IMB_STATUS_COMPLETED_AUTH: return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS; - case STS_INVALID_ARGS: - case STS_INTERNAL_ERROR: - case STS_ERROR: + case IMB_STATUS_INVALID_ARGS: + case IMB_STATUS_INTERNAL_ERROR: + case IMB_STATUS_ERROR: return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR; } ASSERT (0); @@ -112,12 +113,12 @@ ipsecmb_status_job (JOB_STS status) } always_inline void -ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size) +ipsecmb_retire_hmac_job (IMB_JOB *job, u32 *n_fail, u32 digest_size) { vnet_crypto_op_t *op = job->user_data; u32 len = op->digest_len ? op->digest_len : digest_size; - if (PREDICT_FALSE (STS_COMPLETED != job->status)) + if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status)) { op->status = ipsecmb_status_job (job->status); *n_fail = *n_fail + 1; @@ -145,12 +146,12 @@ ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size) static_always_inline u32 ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, u32 block_size, u32 hash_size, u32 digest_size, - JOB_HASH_ALG alg) + IMB_HASH_ALG alg) { ipsecmb_main_t *imbm = &ipsecmb_main; ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); - JOB_AES_HMAC *job; + IMB_JOB *job; u32 i, n_fail = 0, ops_index = 0; u8 scratch[n_ops][digest_size]; const u32 burst_sz = @@ -205,7 +206,7 @@ ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, ipsecmb_main_t *imbm = &ipsecmb_main; ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); - JOB_AES_HMAC *job; + IMB_JOB *job; u32 i, n_fail = 0; u8 scratch[n_ops][digest_size]; @@ -226,9 +227,9 @@ ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, job->auth_tag_output_len_in_bytes = digest_size; job->auth_tag_output = scratch[i]; - job->cipher_mode = NULL_CIPHER; - job->cipher_direction = DECRYPT; - job->chain_order = HASH_CIPHER; + job->cipher_mode = IMB_CIPHER_NULL; + job->cipher_direction = IMB_DIR_DECRYPT; + job->chain_order = IMB_ORDER_HASH_CIPHER; job->u.HMAC._hashed_auth_key_xor_ipad = kd; job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size; @@ -247,22 +248,25 @@ ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, } #endif +/* clang-format off */ #define _(a, b, c, d, e, f) \ static_always_inline u32 \ ipsecmb_ops_hmac_##a (vlib_main_t * vm, \ vnet_crypto_op_t * ops[], \ u32 n_ops) \ -{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); } \ +{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, \ + IMB_AUTH_HMAC_##b); } \ foreach_ipsecmb_hmac_op; #undef _ +/* clang-format on */ always_inline void -ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail) +ipsecmb_retire_cipher_job (IMB_JOB *job, u32 *n_fail) { vnet_crypto_op_t *op = job->user_data; - if (PREDICT_FALSE (STS_COMPLETED != job->status)) + if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status)) { op->status = ipsecmb_status_job (job->status); *n_fail = *n_fail + 1; @@ -275,13 +279,13 @@ ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail) static_always_inline u32 ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, u32 key_len, - JOB_CIPHER_DIRECTION direction, - JOB_CIPHER_MODE cipher_mode) + IMB_CIPHER_DIRECTION direction, + IMB_CIPHER_MODE cipher_mode) { ipsecmb_main_t *imbm = &ipsecmb_main; ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); - JOB_AES_HMAC *job; + IMB_JOB *job; u32 i, n_fail = 0, ops_index = 0; const u32 burst_sz = (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops; @@ -303,12 +307,12 @@ ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], job->msg_len_to_cipher_in_bytes = op->len; job->cipher_start_src_offset_in_bytes = 0; - job->hash_alg = NULL_HASH; + job->hash_alg = IMB_AUTH_NULL; - job->aes_enc_key_expanded = kd->enc_key_exp; - job->aes_dec_key_expanded = kd->dec_key_exp; + job->enc_keys = kd->enc_key_exp; + job->dec_keys = kd->dec_key_exp; job->iv = op->iv; - job->iv_len_in_bytes = AES_BLOCK_SIZE; + job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE; job->user_data = op; } @@ -336,7 +340,7 @@ ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], ipsecmb_main_t *imbm = &ipsecmb_main; ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); - JOB_AES_HMAC *job; + IMB_JOB *job; u32 i, n_fail = 0; for (i = 0; i < n_ops; i++) @@ -352,16 +356,18 @@ ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], job->msg_len_to_cipher_in_bytes = op->len; job->cipher_start_src_offset_in_bytes = 0; - job->hash_alg = NULL_HASH; + job->hash_alg = IMB_AUTH_NULL; job->cipher_mode = cipher_mode; job->cipher_direction = direction; - job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER); + job->chain_order = + (direction == IMB_DIR_ENCRYPT ? IMB_ORDER_CIPHER_HASH : + IMB_ORDER_HASH_CIPHER); job->aes_key_len_in_bytes = key_len / 8; - job->aes_enc_key_expanded = kd->enc_key_exp; - job->aes_dec_key_expanded = kd->dec_key_exp; + job->enc_keys = kd->enc_key_exp; + job->dec_keys = kd->dec_key_exp; job->iv = op->iv; - job->iv_len_in_bytes = AES_BLOCK_SIZE; + job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE; job->user_data = op; @@ -378,17 +384,20 @@ ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], } #endif +/* clang-format off */ #define _(a, b, c) \ static_always_inline u32 ipsecmb_ops_cipher_enc_##a ( \ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \ { \ - return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, ENCRYPT, c); \ + return ipsecmb_ops_aes_cipher_inline ( \ + vm, ops, n_ops, b, IMB_DIR_ENCRYPT, IMB_CIPHER_##c); \ } \ \ static_always_inline u32 ipsecmb_ops_cipher_dec_##a ( \ vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \ { \ - return ipsecmb_ops_aes_cipher_inline (vm, ops, n_ops, b, DECRYPT, c); \ + return ipsecmb_ops_aes_cipher_inline ( \ + vm, ops, n_ops, b, IMB_DIR_DECRYPT, IMB_CIPHER_##c); \ } foreach_ipsecmb_cipher_op; @@ -402,7 +411,7 @@ ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm, \ ipsecmb_main_t *imbm = &ipsecmb_main; \ ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ vm->thread_index); \ - MB_MGR *m = ptd->mgr; \ + IMB_MGR *m = ptd->mgr; \ vnet_crypto_op_chunk_t *chp; \ u32 i, j; \ \ @@ -437,7 +446,7 @@ ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ ipsecmb_main_t *imbm = &ipsecmb_main; \ ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ vm->thread_index); \ - MB_MGR *m = ptd->mgr; \ + IMB_MGR *m = ptd->mgr; \ u32 i; \ \ for (i = 0; i < n_ops; i++) \ @@ -463,7 +472,7 @@ ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm, \ ipsecmb_main_t *imbm = &ipsecmb_main; \ ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ vm->thread_index); \ - MB_MGR *m = ptd->mgr; \ + IMB_MGR *m = ptd->mgr; \ vnet_crypto_op_chunk_t *chp; \ u32 i, j, n_failed = 0; \ \ @@ -505,7 +514,7 @@ ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ ipsecmb_main_t *imbm = &ipsecmb_main; \ ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ vm->thread_index); \ - MB_MGR *m = ptd->mgr; \ + IMB_MGR *m = ptd->mgr; \ u32 i, n_failed = 0; \ \ for (i = 0; i < n_ops; i++) \ @@ -530,18 +539,18 @@ ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ \ return n_ops - n_failed; \ } - +/* clang-format on */ foreach_ipsecmb_gcm_cipher_op; #undef _ #ifdef HAVE_IPSECMB_CHACHA_POLY always_inline void -ipsecmb_retire_aead_job (JOB_AES_HMAC *job, u32 *n_fail) +ipsecmb_retire_aead_job (IMB_JOB *job, u32 *n_fail) { vnet_crypto_op_t *op = job->user_data; u32 len = op->tag_len; - if (PREDICT_FALSE (STS_COMPLETED != job->status)) + if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status)) { op->status = ipsecmb_status_job (job->status); *n_fail = *n_fail + 1; @@ -571,7 +580,7 @@ ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); struct IMB_JOB *job; - MB_MGR *m = ptd->mgr; + IMB_MGR *m = ptd->mgr; u32 i, n_fail = 0, last_key_index = ~0; u8 scratch[VLIB_FRAME_SIZE][16]; u8 *key = 0; @@ -648,7 +657,7 @@ ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[], ipsecmb_main_t *imbm = &ipsecmb_main; ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); - MB_MGR *m = ptd->mgr; + IMB_MGR *m = ptd->mgr; u32 i, n_fail = 0, last_key_index = ~0; u8 *key = 0; @@ -838,7 +847,7 @@ crypto_ipsecmb_init (vlib_main_t * vm) ipsecmb_alg_data_t *ad; ipsecmb_per_thread_data_t *ptd; vlib_thread_main_t *tm = vlib_get_thread_main (); - MB_MGR *m = 0; + IMB_MGR *m = 0; u32 eidx; u8 *name; @@ -861,7 +870,7 @@ crypto_ipsecmb_init (vlib_main_t * vm) ptd->mgr = alloc_mb_mgr (0); #if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0) clib_memset_u8 (ptd->burst_jobs, 0, - sizeof (JOB_AES_HMAC) * IMB_MAX_BURST_SIZE); + sizeof (IMB_JOB) * IMB_MAX_BURST_SIZE); #endif if (clib_cpu_supports_avx512f ()) init_mb_mgr_avx512 (ptd->mgr); |