diff options
author | Neale Ranns <nranns@cisco.com> | 2019-04-16 21:39:37 +0000 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2019-04-18 13:12:55 +0000 |
commit | 2eef76b6d81716075c68cafbf3e82d2daadd5ed9 (patch) | |
tree | 402ee6b97a64e63b72100efb49ee5f453181f3cc /src/plugins | |
parent | ee66c8d3ca38c6e09e33584bee93f80b58cabc2b (diff) |
crypto-ipsecmb: enable GCM
Change-Id: I670d7899bcc63a419daf481167dc445a6386cce8
Signed-off-by: Neale Ranns <nranns@cisco.com>
(cherry picked from commit d8cfbebce78e26a6ef7f6693e7c90dc3c6435d51)
Diffstat (limited to 'src/plugins')
-rw-r--r-- | src/plugins/crypto_ipsecmb/ipsecmb.c | 244 |
1 files changed, 220 insertions, 24 deletions
diff --git a/src/plugins/crypto_ipsecmb/ipsecmb.c b/src/plugins/crypto_ipsecmb/ipsecmb.c index d579c0d74a6..9b3cf3b7afd 100644 --- a/src/plugins/crypto_ipsecmb/ipsecmb.c +++ b/src/plugins/crypto_ipsecmb/ipsecmb.c @@ -36,6 +36,26 @@ typedef struct ipsecmb_main_t_ ipsecmb_per_thread_data_t *per_thread_data; } ipsecmb_main_t; +/** + * AES GCM key=expansion VFT + */ +typedef void (*ase_gcm_pre_t) (const void *key, + struct gcm_key_data * key_data); + +typedef struct ipsecmb_gcm_pre_vft_t_ +{ + ase_gcm_pre_t ase_gcm_pre_128; + ase_gcm_pre_t ase_gcm_pre_192; + ase_gcm_pre_t ase_gcm_pre_256; +} ipsecmb_gcm_pre_vft_t; + +static ipsecmb_gcm_pre_vft_t ipsecmb_gcm_pre_vft; + +#define INIT_IPSEC_MB_GCM_PRE(_arch) \ + ipsecmb_gcm_pre_vft.ase_gcm_pre_128 = aes_gcm_pre_128_##_arch; \ + ipsecmb_gcm_pre_vft.ase_gcm_pre_192 = aes_gcm_pre_192_##_arch; \ + ipsecmb_gcm_pre_vft.ase_gcm_pre_256 = aes_gcm_pre_256_##_arch; + static ipsecmb_main_t ipsecmb_main; #define foreach_ipsecmb_hmac_op \ @@ -44,11 +64,22 @@ static ipsecmb_main_t ipsecmb_main; _(SHA384, SHA_384, sha384) \ _(SHA512, SHA_512, sha512) -#define foreach_ipsecmb_cipher_op \ +/* + * (Alg, key-len-bits, key-len-bytes, iv-len-bytes) + */ +#define foreach_ipsecmb_cbc_cipher_op \ _(AES_128_CBC, 128, 16, 16) \ _(AES_192_CBC, 192, 24, 16) \ _(AES_256_CBC, 256, 32, 16) +/* + * (Alg, key-len-bits, key-len-bytes, iv-len-bytes) + */ +#define foreach_ipsecmb_gcm_cipher_op \ + _(AES_128_GCM, 128, 16, 12) \ + _(AES_192_GCM, 192, 24, 12) \ + _(AES_256_GCM, 256, 32, 12) + always_inline void hash_expand_keys (const MB_MGR * mgr, const u8 * key, @@ -202,11 +233,11 @@ ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail) } static_always_inline u32 -ipsecmb_ops_cipher_inline (vlib_main_t * vm, - ipsecmb_per_thread_data_t * ptd, - vnet_crypto_op_t * ops[], - u32 n_ops, u32 key_len, u32 iv_len, - keyexp_t fn, JOB_CIPHER_DIRECTION direction) +ipsecmb_ops_cbc_cipher_inline (vlib_main_t * vm, + ipsecmb_per_thread_data_t * ptd, + vnet_crypto_op_t * ops[], + u32 n_ops, u32 key_len, u32 iv_len, + keyexp_t fn, JOB_CIPHER_DIRECTION direction) { JOB_AES_HMAC *job; u32 i, n_fail = 0; @@ -270,9 +301,9 @@ ipsecmb_ops_cipher_inline (vlib_main_t * vm, #define _(a, b, c, d) \ static_always_inline u32 \ -ipsecmb_ops_cipher_enc_##a (vlib_main_t * vm, \ - vnet_crypto_op_t * ops[], \ - u32 n_ops) \ +ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], \ + u32 n_ops) \ { \ ipsecmb_per_thread_data_t *ptd; \ ipsecmb_main_t *imbm; \ @@ -280,18 +311,18 @@ ipsecmb_ops_cipher_enc_##a (vlib_main_t * vm, \ imbm = &ipsecmb_main; \ ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \ \ - return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops, c, d, \ - ptd->mgr->keyexp_##b, \ - ENCRYPT); \ + return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d, \ + ptd->mgr->keyexp_##b, \ + ENCRYPT); \ } -foreach_ipsecmb_cipher_op; +foreach_ipsecmb_cbc_cipher_op; #undef _ #define _(a, b, c, d) \ static_always_inline u32 \ -ipsecmb_ops_cipher_dec_##a (vlib_main_t * vm, \ - vnet_crypto_op_t * ops[], \ - u32 n_ops) \ +ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], \ + u32 n_ops) \ { \ ipsecmb_per_thread_data_t *ptd; \ ipsecmb_main_t *imbm; \ @@ -299,11 +330,161 @@ ipsecmb_ops_cipher_dec_##a (vlib_main_t * vm, \ imbm = &ipsecmb_main; \ ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \ \ - return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops, c, d, \ - ptd->mgr->keyexp_##b, \ - DECRYPT); \ + return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d, \ + ptd->mgr->keyexp_##b, \ + DECRYPT); \ + } +foreach_ipsecmb_cbc_cipher_op; +#undef _ + +always_inline void +ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job, + u32 * n_fail, JOB_CIPHER_DIRECTION direction) +{ + vnet_crypto_op_t *op = job->user_data; + + if (STS_COMPLETED != job->status) + { + op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; + *n_fail = *n_fail + 1; + } + else + op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; + + if (DECRYPT == direction) + { + if ((memcmp (op->tag, job->auth_tag_output, op->tag_len))) + { + *n_fail = *n_fail + 1; + op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; + } + } +} + +static_always_inline u32 +ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm, + ipsecmb_per_thread_data_t * ptd, + vnet_crypto_op_t * ops[], + u32 n_ops, u32 key_len, u32 iv_len, + ase_gcm_pre_t fn, + JOB_CIPHER_DIRECTION direction) +{ + JOB_AES_HMAC *job; + u32 i, n_fail = 0; + u8 scratch[n_ops][64]; + + /* + * queue all the jobs first ... + */ + for (i = 0; i < n_ops; i++) + { + struct gcm_key_data key_data; + vnet_crypto_op_t *op = ops[i]; + u32 nonce[3]; + __m128i iv; + + fn (op->key, &key_data); + + job = IMB_GET_NEXT_JOB (ptd->mgr); + + job->src = op->src; + job->dst = op->dst; + job->msg_len_to_cipher_in_bytes = op->len; + job->cipher_start_src_offset_in_bytes = 0; + + job->hash_alg = AES_GMAC; + job->cipher_mode = GCM; + job->cipher_direction = direction; + job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER); + + if (direction == ENCRYPT) + { + if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV) + { + iv = ptd->cbc_iv; + // only use 8 bytes of the IV + clib_memcpy_fast (op->iv, &iv, 8); + ptd->cbc_iv = _mm_aesenc_si128 (iv, iv); + } + nonce[0] = op->salt; + clib_memcpy_fast (nonce + 1, op->iv, 8); + job->iv = (u8 *) nonce; + } + else + { + nonce[0] = op->salt; + clib_memcpy_fast (nonce + 1, op->iv, 8); + job->iv = op->iv; + } + + job->aes_key_len_in_bytes = key_len; + job->aes_enc_key_expanded = &key_data; + job->aes_dec_key_expanded = &key_data; + job->iv_len_in_bytes = iv_len; + + job->u.GCM.aad = op->aad; + job->u.GCM.aad_len_in_bytes = op->aad_len; + job->auth_tag_output_len_in_bytes = op->tag_len; + if (DECRYPT == direction) + job->auth_tag_output = scratch[i]; + else + job->auth_tag_output = op->tag; + job->user_data = op; + + job = IMB_SUBMIT_JOB (ptd->mgr); + + if (job) + ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction); + } + + /* + * .. then flush (i.e. complete) them + * We will have queued enough to satisfy the 'multi' buffer + */ + while ((job = IMB_FLUSH_JOB (ptd->mgr))) + { + ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction); + } + + return n_ops - n_fail; +} + +#define _(a, b, c, d) \ +static_always_inline u32 \ +ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], \ + u32 n_ops) \ +{ \ + ipsecmb_per_thread_data_t *ptd; \ + ipsecmb_main_t *imbm; \ + \ + imbm = &ipsecmb_main; \ + ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \ + \ + return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d, \ + ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \ + ENCRYPT); \ + } +foreach_ipsecmb_gcm_cipher_op; +#undef _ + +#define _(a, b, c, d) \ +static_always_inline u32 \ +ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, \ + vnet_crypto_op_t * ops[], \ + u32 n_ops) \ +{ \ + ipsecmb_per_thread_data_t *ptd; \ + ipsecmb_main_t *imbm; \ + \ + imbm = &ipsecmb_main; \ + ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \ + \ + return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d, \ + ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \ + DECRYPT); \ } -foreach_ipsecmb_cipher_op; +foreach_ipsecmb_gcm_cipher_op; #undef _ clib_error_t * @@ -356,6 +537,7 @@ crypto_ipsecmb_init (vlib_main_t * vm) { ptd->mgr = alloc_mb_mgr (0); init_mb_mgr_avx512 (ptd->mgr); + INIT_IPSEC_MB_GCM_PRE (avx_gen4); } } else if (clib_cpu_supports_avx2 ()) @@ -364,6 +546,7 @@ crypto_ipsecmb_init (vlib_main_t * vm) { ptd->mgr = alloc_mb_mgr (0); init_mb_mgr_avx2 (ptd->mgr); + INIT_IPSEC_MB_GCM_PRE (avx_gen2); } } else @@ -372,6 +555,7 @@ crypto_ipsecmb_init (vlib_main_t * vm) { ptd->mgr = alloc_mb_mgr (0); init_mb_mgr_sse (ptd->mgr); + INIT_IPSEC_MB_GCM_PRE (sse); } } @@ -387,15 +571,27 @@ crypto_ipsecmb_init (vlib_main_t * vm) #undef _ #define _(a, b, c, d) \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ - ipsecmb_ops_cipher_enc_##a); \ + ipsecmb_ops_cbc_cipher_enc_##a); \ + + foreach_ipsecmb_cbc_cipher_op; +#undef _ +#define _(a, b, c, d) \ + vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ + ipsecmb_ops_cbc_cipher_dec_##a); \ + + foreach_ipsecmb_cbc_cipher_op; +#undef _ +#define _(a, b, c, d) \ + vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ + ipsecmb_ops_gcm_cipher_enc_##a); \ - foreach_ipsecmb_cipher_op; + foreach_ipsecmb_gcm_cipher_op; #undef _ #define _(a, b, c, d) \ vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ - ipsecmb_ops_cipher_dec_##a); \ + ipsecmb_ops_gcm_cipher_dec_##a); \ - foreach_ipsecmb_cipher_op; + foreach_ipsecmb_gcm_cipher_op; #undef _ return (NULL); |