diff options
author | Benoît Ganne <bganne@cisco.com> | 2022-01-18 15:56:41 +0100 |
---|---|---|
committer | Dave Wallace <dwallacelf@gmail.com> | 2023-02-08 01:06:39 +0000 |
commit | 02dfd296342525a0802132d85d0ea51da841e5ce (patch) | |
tree | a4bdb7dbb25c30e8c6e9a9c379ee5e443de8fbab /src | |
parent | aaad4f977cd7337b37cc6f00019f601f07abdced (diff) |
ipsec: fix AES CBC IV generation (CVE-2022-46397)
For AES-CBC, the IV must be unpredictable (see NIST SP800-38a Appendix
C). Chaining IVs like is done by ipsecmb and native backends for the
VNET_CRYPTO_OP_FLAG_INIT_IV is fully predictable.
Encrypt a counter as part of the message, making the (predictable)
counter-generated IV unpredictable.
Fixes: VPP-2037
Type: fix
Change-Id: If4f192d62bf97dda553e7573331c75efa11822ae
Signed-off-by: Benoît Ganne <bganne@cisco.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/vnet/crypto/crypto.h | 4 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_encrypt.c | 88 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_sa.h | 2 |
3 files changed, 65 insertions, 29 deletions
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h index e24ad1091f3..8f12a7216b0 100644 --- a/src/vnet/crypto/crypto.h +++ b/src/vnet/crypto/crypto.h @@ -338,7 +338,7 @@ typedef struct i16 crypto_start_offset; /* first buffer offset */ i16 integ_start_offset; /* adj total_length for integ, e.g.4 bytes for IPSec ESN */ - u16 integ_length_adj; + i16 integ_length_adj; vnet_crypto_op_status_t status : 8; u8 flags; /**< share same VNET_CRYPTO_OP_FLAG_* values */ } vnet_crypto_async_frame_elt_t; @@ -628,7 +628,7 @@ static_always_inline void vnet_crypto_async_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f, u32 key_index, u32 crypto_len, i16 integ_len_adj, i16 crypto_start_offset, - u16 integ_start_offset, u32 buffer_index, + i16 integ_start_offset, u32 buffer_index, u16 next_node, u8 *iv, u8 *tag, u8 *aad, u8 flags) { diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 4d312be1f94..aa0fb0a6906 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -215,6 +215,24 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr) return len; } +/* IPsec IV generation: IVs requirements differ depending of the + * encryption mode: IVs must be unpredictable for AES-CBC whereas it can + * be predictable but should never be reused with the same key material + * for CTR and GCM. + * We use a packet counter as the IV for CTR and GCM, and to ensure the + * IV is unpredictable for CBC, it is then encrypted using the same key + * as the message. You can refer to NIST SP800-38a and NIST SP800-38d + * for more details. */ +static_always_inline void * +esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz) +{ + ASSERT (iv_sz >= sizeof (u64)); + u64 *iv = (u64 *) (payload - iv_sz); + clib_memset_u8 (iv, 0, iv_sz); + *iv = sa->iv_counter++; + return iv; +} + static_always_inline void esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_crypto_op_t * ops, vlib_buffer_t * b[], @@ -368,27 +386,29 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, vnet_crypto_op_t *op; vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); vnet_crypto_op_init (op, sa0->crypto_enc_op_id); + u8 *crypto_start = payload; + /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always + * have enough space for ESP header and footer which includes ICV */ + ASSERT (payload_len > icv_sz); + u16 crypto_len = payload_len - icv_sz; + + /* generate the IV in front of the payload */ + void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz); - op->src = op->dst = payload; op->key_index = sa0->crypto_key_index; - op->len = payload_len - icv_sz; op->user_data = bi; if (ipsec_sa_is_set_IS_CTR (sa0)) { - ASSERT (sizeof (u64) == iv_sz); /* construct nonce in a scratch space in front of the IP header */ esp_ctr_nonce_t *nonce = - (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len - - sizeof (*nonce)); - u64 *pkt_iv = (u64 *) (payload - sizeof (u64)); - + (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce)); if (ipsec_sa_is_set_IS_AEAD (sa0)) { /* constuct aad in a scratch space in front of the nonce */ op->aad = (u8 *) nonce - sizeof (esp_aead_t); op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi); - op->tag = payload + op->len; + op->tag = payload + crypto_len; op->tag_len = 16; } else @@ -397,13 +417,17 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, } nonce->salt = sa0->salt; - nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++); + nonce->iv = *(u64 *) pkt_iv; op->iv = (u8 *) nonce; } else { - op->iv = payload - iv_sz; - op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV; + /* construct zero iv in front of the IP header */ + op->iv = pkt_iv - hdr_len - iv_sz; + clib_memset_u8 (op->iv, 0, iv_sz); + /* include iv field in crypto */ + crypto_start -= iv_sz; + crypto_len += iv_sz; } if (lb != b[0]) @@ -412,8 +436,15 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; op->chunk_index = vec_len (ptd->chunks); op->tag = vlib_buffer_get_tail (lb) - icv_sz; - esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload, - payload_len, &op->n_chunks); + esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, + crypto_start, crypto_len + icv_sz, + &op->n_chunks); + } + else + { + /* not chained */ + op->src = op->dst = crypto_start; + op->len = crypto_len; } } @@ -463,26 +494,26 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, u8 *tag, *iv, *aad = 0; u8 flag = 0; u32 key_index; - i16 crypto_start_offset, integ_start_offset = 0; + i16 crypto_start_offset, integ_start_offset; u16 crypto_total_len, integ_total_len; post->next_index = next; /* crypto */ - crypto_start_offset = payload - b->data; + crypto_start_offset = integ_start_offset = payload - b->data; crypto_total_len = integ_total_len = payload_len - icv_sz; tag = payload + crypto_total_len; key_index = sa->linked_key_index; + /* generate the IV in front of the payload */ + void *pkt_iv = esp_generate_iv (sa, payload, iv_sz); + if (ipsec_sa_is_set_IS_CTR (sa)) { - ASSERT (sizeof (u64) == iv_sz); /* construct nonce in a scratch space in front of the IP header */ - esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) - - hdr_len - sizeof (*nonce)); - u64 *pkt_iv = (u64 *) (payload - sizeof (u64)); - + esp_ctr_nonce_t *nonce = + (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce)); if (ipsec_sa_is_set_IS_AEAD (sa)) { /* constuct aad in a scratch space in front of the nonce */ @@ -496,13 +527,17 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, } nonce->salt = sa->salt; - nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++); + nonce->iv = *(u64 *) pkt_iv; iv = (u8 *) nonce; } else { - iv = payload - iv_sz; - flag |= VNET_CRYPTO_OP_FLAG_INIT_IV; + /* construct zero iv in front of the IP header */ + iv = pkt_iv - hdr_len - iv_sz; + clib_memset_u8 (iv, 0, iv_sz); + /* include iv field in crypto */ + crypto_start_offset -= iv_sz; + crypto_total_len += iv_sz; } if (lb != b) @@ -510,13 +545,14 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, /* chain */ flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; tag = vlib_buffer_get_tail (lb) - icv_sz; - crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz, - payload, payload_len, 0); + crypto_total_len = esp_encrypt_chain_crypto ( + vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset, + crypto_total_len + icv_sz, 0); } if (sa->integ_op_id) { - integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t); + integ_start_offset -= iv_sz + sizeof (esp_header_t); integ_total_len += iv_sz + sizeof (esp_header_t); if (b != lb) diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h index df079b13872..88d5c42c31b 100644 --- a/src/vnet/ipsec/ipsec_sa.h +++ b/src/vnet/ipsec/ipsec_sa.h @@ -137,7 +137,7 @@ typedef struct u32 seq; u32 seq_hi; u64 replay_window; - u64 ctr_iv_counter; + u64 iv_counter; dpo_id_t dpo; vnet_crypto_key_index_t crypto_key_index; |