aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/vnet/ipsec/esp_encrypt.c30
-rw-r--r--src/vnet/ipsec/ipsec_sa.h3
2 files changed, 28 insertions, 5 deletions
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index 6c87abe8f75..1023a1a981e 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -201,6 +201,24 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6)
return len;
}
+/* IPsec IV generation: IVs requirements differ depending of the
+ * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
+ * be predictable but should never be reused with the same key material
+ * for CTR and GCM.
+ * We use a packet counter as the IV for CTR and GCM, and to ensure the
+ * IV is unpredictable for CBC, it is then encrypted using the same key
+ * as the message. You can refer to NIST SP800-38a and NIST SP800-38d
+ * for more details. */
+static_always_inline void *
+esp_generate_iv (ipsec_sa_t * sa, void *payload, int iv_sz)
+{
+ ASSERT (iv_sz >= sizeof (u64));
+ u64 *iv = (u64 *) (payload - iv_sz);
+ clib_memset_u8 (iv, 0, iv_sz);
+ *iv = sa->iv_counter++;
+ return iv;
+}
+
static_always_inline void
esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
@@ -463,6 +481,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+ void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
op->src = op->dst = payload;
op->key_index = sa0->crypto_key_index;
op->len = payload_len - icv_sz;
@@ -481,16 +500,19 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
op->tag = payload + op->len;
op->tag_len = 16;
- u64 *iv = (u64 *) (payload - iv_sz);
nonce->salt = sa0->salt;
- nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
+ nonce->iv = *(u64 *) pkt_iv;
op->iv = (u8 *) nonce;
nonce++;
}
else
{
- op->iv = payload - iv_sz;
- op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+ /* construct zero iv in front of the IP header */
+ op->iv = pkt_iv - hdr_len - iv_sz;
+ clib_memset_u8 (op->iv, 0, iv_sz);
+ /* include iv field in crypto */
+ op->src -= iv_sz;
+ op->len += iv_sz;
}
}
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index b3e138bfbea..bc9f3b47683 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -122,6 +122,8 @@ typedef struct
u64 replay_window;
dpo_id_t dpo;
+ u64 iv_counter;
+
vnet_crypto_key_index_t crypto_key_index;
vnet_crypto_key_index_t integ_key_index;
vnet_crypto_op_id_t crypto_enc_op_id:16;
@@ -161,7 +163,6 @@ typedef struct
/* Salt used in GCM modes - stored in network byte order */
u32 salt;
- u64 gcm_iv_counter;
} ipsec_sa_t;
STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES);