summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/ipsec_sa.c
diff options
context:
space:
mode:
authorBenoît Ganne <bganne@cisco.com>2022-01-18 15:56:41 +0100
committerBeno�t Ganne <bganne@cisco.com>2023-03-23 08:59:31 +0000
commit5527a78ed96043d2c26e3271066c50b44dd7fc0b (patch)
tree3e4186cb439e10051d013cc71c1f7d8d81ad9613 /src/vnet/ipsec/ipsec_sa.c
parentad95b06181c354291f4433c5e550cb89c5122252 (diff)
ipsec: make pre-shared keys harder to misuse
Using pre-shared keys is usually a bad idea, one should use eg. IKEv2 instead, but one does not always have the choice. For AES-CBC, the IV must be unpredictable (see NIST SP800-38a Appendix C) whereas for AES-CTR or AES-GCM, the IV should never be reused with the same key material (see NIST SP800-38a Appendix B and NIST SP800-38d section 8). If one uses pre-shared keys and VPP is restarted, the IV counter restarts at 0 and the same IVs are generated with the same pre-shared keys materials. To fix those issues we follow the recommendation from NIST SP800-38a and NIST SP800-38d: - we use a PRNG (not cryptographically secured) to generate IVs to avoid generating the same IV sequence between VPP restarts. The PRNG is chosen so that there is a low chance of generating the same sequence - for AES-CBC, the generated IV is encrypted as part of the message. This makes the (predictable) PRNG-generated IV unpredictable as it is encrypted with the secret key - for AES-CTR and GCM, we use the IV as-is as predictable IVs are fine Most of the changes in this patch are caused by the need to shoehorn an additional state of 2 u64 for the PRNG in the 1st cacheline of the SA object. Type: improvement Change-Id: I2af89c21ae4b2c4c33dd21aeffcfb79c13c9d84c Signed-off-by: Benoît Ganne <bganne@cisco.com>
Diffstat (limited to 'src/vnet/ipsec/ipsec_sa.c')
-rw-r--r--src/vnet/ipsec/ipsec_sa.c129
1 files changed, 74 insertions, 55 deletions
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index eed71a48500..95db17e7fb1 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -13,6 +13,7 @@
* limitations under the License.
*/
+#include <sys/random.h>
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
#include <vnet/udp/udp_local.h>
@@ -92,14 +93,35 @@ ipsec_sa_stack (ipsec_sa_t * sa)
}
void
+ipsec_sa_set_async_mode (ipsec_sa_t *sa, int is_enabled)
+{
+ if (is_enabled)
+ {
+ sa->crypto_key_index = sa->crypto_async_key_index;
+ sa->crypto_enc_op_id = sa->crypto_async_enc_op_id;
+ sa->crypto_dec_op_id = sa->crypto_async_dec_op_id;
+ sa->integ_key_index = ~0;
+ sa->integ_op_id = ~0;
+ }
+ else
+ {
+ sa->crypto_key_index = sa->crypto_sync_key_index;
+ sa->crypto_enc_op_id = sa->crypto_sync_enc_op_id;
+ sa->crypto_dec_op_id = sa->crypto_sync_dec_op_id;
+ sa->integ_key_index = sa->integ_sync_key_index;
+ sa->integ_op_id = sa->integ_sync_op_id;
+ }
+}
+
+void
ipsec_sa_set_crypto_alg (ipsec_sa_t * sa, ipsec_crypto_alg_t crypto_alg)
{
ipsec_main_t *im = &ipsec_main;
sa->crypto_alg = crypto_alg;
sa->crypto_iv_size = im->crypto_algs[crypto_alg].iv_size;
sa->esp_block_align = clib_max (4, im->crypto_algs[crypto_alg].block_align);
- sa->sync_op_data.crypto_enc_op_id = im->crypto_algs[crypto_alg].enc_op_id;
- sa->sync_op_data.crypto_dec_op_id = im->crypto_algs[crypto_alg].dec_op_id;
+ sa->crypto_sync_enc_op_id = im->crypto_algs[crypto_alg].enc_op_id;
+ sa->crypto_sync_dec_op_id = im->crypto_algs[crypto_alg].dec_op_id;
sa->crypto_calg = im->crypto_algs[crypto_alg].alg;
ASSERT (sa->crypto_iv_size <= ESP_MAX_IV_SIZE);
ASSERT (sa->esp_block_align <= ESP_MAX_BLOCK_SIZE);
@@ -122,7 +144,7 @@ ipsec_sa_set_integ_alg (ipsec_sa_t * sa, ipsec_integ_alg_t integ_alg)
ipsec_main_t *im = &ipsec_main;
sa->integ_alg = integ_alg;
sa->integ_icv_size = im->integ_algs[integ_alg].icv_size;
- sa->sync_op_data.integ_op_id = im->integ_algs[integ_alg].op_id;
+ sa->integ_sync_op_id = im->integ_algs[integ_alg].op_id;
sa->integ_calg = im->integ_algs[integ_alg].alg;
ASSERT (sa->integ_icv_size <= ESP_MAX_ICV_SIZE);
}
@@ -133,38 +155,32 @@ ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
/* *INDENT-OFF* */
if (ipsec_sa_is_set_USE_ESN (sa))
{
-#define _(n, s, k) \
- if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##n##_ENC ) \
- sa->async_op_data.crypto_async_enc_op_id = \
- VNET_CRYPTO_OP_##n##_TAG16_AAD12_ENC; \
- if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##n##_DEC ) \
- sa->async_op_data.crypto_async_dec_op_id = \
- VNET_CRYPTO_OP_##n##_TAG16_AAD12_DEC;
- foreach_crypto_aead_alg
+#define _(n, s, k) \
+ if (sa->crypto_sync_enc_op_id == VNET_CRYPTO_OP_##n##_ENC) \
+ sa->crypto_async_enc_op_id = VNET_CRYPTO_OP_##n##_TAG16_AAD12_ENC; \
+ if (sa->crypto_sync_dec_op_id == VNET_CRYPTO_OP_##n##_DEC) \
+ sa->crypto_async_dec_op_id = VNET_CRYPTO_OP_##n##_TAG16_AAD12_DEC;
+ foreach_crypto_aead_alg
#undef _
}
else
{
-#define _(n, s, k) \
- if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##n##_ENC ) \
- sa->async_op_data.crypto_async_enc_op_id = \
- VNET_CRYPTO_OP_##n##_TAG16_AAD8_ENC; \
- if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##n##_DEC ) \
- sa->async_op_data.crypto_async_dec_op_id = \
- VNET_CRYPTO_OP_##n##_TAG16_AAD8_DEC;
- foreach_crypto_aead_alg
+#define _(n, s, k) \
+ if (sa->crypto_sync_enc_op_id == VNET_CRYPTO_OP_##n##_ENC) \
+ sa->crypto_async_enc_op_id = VNET_CRYPTO_OP_##n##_TAG16_AAD8_ENC; \
+ if (sa->crypto_sync_dec_op_id == VNET_CRYPTO_OP_##n##_DEC) \
+ sa->crypto_async_dec_op_id = VNET_CRYPTO_OP_##n##_TAG16_AAD8_DEC;
+ foreach_crypto_aead_alg
#undef _
}
-#define _(c, h, s, k ,d) \
- if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##c##_ENC && \
- sa->sync_op_data.integ_op_id == VNET_CRYPTO_OP_##h##_HMAC) \
- sa->async_op_data.crypto_async_enc_op_id = \
- VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC; \
- if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##c##_DEC && \
- sa->sync_op_data.integ_op_id == VNET_CRYPTO_OP_##h##_HMAC) \
- sa->async_op_data.crypto_async_dec_op_id = \
- VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC;
+#define _(c, h, s, k, d) \
+ if (sa->crypto_sync_enc_op_id == VNET_CRYPTO_OP_##c##_ENC && \
+ sa->integ_sync_op_id == VNET_CRYPTO_OP_##h##_HMAC) \
+ sa->crypto_async_enc_op_id = VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC; \
+ if (sa->crypto_sync_dec_op_id == VNET_CRYPTO_OP_##c##_DEC && \
+ sa->integ_sync_op_id == VNET_CRYPTO_OP_##h##_HMAC) \
+ sa->crypto_async_dec_op_id = VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC;
foreach_crypto_link_async_alg
#undef _
/* *INDENT-ON* */
@@ -313,6 +329,7 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
clib_error_t *err;
ipsec_sa_t *sa;
u32 sa_index;
+ u64 rand[2];
uword *p;
int rv;
@@ -320,8 +337,13 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (p)
return VNET_API_ERROR_ENTRY_ALREADY_EXISTS;
+ if (getrandom (rand, sizeof (rand), 0) != sizeof (rand))
+ return VNET_API_ERROR_INIT_FAILED;
+
pool_get_aligned_zero (ipsec_sa_pool, sa, CLIB_CACHE_LINE_BYTES);
+ clib_pcg64i_srandom_r (&sa->iv_prng, rand[0], rand[1]);
+
fib_node_init (&sa->node, FIB_NODE_TYPE_IPSEC_SA);
fib_node_lock (&sa->node);
sa_index = sa - ipsec_sa_pool;
@@ -352,10 +374,9 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
clib_memcpy (&sa->crypto_key, ck, sizeof (sa->crypto_key));
- sa->crypto_key_index = vnet_crypto_key_add (vm,
- im->crypto_algs[crypto_alg].alg,
- (u8 *) ck->data, ck->len);
- if (~0 == sa->crypto_key_index)
+ sa->crypto_sync_key_index = vnet_crypto_key_add (
+ vm, im->crypto_algs[crypto_alg].alg, (u8 *) ck->data, ck->len);
+ if (~0 == sa->crypto_sync_key_index)
{
pool_put (ipsec_sa_pool, sa);
return VNET_API_ERROR_KEY_LENGTH;
@@ -363,36 +384,34 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (integ_alg != IPSEC_INTEG_ALG_NONE)
{
- sa->integ_key_index = vnet_crypto_key_add (vm,
- im->
- integ_algs[integ_alg].alg,
- (u8 *) ik->data, ik->len);
- if (~0 == sa->integ_key_index)
+ sa->integ_sync_key_index = vnet_crypto_key_add (
+ vm, im->integ_algs[integ_alg].alg, (u8 *) ik->data, ik->len);
+ if (~0 == sa->integ_sync_key_index)
{
pool_put (ipsec_sa_pool, sa);
return VNET_API_ERROR_KEY_LENGTH;
}
}
- if (sa->async_op_data.crypto_async_enc_op_id &&
- !ipsec_sa_is_set_IS_AEAD (sa))
- { //AES-CBC & HMAC
- sa->async_op_data.linked_key_index =
- vnet_crypto_key_add_linked (vm, sa->crypto_key_index,
- sa->integ_key_index);
- }
+ if (sa->crypto_async_enc_op_id && !ipsec_sa_is_set_IS_AEAD (sa))
+ sa->crypto_async_key_index =
+ vnet_crypto_key_add_linked (vm, sa->crypto_sync_key_index,
+ sa->integ_sync_key_index); // AES-CBC & HMAC
+ else
+ sa->crypto_async_key_index = sa->crypto_sync_key_index;
if (im->async_mode)
- sa->crypto_op_data = sa->async_op_data.data;
+ {
+ ipsec_sa_set_async_mode (sa, 1);
+ }
+ else if (ipsec_sa_is_set_IS_ASYNC (sa))
+ {
+ vnet_crypto_request_async_mode (1);
+ ipsec_sa_set_async_mode (sa, 1 /* is_enabled */);
+ }
else
{
- if (ipsec_sa_is_set_IS_ASYNC (sa))
- {
- vnet_crypto_request_async_mode (1);
- sa->crypto_op_data = sa->async_op_data.data;
- }
- else
- sa->crypto_op_data = sa->sync_op_data.data;
+ ipsec_sa_set_async_mode (sa, 0 /* is_enabled */);
}
err = ipsec_check_support_cb (im, sa);
@@ -489,7 +508,7 @@ ipsec_sa_del (ipsec_sa_t * sa)
{
vnet_crypto_request_async_mode (0);
if (!ipsec_sa_is_set_IS_AEAD (sa))
- vnet_crypto_key_del (vm, sa->async_op_data.linked_key_index);
+ vnet_crypto_key_del (vm, sa->crypto_async_key_index);
}
if (ipsec_sa_is_set_UDP_ENCAP (sa) && ipsec_sa_is_set_IS_INBOUND (sa))
@@ -498,9 +517,9 @@ ipsec_sa_del (ipsec_sa_t * sa)
if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
dpo_reset (&sa->dpo);
- vnet_crypto_key_del (vm, sa->crypto_key_index);
+ vnet_crypto_key_del (vm, sa->crypto_sync_key_index);
if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
- vnet_crypto_key_del (vm, sa->integ_key_index);
+ vnet_crypto_key_del (vm, sa->integ_sync_key_index);
pool_put (ipsec_sa_pool, sa);
}