aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/plugins/crypto_ipsecmb/ipsecmb.c65
-rw-r--r--src/plugins/crypto_openssl/main.c6
-rw-r--r--src/vnet/crypto/crypto.h2
-rw-r--r--src/vnet/ipsec/esp_encrypt.c23
-rw-r--r--src/vnet/ipsec/ipsec_sa.h1
5 files changed, 39 insertions, 58 deletions
diff --git a/src/plugins/crypto_ipsecmb/ipsecmb.c b/src/plugins/crypto_ipsecmb/ipsecmb.c
index 60b5bb96704..b826700cf9e 100644
--- a/src/plugins/crypto_ipsecmb/ipsecmb.c
+++ b/src/plugins/crypto_ipsecmb/ipsecmb.c
@@ -81,9 +81,9 @@ static ipsecmb_main_t ipsecmb_main = { };
* (Alg, key-len-bytes, iv-len-bytes)
*/
#define foreach_ipsecmb_gcm_cipher_op \
- _(AES_128_GCM, 128, 12) \
- _(AES_192_GCM, 192, 12) \
- _(AES_256_GCM, 256, 12)
+ _(AES_128_GCM, 128) \
+ _(AES_192_GCM, 192) \
+ _(AES_256_GCM, 256)
always_inline void
ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail, u32 digest_size)
@@ -271,6 +271,7 @@ ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job,
{
op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
*n_fail = *n_fail + 1;
+ return;
}
else
op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
@@ -286,9 +287,8 @@ ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job,
}
static_always_inline u32
-ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
- vnet_crypto_op_t * ops[],
- u32 n_ops, u32 key_len, u32 iv_len,
+ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ u32 n_ops, u32 key_len,
JOB_CIPHER_DIRECTION direction)
{
ipsecmb_main_t *imbm = &ipsecmb_main;
@@ -306,8 +306,6 @@ ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
struct gcm_key_data *kd;
vnet_crypto_op_t *op = ops[i];
kd = (struct gcm_key_data *) imbm->key_data[op->key_index];
- u32 nonce[3];
- __m128i iv;
job = IMB_GET_NEXT_JOB (ptd->mgr);
@@ -321,30 +319,11 @@ ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
job->cipher_direction = direction;
job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
- if (direction == ENCRYPT)
- {
- if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
- {
- iv = ptd->cbc_iv;
- // only use 8 bytes of the IV
- clib_memcpy_fast (op->iv, &iv, 8);
- ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
- }
- nonce[0] = op->salt;
- clib_memcpy_fast (nonce + 1, op->iv, 8);
- job->iv = (u8 *) nonce;
- }
- else
- {
- nonce[0] = op->salt;
- clib_memcpy_fast (nonce + 1, op->iv, 8);
- job->iv = op->iv;
- }
-
+ job->iv = op->iv;
job->aes_key_len_in_bytes = key_len / 8;
job->aes_enc_key_expanded = kd;
job->aes_dec_key_expanded = kd;
- job->iv_len_in_bytes = iv_len;
+ job->iv_len_in_bytes = 12;
job->u.GCM.aad = op->aad;
job->u.GCM.aad_len_in_bytes = op->aad_len;
@@ -361,34 +340,22 @@ ipsecmb_ops_gcm_cipher_inline (vlib_main_t * vm,
ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
}
- /*
- * .. then flush (i.e. complete) them
- * We will have queued enough to satisfy the 'multi' buffer
- */
while ((job = IMB_FLUSH_JOB (ptd->mgr)))
- {
- ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
- }
+ ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
return n_ops - n_fail;
}
-#define _(a, b, c) \
+#define _(a, b) \
static_always_inline u32 \
-ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, \
- vnet_crypto_op_t * ops[], \
+ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
u32 n_ops) \
-{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, c, ENCRYPT); } \
-
-foreach_ipsecmb_gcm_cipher_op;
-#undef _
-
-#define _(a, b, c) \
+{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, ENCRYPT); } \
+ \
static_always_inline u32 \
-ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, \
- vnet_crypto_op_t * ops[], \
+ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
u32 n_ops) \
-{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, c, DECRYPT); } \
+{ return ipsecmb_ops_gcm_cipher_inline (vm, ops, n_ops, b, DECRYPT); } \
foreach_ipsecmb_gcm_cipher_op;
#undef _
@@ -561,7 +528,7 @@ crypto_ipsecmb_init (vlib_main_t * vm)
foreach_ipsecmb_cbc_cipher_op;
#undef _
-#define _(a, b, c) \
+#define _(a, b) \
vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
ipsecmb_ops_gcm_cipher_enc_##a); \
vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
diff --git a/src/plugins/crypto_openssl/main.c b/src/plugins/crypto_openssl/main.c
index eaa16ceb2d8..0560f31a00a 100644
--- a/src/plugins/crypto_openssl/main.c
+++ b/src/plugins/crypto_openssl/main.c
@@ -118,18 +118,14 @@ openssl_ops_enc_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
{
vnet_crypto_op_t *op = ops[i];
vnet_crypto_key_t *key = vnet_crypto_get_key (op->key_index);
- u32 nonce[3];
int len;
if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
RAND_bytes (op->iv, 8);
- nonce[0] = op->salt;
- clib_memcpy_fast (nonce + 1, op->iv, 8);
-
EVP_EncryptInit_ex (ctx, cipher, 0, 0, 0);
EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_GCM_SET_IVLEN, 12, NULL);
- EVP_EncryptInit_ex (ctx, 0, 0, key->data, (u8 *) nonce);
+ EVP_EncryptInit_ex (ctx, 0, 0, key->data, op->iv);
if (op->aad_len)
EVP_EncryptUpdate (ctx, NULL, &len, op->aad, op->aad_len);
EVP_EncryptUpdate (ctx, op->dst, &len, op->src, op->len);
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h
index 2d9c524b266..95bc72b2dc0 100644
--- a/src/vnet/crypto/crypto.h
+++ b/src/vnet/crypto/crypto.h
@@ -130,7 +130,7 @@ typedef struct
#define VNET_CRYPTO_OP_FLAG_INIT_IV (1 << 0)
#define VNET_CRYPTO_OP_FLAG_HMAC_CHECK (1 << 1)
u32 key_index;
- u32 len, salt;
+ u32 len;
u16 aad_len;
u8 digest_len, tag_len;
u8 *iv;
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index af56302bafb..3aafaffe3b5 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -225,6 +225,14 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
}
}
+typedef struct
+{
+ u32 salt;
+ u64 iv;
+} __clib_packed esp_gcm_nonce_t;
+
+STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
+
always_inline uword
esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, int is_ip6, int is_tun)
@@ -235,6 +243,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
u32 n_left = frame->n_vectors;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
u32 thread_index = vm->thread_index;
u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
u32 current_sa_index = ~0, current_sa_packets = 0;
@@ -439,13 +448,10 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
- op->iv = payload - iv_sz;
op->src = op->dst = payload;
op->key_index = sa0->crypto_key_index;
op->len = payload_len - icv_sz;
- op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
op->user_data = b - bufs;
- op->salt = sa0->salt;
if (ipsec_sa_is_set_IS_AEAD (sa0))
{
@@ -459,6 +465,17 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
op->tag = payload + op->len;
op->tag_len = 16;
+
+ u64 *iv = (u64 *) (payload - iv_sz);
+ nonce->salt = sa0->salt;
+ nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
+ op->iv = (u8 *) nonce;
+ nonce++;
+ }
+ else
+ {
+ op->iv = payload - iv_sz;
+ op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
}
}
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index bde09589672..661b54a6ce9 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -167,6 +167,7 @@ typedef struct
/* Salt used in GCM modes - stored in network byte order */
u32 salt;
+ u64 gcm_iv_counter;
} ipsec_sa_t;
STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES);