aboutsummaryrefslogtreecommitdiffstats
path: root/vnet
diff options
context:
space:
mode:
authorRadu Nicolau <radu.nicolau@intel.com>2016-11-29 11:00:30 +0000
committerDamjan Marion <dmarion.lists@gmail.com>2016-11-30 20:37:45 +0000
commit6929ea9225cf229ed59a480ceefb972b85971e50 (patch)
tree9366bec3e281de9cfe20fd3c58cb357c26ab6e43 /vnet
parent2fee4c8fadd31979bd3e72c51d276773d17798d1 (diff)
Enabling AES-GCM-128 with 16B ICV support
Change-Id: Ib57b6f6b71ba14952ad77477a4df3ab33b36fef4 Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Diffstat (limited to 'vnet')
-rw-r--r--vnet/vnet/devices/dpdk/ipsec/esp.h31
-rw-r--r--vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c142
-rw-r--r--vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c104
-rw-r--r--vnet/vnet/devices/dpdk/ipsec/ipsec.h15
-rw-r--r--vnet/vnet/ipsec/ipsec.h23
-rw-r--r--vnet/vnet/ipsec/ipsec_cli.c25
6 files changed, 250 insertions, 90 deletions
diff --git a/vnet/vnet/devices/dpdk/ipsec/esp.h b/vnet/vnet/devices/dpdk/ipsec/esp.h
index 71282ac0cde..7ef90c49816 100644
--- a/vnet/vnet/devices/dpdk/ipsec/esp.h
+++ b/vnet/vnet/devices/dpdk/ipsec/esp.h
@@ -64,6 +64,11 @@ dpdk_esp_init ()
c->key_len = 32;
c->iv_len = 16;
+ c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_GCM_128];
+ c->algo = RTE_CRYPTO_CIPHER_AES_GCM;
+ c->key_len = 16;
+ c->iv_len = 8;
+
vec_validate (em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1);
i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
@@ -85,6 +90,10 @@ dpdk_esp_init ()
i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
i->algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
i->trunc_size = 32;
+
+ i = &em->esp_integ_algs[IPSEC_INTEG_ALG_AES_GCM_128];
+ i->algo = RTE_CRYPTO_AUTH_AES_GCM;
+ i->trunc_size = 16;
}
static_always_inline int
@@ -150,6 +159,9 @@ translate_crypto_algo(ipsec_crypto_alg_t crypto_algo,
case IPSEC_CRYPTO_ALG_AES_CBC_256:
cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
break;
+ case IPSEC_CRYPTO_ALG_AES_GCM_128:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+ break;
default:
return -1;
}
@@ -161,7 +173,7 @@ translate_crypto_algo(ipsec_crypto_alg_t crypto_algo,
static_always_inline int
translate_integ_algo(ipsec_integ_alg_t integ_alg,
- struct rte_crypto_sym_xform *auth_xform)
+ struct rte_crypto_sym_xform *auth_xform, int use_esn)
{
switch (integ_alg) {
case IPSEC_INTEG_ALG_NONE:
@@ -188,6 +200,11 @@ translate_integ_algo(ipsec_integ_alg_t integ_alg,
auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
auth_xform->auth.digest_length = 32;
break;
+ case IPSEC_INTEG_ALG_AES_GCM_128:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
+ auth_xform->auth.digest_length = 16;
+ auth_xform->auth.add_auth_data_length = use_esn? 12 : 8;
+ break;
default:
return -1;
}
@@ -209,6 +226,16 @@ create_sym_sess(ipsec_sa_t *sa, crypto_sa_session_t *sa_sess, u8 is_outbound)
uword key = 0, *data;
crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *)&key;
+ if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ sa->crypto_key_len -= 4;
+ clib_memcpy(&sa->salt, &sa->crypto_key[sa->crypto_key_len], 4);
+ }
+ else
+ {
+ sa->salt = (u32) rand();
+ }
+
cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cipher_xform.cipher.key.data = sa->crypto_key;
cipher_xform.cipher.key.length = sa->crypto_key_len;
@@ -221,7 +248,7 @@ create_sym_sess(ipsec_sa_t *sa, crypto_sa_session_t *sa_sess, u8 is_outbound)
return -1;
p_key->cipher_algo = cipher_xform.cipher.algo;
- if (translate_integ_algo(sa->integ_alg, &auth_xform) < 0)
+ if (translate_integ_algo(sa->integ_alg, &auth_xform, sa->use_esn) < 0)
return -1;
p_key->auth_algo = auth_xform.auth.algo;
diff --git a/vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c b/vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c
index c898d05e27f..2c43e24b531 100644
--- a/vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c
+++ b/vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c
@@ -127,13 +127,12 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
- u32 bi0, sa_index0 = ~0, seq, icv_size;
+ u32 bi0, sa_index0 = ~0, seq, icv_size, iv_size;
vlib_buffer_t * b0;
esp_header_t * esp0;
ipsec_sa_t * sa0;
struct rte_mbuf * mb0 = 0;
const int BLOCK_SIZE = 16;
- const int IV_SIZE = 16;
crypto_sa_session_t * sa_sess;
void * sess;
u16 qp_index;
@@ -209,6 +208,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
rte_crypto_op_attach_sym_session(cop, sess);
icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
/* Convert vlib buffer to mbuf */
mb0 = rte_mbuf_from_vlib_buffer(b0);
@@ -218,7 +218,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
/* Outer IP header has already been stripped */
u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) -
- IV_SIZE - icv_size;
+ iv_size - icv_size;
if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0))
{
@@ -239,46 +239,82 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = mb0;
- sym_cop->cipher.data.offset = sizeof (esp_header_t) + IV_SIZE;
+ sym_cop->cipher.data.offset = sizeof (esp_header_t) + iv_size;
sym_cop->cipher.data.length = payload_len;
- sym_cop->cipher.iv.data =
- rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t));
- sym_cop->cipher.iv.phys_addr =
- rte_pktmbuf_mtophys_offset(mb0, sizeof (esp_header_t));
- sym_cop->cipher.iv.length = IV_SIZE;
-
- if (sa0->use_esn)
- {
- dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1);
- u8* payload_end =
- rte_pktmbuf_mtod_offset(mb0, u8*, sizeof(esp_header_t) + IV_SIZE +
- payload_len);
-
- memcpy (priv->icv, payload_end, icv_size);
- *((u32*) payload_end) = sa0->seq_hi;
- sym_cop->auth.data.offset = 0;
- sym_cop->auth.data.length =
- sizeof(esp_header_t) + IV_SIZE + payload_len + sizeof(sa0->seq_hi);
- sym_cop->auth.digest.data = priv->icv;
- sym_cop->auth.digest.phys_addr =
- cop->phys_addr + (uintptr_t) priv->icv - (uintptr_t) cop;
- sym_cop->auth.digest.length = icv_size;
- }
- else
- {
- sym_cop->auth.data.offset = 0;
- sym_cop->auth.data.length = sizeof(esp_header_t) +
- IV_SIZE + payload_len;
-
- sym_cop->auth.digest.data =
- rte_pktmbuf_mtod_offset(mb0, void*,
- rte_pktmbuf_pkt_len(mb0) - icv_size);
- sym_cop->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(mb0,
- rte_pktmbuf_pkt_len(mb0) - icv_size);
- sym_cop->auth.digest.length = icv_size;
- }
+ u8 *iv = rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t));
+ dpdk_cop_priv_t * priv = (dpdk_cop_priv_t *)(sym_cop + 1);
+
+ if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ dpdk_gcm_cnt_blk *icb = &priv->cb;
+ icb->salt = sa0->salt;
+ clib_memcpy(icb->iv, iv, 8);
+ icb->cnt = clib_host_to_net_u32(1);
+ sym_cop->cipher.iv.data = (u8 *)icb;
+ sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
+ (u8 *)icb - rte_pktmbuf_mtod(mb0, u8 *));
+ sym_cop->cipher.iv.length = 16;
+
+ u8 *aad = priv->aad;
+ clib_memcpy(aad, iv - sizeof(esp_header_t), 8);
+ sym_cop->auth.aad.data = aad;
+ sym_cop->auth.aad.phys_addr = cop->phys_addr +
+ (uintptr_t)aad - (uintptr_t)cop;
+ if (sa0->use_esn)
+ {
+ *((u32*)&aad[8]) = sa0->seq_hi;
+ sym_cop->auth.aad.length = 12;
+ }
+ else
+ {
+ sym_cop->auth.aad.length = 8;
+ }
+
+ sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
+ rte_pktmbuf_pkt_len(mb0) - icv_size);
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
+ rte_pktmbuf_pkt_len(mb0) - icv_size);
+ sym_cop->auth.digest.length = icv_size;
+
+ }
+ else
+ {
+ sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(mb0, void*,
+ sizeof (esp_header_t));
+ sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
+ sizeof (esp_header_t));
+ sym_cop->cipher.iv.length = iv_size;
+
+ if (sa0->use_esn)
+ {
+ dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1);
+ u8* payload_end = rte_pktmbuf_mtod_offset(
+ mb0, u8*, sizeof(esp_header_t) + iv_size + payload_len);
+
+ clib_memcpy (priv->icv, payload_end, icv_size);
+ *((u32*) payload_end) = sa0->seq_hi;
+ sym_cop->auth.data.offset = 0;
+ sym_cop->auth.data.length = sizeof(esp_header_t) + iv_size
+ + payload_len + sizeof(sa0->seq_hi);
+ sym_cop->auth.digest.data = priv->icv;
+ sym_cop->auth.digest.phys_addr = cop->phys_addr
+ + (uintptr_t) priv->icv - (uintptr_t) cop;
+ sym_cop->auth.digest.length = icv_size;
+ }
+ else
+ {
+ sym_cop->auth.data.offset = 0;
+ sym_cop->auth.data.length = sizeof(esp_header_t) +
+ iv_size + payload_len;
+
+ sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
+ rte_pktmbuf_pkt_len(mb0) - icv_size);
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
+ rte_pktmbuf_pkt_len(mb0) - icv_size);
+ sym_cop->auth.digest.length = icv_size;
+ }
+ }
trace:
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
@@ -391,8 +427,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
esp_footer_t * f0;
- const u32 IV_SIZE = 16;
- u32 bi0, next0, icv_size;
+ u32 bi0, next0, icv_size, iv_size;
vlib_buffer_t * b0 = 0;
ip4_header_t *ih4 = 0, *oh4 = 0;
ip6_header_t *ih6 = 0, *oh6 = 0;
@@ -415,6 +450,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
to_next += 1;
icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
if (sa0->use_anti_replay)
{
@@ -428,7 +464,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
}
ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t));
- vlib_buffer_advance (b0, sizeof (esp_header_t) + IV_SIZE);
+ vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size);
b0->current_length -= (icv_size + 2);
b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -475,33 +511,31 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
{
if (PREDICT_FALSE(transport_ip6))
{
- next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
ih6 = (ip6_header_t *) (b0->data + sizeof(ethernet_header_t));
vlib_buffer_advance (b0, -sizeof(ip6_header_t));
oh6 = vlib_buffer_get_current (b0);
memmove(oh6, ih6, sizeof(ip6_header_t));
+ next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
oh6->protocol = f0->next_header;
oh6->payload_length =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0) -
- sizeof (ip6_header_t));
+ clib_host_to_net_u16 (
+ vlib_buffer_length_in_chain(vm, b0) -
+ sizeof (ip6_header_t));
}
else
{
- next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
vlib_buffer_advance (b0, -sizeof(ip4_header_t));
oh4 = vlib_buffer_get_current (b0);
+ memmove(oh4, ih4, sizeof(ip4_header_t));
+ next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
oh4->ip_version_and_header_length = 0x45;
- oh4->tos = ih4->tos;
oh4->fragment_id = 0;
oh4->flags_and_fragment_offset = 0;
- oh4->ttl = ih4->ttl;
oh4->protocol = f0->next_header;
- oh4->src_address.as_u32 = ih4->src_address.as_u32;
- oh4->dst_address.as_u32 = ih4->dst_address.as_u32;
- oh4->length =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ oh4->length = clib_host_to_net_u16 (
+ vlib_buffer_length_in_chain (vm, b0));
oh4->checksum = ip4_header_checksum (oh4);
}
}
diff --git a/vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c b/vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c
index aef4b90d51a..7e41007c92c 100644
--- a/vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c
+++ b/vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c
@@ -147,7 +147,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
u8 next_hdr_type;
u8 transport_mode = 0;
const int BLOCK_SIZE = 16;
- const int IV_SIZE = 16;
+ u32 iv_size;
u16 orig_sz;
crypto_sa_session_t *sa_sess;
void *sess;
@@ -196,6 +196,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
bi_to_enq[qp_index] += 1;
ssize_t adv;
+ iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
ih0 = vlib_buffer_get_current (b0);
orig_sz = b0->current_length;
is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
@@ -223,11 +224,11 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
((u8 *) vlib_buffer_get_current (b0) -
sizeof (ethernet_header_t));
ethernet_header_t *oeh0 =
- (ethernet_header_t *) ((u8 *) ieh0 + (adv - IV_SIZE));
+ (ethernet_header_t *) ((u8 *) ieh0 + (adv - iv_size));
clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
}
- vlib_buffer_advance (b0, adv - IV_SIZE);
+ vlib_buffer_advance (b0, adv - iv_size);
/* XXX IP6/ip4 and IP4/IP6 not supported, only IP4/IP4 and IP6/IP6 */
@@ -258,11 +259,20 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
else
{
ip_hdr_size = sizeof (ip4_header_t);
- next_hdr_type = IP_PROTOCOL_IP_IN_IP;
oh0 = vlib_buffer_get_current (b0);
+ if (PREDICT_TRUE (sa0->is_tunnel))
+ {
+ next_hdr_type = IP_PROTOCOL_IP_IN_IP;
+ oh0->ip4.tos = ih0->ip4.tos;
+ }
+ else
+ {
+ next_hdr_type = ih0->ip4.protocol;
+ memmove (oh0, ih0, sizeof (ip4_header_t));
+ }
+
oh0->ip4.ip_version_and_header_length = 0x45;
- oh0->ip4.tos = ih0->ip4.tos;
oh0->ip4.fragment_id = 0;
oh0->ip4.flags_and_fragment_offset = 0;
oh0->ip4.ttl = 254;
@@ -299,13 +309,6 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
{
next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
transport_mode = 1;
- /*ipv6 already handled */
- if (PREDICT_TRUE (!is_ipv6))
- {
- next_hdr_type = ih0->ip4.protocol;
- oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32;
- oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32;
- }
}
ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
@@ -337,8 +340,6 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *) (sym_cop + 1);
vnet_buffer (b0)->unused[0] = next0;
- priv->iv[0] = sa0->seq;
- priv->iv[1] = sa0->seq_hi;
mb0 = rte_mbuf_from_vlib_buffer (b0);
mb0->data_len = b0->current_length;
@@ -348,21 +349,71 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
rte_crypto_op_attach_sym_session (cop, sess);
sym_cop->m_src = mb0;
- sym_cop->cipher.data.offset = ip_hdr_size + sizeof (esp_header_t);
- sym_cop->cipher.data.length = BLOCK_SIZE * blocks + IV_SIZE;
- sym_cop->cipher.iv.data = (u8 *) priv->iv;
- sym_cop->cipher.iv.phys_addr = cop->phys_addr +
- (uintptr_t) priv->iv - (uintptr_t) cop;
- sym_cop->cipher.iv.length = IV_SIZE;
+ dpdk_gcm_cnt_blk *icb = &priv->cb;
+ icb->salt = sa0->salt;
+ icb->iv[0] = sa0->seq;
+ icb->iv[1] = sa0->seq_hi;
+
+ if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ icb->cnt = clib_host_to_net_u32 (1);
+ clib_memcpy (vlib_buffer_get_current (b0) + ip_hdr_size +
+ sizeof (esp_header_t), icb->iv, 8);
+ sym_cop->cipher.data.offset =
+ ip_hdr_size + sizeof (esp_header_t) + iv_size;
+ sym_cop->cipher.data.length = BLOCK_SIZE * blocks;
+ sym_cop->cipher.iv.length = 16;
+ }
+ else
+ {
+ sym_cop->cipher.data.offset =
+ ip_hdr_size + sizeof (esp_header_t);
+ sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size;
+ sym_cop->cipher.iv.length = iv_size;
+ }
+
+ sym_cop->cipher.iv.data = (u8 *) icb;
+ sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb
+ - (uintptr_t) cop;
+
ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG);
ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE);
- sym_cop->auth.data.offset = ip_hdr_size;
- sym_cop->auth.data.length = b0->current_length - ip_hdr_size -
- em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128))
+ {
+ u8 *aad = priv->aad;
+ clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
+ 8);
+ sym_cop->auth.aad.data = aad;
+ sym_cop->auth.aad.phys_addr = cop->phys_addr +
+ (uintptr_t) aad - (uintptr_t) cop;
+
+ if (PREDICT_FALSE (sa0->use_esn))
+ {
+ *((u32 *) & aad[8]) = sa0->seq_hi;
+ sym_cop->auth.aad.length = 12;
+ }
+ else
+ {
+ sym_cop->auth.aad.length = 8;
+ }
+ }
+ else
+ {
+ sym_cop->auth.data.offset = ip_hdr_size;
+ sym_cop->auth.data.length = b0->current_length - ip_hdr_size
+ - em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ if (PREDICT_FALSE (sa0->use_esn))
+ {
+ u8 *payload_end =
+ vlib_buffer_get_current (b0) + b0->current_length;
+ *((u32 *) payload_end) = sa0->seq_hi;
+ sym_cop->auth.data.length += sizeof (sa0->seq_hi);
+ }
+ }
sym_cop->auth.digest.data = vlib_buffer_get_current (b0) +
b0->current_length -
em->esp_integ_algs[sa0->integ_alg].trunc_size;
@@ -374,13 +425,6 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
sym_cop->auth.digest.length =
em->esp_integ_algs[sa0->integ_alg].trunc_size;
- if (PREDICT_FALSE (sa0->use_esn))
- {
- u8 *payload_end =
- vlib_buffer_get_current (b0) + b0->current_length;
- *((u32 *) payload_end) = sa0->seq_hi;
- sym_cop->auth.data.length += sizeof (sa0->seq_hi);
- }
if (PREDICT_FALSE (is_ipv6))
{
diff --git a/vnet/vnet/devices/dpdk/ipsec/ipsec.h b/vnet/vnet/devices/dpdk/ipsec/ipsec.h
index e103655ca71..e6c7498c0d3 100644
--- a/vnet/vnet/devices/dpdk/ipsec/ipsec.h
+++ b/vnet/vnet/devices/dpdk/ipsec/ipsec.h
@@ -32,8 +32,19 @@
typedef struct
{
- u32 iv[4];
- u8 icv[64];
+ u32 salt;
+ u32 iv[2];
+ u32 cnt;
+} dpdk_gcm_cnt_blk;
+
+typedef struct
+{
+ dpdk_gcm_cnt_blk cb;
+ union
+ {
+ u8 aad[12];
+ u8 icv[64];
+ };
} dpdk_cop_priv_t;
typedef struct
diff --git a/vnet/vnet/ipsec/ipsec.h b/vnet/vnet/ipsec/ipsec.h
index 65d7bad378d..32c7edfc29d 100644
--- a/vnet/vnet/ipsec/ipsec.h
+++ b/vnet/vnet/ipsec/ipsec.h
@@ -31,11 +31,20 @@ typedef enum
IPSEC_POLICY_N_ACTION,
} ipsec_policy_action_t;
+#if DPDK_CRYPTO==1
+#define foreach_ipsec_crypto_alg \
+ _(0, NONE, "none") \
+ _(1, AES_CBC_128, "aes-cbc-128") \
+ _(2, AES_CBC_192, "aes-cbc-192") \
+ _(3, AES_CBC_256, "aes-cbc-256") \
+ _(4, AES_GCM_128, "aes-gcm-128")
+#else
#define foreach_ipsec_crypto_alg \
_(0, NONE, "none") \
_(1, AES_CBC_128, "aes-cbc-128") \
_(2, AES_CBC_192, "aes-cbc-192") \
_(3, AES_CBC_256, "aes-cbc-256")
+#endif
typedef enum
{
@@ -45,6 +54,17 @@ typedef enum
IPSEC_CRYPTO_N_ALG,
} ipsec_crypto_alg_t;
+#if DPDK_CRYPTO==1
+#define foreach_ipsec_integ_alg \
+ _(0, NONE, "none") \
+ _(1, MD5_96, "md5-96") /* RFC2403 */ \
+ _(2, SHA1_96, "sha1-96") /* RFC2404 */ \
+ _(3, SHA_256_96, "sha-256-96") /* draft-ietf-ipsec-ciph-sha-256-00 */ \
+ _(4, SHA_256_128, "sha-256-128") /* RFC4868 */ \
+ _(5, SHA_384_192, "sha-384-192") /* RFC4868 */ \
+ _(6, SHA_512_256, "sha-512-256") /* RFC4868 */ \
+ _(7, AES_GCM_128, "aes-gcm-128")
+#else
#define foreach_ipsec_integ_alg \
_(0, NONE, "none") \
_(1, MD5_96, "md5-96") /* RFC2403 */ \
@@ -53,6 +73,7 @@ typedef enum
_(4, SHA_256_128, "sha-256-128") /* RFC4868 */ \
_(5, SHA_384_192, "sha-384-192") /* RFC4868 */ \
_(6, SHA_512_256, "sha-512-256") /* RFC4868 */
+#endif
typedef enum
{
@@ -90,6 +111,8 @@ typedef struct
ip46_address_t tunnel_src_addr;
ip46_address_t tunnel_dst_addr;
+ u32 salt;
+
/* runtime */
u32 seq;
u32 seq_hi;
diff --git a/vnet/vnet/ipsec/ipsec_cli.c b/vnet/vnet/ipsec/ipsec_cli.c
index 8920924d04e..7ab85d4aefb 100644
--- a/vnet/vnet/ipsec/ipsec_cli.c
+++ b/vnet/vnet/ipsec/ipsec_cli.c
@@ -99,7 +99,7 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
&sa.crypto_alg))
{
if (sa.crypto_alg < IPSEC_CRYPTO_ALG_AES_CBC_128 ||
- sa.crypto_alg > IPSEC_CRYPTO_ALG_AES_CBC_256)
+ sa.crypto_alg >= IPSEC_CRYPTO_N_ALG)
return clib_error_return (0, "unsupported crypto-alg: '%U'",
format_ipsec_crypto_alg, sa.crypto_alg);
}
@@ -109,8 +109,12 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
else if (unformat (line_input, "integ-alg %U", unformat_ipsec_integ_alg,
&sa.integ_alg))
{
+#if DPDK_CRYPTO==1
+ if (sa.integ_alg < IPSEC_INTEG_ALG_NONE ||
+#else
if (sa.integ_alg < IPSEC_INTEG_ALG_SHA1_96 ||
- sa.integ_alg > IPSEC_INTEG_ALG_SHA_512_256)
+#endif
+ sa.integ_alg >= IPSEC_INTEG_N_ALG)
return clib_error_return (0, "unsupported integ-alg: '%U'",
format_ipsec_integ_alg, sa.integ_alg);
}
@@ -137,6 +141,23 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
format_unformat_error, line_input);
}
+#if DPDK_CRYPTO==1
+ /*Special cases, aes-gcm-128 encryption */
+ if (sa.crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ {
+ if (sa.integ_alg != IPSEC_INTEG_ALG_NONE
+ && sa.integ_alg != IPSEC_INTEG_ALG_AES_GCM_128)
+ return clib_error_return (0,
+ "unsupported: aes-gcm-128 crypto-alg needs none as integ-alg");
+ else /*set integ-alg internally to aes-gcm-128 */
+ sa.integ_alg = IPSEC_INTEG_ALG_AES_GCM_128;
+ }
+ else if (sa.integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)
+ return clib_error_return (0, "unsupported integ-alg: aes-gcm-128");
+ else if (sa.integ_alg == IPSEC_INTEG_ALG_NONE)
+ return clib_error_return (0, "unsupported integ-alg: none");
+#endif
+
unformat_free (line_input);
if (sa.crypto_key_len > sizeof (sa.crypto_key))