aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
authorSergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>2017-08-24 14:09:17 +0100
committerDamjan Marion <dmarion.lists@gmail.com>2017-08-25 14:24:53 +0000
commitacdc306093aaea2633cf765307d6cb7c1b80081c (patch)
treeb65547ba0c940597cde63d5521daa6f7fcd7308f /src/plugins
parentffef404ad8b21cbeb09c1f70e9decfe1146d8727 (diff)
dpdk: required changes for 17.08
DPDK 17.08 breaks ethdev and cryptodev APIs. Address those changes while keeping backwards compatibility for DPDK 17.02 and 17.05. Change-Id: Idd6ac264d0d047fe586c41d4c4ca74e8fc778a54 Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/dpdk.am16
-rw-r--r--src/plugins/dpdk/device/common.c28
-rw-r--r--src/plugins/dpdk/device/dpdk.h7
-rw-r--r--src/plugins/dpdk/ipsec/cli.c15
-rw-r--r--src/plugins/dpdk/ipsec/esp.h212
-rw-r--r--src/plugins/dpdk/ipsec/esp_decrypt.c140
-rw-r--r--src/plugins/dpdk/ipsec/esp_encrypt.c106
-rw-r--r--src/plugins/dpdk/ipsec/ipsec.c130
-rw-r--r--src/plugins/dpdk/ipsec/ipsec.h33
9 files changed, 471 insertions, 216 deletions
diff --git a/src/plugins/dpdk.am b/src/plugins/dpdk.am
index 3a1ffeeb4a2..15195a219ce 100644
--- a/src/plugins/dpdk.am
+++ b/src/plugins/dpdk.am
@@ -19,20 +19,24 @@ dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -ldpdk
else
dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive
endif
-if WITH_DPDK_AESNI_MB_PMD
+if WITH_AESNI_MB_LIB
dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libIPSec_MB.a,-l:libIPSec_MB.a
endif
-if WITH_DPDK_AESNI_GCM_PMD
+if WITH_ISA_L_CRYPTO_LIB
dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libisal_crypto.a,-l:libisal_crypto.a
endif
-dpdk_plugin_la_LDFLAGS += -Wl,-lm,-ldl
-if WITH_DPDK_MLX5_PMD
+if WITH_IBVERBS_LIB
dpdk_plugin_la_LDFLAGS += -Wl,-libverbs
endif
-if WITH_DPDK_MLX4_PMD
-dpdk_plugin_la_LDFLAGS += -Wl,-libverbs
+if DPDK_IS_1702_OR_1705
+dpdk_plugin_la_CFLAGS = $(AM_CFLAGS) -DDPDK_VOID_CALLBACK=1 -DDPDK_NO_AEAD=1
+else
+dpdk_plugin_la_CFLAGS = $(AM_CFLAGS) -DDPDK_VOID_CALLBACK=0 -DDPDK_NO_AEAD=0
+dpdk_plugin_la_LDFLAGS += -Wl,-lnuma
endif
+dpdk_plugin_la_LDFLAGS += -Wl,-lm,-ldl
+
dpdk_plugin_la_SOURCES = \
dpdk/main.c \
dpdk/buffer.c \
diff --git a/src/plugins/dpdk/device/common.c b/src/plugins/dpdk/device/common.c
index df52c58fa18..2707b4d889c 100644
--- a/src/plugins/dpdk/device/common.c
+++ b/src/plugins/dpdk/device/common.c
@@ -181,9 +181,9 @@ dpdk_device_stop (dpdk_device_t * xd)
}
}
-void
-dpdk_port_state_callback (uint8_t port_id,
- enum rte_eth_event_type type, void *param)
+always_inline int
+dpdk_port_state_callback_inline (uint8_t port_id,
+ enum rte_eth_event_type type, void *param)
{
struct rte_eth_link link;
vlib_main_t *vm = vlib_get_main ();
@@ -193,7 +193,7 @@ dpdk_port_state_callback (uint8_t port_id,
if (type != RTE_ETH_EVENT_INTR_LSC)
{
clib_warning ("Unknown event %d received for port %d", type, port_id);
- return;
+ return -1;
}
rte_eth_link_get_nowait (port_id, &link);
@@ -238,8 +238,28 @@ dpdk_port_state_callback (uint8_t port_id,
else
clib_warning ("Port %d Link Down\n\n", port_id);
}
+
+ return 0;
+}
+
+#if DPDK_VOID_CALLBACK
+void
+dpdk_port_state_callback (uint8_t port_id,
+ enum rte_eth_event_type type, void *param)
+{
+ dpdk_port_state_callback_inline (port_id, type, param);
}
+#else
+int
+dpdk_port_state_callback (uint8_t port_id,
+ enum rte_eth_event_type type,
+ void *param,
+ void *ret_param __attribute__ ((unused)))
+{
+ return dpdk_port_state_callback_inline (port_id, type, param);
+}
+#endif
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/dpdk/device/dpdk.h b/src/plugins/dpdk/device/dpdk.h
index 29a2c760e8d..1e34e3fbaf7 100644
--- a/src/plugins/dpdk/device/dpdk.h
+++ b/src/plugins/dpdk/device/dpdk.h
@@ -418,8 +418,15 @@ typedef struct
void dpdk_device_setup (dpdk_device_t * xd);
void dpdk_device_start (dpdk_device_t * xd);
void dpdk_device_stop (dpdk_device_t * xd);
+
+#if DPDK_VOID_CALLBACK
void dpdk_port_state_callback (uint8_t port_id,
enum rte_eth_event_type type, void *param);
+#else
+int dpdk_port_state_callback (uint8_t port_id,
+ enum rte_eth_event_type type,
+ void *param, void *ret_param);
+#endif
#define foreach_dpdk_error \
_(NONE, "no error") \
diff --git a/src/plugins/dpdk/ipsec/cli.c b/src/plugins/dpdk/ipsec/cli.c
index a9314065003..a9cf250298b 100644
--- a/src/plugins/dpdk/ipsec/cli.c
+++ b/src/plugins/dpdk/ipsec/cli.c
@@ -86,13 +86,28 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display)
hash_foreach (key, data, cwm->algo_qp_map,
({
cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+#if DPDK_NO_AEAD
cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cap.sym.cipher.algo = p_key->cipher_algo;
+#else
+ if (p_key->is_aead)
+ {
+ cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ cap.sym.aead.algo = p_key->cipher_algo;
+ }
+ else
+ {
+ cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap.sym.cipher.algo = p_key->cipher_algo;
+ }
+#endif
check_algo_is_supported (&cap, cipher_str);
+
cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
cap.sym.auth.algo = p_key->auth_algo;
check_algo_is_supported (&cap, auth_str);
+
vlib_cli_output (vm, "%u\t%10s\t%15s\t%3s\t%u\t%u\n",
vlib_mains[i]->thread_index, cipher_str, auth_str,
p_key->is_outbound ? "out" : "in",
diff --git a/src/plugins/dpdk/ipsec/esp.h b/src/plugins/dpdk/ipsec/esp.h
index 56f0c756eec..308a66afe74 100644
--- a/src/plugins/dpdk/ipsec/esp.h
+++ b/src/plugins/dpdk/ipsec/esp.h
@@ -22,6 +22,9 @@
typedef struct
{
enum rte_crypto_cipher_algorithm algo;
+#if ! DPDK_NO_AEAD
+ enum rte_crypto_aead_algorithm aead_algo;
+#endif
u8 key_len;
u8 iv_len;
} dpdk_esp_crypto_alg_t;
@@ -65,7 +68,11 @@ dpdk_esp_init ()
c->iv_len = 16;
c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_GCM_128];
+#if DPDK_NO_AEAD
c->algo = RTE_CRYPTO_CIPHER_AES_GCM;
+#else
+ c->aead_algo = RTE_CRYPTO_AEAD_AES_GCM;
+#endif
c->key_len = 16;
c->iv_len = 8;
@@ -90,42 +97,68 @@ dpdk_esp_init ()
i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
i->algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
i->trunc_size = 32;
-
+#if DPDK_NO_AEAD
i = &em->esp_integ_algs[IPSEC_INTEG_ALG_AES_GCM_128];
i->algo = RTE_CRYPTO_AUTH_AES_GCM;
i->trunc_size = 16;
+#endif
}
static_always_inline int
translate_crypto_algo (ipsec_crypto_alg_t crypto_algo,
- struct rte_crypto_sym_xform *cipher_xform)
+ struct rte_crypto_sym_xform *xform, u8 use_esn)
{
+#if ! DPDK_NO_AEAD
+ const u16 iv_off =
+ sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op) +
+ offsetof (dpdk_cop_priv_t, cb);
+#endif
+
+ xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
switch (crypto_algo)
{
case IPSEC_CRYPTO_ALG_NONE:
- cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+#if ! DPDK_NO_AEAD
+ xform->cipher.iv.offset = iv_off;
+ xform->cipher.iv.length = 0;
+#endif
+ xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
break;
case IPSEC_CRYPTO_ALG_AES_CBC_128:
case IPSEC_CRYPTO_ALG_AES_CBC_192:
case IPSEC_CRYPTO_ALG_AES_CBC_256:
- cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+#if ! DPDK_NO_AEAD
+ xform->cipher.iv.offset = iv_off;
+ xform->cipher.iv.length = 16;
+#endif
+ xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
break;
case IPSEC_CRYPTO_ALG_AES_GCM_128:
- cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+#if DPDK_NO_AEAD
+ xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+#else
+ xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ xform->aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ xform->aead.iv.offset = iv_off;
+ xform->aead.iv.length = 12; /* GCM IV, not ESP IV */
+ xform->aead.digest_length = 16;
+ xform->aead.aad_length = use_esn ? 12 : 8;
+#endif
break;
default:
return -1;
}
- cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-
return 0;
}
static_always_inline int
translate_integ_algo (ipsec_integ_alg_t integ_alg,
- struct rte_crypto_sym_xform *auth_xform, int use_esn)
+ struct rte_crypto_sym_xform *auth_xform, u8 use_esn)
{
+ auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
switch (integ_alg)
{
case IPSEC_INTEG_ALG_NONE:
@@ -152,21 +185,21 @@ translate_integ_algo (ipsec_integ_alg_t integ_alg,
auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
auth_xform->auth.digest_length = 32;
break;
+#if DPDK_NO_AEAD
case IPSEC_INTEG_ALG_AES_GCM_128:
auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
auth_xform->auth.digest_length = 16;
auth_xform->auth.add_auth_data_length = use_esn ? 12 : 8;
break;
+#endif
default:
return -1;
}
- auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
-
return 0;
}
-static_always_inline int
+static_always_inline i32
create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
u8 is_outbound)
{
@@ -178,6 +211,10 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
struct rte_crypto_sym_xform *xfs;
uword key = 0, *data;
crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key;
+#if ! DPDK_NO_AEAD
+ i32 socket_id = rte_socket_id ();
+ i32 ret;
+#endif
if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
{
@@ -190,15 +227,7 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
sa->salt = random_u32 (&seed);
}
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.cipher.key.data = sa->crypto_key;
- cipher_xform.cipher.key.length = sa->crypto_key_len;
-
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.auth.key.data = sa->integ_key;
- auth_xform.auth.key.length = sa->integ_key_len;
-
- if (translate_crypto_algo (sa->crypto_alg, &cipher_xform) < 0)
+ if (translate_crypto_algo (sa->crypto_alg, &cipher_xform, sa->use_esn) < 0)
return -1;
p_key->cipher_algo = cipher_xform.cipher.algo;
@@ -206,19 +235,44 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
return -1;
p_key->auth_algo = auth_xform.auth.algo;
- if (is_outbound)
+#if ! DPDK_NO_AEAD
+ if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
{
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- cipher_xform.next = &auth_xform;
+ cipher_xform.aead.key.data = sa->crypto_key;
+ cipher_xform.aead.key.length = sa->crypto_key_len;
+
+ if (is_outbound)
+ cipher_xform.cipher.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ else
+ cipher_xform.cipher.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
+ cipher_xform.next = NULL;
xfs = &cipher_xform;
+ p_key->is_aead = 1;
}
- else
+ else /* Cipher + Auth */
+#endif
{
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- auth_xform.next = &cipher_xform;
- xfs = &auth_xform;
+ cipher_xform.cipher.key.data = sa->crypto_key;
+ cipher_xform.cipher.key.length = sa->crypto_key_len;
+
+ auth_xform.auth.key.data = sa->integ_key;
+ auth_xform.auth.key.length = sa->integ_key_len;
+
+ if (is_outbound)
+ {
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ cipher_xform.next = &auth_xform;
+ xfs = &cipher_xform;
+ }
+ else
+ {
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+ auth_xform.next = &cipher_xform;
+ xfs = &auth_xform;
+ }
+ p_key->is_aead = 0;
}
p_key->is_outbound = is_outbound;
@@ -227,17 +281,115 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
if (!data)
return -1;
+#if DPDK_NO_AEAD
sa_sess->sess =
rte_cryptodev_sym_session_create (cwm->qp_data[*data].dev_id, xfs);
-
if (!sa_sess->sess)
return -1;
+#else
+ sa_sess->sess =
+ rte_cryptodev_sym_session_create (dcm->sess_h_pools[socket_id]);
+ if (!sa_sess->sess)
+ return -1;
+
+ ret =
+ rte_cryptodev_sym_session_init (cwm->qp_data[*data].dev_id, sa_sess->sess,
+ xfs, dcm->sess_pools[socket_id]);
+ if (ret)
+ return -1;
+#endif
sa_sess->qp_index = (u8) * data;
return 0;
}
+static_always_inline void
+crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi)
+{
+ icb->salt = salt;
+ icb->iv[0] = seq;
+ icb->iv[1] = seq_hi;
+#if DPDK_NO_AEAD
+ icb->cnt = clib_host_to_net_u32 (1);
+#endif
+}
+
+#define __unused __attribute__((unused))
+static_always_inline void
+crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0,
+ struct rte_crypto_op *cop, void *session,
+ u32 cipher_off, u32 cipher_len,
+ u8 * icb __unused, u32 iv_size __unused,
+ u32 auth_off, u32 auth_len,
+ u8 * aad __unused, u32 aad_size __unused,
+ u8 * digest, u64 digest_paddr, u32 digest_size __unused)
+{
+ struct rte_crypto_sym_op *sym_cop;
+
+ sym_cop = (struct rte_crypto_sym_op *) (cop + 1);
+
+ sym_cop->m_src = mb0;
+ rte_crypto_op_attach_sym_session (cop, session);
+
+ if (!digest_paddr)
+ digest_paddr =
+ rte_pktmbuf_mtophys_offset (mb0, (uintptr_t) digest - (uintptr_t) mb0);
+
+#if DPDK_NO_AEAD
+ sym_cop->cipher.data.offset = cipher_off;
+ sym_cop->cipher.data.length = cipher_len;
+
+ sym_cop->cipher.iv.data = icb;
+ sym_cop->cipher.iv.phys_addr =
+ cop->phys_addr + (uintptr_t) icb - (uintptr_t) cop;
+ sym_cop->cipher.iv.length = iv_size;
+
+ if (is_aead)
+ {
+ sym_cop->auth.aad.data = aad;
+ sym_cop->auth.aad.phys_addr =
+ cop->phys_addr + (uintptr_t) aad - (uintptr_t) cop;
+ sym_cop->auth.aad.length = aad_size;
+ }
+ else
+ {
+ sym_cop->auth.data.offset = auth_off;
+ sym_cop->auth.data.length = auth_len;
+ }
+
+ sym_cop->auth.digest.data = digest;
+ sym_cop->auth.digest.phys_addr = digest_paddr;
+ sym_cop->auth.digest.length = digest_size;
+#else /* ! DPDK_NO_AEAD */
+ if (is_aead)
+ {
+ sym_cop->aead.data.offset = cipher_off;
+ sym_cop->aead.data.length = cipher_len;
+
+ sym_cop->aead.aad.data = aad;
+ sym_cop->aead.aad.phys_addr =
+ cop->phys_addr + (uintptr_t) aad - (uintptr_t) cop;
+
+ sym_cop->aead.digest.data = digest;
+ sym_cop->aead.digest.phys_addr = digest_paddr;
+ }
+ else
+ {
+ sym_cop->cipher.data.offset = cipher_off;
+ sym_cop->cipher.data.length = cipher_len;
+
+ sym_cop->auth.data.offset = auth_off;
+ sym_cop->auth.data.length = auth_len;
+
+ sym_cop->auth.digest.data = digest;
+ sym_cop->auth.digest.phys_addr = digest_paddr;
+ }
+#endif /* DPDK_NO_AEAD */
+}
+
+#undef __unused
+
#endif /* __DPDK_ESP_H__ */
/*
diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/src/plugins/dpdk/ipsec/esp_decrypt.c
index 9377970a00a..c4f295d393c 100644
--- a/src/plugins/dpdk/ipsec/esp_decrypt.c
+++ b/src/plugins/dpdk/ipsec/esp_decrypt.c
@@ -44,8 +44,7 @@ typedef enum {
_(NOT_IP, "Not IP packet (dropped)") \
_(ENQ_FAIL, "Enqueue failed (buffer full)") \
_(NO_CRYPTODEV, "Cryptodev not configured") \
- _(BAD_LEN, "Invalid ciphertext length") \
- _(UNSUPPORTED, "Cipher/Auth not supported")
+ _(BAD_LEN, "Invalid ciphertext length")
typedef enum {
@@ -122,7 +121,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
- u32 bi0, sa_index0 = ~0, seq, icv_size, iv_size;
+ u32 bi0, sa_index0 = ~0, seq, trunc_size, iv_size;
vlib_buffer_t * b0;
esp_header_t * esp0;
ipsec_sa_t * sa0;
@@ -169,18 +168,6 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
sa0->total_data_size += b0->current_length;
- if (PREDICT_FALSE(sa0->integ_alg == IPSEC_INTEG_ALG_NONE) ||
- PREDICT_FALSE(sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE))
- {
- clib_warning ("SPI %u : only cipher + auth supported", sa0->spi);
- vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
- ESP_DECRYPT_ERROR_UNSUPPORTED, 1);
- to_next[0] = bi0;
- to_next += 1;
- n_left_to_next -= 1;
- goto trace;
- }
-
sa_sess = pool_elt_at_index(cwm->sa_sess_d[0], sa_index0);
if (PREDICT_FALSE(!sa_sess->sess))
@@ -211,7 +198,10 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
rte_crypto_op_attach_sym_session(cop, sess);
- icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ trunc_size = 16;
+ else
+ trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
/* Convert vlib buffer to mbuf */
@@ -222,7 +212,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
/* Outer IP header has already been stripped */
u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) -
- iv_size - icv_size;
+ iv_size - trunc_size;
if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0))
{
@@ -242,84 +232,64 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
- sym_cop->m_src = mb0;
- sym_cop->cipher.data.offset = sizeof (esp_header_t) + iv_size;
- sym_cop->cipher.data.length = payload_len;
+ u8 is_aead = sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128;
+ u32 cipher_off, cipher_len;
+ u32 auth_off = 0, auth_len = 0, aad_size = 0;
+ u8 *aad = NULL, *digest = NULL;
+ u64 digest_paddr = 0;
u8 *iv = rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t));
- dpdk_cop_priv_t * priv = (dpdk_cop_priv_t *)(sym_cop + 1);
+ dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *)(sym_cop + 1);
+ dpdk_gcm_cnt_blk *icb = &priv->cb;
+
+ cipher_off = sizeof (esp_header_t) + iv_size;
+ cipher_len = payload_len;
- if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ digest =
+ vlib_buffer_get_current (b0) + sizeof(esp_header_t) +
+ iv_size + payload_len;
+
+ if (is_aead)
{
- dpdk_gcm_cnt_blk *icb = &priv->cb;
- icb->salt = sa0->salt;
- clib_memcpy(icb->iv, iv, 8);
- icb->cnt = clib_host_to_net_u32(1);
- sym_cop->cipher.iv.data = (u8 *)icb;
- sym_cop->cipher.iv.phys_addr = cop->phys_addr +
- (uintptr_t)icb - (uintptr_t)cop;
- sym_cop->cipher.iv.length = 16;
-
- u8 *aad = priv->aad;
- clib_memcpy(aad, iv - sizeof(esp_header_t), 8);
- sym_cop->auth.aad.data = aad;
- sym_cop->auth.aad.phys_addr = cop->phys_addr +
- (uintptr_t)aad - (uintptr_t)cop;
- if (sa0->use_esn)
- {
- *((u32*)&aad[8]) = sa0->seq_hi;
- sym_cop->auth.aad.length = 12;
- }
- else
- {
- sym_cop->auth.aad.length = 8;
- }
+ u32 *_iv = (u32 *) iv;
- sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
- rte_pktmbuf_pkt_len(mb0) - icv_size);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
- rte_pktmbuf_pkt_len(mb0) - icv_size);
- sym_cop->auth.digest.length = icv_size;
+ crypto_set_icb (icb, sa0->salt, _iv[0], _iv[1]);
+ iv_size = 16;
+ aad = priv->aad;
+ clib_memcpy(aad, esp0, 8);
+ aad_size = 8;
+ if (sa0->use_esn)
+ {
+ *((u32*)&aad[8]) = sa0->seq_hi;
+ aad_size = 12;
+ }
}
else
{
- sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(mb0, void*,
- sizeof (esp_header_t));
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
- sizeof (esp_header_t));
- sym_cop->cipher.iv.length = iv_size;
+ clib_memcpy(icb, iv, 16);
+
+ auth_off = 0;
+ auth_len = sizeof(esp_header_t) + iv_size + payload_len;
if (sa0->use_esn)
{
dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1);
- u8* payload_end = rte_pktmbuf_mtod_offset(
- mb0, u8*, sizeof(esp_header_t) + iv_size + payload_len);
-
- clib_memcpy (priv->icv, payload_end, icv_size);
- *((u32*) payload_end) = sa0->seq_hi;
- sym_cop->auth.data.offset = 0;
- sym_cop->auth.data.length = sizeof(esp_header_t) + iv_size
- + payload_len + sizeof(sa0->seq_hi);
- sym_cop->auth.digest.data = priv->icv;
- sym_cop->auth.digest.phys_addr = cop->phys_addr
- + (uintptr_t) priv->icv - (uintptr_t) cop;
- sym_cop->auth.digest.length = icv_size;
- }
- else
- {
- sym_cop->auth.data.offset = 0;
- sym_cop->auth.data.length = sizeof(esp_header_t) +
- iv_size + payload_len;
-
- sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
- rte_pktmbuf_pkt_len(mb0) - icv_size);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
- rte_pktmbuf_pkt_len(mb0) - icv_size);
- sym_cop->auth.digest.length = icv_size;
+
+ clib_memcpy (priv->icv, digest, trunc_size);
+ *((u32*) digest) = sa0->seq_hi;
+ auth_len += sizeof(sa0->seq_hi);
+
+ digest = priv->icv;
+ digest_paddr =
+ cop->phys_addr + (uintptr_t) priv->icv - (uintptr_t) cop;
}
}
+ crypto_op_setup (is_aead, mb0, cop, sess,
+ cipher_off, cipher_len, (u8 *) icb, iv_size,
+ auth_off, auth_len, aad, aad_size,
+ digest, digest_paddr, trunc_size);
trace:
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -339,6 +309,9 @@ trace:
{
u32 enq;
+ if (!n_cop_qp[i])
+ continue;
+
qpd = vec_elt_at_index(cwm->qp_data, i);
enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
qpd->cops, n_cop_qp[i]);
@@ -433,7 +406,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
esp_footer_t * f0;
- u32 bi0, next0, icv_size, iv_size;
+ u32 bi0, next0, trunc_size, iv_size;
vlib_buffer_t * b0 = 0;
ip4_header_t *ih4 = 0, *oh4 = 0;
ip6_header_t *ih6 = 0, *oh6 = 0;
@@ -455,7 +428,10 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
to_next[0] = bi0;
to_next += 1;
- icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ trunc_size = 16;
+ else
+ trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
if (sa0->use_anti_replay)
@@ -472,7 +448,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t));
vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size);
- b0->current_length -= (icv_size + 2);
+ b0->current_length -= (trunc_size + 2);
b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (b0) +
b0->current_length);
diff --git a/src/plugins/dpdk/ipsec/esp_encrypt.c b/src/plugins/dpdk/ipsec/esp_encrypt.c
index ac552f6c0a6..6de444fd3bc 100644
--- a/src/plugins/dpdk/ipsec/esp_encrypt.c
+++ b/src/plugins/dpdk/ipsec/esp_encrypt.c
@@ -43,8 +43,7 @@ typedef enum
_(RX_PKTS, "ESP pkts received") \
_(SEQ_CYCLED, "sequence number cycled") \
_(ENQ_FAIL, "Enqueue failed (buffer full)") \
- _(NO_CRYPTODEV, "Cryptodev not configured") \
- _(UNSUPPORTED, "Cipher/Auth not supported")
+ _(NO_CRYPTODEV, "Cryptodev not configured")
typedef enum
@@ -142,6 +141,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
const int BLOCK_SIZE = 16;
u32 iv_size;
u16 orig_sz;
+ u8 trunc_size;
crypto_sa_session_t *sa_sess;
void *sess;
struct rte_crypto_op *cop = 0;
@@ -199,6 +199,11 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
ssize_t adv;
iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
+ if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ trunc_size = 16;
+ else
+ trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+
ih0 = vlib_buffer_get_current (b0);
orig_sz = b0->current_length;
is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
@@ -314,9 +319,6 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
transport_mode = 1;
}
- ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
- ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE);
-
int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE;
/* pad packet in input buffer */
@@ -330,8 +332,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes;
f0->pad_length = pad_bytes;
f0->next_header = next_hdr_type;
- b0->current_length += pad_bytes + 2 +
- em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ b0->current_length += pad_bytes + 2 + trunc_size;
vnet_buffer (b0)->sw_if_index[VLIB_RX] =
vnet_buffer (b0)->sw_if_index[VLIB_RX];
@@ -349,88 +350,64 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
mb0->pkt_len = b0->current_length;
mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
- rte_crypto_op_attach_sym_session (cop, sess);
+ dpdk_gcm_cnt_blk *icb = &priv->cb;
- sym_cop->m_src = mb0;
+ crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi);
- dpdk_gcm_cnt_blk *icb = &priv->cb;
- icb->salt = sa0->salt;
- icb->iv[0] = sa0->seq;
- icb->iv[1] = sa0->seq_hi;
- icb->cnt = clib_host_to_net_u32 (1);
+ u8 is_aead = sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128;
+ u32 cipher_off, cipher_len;
+ u32 auth_off = 0, auth_len = 0, aad_size = 0;
+ u8 *aad = NULL, *digest = NULL;
- if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+ if (is_aead)
{
u32 *esp_iv =
(u32 *) (b0->data + b0->current_data + ip_hdr_size +
sizeof (esp_header_t));
esp_iv[0] = sa0->seq;
esp_iv[1] = sa0->seq_hi;
- sym_cop->cipher.data.offset =
- ip_hdr_size + sizeof (esp_header_t) + iv_size;
- sym_cop->cipher.data.length = BLOCK_SIZE * blocks;
- sym_cop->cipher.iv.length = 16;
- }
- else
- {
- sym_cop->cipher.data.offset =
- ip_hdr_size + sizeof (esp_header_t);
- sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size;
- sym_cop->cipher.iv.length = iv_size;
- }
- sym_cop->cipher.iv.data = (u8 *) icb;
- sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb
- - (uintptr_t) cop;
+ cipher_off = ip_hdr_size + sizeof (esp_header_t) + iv_size;
+ cipher_len = BLOCK_SIZE * blocks;
+ iv_size = 16; /* GCM IV size, not ESP IV size */
-
- ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG);
- ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE);
-
- if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128))
- {
- u8 *aad = priv->aad;
+ aad = priv->aad;
clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
8);
- sym_cop->auth.aad.data = aad;
- sym_cop->auth.aad.phys_addr = cop->phys_addr +
- (uintptr_t) aad - (uintptr_t) cop;
-
+ aad_size = 8;
if (PREDICT_FALSE (sa0->use_esn))
{
*((u32 *) & aad[8]) = sa0->seq_hi;
- sym_cop->auth.aad.length = 12;
- }
- else
- {
- sym_cop->auth.aad.length = 8;
+ aad_size = 12;
}
+
+ digest =
+ vlib_buffer_get_current (b0) + b0->current_length -
+ trunc_size;
}
else
{
- sym_cop->auth.data.offset = ip_hdr_size;
- sym_cop->auth.data.length = b0->current_length - ip_hdr_size
- - em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ cipher_off = ip_hdr_size + sizeof (esp_header_t);
+ cipher_len = BLOCK_SIZE * blocks + iv_size;
+
+ auth_off = ip_hdr_size;
+ auth_len = b0->current_length - ip_hdr_size - trunc_size;
+
+ digest =
+ vlib_buffer_get_current (b0) + b0->current_length -
+ trunc_size;
if (PREDICT_FALSE (sa0->use_esn))
{
- u8 *payload_end =
- vlib_buffer_get_current (b0) + b0->current_length;
- *((u32 *) payload_end) = sa0->seq_hi;
- sym_cop->auth.data.length += sizeof (sa0->seq_hi);
+ *((u32 *) digest) = sa0->seq_hi;
+ auth_len += sizeof (sa0->seq_hi);
}
}
- sym_cop->auth.digest.data = vlib_buffer_get_current (b0) +
- b0->current_length -
- em->esp_integ_algs[sa0->integ_alg].trunc_size;
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0,
- b0->current_length
- -
- em->esp_integ_algs
- [sa0->integ_alg].trunc_size);
- sym_cop->auth.digest.length =
- em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ crypto_op_setup (is_aead, mb0, cop, sess,
+ cipher_off, cipher_len, (u8 *) icb, iv_size,
+ auth_off, auth_len, aad, aad_size,
+ digest, 0, trunc_size);
if (PREDICT_FALSE (is_ipv6))
{
@@ -470,6 +447,9 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
{
u32 enq;
+ if (!n_cop_qp[i])
+ continue;
+
qpd = vec_elt_at_index(cwm->qp_data, i);
enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
qpd->cops, n_cop_qp[i]);
diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/src/plugins/dpdk/ipsec/ipsec.c
index 7066564d41b..c922940ce9a 100644
--- a/src/plugins/dpdk/ipsec/ipsec.c
+++ b/src/plugins/dpdk/ipsec/ipsec.c
@@ -56,18 +56,23 @@ add_del_sa_sess (u32 sa_index, u8 is_add)
else
{
u8 dev_id;
+ i32 ret;
sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index);
dev_id = cwm->qp_data[sa_sess->qp_index].dev_id;
if (!sa_sess->sess)
continue;
-
- if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess))
- {
- clib_warning("failed to free session");
- return -1;
- }
+#if DPDK_NO_AEAD
+ ret = (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess) == NULL);
+ ASSERT (ret);
+#else
+ ret = rte_cryptodev_sym_session_clear(dev_id, sa_sess->sess);
+ ASSERT (!ret);
+
+ ret = rte_cryptodev_sym_session_free(sa_sess->sess);
+ ASSERT (!ret);
+#endif
memset(sa_sess, 0, sizeof(sa_sess[0]));
}
}
@@ -94,7 +99,7 @@ update_qp_data (crypto_worker_main_t * cwm,
}
/* *INDENT-ON* */
- vec_add2 (cwm->qp_data, qpd, 1);
+ vec_add2_aligned (cwm->qp_data, qpd, 1, CLIB_CACHE_LINE_BYTES);
qpd->dev_id = cdev_id;
qpd->qp_id = qp_id;
@@ -119,6 +124,9 @@ add_mapping (crypto_worker_main_t * cwm,
p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo;
p_key->auth_algo = (u8) auth_cap->sym.auth.algo;
p_key->is_outbound = is_outbound;
+#if ! DPDK_NO_AEAD
+ p_key->is_aead = cipher_cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD;
+#endif
ret = hash_get (cwm->algo_qp_map, key);
if (ret)
@@ -147,6 +155,20 @@ add_cdev_mapping (crypto_worker_main_t * cwm,
for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++)
{
+#if ! DPDK_NO_AEAD
+ if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ {
+ struct rte_cryptodev_capabilities none = { 0 };
+
+ if (check_algo_is_supported (i, NULL) != 0)
+ continue;
+
+ none.sym.auth.algo = RTE_CRYPTO_AUTH_NULL;
+
+ mapped |= add_mapping (cwm, cdev_id, qp, is_outbound, i, &none);
+ continue;
+ }
+#endif
if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
continue;
@@ -205,17 +227,23 @@ dpdk_ipsec_check_support (ipsec_sa_t * sa)
{
if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
return clib_error_return (0, "unsupported integ-alg %U with "
- "crypto-algo aes-gcm-128",
+ "crypto-alg aes-gcm-128",
format_ipsec_integ_alg, sa->integ_alg);
+#if DPDK_NO_AEAD
sa->integ_alg = IPSEC_INTEG_ALG_AES_GCM_128;
+#endif
}
- else
- {
- if (sa->integ_alg == IPSEC_INTEG_ALG_NONE ||
- sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)
- return clib_error_return (0, "unsupported integ-alg %U",
- format_ipsec_integ_alg, sa->integ_alg);
- }
+#if DPDK_NO_AEAD
+ else if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE ||
+ sa->integ_alg == IPSEC_INTEG_ALG_NONE ||
+ sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)
+#else
+ else if (sa->integ_alg == IPSEC_INTEG_ALG_NONE)
+#endif
+ return clib_error_return (0,
+ "unsupported integ-alg %U with crypto-alg %U",
+ format_ipsec_integ_alg, sa->integ_alg,
+ format_ipsec_crypto_alg, sa->crypto_alg);
return 0;
}
@@ -233,6 +261,10 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
struct rte_mempool *rmp;
i32 dev_id, ret;
u32 i, skip_master;
+#if ! DPDK_NO_AEAD
+ u32 max_sess_size = 0, sess_size;
+ i8 socket_id;
+#endif
if (check_cryptodev_queues () < 0)
{
@@ -297,9 +329,10 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
dev_conf.socket_id = rte_cryptodev_socket_id (dev_id);
dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs;
+#if DPDK_NO_AEAD
dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_SESS_OBJS;
dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE;
-
+#endif
ret = rte_cryptodev_configure (dev_id, &dev_conf);
if (ret < 0)
{
@@ -310,16 +343,26 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC;
for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
{
+#if DPDK_NO_AEAD
ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf,
dev_conf.socket_id);
+#else
+ ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf,
+ dev_conf.socket_id, NULL);
+#endif
if (ret < 0)
{
clib_warning ("cryptodev %u qp %u setup error", dev_id, qp);
goto error;
}
}
- vec_validate_aligned (dcm->cop_pools, dev_conf.socket_id,
- CLIB_CACHE_LINE_BYTES);
+ vec_validate (dcm->cop_pools, dev_conf.socket_id);
+
+#if ! DPDK_NO_AEAD
+ sess_size = rte_cryptodev_get_private_session_size (dev_id);
+ if (sess_size > max_sess_size)
+ max_sess_size = sess_size;
+#endif
if (!vec_elt (dcm->cop_pools, dev_conf.socket_id))
{
@@ -333,14 +376,14 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
DPDK_CRYPTO_CACHE_SIZE,
DPDK_CRYPTO_PRIV_SIZE,
dev_conf.socket_id);
- vec_free (pool_name);
if (!rmp)
{
- clib_warning ("failed to allocate mempool on socket %u",
- dev_conf.socket_id);
+ clib_warning ("failed to allocate %s", pool_name);
+ vec_free (pool_name);
goto error;
}
+ vec_free (pool_name);
vec_elt (dcm->cop_pools, dev_conf.socket_id) = rmp;
}
@@ -348,6 +391,51 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
DPDK_CRYPTO_NB_SESS_OBJS, DPDK_CRYPTO_CACHE_SIZE);
}
+#if ! DPDK_NO_AEAD
+ /* *INDENT-OFF* */
+ vec_foreach_index (socket_id, dcm->cop_pools)
+ {
+ u8 *pool_name;
+
+ if (!vec_elt (dcm->cop_pools, socket_id))
+ continue;
+
+ vec_validate (dcm->sess_h_pools, socket_id);
+ pool_name = format (0, "crypto_sess_h_socket%u%c",
+ socket_id, 0);
+ rmp =
+ rte_mempool_create((i8 *)pool_name, DPDK_CRYPTO_NB_SESS_OBJS,
+ rte_cryptodev_get_header_session_size (),
+ 512, 0, NULL, NULL, NULL, NULL,
+ socket_id, 0);
+ if (!rmp)
+ {
+ clib_warning ("failed to allocate %s", pool_name);
+ vec_free (pool_name);
+ goto error;
+ }
+ vec_free (pool_name);
+ vec_elt (dcm->sess_h_pools, socket_id) = rmp;
+
+ vec_validate (dcm->sess_pools, socket_id);
+ pool_name = format (0, "crypto_sess_socket%u%c",
+ socket_id, 0);
+ rmp =
+ rte_mempool_create((i8 *)pool_name, DPDK_CRYPTO_NB_SESS_OBJS,
+ max_sess_size, 512, 0, NULL, NULL, NULL, NULL,
+ socket_id, 0);
+ if (!rmp)
+ {
+ clib_warning ("failed to allocate %s", pool_name);
+ vec_free (pool_name);
+ goto error;
+ }
+ vec_free (pool_name);
+ vec_elt (dcm->sess_pools, socket_id) = rmp;
+ }
+ /* *INDENT-ON* */
+#endif
+
dpdk_esp_init ();
/* Add new next node and set as default */
diff --git a/src/plugins/dpdk/ipsec/ipsec.h b/src/plugins/dpdk/ipsec/ipsec.h
index d7940345bfc..a94dd6821af 100644
--- a/src/plugins/dpdk/ipsec/ipsec.h
+++ b/src/plugins/dpdk/ipsec/ipsec.h
@@ -53,6 +53,7 @@ typedef struct
u8 cipher_algo;
u8 auth_algo;
u8 is_outbound;
+ u8 is_aead;
} crypto_worker_qp_key_t;
typedef struct
@@ -81,6 +82,8 @@ typedef struct
typedef struct
{
+ struct rte_mempool **sess_h_pools;
+ struct rte_mempool **sess_pools;
struct rte_mempool **cop_pools;
crypto_worker_main_t *workers_main;
u8 enabled;
@@ -146,12 +149,14 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
{
struct
{
- uint8_t cipher_algo;
enum rte_crypto_sym_xform_type type;
union
{
enum rte_crypto_auth_algorithm auth;
enum rte_crypto_cipher_algorithm cipher;
+#if ! DPDK_NO_AEAD
+ enum rte_crypto_aead_algorithm aead;
+#endif
};
char *name;
} supported_algo[] =
@@ -162,15 +167,18 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
{
.type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
RTE_CRYPTO_CIPHER_AES_CBC,.name = "AES_CBC"},
+#if DPDK_NO_AEAD
{
.type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
- RTE_CRYPTO_CIPHER_AES_CTR,.name = "AES_CTR"},
+ RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"},
+#else
{
- .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
- RTE_CRYPTO_CIPHER_3DES_CBC,.name = "3DES-CBC"},
+ .type = RTE_CRYPTO_SYM_XFORM_AEAD,.aead =
+ RTE_CRYPTO_AEAD_AES_GCM,.name = "AES-GCM"},
+#endif
{
- .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
- RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"},
+ .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
+ RTE_CRYPTO_AUTH_NULL,.name = "NULL"},
{
.type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
RTE_CRYPTO_AUTH_SHA1_HMAC,.name = "HMAC-SHA1"},
@@ -183,15 +191,16 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
{
.type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
RTE_CRYPTO_AUTH_SHA512_HMAC,.name = "HMAC-SHA512"},
- {
- .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
- RTE_CRYPTO_AUTH_AES_XCBC_MAC,.name = "AES-XCBC-MAC"},
+#if DPDK_NO_AEAD
{
.type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
RTE_CRYPTO_AUTH_AES_GCM,.name = "AES-GCM"},
+#endif
{
/* tail */
- .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED},};
+ .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED}
+ };
+
uint32_t i = 0;
if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
@@ -203,6 +212,10 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
{
if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
cap->sym.cipher.algo == supported_algo[i].cipher) ||
+#if ! DPDK_NO_AEAD
+ (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ cap->sym.aead.algo == supported_algo[i].aead) ||
+#endif
(cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH &&
cap->sym.auth.algo == supported_algo[i].auth))
{