aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--extras/deprecated/dpdk-ipsec/cli.c (renamed from src/plugins/dpdk/ipsec/cli.c)0
-rw-r--r--extras/deprecated/dpdk-ipsec/crypto_node.c (renamed from src/plugins/dpdk/ipsec/crypto_node.c)0
-rw-r--r--extras/deprecated/dpdk-ipsec/dir.dox (renamed from src/plugins/dpdk/ipsec/dir.dox)0
-rw-r--r--extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md (renamed from src/plugins/dpdk/ipsec/dpdk_crypto_ipsec_doc.md)0
-rw-r--r--extras/deprecated/dpdk-ipsec/esp_decrypt.c (renamed from src/plugins/dpdk/ipsec/esp_decrypt.c)0
-rw-r--r--extras/deprecated/dpdk-ipsec/esp_encrypt.c (renamed from src/plugins/dpdk/ipsec/esp_encrypt.c)3
-rw-r--r--extras/deprecated/dpdk-ipsec/ipsec.c (renamed from src/plugins/dpdk/ipsec/ipsec.c)0
-rw-r--r--extras/deprecated/dpdk-ipsec/ipsec.h (renamed from src/plugins/dpdk/ipsec/ipsec.h)1
-rw-r--r--src/plugins/dpdk/CMakeLists.txt9
-rw-r--r--src/plugins/dpdk/cryptodev/cryptodev.c429
-rw-r--r--src/plugins/dpdk/cryptodev/cryptodev_dp_api.c616
11 files changed, 580 insertions, 478 deletions
diff --git a/src/plugins/dpdk/ipsec/cli.c b/extras/deprecated/dpdk-ipsec/cli.c
index 8fdda020a77..8fdda020a77 100644
--- a/src/plugins/dpdk/ipsec/cli.c
+++ b/extras/deprecated/dpdk-ipsec/cli.c
diff --git a/src/plugins/dpdk/ipsec/crypto_node.c b/extras/deprecated/dpdk-ipsec/crypto_node.c
index 893848c05b6..893848c05b6 100644
--- a/src/plugins/dpdk/ipsec/crypto_node.c
+++ b/extras/deprecated/dpdk-ipsec/crypto_node.c
diff --git a/src/plugins/dpdk/ipsec/dir.dox b/extras/deprecated/dpdk-ipsec/dir.dox
index 05504541abb..05504541abb 100644
--- a/src/plugins/dpdk/ipsec/dir.dox
+++ b/extras/deprecated/dpdk-ipsec/dir.dox
diff --git a/src/plugins/dpdk/ipsec/dpdk_crypto_ipsec_doc.md b/extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md
index 8cf51f07c03..8cf51f07c03 100644
--- a/src/plugins/dpdk/ipsec/dpdk_crypto_ipsec_doc.md
+++ b/extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md
diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/extras/deprecated/dpdk-ipsec/esp_decrypt.c
index 9a782abeb94..9a782abeb94 100644
--- a/src/plugins/dpdk/ipsec/esp_decrypt.c
+++ b/extras/deprecated/dpdk-ipsec/esp_decrypt.c
diff --git a/src/plugins/dpdk/ipsec/esp_encrypt.c b/extras/deprecated/dpdk-ipsec/esp_encrypt.c
index 157c93f417e..ce1b5795995 100644
--- a/src/plugins/dpdk/ipsec/esp_encrypt.c
+++ b/extras/deprecated/dpdk-ipsec/esp_encrypt.c
@@ -117,8 +117,7 @@ dpdk_esp_encrypt_inline (vlib_main_t * vm,
{
u32 n_left_from, *from, *to_next, next_index, thread_index;
ipsec_main_t *im = &ipsec_main;
- vnet_main_t *vnm = im->vnet_main;
- vnet_interface_main_t *vim = &vnm->interface_main;
+ vnet_main_t *vnm = vnet_get_main ();
u32 thread_idx = vlib_get_thread_index ();
dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
crypto_resource_t *res = 0;
diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/extras/deprecated/dpdk-ipsec/ipsec.c
index e260ba7dcc4..e260ba7dcc4 100644
--- a/src/plugins/dpdk/ipsec/ipsec.c
+++ b/extras/deprecated/dpdk-ipsec/ipsec.c
diff --git a/src/plugins/dpdk/ipsec/ipsec.h b/extras/deprecated/dpdk-ipsec/ipsec.h
index 741674376e3..368120e18fa 100644
--- a/src/plugins/dpdk/ipsec/ipsec.h
+++ b/extras/deprecated/dpdk-ipsec/ipsec.h
@@ -18,6 +18,7 @@
#include <vnet/vnet.h>
#include <vppinfra/cache.h>
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec_sa.h>
#undef always_inline
#include <rte_config.h>
diff --git a/src/plugins/dpdk/CMakeLists.txt b/src/plugins/dpdk/CMakeLists.txt
index 7db6b094be7..6ec48578852 100644
--- a/src/plugins/dpdk/CMakeLists.txt
+++ b/src/plugins/dpdk/CMakeLists.txt
@@ -134,24 +134,15 @@ add_vpp_plugin(dpdk
device/format.c
device/init.c
device/node.c
- ipsec/cli.c
- ipsec/crypto_node.c
- ipsec/esp_decrypt.c
- ipsec/esp_encrypt.c
- ipsec/ipsec.c
cryptodev/${DPDK_CRYPTODEV_SOURCE}.c
MULTIARCH_SOURCES
buffer.c
device/device.c
device/node.c
- ipsec/crypto_node.c
- ipsec/esp_decrypt.c
- ipsec/esp_encrypt.c
INSTALL_HEADERS
device/dpdk.h
- ipsec/ipsec.h
LINK_FLAGS
"${DPDK_LINK_FLAGS}"
diff --git a/src/plugins/dpdk/cryptodev/cryptodev.c b/src/plugins/dpdk/cryptodev/cryptodev.c
index f51a5a527dc..d87a16c37a0 100644
--- a/src/plugins/dpdk/cryptodev/cryptodev.c
+++ b/src/plugins/dpdk/cryptodev/cryptodev.c
@@ -18,7 +18,7 @@
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
#include <vnet/crypto/crypto.h>
-#include <vnet/vnet.h>
+#include <vnet/ipsec/ipsec.h>
#include <vpp/app/version.h>
#include <dpdk/buffer.h>
@@ -108,7 +108,7 @@ typedef enum
typedef struct
{
- struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES];
+ struct rte_cryptodev_sym_session ***keys;
} cryptodev_key_t;
typedef struct
@@ -120,6 +120,7 @@ typedef struct
typedef struct
{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
struct rte_mempool *cop_pool;
struct rte_mempool *sess_pool;
struct rte_mempool *sess_priv_pool;
@@ -148,10 +149,10 @@ typedef struct
cryptodev_main_t cryptodev_main;
-static int
+static_always_inline int
prepare_aead_xform (struct rte_crypto_sym_xform *xform,
- cryptodev_op_type_t op_type,
- const vnet_crypto_key_t * key, u32 aad_len)
+ cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
+ u32 aad_len)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
memset (xform, 0, sizeof (*xform));
@@ -176,10 +177,10 @@ prepare_aead_xform (struct rte_crypto_sym_xform *xform,
return 0;
}
-static int
+static_always_inline int
prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
cryptodev_op_type_t op_type,
- const vnet_crypto_key_t * key)
+ const vnet_crypto_key_t *key)
{
struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
vnet_crypto_key_t *key_cipher, *key_auth;
@@ -240,57 +241,7 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
return 0;
}
-static int
-cryptodev_session_create (vnet_crypto_key_t * const key,
- struct rte_mempool *sess_priv_pool,
- cryptodev_key_t * session_pair, u32 aad_len)
-{
- struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
- struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
- cryptodev_main_t *cmt = &cryptodev_main;
- cryptodev_inst_t *dev_inst;
- struct rte_cryptodev *cdev;
- int ret;
- uint8_t dev_id = 0;
-
- if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
- ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
- else
- ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
- aad_len);
- if (ret)
- return 0;
-
- if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
- prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
- else
- prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
-
- vec_foreach (dev_inst, cmt->cryptodev_inst)
- {
- dev_id = dev_inst->dev_id;
- cdev = rte_cryptodev_pmd_get_dev (dev_id);
-
- /* if the session is already configured for the driver type, avoid
- configuring it again to increase the session data's refcnt */
- if (session_pair->keys[0]->sess_data[cdev->driver_id].data &&
- session_pair->keys[1]->sess_data[cdev->driver_id].data)
- continue;
-
- ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0],
- xforms_enc, sess_priv_pool);
- ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1],
- xforms_dec, sess_priv_pool);
- if (ret < 0)
- return ret;
- }
- session_pair->keys[0]->opaque_data = aad_len;
- session_pair->keys[1]->opaque_data = aad_len;
-
- return 0;
-}
-
-static void
+static_always_inline void
cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
{
u32 n_devs, i;
@@ -306,8 +257,8 @@ cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
rte_cryptodev_sym_session_free (sess);
}
-static int
-cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key)
+static_always_inline int
+cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
{
vnet_crypto_alg_t alg;
if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
@@ -324,77 +275,137 @@ cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key)
return -1;
}
-static_always_inline void
-cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
- vnet_crypto_key_index_t idx, u32 aad_len)
+static_always_inline int
+cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
+ u32 aad_len)
{
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_numa_data_t *numa_data;
+ cryptodev_inst_t *dev_inst;
vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
struct rte_mempool *sess_pool, *sess_priv_pool;
- cryptodev_key_t *ckey = 0;
- int ret = 0;
-
- if (kop == VNET_CRYPTO_KEY_OP_DEL)
- {
- if (idx >= vec_len (cmt->keys))
- return;
-
- ckey = pool_elt_at_index (cmt->keys, idx);
- cryptodev_session_del (ckey->keys[0]);
- cryptodev_session_del (ckey->keys[1]);
- ckey->keys[0] = 0;
- ckey->keys[1] = 0;
- pool_put (cmt->keys, ckey);
- return;
- }
- else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
- {
- if (idx >= vec_len (cmt->keys))
- return;
-
- ckey = pool_elt_at_index (cmt->keys, idx);
-
- cryptodev_session_del (ckey->keys[0]);
- cryptodev_session_del (ckey->keys[1]);
- ckey->keys[0] = 0;
- ckey->keys[1] = 0;
- }
- else /* create key */
- pool_get_zero (cmt->keys, ckey);
-
- /* do not create session for unsupported alg */
- if (cryptodev_check_supported_vnet_alg (key))
- return;
+ cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
+ struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
+ struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
+ struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
+ u32 numa_node = vm->numa_node;
+ int ret;
- numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
+ numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
sess_pool = numa_data->sess_pool;
sess_priv_pool = numa_data->sess_priv_pool;
- ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool);
- if (!ckey->keys[0])
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
+ rte_cryptodev_sym_session_create (sess_pool);
+ if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
{
ret = -1;
goto clear_key;
}
- ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool);
- if (!ckey->keys[1])
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
+ rte_cryptodev_sym_session_create (sess_pool);
+ if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
{
ret = -1;
goto clear_key;
}
- ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
+ else
+ ret =
+ prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
+ if (ret)
+ return 0;
+
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
+ else
+ prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
+
+ vec_foreach (dev_inst, cmt->cryptodev_inst)
+ {
+ u32 dev_id = dev_inst->dev_id;
+ struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
+
+ /* if the session is already configured for the driver type, avoid
+ configuring it again to increase the session data's refcnt */
+ if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]
+ ->sess_data[cdev->driver_id]
+ .data &&
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data)
+ continue;
+
+ ret = rte_cryptodev_sym_session_init (
+ dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
+ sess_priv_pool);
+ ret = rte_cryptodev_sym_session_init (
+ dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
+ sess_priv_pool);
+ if (ret < 0)
+ return ret;
+ }
+
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
+
+ CLIB_MEMORY_STORE_BARRIER ();
+ ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
+ ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT];
clear_key:
if (ret != 0)
{
- cryptodev_session_del (ckey->keys[0]);
- cryptodev_session_del (ckey->keys[1]);
- memset (ckey, 0, sizeof (*ckey));
- pool_put (cmt->keys, ckey);
+ cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
+ cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
+ }
+ return ret;
+}
+
+static_always_inline void
+cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx, u32 aad_len)
+{
+ cryptodev_main_t *cmt = &cryptodev_main;
+ vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
+ cryptodev_key_t *ckey = 0;
+ u32 i;
+
+ vec_validate (cmt->keys, idx);
+ ckey = vec_elt_at_index (cmt->keys, idx);
+
+ if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
+ {
+ if (idx >= vec_len (cmt->keys))
+ return;
+
+ vec_foreach_index (i, cmt->per_numa_data)
+ {
+ if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
+ {
+ cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
+ cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
+
+ CLIB_MEMORY_STORE_BARRIER ();
+ ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
+ ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
+ }
+ }
+ return;
}
+
+ /* create key */
+
+ /* do not create session for unsupported alg */
+ if (cryptodev_check_supported_vnet_alg (key))
+ return;
+
+ vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
+ vec_foreach_index (i, ckey->keys)
+ vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
}
/*static*/ void
@@ -474,11 +485,11 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
vnet_crypto_async_frame_elt_t *fe;
+ struct rte_cryptodev_sym_session *sess = 0;
cryptodev_op_t **cop;
u32 *bi;
u32 n_enqueue, n_elts;
- cryptodev_key_t *key;
- u32 last_key_index;
+ u32 last_key_index = ~0;
if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
return -1;
@@ -505,9 +516,6 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
cop[0]->frame = frame;
cop[0]->n_elts = n_elts;
- key = pool_elt_at_index (cmt->keys, fe->key_index);
- last_key_index = fe->key_index;
-
while (n_elts)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
@@ -525,8 +533,20 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
}
if (last_key_index != fe->key_index)
{
- key = pool_elt_at_index (cmt->keys, fe->key_index);
+ cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
last_key_index = fe->key_index;
+
+ if (key->keys[vm->numa_node][op_type] == 0)
+ {
+ if (PREDICT_FALSE (
+ cryptodev_session_create (vm, last_key_index, 0) < 0))
+ {
+ cryptodev_mark_frame_err_status (
+ frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ return -1;
+ }
+ }
+ sess = key->keys[vm->numa_node][op_type];
}
sop->m_src = rte_mbuf_from_vlib_buffer (b);
@@ -542,7 +562,7 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
integ_offset = 0;
crypto_offset = offset_diff;
}
- sop->session = key->keys[op_type];
+ sop->session = sess;
sop->cipher.data.offset = crypto_offset;
sop->cipher.data.length = fe->crypto_total_length;
sop->auth.data.offset = integ_offset;
@@ -586,12 +606,11 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
vnet_crypto_async_frame_elt_t *fe;
+ struct rte_cryptodev_sym_session *sess = 0;
cryptodev_op_t **cop;
u32 *bi;
u32 n_enqueue = 0, n_elts;
- cryptodev_key_t *key;
- u32 last_key_index;
- u8 sess_aad_len;
+ u32 last_key_index = ~0;
if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
return -1;
@@ -618,13 +637,6 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
cop[0]->frame = frame;
cop[0]->n_elts = n_elts;
- key = pool_elt_at_index (cmt->keys, fe->key_index);
- last_key_index = fe->key_index;
- sess_aad_len = (u8) key->keys[op_type]->opaque_data;
- if (PREDICT_FALSE (sess_aad_len != aad_len))
- cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY,
- fe->key_index, aad_len);
-
while (n_elts)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
@@ -640,14 +652,35 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
}
if (last_key_index != fe->key_index)
{
- key = pool_elt_at_index (cmt->keys, fe->key_index);
- sess_aad_len = (u8) key->keys[op_type]->opaque_data;
- if (PREDICT_FALSE (sess_aad_len != aad_len))
+ cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
+
+ last_key_index = fe->key_index;
+ if (key->keys[vm->numa_node][op_type] == 0)
{
- cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY,
+ if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
+ aad_len) < 0))
+ {
+ cryptodev_mark_frame_err_status (
+ frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ return -1;
+ }
+ }
+ else if (PREDICT_FALSE (
+ key->keys[vm->numa_node][op_type]->opaque_data !=
+ aad_len))
+ {
+ cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
fe->key_index, aad_len);
+ if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
+ aad_len) < 0))
+ {
+ cryptodev_mark_frame_err_status (
+ frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ return -1;
+ }
}
- last_key_index = fe->key_index;
+
+ sess = key->keys[vm->numa_node][op_type];
}
sop->m_src = rte_mbuf_from_vlib_buffer (b);
@@ -662,7 +695,7 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
crypto_offset = 0;
}
- sop->session = key->keys[op_type];
+ sop->session = sess;
sop->aead.aad.data = cop[0]->aad;
sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
sop->aead.data.length = fe->crypto_total_length;
@@ -1072,12 +1105,7 @@ cryptodev_count_queue (u32 numa)
for (i = 0; i < n_cryptodev; i++)
{
rte_cryptodev_info_get (i, &info);
- if (rte_cryptodev_socket_id (i) != numa)
- {
- clib_warning ("DPDK crypto resource %s is in different numa node "
- "as %u, ignored", info.device->name, numa);
- continue;
- }
+
/* only device support symmetric crypto is used */
if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
continue;
@@ -1093,8 +1121,6 @@ cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id)
struct rte_cryptodev_info info;
struct rte_cryptodev *cdev;
cryptodev_main_t *cmt = &cryptodev_main;
- cryptodev_numa_data_t *numa_data = vec_elt_at_index (cmt->per_numa_data,
- vm->numa_node);
u32 i;
int ret;
@@ -1116,7 +1142,7 @@ cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id)
{
struct rte_cryptodev_config cfg;
- cfg.socket_id = vm->numa_node;
+ cfg.socket_id = info.device->numa_node;
cfg.nb_queue_pairs = info.max_nb_queue_pairs;
rte_cryptodev_configure (cryptodev_id, &cfg);
@@ -1127,12 +1153,12 @@ cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id)
int ret;
- qp_cfg.mp_session = numa_data->sess_pool;
- qp_cfg.mp_session_private = numa_data->sess_priv_pool;
+ qp_cfg.mp_session = 0;
+ qp_cfg.mp_session_private = 0;
qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
- vm->numa_node);
+ info.device->numa_node);
if (ret)
break;
}
@@ -1275,58 +1301,6 @@ dpdk_cryptodev_init (vlib_main_t * vm)
/* A total of 4 times n_worker threads * frame size as crypto ops */
n_cop_elts = max_pow2 ((u64)n_workers * CRYPTODEV_NB_CRYPTO_OPS);
- vec_validate (cmt->per_numa_data, vm->numa_node);
- numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
-
- /* create session pool for the numa node */
- name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
- mp = rte_cryptodev_sym_session_pool_create ((char *) name,
- CRYPTODEV_NB_SESSION,
- 0, 0, 0, numa);
- if (!mp)
- {
- error = clib_error_return (0, "Not enough memory for mp %s", name);
- goto err_handling;
- }
- vec_free (name);
-
- numa_data->sess_pool = mp;
-
- /* create session private pool for the numa node */
- name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
- mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
- 0, NULL, NULL, NULL, NULL, numa, 0);
- if (!mp)
- {
- error = clib_error_return (0, "Not enough memory for mp %s", name);
- vec_free (name);
- goto err_handling;
- }
-
- vec_free (name);
-
- numa_data->sess_priv_pool = mp;
-
- /* create cryptodev op pool */
- name = format (0, "cryptodev_op_pool_%u%c", numa, 0);
-
- mp = rte_mempool_create ((char *) name, n_cop_elts,
- sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2,
- sizeof (struct rte_crypto_op_pool_private), NULL,
- NULL, crypto_op_init, NULL, numa, 0);
- if (!mp)
- {
- error = clib_error_return (0, "Not enough memory for mp %s", name);
- vec_free (name);
- goto err_handling;
- }
-
- priv = rte_mempool_get_priv (mp);
- priv->priv_size = sizeof (struct rte_crypto_op_pool_private);
- priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
- vec_free (name);
- numa_data->cop_pool = mp;
-
/* probe all cryptodev devices and get queue info */
if (cryptodev_probe (vm, n_workers) < 0)
{
@@ -1342,6 +1316,7 @@ dpdk_cryptodev_init (vlib_main_t * vm)
for (i = skip_master; i < tm->n_vlib_mains; i++)
{
ptd = cmt->per_thread_data + i;
+
cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
name = format (0, "frames_ring_%u%c", i, 0);
ptd->ring = rte_ring_create((char *) name, CRYPTODEV_NB_CRYPTO_OPS,
@@ -1354,11 +1329,67 @@ dpdk_cryptodev_init (vlib_main_t * vm)
}
vec_validate (ptd->cops, VNET_CRYPTO_FRAME_SIZE - 1);
vec_free(name);
+
+ numa = vlib_mains[i]->numa_node;
+
+ vec_validate (cmt->per_numa_data, numa);
+ numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
+
+ if (numa_data->sess_pool)
+ continue;
+
+ /* create session pool for the numa node */
+ name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
+ mp = rte_cryptodev_sym_session_pool_create (
+ (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
+ if (!mp)
+ {
+ error = clib_error_return (0, "Not enough memory for mp %s", name);
+ goto err_handling;
+ }
+ vec_free (name);
+
+ numa_data->sess_pool = mp;
+
+ /* create session private pool for the numa node */
+ name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
+ mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
+ 0, NULL, NULL, NULL, NULL, numa, 0);
+ if (!mp)
+ {
+ error = clib_error_return (0, "Not enough memory for mp %s", name);
+ vec_free (name);
+ goto err_handling;
+ }
+
+ vec_free (name);
+
+ numa_data->sess_priv_pool = mp;
+
+ /* create cryptodev op pool */
+ name = format (0, "cryptodev_op_pool_%u%c", numa, 0);
+
+ mp = rte_mempool_create ((char *) name, n_cop_elts,
+ sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2,
+ sizeof (struct rte_crypto_op_pool_private),
+ NULL, NULL, crypto_op_init, NULL, numa, 0);
+ if (!mp)
+ {
+ error = clib_error_return (0, "Not enough memory for mp %s", name);
+ vec_free (name);
+ goto err_handling;
+ }
+
+ priv = rte_mempool_get_priv (mp);
+ priv->priv_size = sizeof (struct rte_crypto_op_pool_private);
+ priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ vec_free (name);
+ numa_data->cop_pool = mp;
}
/* register handler */
- eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
- "DPDK Cryptodev Engine");
+ eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
+ "DPDK Cryptodev Engine");
#define _(a, b, c, d, e, f) \
vnet_crypto_register_async_handler \
@@ -1388,6 +1419,12 @@ dpdk_cryptodev_init (vlib_main_t * vm)
vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
+ /* this engine is only enabled when cryptodev device(s) are presented in
+ * startup.conf. Assume it is wanted to be used, turn on async mode here.
+ */
+ vnet_crypto_request_async_mode (1);
+ ipsec_set_async_mode (1);
+
return 0;
err_handling:
diff --git a/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c b/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c
index 0a33d25bd61..420bb89dc0a 100644
--- a/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c
+++ b/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c
@@ -18,7 +18,7 @@
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
#include <vnet/crypto/crypto.h>
-#include <vnet/vnet.h>
+#include <vnet/ipsec/ipsec.h>
#include <vpp/app/version.h>
#include <dpdk/buffer.h>
@@ -84,14 +84,13 @@ typedef enum
typedef struct
{
- union rte_cryptodev_session_ctx keys[CRYPTODEV_N_OP_TYPES];
+ union rte_cryptodev_session_ctx **keys;
} cryptodev_key_t;
typedef struct
{
u32 dev_id;
u32 q_id;
- struct rte_crypto_raw_dp_ctx *raw_dp_ctx_buffer;
char *desc;
} cryptodev_inst_t;
@@ -114,6 +113,7 @@ typedef struct
u16 cryptodev_id;
u16 cryptodev_q;
u16 inflight;
+ union rte_cryptodev_session_ctx reset_sess; /* session data for reset ctx */
} cryptodev_engine_thread_t;
typedef struct
@@ -129,10 +129,10 @@ typedef struct
cryptodev_main_t cryptodev_main;
-static int
+static_always_inline int
prepare_aead_xform (struct rte_crypto_sym_xform *xform,
- cryptodev_op_type_t op_type,
- const vnet_crypto_key_t * key, u32 aad_len)
+ cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
+ u32 aad_len)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
memset (xform, 0, sizeof (*xform));
@@ -157,10 +157,10 @@ prepare_aead_xform (struct rte_crypto_sym_xform *xform,
return 0;
}
-static int
+static_always_inline int
prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
cryptodev_op_type_t op_type,
- const vnet_crypto_key_t * key)
+ const vnet_crypto_key_t *key)
{
struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
vnet_crypto_key_t *key_cipher, *key_auth;
@@ -221,18 +221,57 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
return 0;
}
-static int
-cryptodev_session_create (vnet_crypto_key_t * const key,
- struct rte_mempool *sess_priv_pool,
- cryptodev_key_t * session_pair, u32 aad_len)
+static_always_inline void
+cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
+{
+ u32 n_devs, i;
+
+ if (sess == NULL)
+ return;
+
+ n_devs = rte_cryptodev_count ();
+
+ for (i = 0; i < n_devs; i++)
+ rte_cryptodev_sym_session_clear (i, sess);
+
+ rte_cryptodev_sym_session_free (sess);
+}
+
+static_always_inline int
+cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
+ u32 aad_len)
{
- struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
- struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
cryptodev_main_t *cmt = &cryptodev_main;
+ cryptodev_numa_data_t *numa_data;
cryptodev_inst_t *dev_inst;
- struct rte_cryptodev *cdev;
+ vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
+ struct rte_mempool *sess_pool, *sess_priv_pool;
+ cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
+ struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
+ struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
+ struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
+ u32 numa_node = vm->numa_node;
int ret;
- uint8_t dev_id = 0;
+
+ numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
+ sess_pool = numa_data->sess_pool;
+ sess_priv_pool = numa_data->sess_priv_pool;
+
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
+ rte_cryptodev_sym_session_create (sess_pool);
+ if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
+ {
+ ret = -1;
+ goto clear_key;
+ }
+
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
+ rte_cryptodev_sym_session_create (sess_pool);
+ if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
+ {
+ ret = -1;
+ goto clear_key;
+ }
if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
@@ -249,44 +288,39 @@ cryptodev_session_create (vnet_crypto_key_t * const key,
vec_foreach (dev_inst, cmt->cryptodev_inst)
{
- dev_id = dev_inst->dev_id;
- cdev = rte_cryptodev_pmd_get_dev (dev_id);
+ u32 dev_id = dev_inst->dev_id;
+ struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
/* if the session is already configured for the driver type, avoid
configuring it again to increase the session data's refcnt */
- if (session_pair->keys[0].crypto_sess->sess_data[cdev->driver_id].data &&
- session_pair->keys[1].crypto_sess->sess_data[cdev->driver_id].data)
+ if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[cdev->driver_id].data &&
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data)
continue;
- ret = rte_cryptodev_sym_session_init (dev_id,
- session_pair->keys[0].crypto_sess,
- xforms_enc, sess_priv_pool);
- ret = rte_cryptodev_sym_session_init (dev_id,
- session_pair->keys[1].crypto_sess,
- xforms_dec, sess_priv_pool);
+ ret = rte_cryptodev_sym_session_init (
+ dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc, sess_priv_pool);
+ ret = rte_cryptodev_sym_session_init (
+ dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec, sess_priv_pool);
if (ret < 0)
return ret;
}
- session_pair->keys[0].crypto_sess->opaque_data = aad_len;
- session_pair->keys[1].crypto_sess->opaque_data = aad_len;
- return 0;
-}
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
-static void
-cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
-{
- u32 n_devs, i;
-
- if (sess == NULL)
- return;
-
- n_devs = rte_cryptodev_count ();
+ CLIB_MEMORY_STORE_BARRIER ();
+ ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess =
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
+ ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess =
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT];
- for (i = 0; i < n_devs; i++)
- rte_cryptodev_sym_session_clear (i, sess);
-
- rte_cryptodev_sym_session_free (sess);
+clear_key:
+ if (ret != 0)
+ {
+ cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
+ cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
+ }
+ return ret;
}
static int
@@ -312,72 +346,44 @@ cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
vnet_crypto_key_index_t idx, u32 aad_len)
{
cryptodev_main_t *cmt = &cryptodev_main;
- cryptodev_numa_data_t *numa_data;
vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
- struct rte_mempool *sess_pool, *sess_priv_pool;
cryptodev_key_t *ckey = 0;
- int ret = 0;
+ u32 i;
+
+ vec_validate (cmt->keys, idx);
+ ckey = vec_elt_at_index (cmt->keys, idx);
- if (kop == VNET_CRYPTO_KEY_OP_DEL)
+ if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
{
if (idx >= vec_len (cmt->keys))
return;
- ckey = pool_elt_at_index (cmt->keys, idx);
- cryptodev_session_del (ckey->keys[0].crypto_sess);
- cryptodev_session_del (ckey->keys[1].crypto_sess);
- ckey->keys[0].crypto_sess = 0;
- ckey->keys[1].crypto_sess = 0;
- pool_put (cmt->keys, ckey);
+ vec_foreach_index (i, cmt->per_numa_data)
+ {
+ if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess)
+ {
+ cryptodev_session_del (
+ ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess);
+ cryptodev_session_del (
+ ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess);
+
+ CLIB_MEMORY_STORE_BARRIER ();
+ ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess = 0;
+ ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess = 0;
+ }
+ }
return;
}
- else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
- {
- if (idx >= vec_len (cmt->keys))
- return;
-
- ckey = pool_elt_at_index (cmt->keys, idx);
- cryptodev_session_del (ckey->keys[0].crypto_sess);
- cryptodev_session_del (ckey->keys[1].crypto_sess);
- ckey->keys[0].crypto_sess = 0;
- ckey->keys[1].crypto_sess = 0;
- }
- else /* create key */
- pool_get_zero (cmt->keys, ckey);
+ /* create key */
/* do not create session for unsupported alg */
if (cryptodev_check_supported_vnet_alg (key))
return;
- numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
- sess_pool = numa_data->sess_pool;
- sess_priv_pool = numa_data->sess_priv_pool;
-
- ckey->keys[0].crypto_sess = rte_cryptodev_sym_session_create (sess_pool);
- if (!ckey->keys[0].crypto_sess)
- {
- ret = -1;
- goto clear_key;
- }
-
- ckey->keys[1].crypto_sess = rte_cryptodev_sym_session_create (sess_pool);
- if (!ckey->keys[1].crypto_sess)
- {
- ret = -1;
- goto clear_key;
- }
-
- ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
-
-clear_key:
- if (ret != 0)
- {
- cryptodev_session_del (ckey->keys[0].crypto_sess);
- cryptodev_session_del (ckey->keys[1].crypto_sess);
- memset (ckey, 0, sizeof (*ckey));
- pool_put (cmt->keys, ckey);
- }
+ vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
+ vec_foreach_index (i, ckey->keys)
+ vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
}
/*static*/ void
@@ -449,13 +455,12 @@ compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t * fe, i16 * min_ofs,
return ofs.raw;
}
-/* Reset cryptodev dp context to previous queue pair state */
static_always_inline void
-cryptodev_reset_ctx (u16 cdev_id, u16 qid, struct rte_crypto_raw_dp_ctx *ctx)
+cryptodev_reset_ctx (cryptodev_engine_thread_t *cet)
{
- union rte_cryptodev_session_ctx session_ctx = {.crypto_sess = NULL };
-
- rte_cryptodev_configure_raw_dp_ctx (cdev_id, qid, ctx, ~0, session_ctx, 0);
+ rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
+ cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
+ cet->reset_sess, 0);
}
static_always_inline int
@@ -470,7 +475,6 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
vlib_buffer_t **b;
u32 n_elts;
- cryptodev_key_t *key;
u32 last_key_index = ~0;
i16 min_ofs;
u32 max_end;
@@ -506,20 +510,24 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
if (PREDICT_FALSE (last_key_index != fe->key_index))
{
- key = pool_elt_at_index (cmt->keys, fe->key_index);
- last_key_index = fe->key_index;
+ cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
- if (PREDICT_FALSE
- (rte_cryptodev_configure_raw_dp_ctx
- (cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
- RTE_CRYPTO_OP_WITH_SESSION, key->keys[op_type], 1) < 0))
+ if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
+ 0))
{
- cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
- cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q,
- cet->ctx);
- return -1;
+ status = cryptodev_session_create (vm, fe->key_index, 0);
+ if (PREDICT_FALSE (status < 0))
+ goto error_exit;
}
+
+ status = rte_cryptodev_configure_raw_dp_ctx (
+ cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
+ RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
+ /*is_update */ 1);
+ if (PREDICT_FALSE (status < 0))
+ goto error_exit;
+
+ last_key_index = fe->key_index;
}
cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
@@ -547,27 +555,15 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
{
vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
- if (cryptodev_frame_build_sgl
- (vm, cmt->iova_mode, vec, &n_seg, b[0],
- max_end - min_ofs - vec->len) < 0)
- {
- cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
- cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q,
- cet->ctx);
- return -1;
- }
+ if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
+ max_end - min_ofs - vec->len) < 0)
+ goto error_exit;
}
status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
&digest_vec, 0, (void *) frame);
- if (status < 0)
- {
- cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
- cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx);
- return -1;
- }
+ if (PREDICT_FALSE (status < 0))
+ goto error_exit;
b++;
fe++;
@@ -577,12 +573,18 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
if (PREDICT_FALSE (status < 0))
{
- cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx);
+ cryptodev_reset_ctx (cet);
return -1;
}
cet->inflight += frame->n_elts;
return 0;
+
+error_exit:
+ cryptodev_mark_frame_err_status (frame,
+ VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ cryptodev_reset_ctx (cet);
+ return -1;
}
static_always_inline int
@@ -595,15 +597,12 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
vnet_crypto_async_frame_elt_t *fe;
vlib_buffer_t **b;
u32 n_elts;
- cryptodev_key_t *key;
- u32 last_key_index = ~0;
union rte_crypto_sym_ofs cofs;
struct rte_crypto_vec *vec;
struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
- u8 sess_aad_len = 0;
+ u32 last_key_index = ~0;
int status;
-
n_elts = frame->n_elts;
if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
@@ -631,28 +630,36 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
vlib_prefetch_buffer_header (b[1], LOAD);
}
- if (last_key_index != fe->key_index)
+ if (PREDICT_FALSE (last_key_index != fe->key_index))
{
- key = pool_elt_at_index (cmt->keys, fe->key_index);
- sess_aad_len = (u8) key->keys[op_type].crypto_sess->opaque_data;
- if (PREDICT_FALSE (sess_aad_len != aad_len))
+ cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
+
+ if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess ==
+ 0))
{
- cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY,
- fe->key_index, aad_len);
+ status = cryptodev_session_create (vm, fe->key_index, aad_len);
+ if (PREDICT_FALSE (status < 0))
+ goto error_exit;
}
- last_key_index = fe->key_index;
- if (PREDICT_FALSE
- (rte_cryptodev_configure_raw_dp_ctx
- (cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
- RTE_CRYPTO_OP_WITH_SESSION, key->keys[op_type], 1) < 0))
+ if (PREDICT_FALSE ((u8) key->keys[vm->numa_node][op_type]
+ .crypto_sess->opaque_data != aad_len))
{
- cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
- cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q,
- cet->ctx);
- return -1;
+ cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
+ fe->key_index, aad_len);
+ status = cryptodev_session_create (vm, fe->key_index, aad_len);
+ if (PREDICT_FALSE (status < 0))
+ goto error_exit;
}
+
+ status = rte_cryptodev_configure_raw_dp_ctx (
+ cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
+ RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type],
+ /*is_update */ 1);
+ if (PREDICT_FALSE (status < 0))
+ goto error_exit;
+
+ last_key_index = fe->key_index;
}
if (cmt->iova_mode == RTE_IOVA_VA)
@@ -692,31 +699,21 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
{
- vec[0].len = b[0]->current_data +
- b[0]->current_length - fe->crypto_start_offset;
- if (cryptodev_frame_build_sgl
- (vm, cmt->iova_mode, vec, &n_seg, b[0],
- fe->crypto_total_length - vec[0].len) < 0)
- {
- cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
- cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q,
- cet->ctx);
- return -1;
- }
+ vec[0].len = b[0]->current_data + b[0]->current_length -
+ fe->crypto_start_offset;
+ status =
+ cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
+ fe->crypto_total_length - vec[0].len);
+ if (status < 0)
+ goto error_exit;
}
status =
- rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs,
- &iv_vec, &digest_vec, &aad_vec,
- (void *) frame);
+ rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
+ &digest_vec, &aad_vec, (void *) frame);
if (PREDICT_FALSE (status < 0))
- {
- cryptodev_mark_frame_err_status (frame,
- VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
- cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx);
- return -1;
- }
+ goto error_exit;
+
fe++;
b++;
n_elts--;
@@ -724,14 +721,17 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
if (PREDICT_FALSE (status < 0))
- {
- cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx);
- return -1;
- }
+ goto error_exit;
cet->inflight += frame->n_elts;
return 0;
+
+error_exit:
+ cryptodev_mark_frame_err_status (frame,
+ VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ cryptodev_reset_ctx (cet);
+ return -1;
}
static u32
@@ -999,7 +999,7 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
cet->cryptodev_id = cinst->dev_id;
cet->cryptodev_q = cinst->q_id;
- cet->ctx = cinst->raw_dp_ctx_buffer;
+ cryptodev_reset_ctx (cet);
clib_spinlock_unlock (&cmt->tlock);
break;
case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
@@ -1024,7 +1024,7 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
cinst = cmt->cryptodev_inst + cryptodev_inst_index;
cet->cryptodev_id = cinst->dev_id;
cet->cryptodev_q = cinst->q_id;
- cet->ctx = cinst->raw_dp_ctx_buffer;
+ cryptodev_reset_ctx (cet);
clib_spinlock_unlock (&cmt->tlock);
break;
default:
@@ -1214,12 +1214,6 @@ cryptodev_count_queue (u32 numa)
for (i = 0; i < n_cryptodev; i++)
{
rte_cryptodev_info_get (i, &info);
- if (rte_cryptodev_socket_id (i) != numa)
- {
- clib_warning ("DPDK crypto resource %s is in different numa node "
- "as %u, ignored", info.device->name, numa);
- continue;
- }
q_count += info.max_nb_queue_pairs;
}
@@ -1229,16 +1223,12 @@ cryptodev_count_queue (u32 numa)
static int
cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
{
+ struct rte_cryptodev_config cfg;
struct rte_cryptodev_info info;
- struct rte_cryptodev *cdev;
cryptodev_main_t *cmt = &cryptodev_main;
- cryptodev_numa_data_t *numa_data = vec_elt_at_index (cmt->per_numa_data,
- vm->numa_node);
- u32 dp_size = 0;
u32 i;
int ret;
- cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
rte_cryptodev_info_get (cryptodev_id, &info);
if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
@@ -1246,46 +1236,41 @@ cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
ret = check_cryptodev_alg_support (cryptodev_id);
if (ret != 0)
- return ret;
+ {
+ clib_warning (
+ "Cryptodev: device %u does not support required algorithms",
+ cryptodev_id);
+ return ret;
+ }
+ cfg.socket_id = info.device->numa_node;
+ cfg.nb_queue_pairs = info.max_nb_queue_pairs;
+ rte_cryptodev_configure (cryptodev_id, &cfg);
- /** If the device is already started, we reuse it, otherwise configure
- * both the device and queue pair.
- **/
- if (!cdev->data->dev_started)
+ for (i = 0; i < info.max_nb_queue_pairs; i++)
{
- struct rte_cryptodev_config cfg;
+ struct rte_cryptodev_qp_conf qp_cfg;
- cfg.socket_id = vm->numa_node;
- cfg.nb_queue_pairs = info.max_nb_queue_pairs;
+ qp_cfg.mp_session = 0;
+ qp_cfg.mp_session_private = 0;
+ qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
- rte_cryptodev_configure (cryptodev_id, &cfg);
-
- for (i = 0; i < info.max_nb_queue_pairs; i++)
+ ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
+ info.device->numa_node);
+ if (ret)
{
- struct rte_cryptodev_qp_conf qp_cfg;
-
- qp_cfg.mp_session = numa_data->sess_pool;
- qp_cfg.mp_session_private = numa_data->sess_priv_pool;
- qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
-
- ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
- vm->numa_node);
- if (ret)
- break;
+ clib_warning ("Cryptodev: Configure device %u queue %u failed %d",
+ cryptodev_id, i, ret);
+ break;
}
- if (i != info.max_nb_queue_pairs)
- return -1;
-
- /* start the device */
- rte_cryptodev_start (i);
}
- ret = rte_cryptodev_get_raw_dp_ctx_size (cryptodev_id);
- if (ret < 0)
+ if (i != info.max_nb_queue_pairs)
return -1;
- dp_size = ret;
+
+ /* start the device */
+ rte_cryptodev_start (cryptodev_id);
for (i = 0; i < info.max_nb_queue_pairs; i++)
{
@@ -1294,9 +1279,6 @@ cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
cdev_inst->dev_id = cryptodev_id;
cdev_inst->q_id = i;
- vec_validate_aligned (cdev_inst->raw_dp_ctx_buffer, dp_size, 8);
- cryptodev_reset_ctx (cdev_inst->dev_id, cdev_inst->q_id,
- cdev_inst->raw_dp_ctx_buffer);
snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
"%s_q%u", info.device->name, i);
@@ -1345,22 +1327,24 @@ cryptodev_probe (vlib_main_t *vm, u32 n_workers)
return 0;
}
-static int
-cryptodev_get_session_sz (vlib_main_t *vm, u32 n_workers)
+static void
+cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
{
- u32 sess_data_sz = 0, i;
-
- if (rte_cryptodev_count () == 0)
- return -1;
+ cryptodev_main_t *cmt = &cryptodev_main;
+ cryptodev_inst_t *cinst;
+ u32 max_sess = 0, max_dp = 0;
- for (i = 0; i < rte_cryptodev_count (); i++)
+ vec_foreach (cinst, cmt->cryptodev_inst)
{
- u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
+ u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
+ u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
- sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
+ max_sess = clib_max (sess_sz, max_sess);
+ max_dp = clib_max (dp_sz, max_dp);
}
- return sess_data_sz;
+ *max_sess_sz = max_sess;
+ *max_dp_sz = max_dp;
}
static void
@@ -1384,7 +1368,74 @@ dpdk_disable_cryptodev_engine (vlib_main_t * vm)
rte_free (ptd->aad_buf);
if (ptd->cached_frame)
rte_ring_free (ptd->cached_frame);
+ if (ptd->reset_sess.crypto_sess)
+ {
+ struct rte_mempool *mp =
+ rte_mempool_from_obj ((void *) ptd->reset_sess.crypto_sess);
+
+ rte_mempool_free (mp);
+ ptd->reset_sess.crypto_sess = 0;
+ }
+ }
+}
+
+static clib_error_t *
+create_reset_sess (cryptodev_engine_thread_t *ptd, u32 lcore, u32 numa,
+ u32 sess_sz)
+{
+ struct rte_crypto_sym_xform xform = { 0 };
+ struct rte_crypto_aead_xform *aead_xform = &xform.aead;
+ struct rte_cryptodev_sym_session *sess;
+ struct rte_mempool *mp = 0;
+ u8 key[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ u8 *name = 0;
+ clib_error_t *error = 0;
+
+ /* create session pool for the numa node */
+ name = format (0, "vcryptodev_s_reset_%u_%u", numa, lcore);
+ mp = rte_cryptodev_sym_session_pool_create ((char *) name, 2, sess_sz, 0, 0,
+ numa);
+ if (!mp)
+ {
+ error = clib_error_return (0, "Not enough memory for mp %s", name);
+ goto error_exit;
+ }
+ vec_free (name);
+
+ xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
+ aead_xform->op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ aead_xform->aad_length = 8;
+ aead_xform->digest_length = 16;
+ aead_xform->iv.offset = 0;
+ aead_xform->iv.length = 12;
+ aead_xform->key.data = key;
+ aead_xform->key.length = 16;
+
+ sess = rte_cryptodev_sym_session_create (mp);
+ if (!sess)
+ {
+ error = clib_error_return (0, "failed to create session");
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_sym_session_init (ptd->cryptodev_id, sess, &xform, mp) < 0)
+ {
+ error = clib_error_return (0, "failed to create session private");
+ goto error_exit;
}
+
+ ptd->reset_sess.crypto_sess = sess;
+
+ return 0;
+
+error_exit:
+ if (mp)
+ rte_mempool_free (mp);
+ if (name)
+ vec_free (name);
+
+ return error;
}
clib_error_t *
@@ -1398,7 +1449,7 @@ dpdk_cryptodev_init (vlib_main_t * vm)
u32 skip_master = vlib_num_workers () > 0;
u32 n_workers = tm->n_vlib_mains - skip_master;
u32 numa = vm->numa_node;
- i32 sess_sz;
+ u32 sess_sz, dp_sz;
u32 eidx;
u32 i;
u8 *name = 0;
@@ -1406,44 +1457,7 @@ dpdk_cryptodev_init (vlib_main_t * vm)
cmt->iova_mode = rte_eal_iova_mode ();
- sess_sz = cryptodev_get_session_sz(vm, n_workers);
- if (sess_sz < 0)
- {
- error = clib_error_return (0, "Not enough cryptodevs");
- return error;
- }
-
vec_validate (cmt->per_numa_data, vm->numa_node);
- numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
-
- /* create session pool for the numa node */
- name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
- mp = rte_cryptodev_sym_session_pool_create ((char *) name,
- CRYPTODEV_NB_SESSION,
- 0, 0, 0, numa);
- if (!mp)
- {
- error = clib_error_return (0, "Not enough memory for mp %s", name);
- goto err_handling;
- }
- vec_free (name);
-
- numa_data->sess_pool = mp;
-
- /* create session private pool for the numa node */
- name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
- mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
- 0, NULL, NULL, NULL, NULL, numa, 0);
- if (!mp)
- {
- error = clib_error_return (0, "Not enough memory for mp %s", name);
- vec_free (name);
- goto err_handling;
- }
-
- vec_free (name);
-
- numa_data->sess_priv_pool = mp;
/* probe all cryptodev devices and get queue info */
if (cryptodev_probe (vm, n_workers) < 0)
@@ -1452,6 +1466,8 @@ dpdk_cryptodev_init (vlib_main_t * vm)
goto err_handling;
}
+ cryptodev_get_max_sz (&sess_sz, &dp_sz);
+
clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
clib_spinlock_init (&cmt->tlock);
@@ -1460,11 +1476,13 @@ dpdk_cryptodev_init (vlib_main_t * vm)
for (i = skip_master; i < tm->n_vlib_mains; i++)
{
ptd = cmt->per_thread_data + i;
- cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
+ numa = vlib_mains[i]->numa_node;
+
ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
CRYPTODEV_MAX_AAD_SIZE,
CLIB_CACHE_LINE_BYTES,
numa);
+
if (ptd->aad_buf == 0)
{
error = clib_error_return (0, "Failed to alloc aad buf");
@@ -1473,6 +1491,13 @@ dpdk_cryptodev_init (vlib_main_t * vm)
ptd->aad_phy_addr = rte_malloc_virt2iova (ptd->aad_buf);
+ ptd->ctx = rte_zmalloc_socket (0, dp_sz, CLIB_CACHE_LINE_BYTES, numa);
+ if (!ptd->ctx)
+ {
+ error = clib_error_return (0, "Failed to alloc raw dp ctx");
+ goto err_handling;
+ }
+
name = format (0, "cache_frame_ring_%u%u", numa, i);
ptd->cached_frame = rte_ring_create ((char *)name,
CRYPTODEV_DEQ_CACHE_SZ, numa,
@@ -1480,15 +1505,58 @@ dpdk_cryptodev_init (vlib_main_t * vm)
if (ptd->cached_frame == 0)
{
- error = clib_error_return (0, "Failed to frame ring");
+ error = clib_error_return (0, "Failed to alloc frame ring");
goto err_handling;
}
vec_free (name);
+
+ vec_validate (cmt->per_numa_data, numa);
+ numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
+
+ if (!numa_data->sess_pool)
+ {
+ /* create session pool for the numa node */
+ name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
+ mp = rte_cryptodev_sym_session_pool_create (
+ (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
+ if (!mp)
+ {
+ error =
+ clib_error_return (0, "Not enough memory for mp %s", name);
+ goto err_handling;
+ }
+ vec_free (name);
+
+ numa_data->sess_pool = mp;
+
+ /* create session private pool for the numa node */
+ name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
+ mp =
+ rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
+ 0, 0, NULL, NULL, NULL, NULL, numa, 0);
+ if (!mp)
+ {
+ error =
+ clib_error_return (0, "Not enough memory for mp %s", name);
+ vec_free (name);
+ goto err_handling;
+ }
+
+ vec_free (name);
+
+ numa_data->sess_priv_pool = mp;
+ }
+
+ error = create_reset_sess (ptd, i, numa, sess_sz);
+ if (error)
+ goto err_handling;
+
+ cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
}
/* register handler */
- eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
- "DPDK Cryptodev Engine");
+ eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
+ "DPDK Cryptodev Engine");
#define _(a, b, c, d, e, f) \
vnet_crypto_register_async_handler \
@@ -1518,6 +1586,12 @@ dpdk_cryptodev_init (vlib_main_t * vm)
vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
+ /* this engine is only enabled when cryptodev device(s) are presented in
+ * startup.conf. Assume it is wanted to be used, turn on async mode here.
+ */
+ vnet_crypto_request_async_mode (1);
+ ipsec_set_async_mode (1);
+
return 0;
err_handling: