aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/dpdk/cryptodev/cryptodev.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/dpdk/cryptodev/cryptodev.c')
-rw-r--r--src/plugins/dpdk/cryptodev/cryptodev.c432
1 files changed, 314 insertions, 118 deletions
diff --git a/src/plugins/dpdk/cryptodev/cryptodev.c b/src/plugins/dpdk/cryptodev/cryptodev.c
index d52fa407ec5..43c2c879aab 100644
--- a/src/plugins/dpdk/cryptodev/cryptodev.c
+++ b/src/plugins/dpdk/cryptodev/cryptodev.c
@@ -29,7 +29,6 @@
#include <rte_cryptodev.h>
#include <rte_crypto_sym.h>
#include <rte_crypto.h>
-#include <rte_cryptodev_pmd.h>
#include <rte_config.h>
#include "cryptodev.h"
@@ -52,12 +51,19 @@ prepare_aead_xform (struct rte_crypto_sym_xform *xform,
xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
xform->next = 0;
- if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
- key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
- key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
+ if (key->alg == VNET_CRYPTO_ALG_AES_128_GCM ||
+ key->alg == VNET_CRYPTO_ALG_AES_192_GCM ||
+ key->alg == VNET_CRYPTO_ALG_AES_256_GCM)
+ {
+ aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
+ }
+ else if (key->alg == VNET_CRYPTO_ALG_CHACHA20_POLY1305)
+ {
+ aead_xform->algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
+ }
+ else
return -1;
- aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
aead_xform->aad_length = aad_len;
@@ -135,7 +141,7 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
}
static_always_inline void
-cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
+cryptodev_session_del (cryptodev_session_t *sess)
{
u32 n_devs, i;
@@ -145,9 +151,14 @@ cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
n_devs = rte_cryptodev_count ();
for (i = 0; i < n_devs; i++)
+#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
+ if (rte_cryptodev_sym_session_free (i, sess) == 0)
+ break;
+#else
rte_cryptodev_sym_session_clear (i, sess);
rte_cryptodev_sym_session_free (sess);
+#endif
}
static int
@@ -310,7 +321,7 @@ cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
if (cryptodev_check_supported_vnet_alg (key) == 0)
return;
- vec_validate (ckey->keys, idx);
+ vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
vec_foreach_index (i, ckey->keys)
vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
}
@@ -322,6 +333,59 @@ cryptodev_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
cryptodev_sess_handler (vm, kop, idx, 8);
}
+clib_error_t *
+allocate_session_pools (u32 numa_node,
+ cryptodev_session_pool_t *sess_pools_elt, u32 len)
+{
+ cryptodev_main_t *cmt = &cryptodev_main;
+ u8 *name;
+ clib_error_t *error = NULL;
+
+ name = format (0, "vcrypto_sess_pool_%u_%04x%c", numa_node, len, 0);
+#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
+ sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
+ (char *) name, CRYPTODEV_NB_SESSION, cmt->sess_sz, 0, 0, numa_node);
+#else
+ sess_pools_elt->sess_pool = rte_cryptodev_sym_session_pool_create (
+ (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa_node);
+#endif
+
+ if (!sess_pools_elt->sess_pool)
+ {
+ error = clib_error_return (0, "Not enough memory for mp %s", name);
+ goto clear_mempools;
+ }
+ vec_free (name);
+
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
+ name = format (0, "crypto_sess_pool_%u_%04x%c", numa_node, len, 0);
+ sess_pools_elt->sess_priv_pool = rte_mempool_create (
+ (char *) name, CRYPTODEV_NB_SESSION * (cmt->drivers_cnt), cmt->sess_sz, 0,
+ 0, NULL, NULL, NULL, NULL, numa_node, 0);
+
+ if (!sess_pools_elt->sess_priv_pool)
+ {
+ error = clib_error_return (0, "Not enough memory for mp %s", name);
+ goto clear_mempools;
+ }
+ vec_free (name);
+#endif
+
+clear_mempools:
+ if (error)
+ {
+ vec_free (name);
+ if (sess_pools_elt->sess_pool)
+ rte_mempool_free (sess_pools_elt->sess_pool);
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
+ if (sess_pools_elt->sess_priv_pool)
+ rte_mempool_free (sess_pools_elt->sess_priv_pool);
+#endif
+ return error;
+ }
+ return 0;
+}
+
int
cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
u32 aad_len)
@@ -330,52 +394,106 @@ cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
cryptodev_numa_data_t *numa_data;
cryptodev_inst_t *dev_inst;
vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
- struct rte_mempool *sess_pool, *sess_priv_pool;
+ struct rte_mempool *sess_pool;
+ cryptodev_session_pool_t *sess_pools_elt;
cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
- struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
+ cryptodev_session_t *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
+ struct rte_mempool *sess_priv_pool;
+ struct rte_cryptodev_info dev_info;
+#endif
u32 numa_node = vm->numa_node;
- int ret;
+ clib_error_t *error;
+ int ret = 0;
+ u8 found = 0;
numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
- sess_pool = numa_data->sess_pool;
- sess_priv_pool = numa_data->sess_priv_pool;
- sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
- rte_cryptodev_sym_session_create (sess_pool);
- if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
+ clib_spinlock_lock (&cmt->tlock);
+ vec_foreach (sess_pools_elt, numa_data->sess_pools)
{
- ret = -1;
- goto clear_key;
+ if (sess_pools_elt->sess_pool == NULL)
+ {
+ error = allocate_session_pools (numa_node, sess_pools_elt,
+ vec_len (numa_data->sess_pools) - 1);
+ if (error)
+ {
+ ret = -1;
+ goto clear_key;
+ }
+ }
+ if (rte_mempool_avail_count (sess_pools_elt->sess_pool) >= 2)
+ {
+ found = 1;
+ break;
+ }
}
- sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
- rte_cryptodev_sym_session_create (sess_pool);
- if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
+ if (found == 0)
{
- ret = -1;
- goto clear_key;
+ vec_add2 (numa_data->sess_pools, sess_pools_elt, 1);
+ error = allocate_session_pools (numa_node, sess_pools_elt,
+ vec_len (numa_data->sess_pools) - 1);
+ if (error)
+ {
+ ret = -1;
+ goto clear_key;
+ }
}
+ sess_pool = sess_pools_elt->sess_pool;
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
+ sess_priv_pool = sess_pools_elt->sess_priv_pool;
+
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
+ rte_cryptodev_sym_session_create (sess_pool);
+
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
+ rte_cryptodev_sym_session_create (sess_pool);
+#endif
+
if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
else
ret =
prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
if (ret)
- return 0;
+ {
+ ret = -1;
+ goto clear_key;
+ }
if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
else
prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
+#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
+ dev_inst = vec_elt_at_index (cmt->cryptodev_inst, 0);
+ u32 dev_id = dev_inst->dev_id;
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
+ rte_cryptodev_sym_session_create (dev_id, xforms_enc, sess_pool);
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
+ rte_cryptodev_sym_session_create (dev_id, xforms_dec, sess_pool);
+ if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT] ||
+ !sessions[CRYPTODEV_OP_TYPE_DECRYPT])
+ {
+ ret = -1;
+ goto clear_key;
+ }
+
+ rte_cryptodev_sym_session_opaque_data_set (
+ sessions[CRYPTODEV_OP_TYPE_ENCRYPT], aad_len);
+ rte_cryptodev_sym_session_opaque_data_set (
+ sessions[CRYPTODEV_OP_TYPE_DECRYPT], aad_len);
+#else
vec_foreach (dev_inst, cmt->cryptodev_inst)
{
u32 dev_id = dev_inst->dev_id;
- struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
- u32 driver_id = cdev->driver_id;
+ rte_cryptodev_info_get (dev_id, &dev_info);
+ u32 driver_id = dev_info.driver_id;
/* if the session is already configured for the driver type, avoid
configuring it again to increase the session data's refcnt */
@@ -390,11 +508,12 @@ cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
sess_priv_pool);
if (ret < 0)
- return ret;
+ goto clear_key;
}
sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
+#endif
CLIB_MEMORY_STORE_BARRIER ();
ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
@@ -408,6 +527,7 @@ clear_key:
cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
}
+ clib_spinlock_unlock (&cmt->tlock);
return ret;
}
@@ -459,14 +579,14 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
return -EBUSY;
vec_foreach_index (idx, cmt->cryptodev_inst)
- {
- cinst = cmt->cryptodev_inst + idx;
- if (cinst->dev_id == cet->cryptodev_id &&
- cinst->q_id == cet->cryptodev_q)
- break;
- }
+ {
+ cinst = cmt->cryptodev_inst + idx;
+ if (cinst->dev_id == cet->cryptodev_id &&
+ cinst->q_id == cet->cryptodev_q)
+ break;
+ }
/* invalid existing worker resource assignment */
- if (idx == vec_len (cmt->cryptodev_inst))
+ if (idx >= vec_len (cmt->cryptodev_inst))
return -EINVAL;
clib_spinlock_lock (&cmt->tlock);
clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
@@ -547,6 +667,90 @@ VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
};
static clib_error_t *
+cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ cryptodev_main_t *cmt = &cryptodev_main;
+ u32 thread_index = 0;
+ u16 i;
+ vec_foreach_index (thread_index, cmt->per_thread_data)
+ {
+ cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
+ cryptodev_cache_ring_t *ring = &cet->cache_ring;
+ u16 head = ring->head;
+ u16 tail = ring->tail;
+ u16 n_cached = (CRYPTODEV_CACHE_QUEUE_SIZE - tail + head) &
+ CRYPTODEV_CACHE_QUEUE_MASK;
+
+ u16 enq_head = ring->enq_head;
+ u16 deq_tail = ring->deq_tail;
+ u16 n_frames_inflight =
+ (enq_head == deq_tail) ?
+ 0 :
+ ((CRYPTODEV_CACHE_QUEUE_SIZE + enq_head - deq_tail) &
+ CRYPTODEV_CACHE_QUEUE_MASK);
+ /* even if some elements of dequeued frame are still pending for deq
+ * we consider the frame as processed */
+ u16 n_frames_processed =
+ ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ?
+ 0 :
+ ((CRYPTODEV_CACHE_QUEUE_SIZE - tail + deq_tail) &
+ CRYPTODEV_CACHE_QUEUE_MASK) +
+ 1;
+ /* even if some elements of enqueued frame are still pending for enq
+ * we consider the frame as enqueued */
+ u16 n_frames_pending =
+ (head == enq_head) ? 0 :
+ ((CRYPTODEV_CACHE_QUEUE_SIZE - enq_head + head) &
+ CRYPTODEV_CACHE_QUEUE_MASK) -
+ 1;
+
+ u16 elts_to_enq =
+ (ring->frames[enq_head].n_elts - ring->frames[enq_head].enq_elts_head);
+ u16 elts_to_deq =
+ (ring->frames[deq_tail].n_elts - ring->frames[deq_tail].deq_elts_tail);
+
+ u32 elts_total = 0;
+
+ for (i = 0; i < CRYPTODEV_CACHE_QUEUE_SIZE; i++)
+ elts_total += ring->frames[i].n_elts;
+
+ if (vlib_num_workers () > 0 && thread_index == 0)
+ continue;
+
+ vlib_cli_output (vm, "\n\n");
+ vlib_cli_output (vm, "Frames cached in the ring: %u", n_cached);
+ vlib_cli_output (vm, "Frames cached but not processed: %u",
+ n_frames_pending);
+ vlib_cli_output (vm, "Frames inflight: %u", n_frames_inflight);
+ vlib_cli_output (vm, "Frames processed: %u", n_frames_processed);
+ vlib_cli_output (vm, "Elements total: %u", elts_total);
+ vlib_cli_output (vm, "Elements inflight: %u", cet->inflight);
+ vlib_cli_output (vm, "Head index: %u", head);
+ vlib_cli_output (vm, "Tail index: %u", tail);
+ vlib_cli_output (vm, "Current frame index beeing enqueued: %u",
+ enq_head);
+ vlib_cli_output (vm, "Current frame index being dequeued: %u", deq_tail);
+ vlib_cli_output (vm,
+ "Elements in current frame to be enqueued: %u, waiting "
+ "to be enqueued: %u",
+ ring->frames[enq_head].n_elts, elts_to_enq);
+ vlib_cli_output (vm,
+ "Elements in current frame to be dequeued: %u, waiting "
+ "to be dequeued: %u",
+ ring->frames[deq_tail].n_elts, elts_to_deq);
+ vlib_cli_output (vm, "\n\n");
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
+ .path = "show cryptodev cache status",
+ .short_help = "show status of all cryptodev cache rings",
+ .function = cryptodev_show_cache_rings_fn,
+};
+
+static clib_error_t *
cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
vlib_cli_command_t * cmd)
{
@@ -643,6 +847,15 @@ cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
rte_cryptodev_info_get (cryptodev_id, &info);
+ /* Starting from DPDK 22.11, VPP does not allow heterogeneous crypto devices
+ anymore. Only devices that have the same driver type as the first
+ initialized device can be initialized.
+ */
+#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
+ if (cmt->drivers_cnt == 1 && cmt->driver_id != info.driver_id)
+ return -1;
+#endif
+
if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
return -1;
@@ -656,7 +869,9 @@ cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
struct rte_cryptodev_qp_conf qp_cfg;
qp_cfg.mp_session = 0;
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
qp_cfg.mp_session_private = 0;
+#endif
qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
@@ -675,16 +890,30 @@ cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id)
/* start the device */
rte_cryptodev_start (cryptodev_id);
+#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
+ if (cmt->drivers_cnt == 0)
+ {
+ cmt->drivers_cnt = 1;
+ cmt->driver_id = info.driver_id;
+ cmt->sess_sz = rte_cryptodev_sym_get_private_session_size (cryptodev_id);
+ }
+#endif
+
for (i = 0; i < info.max_nb_queue_pairs; i++)
{
cryptodev_inst_t *cdev_inst;
+#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
+ const char *dev_name = rte_dev_name (info.device);
+#else
+ const char *dev_name = info.device->name;
+#endif
vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
- cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
+ cdev_inst->desc = vec_new (char, strlen (dev_name) + 10);
cdev_inst->dev_id = cryptodev_id;
cdev_inst->q_id = i;
- snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
- "%s_q%u", info.device->name, i);
+ snprintf (cdev_inst->desc, strlen (dev_name) + 9, "%s_q%u",
+ info.device->name, i);
}
return 0;
@@ -1016,46 +1245,26 @@ cryptodev_probe (vlib_main_t *vm, u32 n_workers)
return 0;
}
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
static void
-cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz)
+is_drv_unique (u32 driver_id, u32 **unique_drivers)
{
- cryptodev_main_t *cmt = &cryptodev_main;
- cryptodev_inst_t *cinst;
- u32 max_sess = 0, max_dp = 0;
+ u32 *unique_elt;
+ u8 found = 0;
- vec_foreach (cinst, cmt->cryptodev_inst)
+ vec_foreach (unique_elt, *unique_drivers)
{
- u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id);
- u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
-
- max_sess = clib_max (sess_sz, max_sess);
- max_dp = clib_max (dp_sz, max_dp);
+ if (*unique_elt == driver_id)
+ {
+ found = 1;
+ break;
+ }
}
- *max_sess_sz = max_sess;
- *max_dp_sz = max_dp;
-}
-
-static void
-dpdk_disable_cryptodev_engine (vlib_main_t *vm)
-{
- vlib_thread_main_t *tm = vlib_get_thread_main ();
- cryptodev_main_t *cmt = &cryptodev_main;
- u32 i;
-
- for (i = (vlib_num_workers () > 0); i < tm->n_vlib_mains; i++)
- {
- u32 numa = vlib_get_main_by_index (i)->numa_node;
- cryptodev_numa_data_t *numa_data;
-
- vec_validate (cmt->per_numa_data, numa);
- numa_data = cmt->per_numa_data + numa;
- if (numa_data->sess_pool)
- rte_mempool_free (numa_data->sess_pool);
- if (numa_data->sess_priv_pool)
- rte_mempool_free (numa_data->sess_priv_pool);
- }
+ if (!found)
+ vec_add1 (*unique_drivers, driver_id);
}
+#endif
clib_error_t *
dpdk_cryptodev_init (vlib_main_t * vm)
@@ -1064,30 +1273,53 @@ dpdk_cryptodev_init (vlib_main_t * vm)
vlib_thread_main_t *tm = vlib_get_thread_main ();
cryptodev_engine_thread_t *cet;
cryptodev_numa_data_t *numa_data;
- struct rte_mempool *mp;
+ u32 node;
+ u8 nodes = 0;
u32 skip_master = vlib_num_workers () > 0;
u32 n_workers = tm->n_vlib_mains - skip_master;
- u32 numa = vm->numa_node;
- u32 sess_sz, dp_sz;
u32 eidx;
u32 i;
- u8 *name = 0;
clib_error_t *error;
cmt->iova_mode = rte_eal_iova_mode ();
- vec_validate (cmt->per_numa_data, vm->numa_node);
+ clib_bitmap_foreach (node, tm->cpu_socket_bitmap)
+ {
+ if (node >= nodes)
+ nodes = node;
+ }
+
+ vec_validate (cmt->per_numa_data, nodes);
+ vec_foreach (numa_data, cmt->per_numa_data)
+ {
+ vec_validate (numa_data->sess_pools, 0);
+ }
/* probe all cryptodev devices and get queue info */
if (cryptodev_probe (vm, n_workers) < 0)
+ return 0;
+
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
+ struct rte_cryptodev_info dev_info;
+ cryptodev_inst_t *dev_inst;
+ u32 *unique_drivers = 0;
+ vec_foreach (dev_inst, cmt->cryptodev_inst)
{
- error = clib_error_return (0, "Failed to configure cryptodev");
- goto err_handling;
+ u32 dev_id = dev_inst->dev_id;
+ rte_cryptodev_info_get (dev_id, &dev_info);
+ u32 driver_id = dev_info.driver_id;
+ is_drv_unique (driver_id, &unique_drivers);
+
+ u32 sess_sz =
+ rte_cryptodev_sym_get_private_session_size (dev_inst->dev_id);
+ cmt->sess_sz = clib_max (cmt->sess_sz, sess_sz);
}
- cryptodev_get_max_sz (&sess_sz, &dp_sz);
+ cmt->drivers_cnt = vec_len (unique_drivers);
+ vec_free (unique_drivers);
+#endif
- clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
+ clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
clib_spinlock_init (&cmt->tlock);
vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
@@ -1095,46 +1327,13 @@ dpdk_cryptodev_init (vlib_main_t * vm)
for (i = skip_master; i < tm->n_vlib_mains; i++)
{
cet = cmt->per_thread_data + i;
- numa = vlib_get_main_by_index (i)->numa_node;
- vec_validate (cmt->per_numa_data, numa);
- numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
-
- if (!numa_data->sess_pool)
+ if (cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO) <
+ 0)
{
- /* create session pool for the numa node */
- name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
- mp = rte_cryptodev_sym_session_pool_create (
- (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
- if (!mp)
- {
- error =
- clib_error_return (0, "Not enough memory for mp %s", name);
- goto err_handling;
- }
- vec_free (name);
-
- numa_data->sess_pool = mp;
-
- /* create session private pool for the numa node */
- name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
- mp =
- rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz,
- 0, 0, NULL, NULL, NULL, NULL, numa, 0);
- if (!mp)
- {
- error =
- clib_error_return (0, "Not enough memory for mp %s", name);
- vec_free (name);
- goto err_handling;
- }
-
- vec_free (name);
-
- numa_data->sess_priv_pool = mp;
+ error = clib_error_return (0, "Failed to configure cryptodev");
+ goto err_handling;
}
-
- cryptodev_assign_resource (cet, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
}
/* register handler */
@@ -1154,13 +1353,10 @@ dpdk_cryptodev_init (vlib_main_t * vm)
/* this engine is only enabled when cryptodev device(s) are presented in
* startup.conf. Assume it is wanted to be used, turn on async mode here.
*/
- vnet_crypto_request_async_mode (1);
ipsec_set_async_mode (1);
return 0;
err_handling:
- dpdk_disable_cryptodev_engine (vm);
-
return error;
}