summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c')
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c424
1 files changed, 252 insertions, 172 deletions
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 7443b47b..45b25c9d 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -40,29 +40,8 @@
#include "rte_aesni_mb_pmd_private.h"
-/**
- * Global static parameter used to create a unique name for each AES-NI multi
- * buffer crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-typedef void (*hash_one_block_t)(void *data, void *digest);
-typedef void (*aes_keyexp_t)(void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
/**
* Calculate the authentication pre-computes
@@ -107,41 +86,50 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
}
/** Get xform chain order */
-static int
+static enum aesni_mb_operation
aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
- /*
- * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
- * operations, all other options are invalid, so we must have exactly
- * 2 xform structs chained together
- */
- if (xform->next == NULL || xform->next->next != NULL)
- return -1;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return HASH_CIPHER;
+ if (xform == NULL)
+ return AESNI_MB_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_CIPHER_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return AESNI_MB_OP_CIPHER_HASH;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return CIPHER_HASH;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_HASH_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return AESNI_MB_OP_HASH_CIPHER;
+ }
- return -1;
+ return AESNI_MB_OP_NOT_SUPPORTED;
}
/** Set session authentication parameters */
static int
-aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
+ if (xform == NULL) {
+ sess->auth.algo = NULL_HASH;
+ return 0;
+ }
+
if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+
/* Set Authentication Parameters */
if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
sess->auth.algo = AES_XCBC;
@@ -193,12 +181,17 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
/** Set session cipher parameters */
static int
-aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
+ if (xform == NULL) {
+ sess->cipher.mode = NULL_CIPHER;
+ return 0;
+ }
+
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
@@ -225,6 +218,9 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
case RTE_CRYPTO_CIPHER_AES_CTR:
sess->cipher.mode = CNTR;
break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_SEC_BPI;
+ break;
default:
MB_LOG_ERR("Unsupported cipher mode parameter");
return -1;
@@ -259,7 +255,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
/** Parse crypto xform chain and set private session parameters */
int
-aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
@@ -268,16 +264,36 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
- case HASH_CIPHER:
+ case AESNI_MB_OP_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
break;
- case CIPHER_HASH:
+ case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
break;
+ case AESNI_MB_OP_HASH_ONLY:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = NULL;
+ break;
+ case AESNI_MB_OP_CIPHER_ONLY:
+ /*
+ * Multi buffer library operates only at two modes,
+ * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+ * chain order depends on cipher operation: encryption is always
+ * the first operation and decryption the last one.
+ */
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->chain_order = CIPHER_HASH;
+ else
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = NULL;
+ cipher_xform = xform;
+ break;
+ case AESNI_MB_OP_NOT_SUPPORTED:
default:
MB_LOG_ERR("Unsupported operation chain order parameter");
return -1;
@@ -296,16 +312,43 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
return 0;
}
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp Queue Pair to process
+ * @param ops Crypto operations for processing
+ * @param nb_ops Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = __qp;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+ (void **)ops, nb_ops, NULL);
+
+ qp->stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
/** Get multi buffer session */
-static struct aesni_mb_session *
+static inline struct aesni_mb_session *
get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_AESNI_MB_PMD))
+ RTE_CRYPTODEV_AESNI_MB_PMD)) {
return NULL;
+ }
sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
@@ -317,7 +360,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
sess = (struct aesni_mb_session *)
((struct rte_cryptodev_sym_session *)_sess)->_private;
- if (unlikely(aesni_mb_set_session_parameters(qp->ops,
+ if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
@@ -340,18 +383,20 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
* - Completed JOB_AES_HMAC structure pointer on success
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
-static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
- struct aesni_mb_session *session)
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+ struct rte_crypto_op *op)
{
- JOB_AES_HMAC *job;
-
struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ struct aesni_mb_session *session;
uint16_t m_offset = 0;
- job = (*qp->ops->job.get_next)(&qp->mb_mgr);
- if (unlikely(job == NULL))
- return job;
+ session = get_session(qp, op);
+ if (session == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -1;
+ }
+ op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
/* Set crypto operation */
job->chain_order = session->chain_order;
@@ -386,7 +431,8 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
if (odata == NULL) {
MB_LOG_ERR("failed to allocate space in destination "
"mbuf for source data");
- return NULL;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
}
memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
@@ -397,14 +443,16 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
}
/* Set digest output location */
- if (job->cipher_direction == DECRYPT) {
+ if (job->hash_alg != NULL_HASH &&
+ session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
if (job->auth_tag_output == NULL) {
MB_LOG_ERR("failed to allocate space in output mbuf "
"for temp digest");
- return NULL;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
}
memset(job->auth_tag_output, 0,
@@ -439,7 +487,22 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
job->user_data = op;
job->user_data2 = m_dst;
- return job;
+ return 0;
+}
+
+static inline void
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
+ struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
+
+ RTE_ASSERT(m_dst == NULL);
+
+ /* Verify digest if required */
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* trim area used for digest from mbuf */
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/**
@@ -452,32 +515,32 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_crypto_op *
+static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_crypto_op *op =
- (struct rte_crypto_op *)job->user_data;
- struct rte_mbuf *m_dst =
- (struct rte_mbuf *)job->user_data2;
-
- if (op == NULL || m_dst == NULL)
- return NULL;
-
- /* set status as successful by default */
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* check if job has been processed */
- if (unlikely(job->status != STS_COMPLETED)) {
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return op;
- } else if (job->chain_order == HASH_CIPHER) {
- /* Verify digest if required */
- if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
- job->auth_tag_output_len_in_bytes) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
+ struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+
+ struct aesni_mb_session *sess;
+
+ RTE_ASSERT(op == NULL);
+
+ if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
+ switch (job->status) {
+ case STS_COMPLETED:
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (job->hash_alg != NULL_HASH) {
+ sess = (struct aesni_mb_session *)
+ op->sym->session->_private;
+
+ if (sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ verify_digest(job, op);
+ }
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
}
/* Free session if a session-less crypto op */
@@ -500,121 +563,131 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
* - Number of processed jobs
*/
static unsigned
-handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
- while (job) {
- processed_jobs++;
+ while (job != NULL && processed_jobs < nb_ops) {
op = post_process_mb_job(qp, job);
- if (op)
- rte_ring_enqueue(qp->processed_ops, (void *)op);
- else
+
+ if (op) {
+ ops[processed_jobs++] = op;
+ qp->stats.dequeued_count++;
+ } else {
qp->stats.dequeue_err_count++;
- job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
+ break;
+ }
+
+ job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
}
return processed_jobs;
}
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int processed_ops = 0;
+
+ /* Flush the remaining jobs */
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+
+ if (job)
+ processed_ops += handle_completed_jobs(qp, job,
+ &ops[processed_ops], nb_ops - processed_ops);
+
+ return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job)
+{
+ job->chain_order = HASH_CIPHER;
+ job->cipher_mode = NULL_CIPHER;
+ job->hash_alg = NULL_HASH;
+ job->cipher_direction = DECRYPT;
+
+ return job;
+}
+
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct aesni_mb_session *sess;
struct aesni_mb_qp *qp = queue_pair;
- JOB_AES_HMAC *job = NULL;
+ struct rte_crypto_op *op;
+ JOB_AES_HMAC *job;
- int i, processed_jobs = 0;
+ int retval, processed_jobs = 0;
- for (i = 0; i < nb_ops; i++) {
-#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
- if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- MB_LOG_ERR("PMD only supports symmetric crypto "
- "operation requests, op (%p) is not a "
- "symmetric operation.", op);
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
- }
-#endif
- sess = get_session(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
- }
+ do {
+ /* Get next operation to process from ingress queue */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
- job = process_crypto_op(qp, ops[i], sess);
+ /* Get next free mb job struct from mb manager */
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL)) {
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
+ /* if no free mb job structs we need to flush mb_mgr */
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ (nb_ops - processed_jobs) - 1);
+
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ }
+
+ retval = set_mb_job_params(job, qp, op);
+ if (unlikely(retval != 0)) {
+ qp->stats.dequeue_err_count++;
+ set_job_null_op(job);
}
- /* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ /* Submit job to multi-buffer for processing */
+ job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
- }
-
- if (processed_jobs == 0)
- goto flush_jobs;
- else
- qp->stats.enqueued_count += processed_jobs;
- return i;
-
-flush_jobs:
- /*
- * If we haven't processed any jobs in submit loop, then flush jobs
- * queue to stop the output stalling
- */
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
- if (job)
- qp->stats.enqueued_count += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(qp, job,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
- return i;
-}
+ } while (processed_jobs < nb_ops);
-static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct aesni_mb_qp *qp = queue_pair;
+ if (processed_jobs < 1)
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
- unsigned nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
- qp->stats.dequeued_count += nb_dequeued;
-
- return nb_dequeued;
+ return processed_jobs;
}
-
-static int cryptodev_aesni_mb_remove(const char *name);
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
static int
cryptodev_aesni_mb_create(const char *name,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct aesni_mb_private *internals;
enum aesni_mb_vector_mode vector_mode;
- /* Check CPU for support for AES instruction set */
- if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
- MB_LOG_ERR("AES instructions not supported by CPU");
- return -EFAULT;
- }
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
/* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ vector_mode = RTE_AESNI_MB_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
vector_mode = RTE_AESNI_MB_AVX2;
else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
vector_mode = RTE_AESNI_MB_AVX;
@@ -625,15 +698,7 @@ cryptodev_aesni_mb_create(const char *name,
return -EFAULT;
}
- /* create a unique device name */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- MB_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
-
-
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct aesni_mb_private), init_params->socket_id);
if (dev == NULL) {
MB_LOG_ERR("failed to create cryptodev vdev");
@@ -661,6 +726,9 @@ cryptodev_aesni_mb_create(const char *name,
case RTE_AESNI_MB_AVX2:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
break;
+ case RTE_AESNI_MB_AVX512:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+ break;
default:
break;
}
@@ -674,38 +742,50 @@ cryptodev_aesni_mb_create(const char *name,
return 0;
init_error:
- MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
+ MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
+ init_params->name);
- cryptodev_aesni_mb_remove(crypto_dev_name);
+ cryptodev_aesni_mb_remove(vdev);
return -EFAULT;
}
-
static int
-cryptodev_aesni_mb_probe(const char *name,
- const char *input_args)
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ ""
};
+ const char *name;
+ const char *input_args;
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return cryptodev_aesni_mb_create(name, &init_params);
+ return cryptodev_aesni_mb_create(name, vdev, &init_params);
}
static int
-cryptodev_aesni_mb_remove(const char *name)
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;