summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/scheduler
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/scheduler')
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c101
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h3
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c4
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c60
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c18
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c182
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c101
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h26
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c2
9 files changed, 313 insertions, 184 deletions
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 140c8b41..6e4919c4 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -9,6 +9,8 @@
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
+int scheduler_logtype_driver;
+
/** update the scheduler pmd's capability with attaching device's
* capability.
* For each device to be attached, the scheduler's capability should be
@@ -91,8 +93,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)
struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
uint32_t nb_caps = 0, i;
- if (sched_ctx->capabilities)
+ if (sched_ctx->capabilities) {
rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
for (i = 0; i < sched_ctx->nb_slaves; i++) {
struct rte_cryptodev_info dev_info;
@@ -166,30 +170,30 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
uint32_t i;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
sched_ctx = dev->data->dev_private;
if (sched_ctx->nb_slaves >=
RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
- CS_LOG_ERR("Too many slaves attached");
+ CR_SCHED_LOG(ERR, "Too many slaves attached");
return -ENOMEM;
}
for (i = 0; i < sched_ctx->nb_slaves; i++)
if (sched_ctx->slaves[i].dev_id == slave_id) {
- CS_LOG_ERR("Slave already added");
+ CR_SCHED_LOG(ERR, "Slave already added");
return -ENOTSUP;
}
@@ -206,7 +210,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
slave->driver_id = 0;
sched_ctx->nb_slaves--;
- CS_LOG_ERR("capabilities update failed");
+ CR_SCHED_LOG(ERR, "capabilities update failed");
return -ENOTSUP;
}
@@ -225,17 +229,17 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
uint32_t i, slave_pos;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -245,12 +249,12 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
break;
if (slave_pos == sched_ctx->nb_slaves) {
- CS_LOG_ERR("Cannot find slave");
+ CR_SCHED_LOG(ERR, "Cannot find slave");
return -ENOTSUP;
}
if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
- CS_LOG_ERR("Failed to detach slave");
+ CR_SCHED_LOG(ERR, "Failed to detach slave");
return -ENOTSUP;
}
@@ -263,7 +267,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
sched_ctx->nb_slaves--;
if (update_scheduler_capability(sched_ctx) < 0) {
- CS_LOG_ERR("capabilities update failed");
+ CR_SCHED_LOG(ERR, "capabilities update failed");
return -ENOTSUP;
}
@@ -282,17 +286,17 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -305,33 +309,33 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
case CDEV_SCHED_MODE_ROUNDROBIN:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
roundrobin_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
pkt_size_based_distr_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_FAILOVER:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
failover_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_MULTICORE:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
multicore_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
default:
- CS_LOG_ERR("Not yet supported");
+ CR_SCHED_LOG(ERR, "Not yet supported");
return -ENOTSUP;
}
@@ -345,12 +349,12 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -367,17 +371,17 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -395,12 +399,12 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -417,25 +421,25 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
sched_ctx = dev->data->dev_private;
if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", scheduler->name,
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", scheduler->name,
RTE_CRYPTODEV_NAME_MAX_LEN);
return -EINVAL;
}
@@ -444,8 +448,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
if (strlen(scheduler->description) >
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid description %s, should be less than "
- "%u bytes.\n", scheduler->description,
+ CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
+ "%u bytes.", scheduler->description,
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
return -EINVAL;
}
@@ -462,14 +466,16 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
sched_ctx->ops.option_set = scheduler->ops->option_set;
sched_ctx->ops.option_get = scheduler->ops->option_get;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
if (sched_ctx->ops.create_private_ctx) {
int ret = (*sched_ctx->ops.create_private_ctx)(dev);
if (ret < 0) {
- CS_LOG_ERR("Unable to create scheduler private "
+ CR_SCHED_LOG(ERR, "Unable to create scheduler private "
"context");
return ret;
}
@@ -488,12 +494,12 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
uint32_t nb_slaves = 0;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -521,17 +527,17 @@ rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
option_type >= CDEV_SCHED_OPTION_COUNT) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (!option) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -551,17 +557,17 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (!option) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -571,3 +577,8 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
return (*sched_ctx->ops.option_get)(dev, option_type, option);
}
+
+RTE_INIT(scheduler_init_log)
+{
+ scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler");
+}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 01e7646c..3faea409 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -30,7 +30,7 @@ extern "C" {
#endif
/** Maximum number of multi-core worker cores */
-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1)
/** Round-robin scheduling mode string */
#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
@@ -76,6 +76,7 @@ enum rte_cryptodev_schedule_option_type {
/**
* Threshold option structure
*/
+#define RTE_CRYPTODEV_SCHEDULER_PARAM_THRES "threshold"
struct rte_cryptodev_scheduler_threshold_option {
uint32_t threshold; /**< Threshold for packet-size mode */
};
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 005b1638..ddfb5b81 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -139,7 +139,7 @@ scheduler_start(struct rte_cryptodev *dev)
uint16_t i;
if (sched_ctx->nb_slaves < 2) {
- CS_LOG_ERR("Number of slaves shall no less than 2");
+ CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2");
return -ENOMEM;
}
@@ -182,7 +182,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
rte_socket_id());
if (!fo_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index b2ce44ce..d410e69d 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -21,8 +21,8 @@ struct mc_scheduler_ctx {
uint32_t num_workers; /**< Number of workers polling */
uint32_t stop_signal;
- struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
- struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+ struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
};
struct mc_scheduler_qp_ctx {
@@ -178,7 +178,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
}
}
if (worker_idx == -1) {
- CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
+ CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
+ core_id);
return -1;
}
@@ -313,7 +314,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
rte_socket_id());
if (!mc_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
@@ -328,16 +329,18 @@ static int
scheduler_create_private_ctx(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
- struct mc_scheduler_ctx *mc_ctx;
+ struct mc_scheduler_ctx *mc_ctx = NULL;
uint16_t i;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
rte_socket_id());
if (!mc_ctx) {
- CS_LOG_ERR("failed allocate memory");
+ CR_SCHED_LOG(ERR, "failed allocate memory");
return -ENOMEM;
}
@@ -345,25 +348,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
for (i = 0; i < sched_ctx->nb_wc; i++) {
char r_name[16];
- snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_enq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
- snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_deq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
}
sched_ctx->private_ctx = (void *)mc_ctx;
return 0;
+
+exit:
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ rte_ring_free(mc_ctx->sched_enq_ring[i]);
+ rte_ring_free(mc_ctx->sched_deq_ring[i]);
+ }
+ rte_free(mc_ctx);
+
+ return -1;
}
struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 96bf0161..74129b66 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -258,7 +258,7 @@ scheduler_start(struct rte_cryptodev *dev)
/* for packet size based scheduler, nb_slaves have to >= 2 */
if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
- CS_LOG_ERR("not enough slaves to start");
+ CR_SCHED_LOG(ERR, "not enough slaves to start");
return -1;
}
@@ -302,7 +302,7 @@ scheduler_stop(struct rte_cryptodev *dev)
if (ps_qp_ctx->primary_slave.nb_inflight_cops +
ps_qp_ctx->secondary_slave.nb_inflight_cops) {
- CS_LOG_ERR("Some crypto ops left in slave queue");
+ CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue");
return -1;
}
}
@@ -319,7 +319,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
rte_socket_id());
if (!ps_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
@@ -334,13 +334,15 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct psd_scheduler_ctx *psd_ctx;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
rte_socket_id());
if (!psd_ctx) {
- CS_LOG_ERR("failed allocate memory");
+ CR_SCHED_LOG(ERR, "failed allocate memory");
return -ENOMEM;
}
@@ -360,14 +362,14 @@ scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
if ((enum rte_cryptodev_schedule_option_type)option_type !=
CDEV_SCHED_OPTION_THRESHOLD) {
- CS_LOG_ERR("Option not supported");
+ CR_SCHED_LOG(ERR, "Option not supported");
return -EINVAL;
}
threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
option)->threshold;
if (!rte_is_power_of_2(threshold)) {
- CS_LOG_ERR("Threshold is not power of 2");
+ CR_SCHED_LOG(ERR, "Threshold is not power of 2");
return -EINVAL;
}
@@ -386,7 +388,7 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
if ((enum rte_cryptodev_schedule_option_type)option_type !=
CDEV_SCHED_OPTION_THRESHOLD) {
- CS_LOG_ERR("Option not supported");
+ CR_SCHED_LOG(ERR, "Option not supported");
return -EINVAL;
}
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 51a85fa6..a9221a94 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -9,6 +9,7 @@
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_reorder.h>
+#include <rte_string_fns.h>
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
@@ -19,8 +20,10 @@ struct scheduler_init_params {
struct rte_cryptodev_pmd_init_params def_p;
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
+ char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
uint32_t enable_ordering;
- uint64_t wcmask;
+ uint16_t wc_pool[RTE_MAX_LCORE];
+ uint16_t nb_wc;
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
@@ -28,9 +31,9 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_NAME ("name")
#define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
#define RTE_CRYPTODEV_VDEV_MODE ("mode")
+#define RTE_CRYPTODEV_VDEV_MODE_PARAM ("mode_param")
#define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
-#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
@@ -39,9 +42,9 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
RTE_CRYPTODEV_VDEV_SLAVE,
RTE_CRYPTODEV_VDEV_MODE,
+ RTE_CRYPTODEV_VDEV_MODE_PARAM,
RTE_CRYPTODEV_VDEV_ORDERING,
RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
RTE_CRYPTODEV_VDEV_SOCKET_ID,
RTE_CRYPTODEV_VDEV_COREMASK,
RTE_CRYPTODEV_VDEV_CORELIST
@@ -68,6 +71,8 @@ const struct scheduler_parse_map scheduler_ordering_map[] = {
{"disable", 0}
};
+#define CDEV_SCHED_MODE_PARAM_SEP_CHAR ':'
+
static int
cryptodev_scheduler_create(const char *name,
struct rte_vdev_device *vdev,
@@ -81,15 +86,11 @@ cryptodev_scheduler_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device,
&init_params->def_p);
if (dev == NULL) {
- CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
+ CR_SCHED_LOG(ERR, "driver %s: failed to create cryptodev vdev",
name);
return -EFAULT;
}
- if (init_params->wcmask != 0)
- RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
- init_params->wcmask);
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
@@ -100,20 +101,26 @@ cryptodev_scheduler_create(const char *name,
if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
uint16_t i;
- sched_ctx->nb_wc = 0;
+ sched_ctx->nb_wc = init_params->nb_wc;
- for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
- if (init_params->wcmask & (1ULL << i)) {
- sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
- RTE_LOG(INFO, PMD,
- " Worker core[%u]=%u added\n",
- sched_ctx->nb_wc-1, i);
- }
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ sched_ctx->wc_pool[i] = init_params->wc_pool[i];
+ CR_SCHED_LOG(INFO, " Worker core[%u]=%u added",
+ i, sched_ctx->wc_pool[i]);
}
}
if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
init_params->mode < CDEV_SCHED_MODE_COUNT) {
+ union {
+ struct rte_cryptodev_scheduler_threshold_option
+ threshold_option;
+ } option;
+ enum rte_cryptodev_schedule_option_type option_type;
+ char param_name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char param_val[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char *s, *end;
+
ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
init_params->mode);
if (ret < 0) {
@@ -125,10 +132,52 @@ cryptodev_scheduler_create(const char *name,
if (scheduler_mode_map[i].val != sched_ctx->mode)
continue;
- RTE_LOG(INFO, PMD, " Scheduling mode = %s\n",
+ CR_SCHED_LOG(INFO, " Scheduling mode = %s",
scheduler_mode_map[i].name);
break;
}
+
+ if (strlen(init_params->mode_param_str) > 0) {
+ s = strchr(init_params->mode_param_str,
+ CDEV_SCHED_MODE_PARAM_SEP_CHAR);
+ if (s == NULL) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ strlcpy(param_name, init_params->mode_param_str,
+ s - init_params->mode_param_str + 1);
+ s++;
+ strlcpy(param_val, s,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ switch (init_params->mode) {
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (strcmp(param_name,
+ RTE_CRYPTODEV_SCHEDULER_PARAM_THRES)
+ != 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+ option_type = CDEV_SCHED_OPTION_THRESHOLD;
+
+ option.threshold_option.threshold =
+ strtoul(param_val, &end, 0);
+ break;
+ default:
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ if (sched_ctx->ops.option_set(dev, option_type,
+ (void *)&option) < 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ RTE_LOG(INFO, PMD, " Sched mode param (%s = %s)\n",
+ param_name, param_val);
+ }
}
sched_ctx->reordering_enabled = init_params->enable_ordering;
@@ -138,7 +187,7 @@ cryptodev_scheduler_create(const char *name,
sched_ctx->reordering_enabled)
continue;
- RTE_LOG(INFO, PMD, " Packet ordering = %s\n",
+ CR_SCHED_LOG(INFO, " Packet ordering = %s",
scheduler_ordering_map[i].name);
break;
@@ -153,7 +202,7 @@ cryptodev_scheduler_create(const char *name,
if (!sched_ctx->init_slave_names[
sched_ctx->nb_init_slaves]) {
- CS_LOG_ERR("driver %s: Insufficient memory",
+ CR_SCHED_LOG(ERR, "driver %s: Insufficient memory",
name);
return -ENOMEM;
}
@@ -175,8 +224,8 @@ cryptodev_scheduler_create(const char *name,
0, SOCKET_ID_ANY);
if (!sched_ctx->capabilities) {
- RTE_LOG(ERR, PMD, "Not enough memory for capability "
- "information\n");
+ CR_SCHED_LOG(ERR, "Not enough memory for capability "
+ "information");
return -ENOMEM;
}
@@ -220,7 +269,7 @@ parse_integer_arg(const char *key __rte_unused,
*i = atoi(value);
if (*i < 0) {
- CS_LOG_ERR("Argument has to be positive.\n");
+ CR_SCHED_LOG(ERR, "Argument has to be positive.");
return -EINVAL;
}
@@ -232,9 +281,47 @@ static int
parse_coremask_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
+ int i, j, val;
+ uint16_t idx = 0;
+ char c;
struct scheduler_init_params *params = extra_args;
- params->wcmask = strtoull(value, NULL, 16);
+ params->nb_wc = 0;
+
+ if (value == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*value))
+ value++;
+ if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
+ value += 2;
+ i = strlen(value);
+ while ((i > 0) && isblank(value[i - 1]))
+ i--;
+
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = value[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+
+ for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
+ if ((1 << j) & val)
+ params->wc_pool[params->nb_wc++] = idx;
+ }
+ }
return 0;
}
@@ -246,7 +333,7 @@ parse_corelist_arg(const char *key __rte_unused,
{
struct scheduler_init_params *params = extra_args;
- params->wcmask = 0ULL;
+ params->nb_wc = 0;
const char *token = value;
@@ -254,7 +341,11 @@ parse_corelist_arg(const char *key __rte_unused,
char *rval;
unsigned int core = strtoul(token, &rval, 10);
- params->wcmask |= 1ULL << core;
+ if (core >= RTE_MAX_LCORE) {
+ CR_SCHED_LOG(ERR, "Invalid worker core %u, should be smaller "
+ "than %u.", core, RTE_MAX_LCORE);
+ }
+ params->wc_pool[params->nb_wc++] = (uint16_t)core;
token = (const char *)rval;
if (token[0] == '\0')
break;
@@ -272,8 +363,8 @@ parse_name_arg(const char *key __rte_unused,
struct rte_cryptodev_pmd_init_params *params = extra_args;
if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", value,
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", value,
RTE_CRYPTODEV_NAME_MAX_LEN - 1);
return -EINVAL;
}
@@ -291,7 +382,7 @@ parse_slave_arg(const char *key __rte_unused,
struct scheduler_init_params *param = extra_args;
if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
- CS_LOG_ERR("Too many slaves.\n");
+ CR_SCHED_LOG(ERR, "Too many slaves.");
return -ENOMEM;
}
@@ -312,12 +403,13 @@ parse_mode_arg(const char *key __rte_unused,
if (strcmp(value, scheduler_mode_map[i].name) == 0) {
param->mode = (enum rte_cryptodev_scheduler_mode)
scheduler_mode_map[i].val;
+
break;
}
}
if (i == RTE_DIM(scheduler_mode_map)) {
- CS_LOG_ERR("Unrecognized input.\n");
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
return -EINVAL;
}
@@ -325,6 +417,18 @@ parse_mode_arg(const char *key __rte_unused,
}
static int
+parse_mode_param_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ strlcpy(param->mode_param_str, value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
parse_ordering_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
@@ -340,7 +444,7 @@ parse_ordering_arg(const char *key __rte_unused,
}
if (i == RTE_DIM(scheduler_ordering_map)) {
- CS_LOG_ERR("Unrecognized input.\n");
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
return -EINVAL;
}
@@ -370,13 +474,6 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
- ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- &parse_integer_arg,
- &params->def_p.max_nb_sessions);
- if (ret < 0)
- goto free_kvlist;
-
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
&parse_integer_arg,
&params->def_p.socket_id);
@@ -411,6 +508,11 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE_PARAM,
+ &parse_mode_param_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING,
&parse_ordering_arg, params);
if (ret < 0)
@@ -430,8 +532,7 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct scheduler_ctx),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
},
.nb_slaves = 0,
.mode = CDEV_SCHED_MODE_NOT_SET,
@@ -464,9 +565,8 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
cryptodev_scheduler_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int> "
"slave=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
- cryptodev_scheduler_pmd_drv,
+ cryptodev_scheduler_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 680c2afb..778071ca 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -27,7 +27,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
int status;
if (!slave_dev) {
- CS_LOG_ERR("Failed to locate slave dev %s",
+ CR_SCHED_LOG(ERR, "Failed to locate slave dev %s",
dev_name);
return -EINVAL;
}
@@ -36,16 +36,17 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
scheduler_id, slave_dev->data->dev_id);
if (status < 0) {
- CS_LOG_ERR("Failed to attach slave cryptodev %u",
+ CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u",
slave_dev->data->dev_id);
return status;
}
- CS_LOG_INFO("Scheduler %s attached slave %s\n",
+ CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s",
dev->data->name,
sched_ctx->init_slave_names[i]);
rte_free(sched_ctx->init_slave_names[i]);
+ sched_ctx->init_slave_names[i] = NULL;
sched_ctx->nb_init_slaves -= 1;
}
@@ -101,7 +102,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
dev->data->dev_id, qp_id) < 0) {
- CS_LOG_ERR("failed to create unique reorder buffer "
+ CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
"name");
return -ENOMEM;
}
@@ -110,7 +111,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
buff_size, rte_socket_id(),
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (!qp_ctx->order_ring) {
- CS_LOG_ERR("failed to create order ring");
+ CR_SCHED_LOG(ERR, "failed to create order ring");
return -ENOMEM;
}
} else {
@@ -144,18 +145,18 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
ret = update_order_ring(dev, i);
if (ret < 0) {
- CS_LOG_ERR("Failed to update reorder buffer");
+ CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
return ret;
}
}
if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
- CS_LOG_ERR("Scheduler mode is not set");
+ CR_SCHED_LOG(ERR, "Scheduler mode is not set");
return -1;
}
if (!sched_ctx->nb_slaves) {
- CS_LOG_ERR("No slave in the scheduler");
+ CR_SCHED_LOG(ERR, "No slave in the scheduler");
return -1;
}
@@ -165,7 +166,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
- CS_LOG_ERR("Failed to attach slave");
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
return -ENOTSUP;
}
}
@@ -173,7 +174,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
- CS_LOG_ERR("Scheduler start failed");
+ CR_SCHED_LOG(ERR, "Scheduler start failed");
return -1;
}
@@ -185,7 +186,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
if (ret < 0) {
- CS_LOG_ERR("Failed to start slave dev %u",
+ CR_SCHED_LOG(ERR, "Failed to start slave dev %u",
slave_dev_id);
return ret;
}
@@ -261,11 +262,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
}
}
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
- if (sched_ctx->capabilities)
+ if (sched_ctx->capabilities) {
rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
return 0;
}
@@ -316,8 +321,9 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info *dev_info)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
- uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
- UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
+ uint32_t max_nb_sess = 0;
+ uint16_t headroom_sz = 0;
+ uint16_t tailroom_sz = 0;
uint32_t i;
if (!dev_info)
@@ -333,17 +339,32 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info slave_info;
rte_cryptodev_info_get(slave_dev_id, &slave_info);
- max_nb_sessions = slave_info.sym.max_nb_sessions <
- max_nb_sessions ?
- slave_info.sym.max_nb_sessions :
- max_nb_sessions;
+ uint32_t dev_max_sess = slave_info.sym.max_nb_sessions;
+ if (dev_max_sess != 0) {
+ if (max_nb_sess == 0 || dev_max_sess < max_nb_sess)
+ max_nb_sess = slave_info.sym.max_nb_sessions;
+ }
+
+ /* Get the max headroom requirement among slave PMDs */
+ headroom_sz = slave_info.min_mbuf_headroom_req >
+ headroom_sz ?
+ slave_info.min_mbuf_headroom_req :
+ headroom_sz;
+
+ /* Get the max tailroom requirement among slave PMDs */
+ tailroom_sz = slave_info.min_mbuf_tailroom_req >
+ tailroom_sz ?
+ slave_info.min_mbuf_tailroom_req :
+ tailroom_sz;
}
dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = sched_ctx->capabilities;
dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = max_nb_sessions;
+ dev_info->min_mbuf_headroom_req = headroom_sz;
+ dev_info->min_mbuf_tailroom_req = tailroom_sz;
+ dev_info->sym.max_nb_sessions = max_nb_sess;
}
/** Release queue pair */
@@ -381,7 +402,7 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
"CRYTO_SCHE PMD %u QP %u",
dev->data->dev_id, qp_id) < 0) {
- CS_LOG_ERR("Failed to create unique queue pair name");
+ CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
return -EFAULT;
}
@@ -419,14 +440,14 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
*/
ret = scheduler_attach_init_slave(dev);
if (ret < 0) {
- CS_LOG_ERR("Failed to attach slave");
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
scheduler_pmd_qp_release(dev, qp_id);
return ret;
}
if (*sched_ctx->ops.config_queue_pair) {
if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
- CS_LOG_ERR("Unable to configure queue pair");
+ CR_SCHED_LOG(ERR, "Unable to configure queue pair");
return -1;
}
}
@@ -434,22 +455,6 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return 0;
}
-/** Start queue pair */
-static int
-scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
scheduler_pmd_qp_count(struct rte_cryptodev *dev)
@@ -458,7 +463,7 @@ scheduler_pmd_qp_count(struct rte_cryptodev *dev)
}
static uint32_t
-scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint8_t i = 0;
@@ -468,7 +473,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
- uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
+ uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
if (max_priv_sess_size < priv_sess_size)
max_priv_sess_size = priv_sess_size;
@@ -478,7 +483,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
static int
-scheduler_pmd_session_configure(struct rte_cryptodev *dev,
+scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -493,7 +498,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev,
ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
xform, mempool);
if (ret < 0) {
- CS_LOG_ERR("unabled to config sym session");
+ CR_SCHED_LOG(ERR, "unable to config sym session");
return ret;
}
}
@@ -503,7 +508,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-scheduler_pmd_session_clear(struct rte_cryptodev *dev,
+scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
@@ -530,13 +535,11 @@ struct rte_cryptodev_ops scheduler_pmd_ops = {
.queue_pair_setup = scheduler_pmd_qp_setup,
.queue_pair_release = scheduler_pmd_qp_release,
- .queue_pair_start = scheduler_pmd_qp_start,
- .queue_pair_stop = scheduler_pmd_qp_stop,
.queue_pair_count = scheduler_pmd_qp_count,
- .session_get_size = scheduler_pmd_session_get_size,
- .session_configure = scheduler_pmd_session_configure,
- .session_clear = scheduler_pmd_session_clear,
+ .sym_session_get_size = scheduler_pmd_sym_session_get_size,
+ .sym_session_configure = scheduler_pmd_sym_session_configure,
+ .sym_session_clear = scheduler_pmd_sym_session_clear,
};
struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index dd7ca5a4..d5e602a2 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -12,25 +12,11 @@
#define PER_SLAVE_BUFF_SIZE (256)
-#define CS_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG
-#define CS_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-
-#define CS_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define CS_LOG_INFO(fmt, args...)
-#define CS_LOG_DBG(fmt, args...)
-#endif
+extern int scheduler_logtype_driver;
+
+#define CR_SCHED_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
struct scheduler_slave {
uint8_t dev_id;
@@ -60,7 +46,7 @@ struct scheduler_ctx {
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
- uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ uint16_t wc_pool[RTE_MAX_LCORE];
uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index c6e03e21..c7082a64 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -175,7 +175,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
rte_socket_id());
if (!rr_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}