diff options
Diffstat (limited to 'drivers/crypto/scheduler')
-rw-r--r-- | drivers/crypto/scheduler/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/scheduler/rte_cryptodev_scheduler.c | 56 | ||||
-rw-r--r-- | drivers/crypto/scheduler/rte_cryptodev_scheduler.h | 42 | ||||
-rw-r--r-- | drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map | 2 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_failover.c | 47 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_multicore.c | 415 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_pkt_size_distr.c | 18 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_pmd.c | 118 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_pmd_ops.c | 83 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_pmd_private.h | 19 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_roundrobin.c | 41 |
11 files changed, 604 insertions, 238 deletions
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile index c273e784..b045410c 100644 --- a/drivers/crypto/scheduler/Makefile +++ b/drivers/crypto/scheduler/Makefile @@ -56,5 +56,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_multicore.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c index 319dcf0a..df8634ad 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c @@ -198,7 +198,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } @@ -226,12 +226,12 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) rte_cryptodev_info_get(slave_id, &dev_info); slave->dev_id = slave_id; - slave->dev_type = dev_info.dev_type; + slave->driver_id = dev_info.driver_id; sched_ctx->nb_slaves++; if (update_scheduler_capability(sched_ctx) < 0) { slave->dev_id = 0; - slave->dev_type = 0; + slave->driver_id = 0; sched_ctx->nb_slaves--; CS_LOG_ERR("capabilities update failed"); @@ -257,7 +257,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } @@ -314,7 +314,7 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } @@ -351,6 +351,13 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, return -1; } break; + case CDEV_SCHED_MODE_MULTICORE: + if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, + multicore_scheduler) < 0) { + CS_LOG_ERR("Failed to load scheduler"); + return -1; + } + break; default: CS_LOG_ERR("Not yet supported"); return -ENOTSUP; @@ -359,13 +366,6 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, return 0; } -int -rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id, - enum rte_cryptodev_scheduler_mode mode) -{ - return rte_cryptodev_scheduler_mode_set(scheduler_id, mode); -} - enum rte_cryptodev_scheduler_mode rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) { @@ -377,7 +377,7 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } @@ -387,12 +387,6 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) return sched_ctx->mode; } -enum rte_cryptodev_scheduler_mode -rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id) -{ - return rte_cryptodev_scheduler_mode_get(scheduler_id); -} - int rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id, uint32_t enable_reorder) @@ -405,7 +399,7 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id, return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } @@ -433,7 +427,7 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id) return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } @@ -455,7 +449,7 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } @@ -467,8 +461,22 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, sched_ctx = dev->data->dev_private; + if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) { + CS_LOG_ERR("Invalid name %s, should be less than " + "%u bytes.\n", scheduler->name, + RTE_CRYPTODEV_NAME_MAX_LEN); + return -EINVAL; + } strncpy(sched_ctx->name, scheduler->name, RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN); + + if (strlen(scheduler->description) > + RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) { + CS_LOG_ERR("Invalid description %s, should be less than " + "%u bytes.\n", scheduler->description, + RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1); + return -EINVAL; + } strncpy(sched_ctx->description, scheduler->description, RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN); @@ -512,7 +520,7 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves) return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } @@ -580,7 +588,7 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id, return -EINVAL; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { + if (dev->driver_id != cryptodev_driver_id) { CS_LOG_ERR("Operation not supported"); return -ENOTSUP; } diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h index 2ba6e470..df22f2a9 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h @@ -58,12 +58,17 @@ extern "C" { #define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8) #endif +/** Maximum number of multi-core worker cores */ +#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64) + /** Round-robin scheduling mode string */ #define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin /** Packet-size based distribution scheduling mode string */ #define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr /** Fail-over scheduling mode string */ #define SCHEDULER_MODE_NAME_FAIL_OVER fail-over +/** multi-core scheduling mode string */ +#define SCHEDULER_MODE_NAME_MULTI_CORE multi-core /** * Crypto scheduler PMD operation modes @@ -78,6 +83,8 @@ enum rte_cryptodev_scheduler_mode { CDEV_SCHED_MODE_PKT_SIZE_DISTR, /** Fail-over mode */ CDEV_SCHED_MODE_FAILOVER, + /** multi-core mode */ + CDEV_SCHED_MODE_MULTICORE, CDEV_SCHED_MODE_COUNT /**< number of modes */ }; @@ -116,6 +123,7 @@ struct rte_cryptodev_scheduler; * - 0 if the scheduler is successfully loaded * - -ENOTSUP if the operation is not supported. * - -EBUSY if device is started. + * - -EINVAL if input values are invalid. */ int rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, @@ -186,38 +194,6 @@ enum rte_cryptodev_scheduler_mode rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id); /** - * @deprecated - * Set the scheduling mode - * - * @param scheduler_id - * The target scheduler device ID - * @param mode - * The scheduling mode - * - * @return - * 0 if attaching successful, negative integer if otherwise. - */ -__rte_deprecated -int -rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id, - enum rte_cryptodev_scheduler_mode mode); - -/** - * @deprecated - * Get the current scheduling mode - * - * @param scheduler_id - * The target scheduler device ID - * - * @return - * If successful, returns the scheduling mode, negative integer - * otherwise - */ -__rte_deprecated -enum rte_cryptodev_scheduler_mode -rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id); - -/** * Set the crypto ops reordering feature on/off * * @param scheduler_id @@ -327,6 +303,8 @@ extern struct rte_cryptodev_scheduler *roundrobin_scheduler; extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler; /** Fail-over mode scheduler */ extern struct rte_cryptodev_scheduler *failover_scheduler; +/** multi-core mode scheduler */ +extern struct rte_cryptodev_scheduler *multicore_scheduler; #ifdef __cplusplus } diff --git a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map index 0a8b4716..5c43127c 100644 --- a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map +++ b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map @@ -4,8 +4,6 @@ DPDK_17.02 { rte_cryptodev_scheduler_load_user_scheduler; rte_cryptodev_scheduler_slave_attach; rte_cryptodev_scheduler_slave_detach; - rte_crpytodev_scheduler_mode_set; - rte_crpytodev_scheduler_mode_get; rte_cryptodev_scheduler_ordering_set; rte_cryptodev_scheduler_ordering_get; diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c index 2471a5f1..2aa13f8e 100644 --- a/drivers/crypto/scheduler/scheduler_failover.c +++ b/drivers/crypto/scheduler/scheduler_failover.c @@ -48,58 +48,19 @@ struct fo_scheduler_qp_ctx { uint8_t deq_idx; }; -static inline uint16_t __attribute__((always_inline)) -failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx, +static __rte_always_inline uint16_t +failover_slave_enqueue(struct scheduler_slave *slave, struct rte_crypto_op **ops, uint16_t nb_ops) { uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; - struct scheduler_session *sess0, *sess1, *sess2, *sess3; for (i = 0; i < nb_ops && i < 4; i++) rte_prefetch0(ops[i]->sym->session); - for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { - rte_prefetch0(ops[i + 4]->sym->session); - rte_prefetch0(ops[i + 5]->sym->session); - rte_prefetch0(ops[i + 6]->sym->session); - rte_prefetch0(ops[i + 7]->sym->session); - - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sess1 = (struct scheduler_session *) - ops[i+1]->sym->session->_private; - sess2 = (struct scheduler_session *) - ops[i+2]->sym->session->_private; - sess3 = (struct scheduler_session *) - ops[i+3]->sym->session->_private; - - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i + 1]->sym->session = sess1->sessions[slave_idx]; - ops[i + 2]->sym->session = sess2->sessions[slave_idx]; - ops[i + 3]->sym->session = sess3->sessions[slave_idx]; - } - - for (; i < nb_ops; i++) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sessions[i] = ops[i]->sym->session; - ops[i]->sym->session = sess0->sessions[slave_idx]; - } - processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, slave->qp_id, ops, nb_ops); slave->nb_inflight_cops += processed_ops; - if (unlikely(processed_ops < nb_ops)) - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; - return processed_ops; } @@ -114,11 +75,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return 0; enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave, - PRIMARY_SLAVE_IDX, ops, nb_ops); + ops, nb_ops); if (enqueued_ops < nb_ops) enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave, - SECONDARY_SLAVE_IDX, &ops[enqueued_ops], + &ops[enqueued_ops], nb_ops - enqueued_ops); return enqueued_ops; diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c new file mode 100644 index 00000000..0cd5bce5 --- /dev/null +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -0,0 +1,415 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <unistd.h> + +#include <rte_cryptodev.h> +#include <rte_malloc.h> + +#include "rte_cryptodev_scheduler_operations.h" +#include "scheduler_pmd_private.h" + +#define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_" +#define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_" + +#define MC_SCHED_BUFFER_SIZE 32 + +#define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80 + +/** multi-core scheduler context */ +struct mc_scheduler_ctx { + uint32_t num_workers; /**< Number of workers polling */ + uint32_t stop_signal; + + struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; +}; + +struct mc_scheduler_qp_ctx { + struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; + uint32_t nb_slaves; + + uint32_t last_enq_worker_idx; + uint32_t last_deq_worker_idx; + + struct mc_scheduler_ctx *mc_private_ctx; +}; + +static uint16_t +schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct mc_scheduler_qp_ctx *mc_qp_ctx = + ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; + struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx; + uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx; + uint16_t i, processed_ops = 0; + + if (unlikely(nb_ops == 0)) + return 0; + + for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) { + struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx]; + uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring, + (void *)(&ops[processed_ops]), nb_ops, NULL); + + nb_ops -= nb_queue_ops; + processed_ops += nb_queue_ops; + + if (++worker_idx == mc_ctx->num_workers) + worker_idx = 0; + } + mc_qp_ctx->last_enq_worker_idx = worker_idx; + + return processed_ops; +} + +static uint16_t +schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct rte_ring *order_ring = + ((struct scheduler_qp_ctx *)qp)->order_ring; + uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring, + nb_ops); + uint16_t nb_ops_enqd = schedule_enqueue(qp, ops, + nb_ops_to_enq); + + scheduler_order_insert(order_ring, ops, nb_ops_enqd); + + return nb_ops_enqd; +} + + +static uint16_t +schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct mc_scheduler_qp_ctx *mc_qp_ctx = + ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; + struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx; + uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx; + uint16_t i, processed_ops = 0; + + for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) { + struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx]; + uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring, + (void *)(&ops[processed_ops]), nb_ops, NULL); + + nb_ops -= nb_deq_ops; + processed_ops += nb_deq_ops; + if (++worker_idx == mc_ctx->num_workers) + worker_idx = 0; + } + + mc_qp_ctx->last_deq_worker_idx = worker_idx; + + return processed_ops; + +} + +static uint16_t +schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring; + struct rte_crypto_op *op; + uint32_t nb_objs = rte_ring_count(order_ring); + uint32_t nb_ops_to_deq = 0; + uint32_t nb_ops_deqd = 0; + + if (nb_objs > nb_ops) + nb_objs = nb_ops; + + while (nb_ops_to_deq < nb_objs) { + SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op); + + if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE)) + break; + + op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE; + nb_ops_to_deq++; + } + + if (nb_ops_to_deq) { + nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring, + (void **)ops, nb_ops_to_deq, NULL); + } + + return nb_ops_deqd; +} + +static int +slave_attach(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint8_t slave_id) +{ + return 0; +} + +static int +slave_detach(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint8_t slave_id) +{ + return 0; +} + +static int +mc_scheduler_worker(struct rte_cryptodev *dev) +{ + struct scheduler_ctx *sched_ctx = dev->data->dev_private; + struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx; + struct rte_ring *enq_ring; + struct rte_ring *deq_ring; + uint32_t core_id = rte_lcore_id(); + int i, worker_idx = -1; + struct scheduler_slave *slave; + struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE]; + struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE]; + uint16_t processed_ops; + uint16_t pending_enq_ops = 0; + uint16_t pending_enq_ops_idx = 0; + uint16_t pending_deq_ops = 0; + uint16_t pending_deq_ops_idx = 0; + uint16_t inflight_ops = 0; + const uint8_t reordering_enabled = sched_ctx->reordering_enabled; + + for (i = 0; i < (int)sched_ctx->nb_wc; i++) { + if (sched_ctx->wc_pool[i] == core_id) { + worker_idx = i; + break; + } + } + if (worker_idx == -1) { + CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id); + return -1; + } + + slave = &sched_ctx->slaves[worker_idx]; + enq_ring = mc_ctx->sched_enq_ring[worker_idx]; + deq_ring = mc_ctx->sched_deq_ring[worker_idx]; + + while (!mc_ctx->stop_signal) { + if (pending_enq_ops) { + processed_ops = + rte_cryptodev_enqueue_burst(slave->dev_id, + slave->qp_id, &enq_ops[pending_enq_ops_idx], + pending_enq_ops); + pending_enq_ops -= processed_ops; + pending_enq_ops_idx += processed_ops; + inflight_ops += processed_ops; + } else { + processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops, + MC_SCHED_BUFFER_SIZE, NULL); + if (processed_ops) { + pending_enq_ops_idx = rte_cryptodev_enqueue_burst( + slave->dev_id, slave->qp_id, + enq_ops, processed_ops); + pending_enq_ops = processed_ops - pending_enq_ops_idx; + inflight_ops += pending_enq_ops_idx; + } + } + + if (pending_deq_ops) { + processed_ops = rte_ring_enqueue_burst( + deq_ring, (void *)&deq_ops[pending_deq_ops_idx], + pending_deq_ops, NULL); + pending_deq_ops -= processed_ops; + pending_deq_ops_idx += processed_ops; + } else if (inflight_ops) { + processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id, + slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE); + if (processed_ops) { + inflight_ops -= processed_ops; + if (reordering_enabled) { + uint16_t j; + + for (j = 0; j < processed_ops; j++) { + deq_ops[j]->status |= + CRYPTO_OP_STATUS_BIT_COMPLETE; + } + } else { + pending_deq_ops_idx = rte_ring_enqueue_burst( + deq_ring, (void *)deq_ops, processed_ops, + NULL); + pending_deq_ops = processed_ops - + pending_deq_ops_idx; + } + } + } + + rte_pause(); + } + + return 0; +} + +static int +scheduler_start(struct rte_cryptodev *dev) +{ + struct scheduler_ctx *sched_ctx = dev->data->dev_private; + struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx; + uint16_t i; + + mc_ctx->stop_signal = 0; + + for (i = 0; i < sched_ctx->nb_wc; i++) + rte_eal_remote_launch( + (lcore_function_t *)mc_scheduler_worker, dev, + sched_ctx->wc_pool[i]); + + if (sched_ctx->reordering_enabled) { + dev->enqueue_burst = &schedule_enqueue_ordering; + dev->dequeue_burst = &schedule_dequeue_ordering; + } else { + dev->enqueue_burst = &schedule_enqueue; + dev->dequeue_burst = &schedule_dequeue; + } + + for (i = 0; i < dev->data->nb_queue_pairs; i++) { + struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; + struct mc_scheduler_qp_ctx *mc_qp_ctx = + qp_ctx->private_qp_ctx; + uint32_t j; + + memset(mc_qp_ctx->slaves, 0, + RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES * + sizeof(struct scheduler_slave)); + for (j = 0; j < sched_ctx->nb_slaves; j++) { + mc_qp_ctx->slaves[j].dev_id = + sched_ctx->slaves[j].dev_id; + mc_qp_ctx->slaves[j].qp_id = i; + } + + mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves; + + mc_qp_ctx->last_enq_worker_idx = 0; + mc_qp_ctx->last_deq_worker_idx = 0; + } + + return 0; +} + +static int +scheduler_stop(struct rte_cryptodev *dev) +{ + struct scheduler_ctx *sched_ctx = dev->data->dev_private; + struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx; + uint16_t i; + + mc_ctx->stop_signal = 1; + + for (i = 0; i < sched_ctx->nb_wc; i++) + rte_eal_wait_lcore(sched_ctx->wc_pool[i]); + + return 0; +} + +static int +scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) +{ + struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; + struct mc_scheduler_qp_ctx *mc_qp_ctx; + struct scheduler_ctx *sched_ctx = dev->data->dev_private; + struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx; + + mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0, + rte_socket_id()); + if (!mc_qp_ctx) { + CS_LOG_ERR("failed allocate memory for private queue pair"); + return -ENOMEM; + } + + mc_qp_ctx->mc_private_ctx = mc_ctx; + qp_ctx->private_qp_ctx = (void *)mc_qp_ctx; + + + return 0; +} + +static int +scheduler_create_private_ctx(struct rte_cryptodev *dev) +{ + struct scheduler_ctx *sched_ctx = dev->data->dev_private; + struct mc_scheduler_ctx *mc_ctx; + uint16_t i; + + if (sched_ctx->private_ctx) + rte_free(sched_ctx->private_ctx); + + mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0, + rte_socket_id()); + if (!mc_ctx) { + CS_LOG_ERR("failed allocate memory"); + return -ENOMEM; + } + + mc_ctx->num_workers = sched_ctx->nb_wc; + for (i = 0; i < sched_ctx->nb_wc; i++) { + char r_name[16]; + + snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i); + mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, + rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_enq_ring[i]) { + CS_LOG_ERR("Cannot create ring for worker %u", i); + return -1; + } + snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i); + mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, + rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_deq_ring[i]) { + CS_LOG_ERR("Cannot create ring for worker %u", i); + return -1; + } + } + + sched_ctx->private_ctx = (void *)mc_ctx; + + return 0; +} + +struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { + slave_attach, + slave_detach, + scheduler_start, + scheduler_stop, + scheduler_config_qp, + scheduler_create_private_ctx, + NULL, /* option_set */ + NULL /* option_get */ +}; + +struct rte_cryptodev_scheduler mc_scheduler = { + .name = "multicore-scheduler", + .description = "scheduler which will run burst across multiple cpu cores", + .mode = CDEV_SCHED_MODE_MULTICORE, + .ops = &scheduler_mc_ops +}; + +struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler; diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c index 6b628dfa..1dd1bc32 100644 --- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c +++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c @@ -67,7 +67,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) struct scheduler_qp_ctx *qp_ctx = qp; struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx; struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops]; - struct scheduler_session *sess; uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = { psd_qp_ctx->primary_slave.nb_inflight_cops, psd_qp_ctx->secondary_slave.nb_inflight_cops @@ -97,8 +96,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) rte_prefetch0(ops[i + 7]->sym); rte_prefetch0(ops[i + 7]->sym->session); - sess = (struct scheduler_session *) - ops[i]->sym->session->_private; /* job_len is initialized as cipher data length, once * it is 0, equals to auth data length */ @@ -118,11 +115,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) } sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i]; - ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx]; p_enq_op->pos++; - sess = (struct scheduler_session *) - ops[i+1]->sym->session->_private; job_len = ops[i+1]->sym->cipher.data.length; job_len += (ops[i+1]->sym->cipher.data.length == 0) * ops[i+1]->sym->auth.data.length; @@ -135,11 +129,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) } sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1]; - ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx]; p_enq_op->pos++; - sess = (struct scheduler_session *) - ops[i+2]->sym->session->_private; job_len = ops[i+2]->sym->cipher.data.length; job_len += (ops[i+2]->sym->cipher.data.length == 0) * ops[i+2]->sym->auth.data.length; @@ -152,12 +143,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) } sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2]; - ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx]; p_enq_op->pos++; - sess = (struct scheduler_session *) - ops[i+3]->sym->session->_private; - job_len = ops[i+3]->sym->cipher.data.length; job_len += (ops[i+3]->sym->cipher.data.length == 0) * ops[i+3]->sym->auth.data.length; @@ -170,14 +157,10 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) } sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3]; - ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx]; p_enq_op->pos++; } for (; i < nb_ops; i++) { - sess = (struct scheduler_session *) - ops[i]->sym->session->_private; - job_len = ops[i]->sym->cipher.data.length; job_len += (ops[i]->sym->cipher.data.length == 0) * ops[i]->sym->auth.data.length; @@ -190,7 +173,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) } sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i]; - ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx]; p_enq_op->pos++; } diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c index 0b63c20b..400fc4f1 100644 --- a/drivers/crypto/scheduler/scheduler_pmd.c +++ b/drivers/crypto/scheduler/scheduler_pmd.c @@ -33,6 +33,7 @@ #include <rte_hexdump.h> #include <rte_cryptodev.h> #include <rte_cryptodev_pmd.h> +#include <rte_cryptodev_vdev.h> #include <rte_vdev.h> #include <rte_malloc.h> #include <rte_cpuflags.h> @@ -41,11 +42,14 @@ #include "rte_cryptodev_scheduler.h" #include "scheduler_pmd_private.h" +uint8_t cryptodev_driver_id; + struct scheduler_init_params { struct rte_crypto_vdev_init_params def_p; uint32_t nb_slaves; enum rte_cryptodev_scheduler_mode mode; uint32_t enable_ordering; + uint64_t wcmask; char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES] [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; }; @@ -57,6 +61,8 @@ struct scheduler_init_params { #define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs") #define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions") #define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id") +#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask") +#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist") const char *scheduler_valid_params[] = { RTE_CRYPTODEV_VDEV_NAME, @@ -65,7 +71,9 @@ const char *scheduler_valid_params[] = { RTE_CRYPTODEV_VDEV_ORDERING, RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, - RTE_CRYPTODEV_VDEV_SOCKET_ID + RTE_CRYPTODEV_VDEV_SOCKET_ID, + RTE_CRYPTODEV_VDEV_COREMASK, + RTE_CRYPTODEV_VDEV_CORELIST }; struct scheduler_parse_map { @@ -79,7 +87,9 @@ const struct scheduler_parse_map scheduler_mode_map[] = { {RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR), CDEV_SCHED_MODE_PKT_SIZE_DISTR}, {RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER), - CDEV_SCHED_MODE_FAILOVER} + CDEV_SCHED_MODE_FAILOVER}, + {RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE), + CDEV_SCHED_MODE_MULTICORE} }; const struct scheduler_parse_map scheduler_ordering_map[] = { @@ -89,7 +99,8 @@ const struct scheduler_parse_map scheduler_ordering_map[] = { static int cryptodev_scheduler_create(const char *name, - struct scheduler_init_params *init_params) + struct rte_vdev_device *vdev, + struct scheduler_init_params *init_params) { struct rte_cryptodev *dev; struct scheduler_ctx *sched_ctx; @@ -101,22 +112,38 @@ cryptodev_scheduler_create(const char *name, sizeof(init_params->def_p.name), "%s", name); - dev = rte_cryptodev_pmd_virtual_dev_init(init_params->def_p.name, + dev = rte_cryptodev_vdev_pmd_init(init_params->def_p.name, sizeof(struct scheduler_ctx), - init_params->def_p.socket_id); + init_params->def_p.socket_id, + vdev); if (dev == NULL) { CS_LOG_ERR("driver %s: failed to create cryptodev vdev", name); return -EFAULT; } - dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD; + dev->driver_id = cryptodev_driver_id; dev->dev_ops = rte_crypto_scheduler_pmd_ops; sched_ctx = dev->data->dev_private; sched_ctx->max_nb_queue_pairs = init_params->def_p.max_nb_queue_pairs; + if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) { + uint16_t i; + + sched_ctx->nb_wc = 0; + + for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) { + if (init_params->wcmask & (1ULL << i)) { + sched_ctx->wc_pool[sched_ctx->nb_wc++] = i; + RTE_LOG(INFO, PMD, + " Worker core[%u]=%u added\n", + sched_ctx->nb_wc-1, i); + } + } + } + if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED && init_params->mode < CDEV_SCHED_MODE_COUNT) { ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id, @@ -219,22 +246,6 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev) return 0; } -static uint8_t -number_of_sockets(void) -{ - int sockets = 0; - int i; - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - - for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) { - if (sockets < ms[i].socket_id) - sockets = ms[i].socket_id; - } - - /* Number of sockets = maximum socket_id + 1 */ - return ++sockets; -} - /** Parse integer from integer argument */ static int parse_integer_arg(const char *key __rte_unused, @@ -251,6 +262,43 @@ parse_integer_arg(const char *key __rte_unused, return 0; } +/** Parse integer from hexadecimal integer argument */ +static int +parse_coremask_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct scheduler_init_params *params = extra_args; + + params->wcmask = strtoull(value, NULL, 16); + + return 0; +} + +/** Parse integer from list of integers argument */ +static int +parse_corelist_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct scheduler_init_params *params = extra_args; + + params->wcmask = 0ULL; + + const char *token = value; + + while (isdigit(token[0])) { + char *rval; + unsigned int core = strtoul(token, &rval, 10); + + params->wcmask |= 1ULL << core; + token = (const char *)rval; + if (token[0] == '\0') + break; + token++; + } + + return 0; +} + /** Parse name */ static int parse_name_arg(const char *key __rte_unused, @@ -277,7 +325,7 @@ parse_slave_arg(const char *key __rte_unused, { struct scheduler_init_params *param = extra_args; - if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) { + if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) { CS_LOG_ERR("Too many slaves.\n"); return -ENOMEM; } @@ -370,6 +418,18 @@ scheduler_parse_init_params(struct scheduler_init_params *params, if (ret < 0) goto free_kvlist; + ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_COREMASK, + &parse_coremask_arg, + params); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_CORELIST, + &parse_corelist_arg, + params); + if (ret < 0) + goto free_kvlist; + ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME, &parse_name_arg, ¶ms->def_p); @@ -390,12 +450,6 @@ scheduler_parse_init_params(struct scheduler_init_params *params, &parse_ordering_arg, params); if (ret < 0) goto free_kvlist; - - if (params->def_p.socket_id >= number_of_sockets()) { - CDEV_LOG_ERR("Invalid socket id specified to create " - "the virtual crypto device on"); - goto free_kvlist; - } } free_kvlist: @@ -437,8 +491,12 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev) if (init_params.def_p.name[0] != '\0') RTE_LOG(INFO, PMD, " User defined name = %s\n", init_params.def_p.name); + if (init_params.wcmask != 0) + RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n", + init_params.wcmask); return cryptodev_scheduler_create(name, + vdev, &init_params); } @@ -454,3 +512,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD, "max_nb_sessions=<int> " "socket_id=<int> " "slave=<name>"); +RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_scheduler_pmd_drv, + cryptodev_driver_id); diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c index 2b5858df..d3795346 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_ops.c +++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c @@ -37,6 +37,7 @@ #include <rte_dev.h> #include <rte_cryptodev.h> #include <rte_cryptodev_pmd.h> +#include <rte_cryptodev_vdev.h> #include <rte_reorder.h> #include "scheduler_pmd_private.h" @@ -368,7 +369,7 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev, max_nb_sessions; } - dev_info->dev_type = dev->dev_type; + dev_info->driver_id = dev->driver_id; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = sched_ctx->capabilities; dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; @@ -398,7 +399,8 @@ scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) /** Setup a queue pair */ static int scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, - const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) + const struct rte_cryptodev_qp_conf *qp_conf, int socket_id, + struct rte_mempool *session_pool) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct scheduler_qp_ctx *qp_ctx; @@ -420,8 +422,13 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_id = sched_ctx->slaves[i].dev_id; + /* + * All slaves will share the same session mempool + * for session-less operations, so the objects + * must be big enough for all the drivers used. + */ ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id, - qp_conf, socket_id); + qp_conf, socket_id, session_pool); if (ret < 0) return ret; } @@ -483,37 +490,41 @@ scheduler_pmd_qp_count(struct rte_cryptodev *dev) static uint32_t scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) { - return sizeof(struct scheduler_session); + struct scheduler_ctx *sched_ctx = dev->data->dev_private; + uint8_t i = 0; + uint32_t max_priv_sess_size = 0; + + /* Check what is the maximum private session size for all slaves */ + for (i = 0; i < sched_ctx->nb_slaves; i++) { + uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; + struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id]; + uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev); + + if (max_priv_sess_size < priv_sess_size) + max_priv_sess_size = priv_sess_size; + } + + return max_priv_sess_size; } static int -config_slave_sess(struct scheduler_ctx *sched_ctx, - struct rte_crypto_sym_xform *xform, - struct scheduler_session *sess, - uint32_t create) +scheduler_pmd_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) { + struct scheduler_ctx *sched_ctx = dev->data->dev_private; uint32_t i; + int ret; for (i = 0; i < sched_ctx->nb_slaves; i++) { struct scheduler_slave *slave = &sched_ctx->slaves[i]; - if (sess->sessions[i]) { - if (create) - continue; - /* !create */ - sess->sessions[i] = rte_cryptodev_sym_session_free( - slave->dev_id, sess->sessions[i]); - } else { - if (!create) - continue; - /* create */ - sess->sessions[i] = - rte_cryptodev_sym_session_create( - slave->dev_id, xform); - if (!sess->sessions[i]) { - config_slave_sess(sched_ctx, NULL, sess, 0); - return -1; - } + ret = rte_cryptodev_sym_session_init(slave->dev_id, sess, + xform, mempool); + if (ret < 0) { + CS_LOG_ERR("unabled to config sym session"); + return ret; } } @@ -523,27 +534,17 @@ config_slave_sess(struct scheduler_ctx *sched_ctx, /** Clear the memory of session so it doesn't leave key material behind */ static void scheduler_pmd_session_clear(struct rte_cryptodev *dev, - void *sess) + struct rte_cryptodev_sym_session *sess) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; + uint32_t i; - config_slave_sess(sched_ctx, NULL, sess, 0); - - memset(sess, 0, sizeof(struct scheduler_session)); -} - -static void * -scheduler_pmd_session_configure(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *sess) -{ - struct scheduler_ctx *sched_ctx = dev->data->dev_private; + /* Clear private data of slaves */ + for (i = 0; i < sched_ctx->nb_slaves; i++) { + struct scheduler_slave *slave = &sched_ctx->slaves[i]; - if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) { - CS_LOG_ERR("unabled to config sym session"); - return NULL; + rte_cryptodev_sym_session_clear(slave->dev_id, sess); } - - return sess; } struct rte_cryptodev_ops scheduler_pmd_ops = { diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index 421dae37..e606716a 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -36,6 +36,9 @@ #include "rte_cryptodev_scheduler.h" +#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler +/**< Scheduler Crypto PMD device name */ + #define PER_SLAVE_BUFF_SIZE (256) #define CS_LOG_ERR(fmt, args...) \ @@ -63,7 +66,7 @@ struct scheduler_slave { uint16_t qp_id; uint32_t nb_inflight_cops; - enum rte_cryptodev_type dev_type; + uint8_t driver_id; }; struct scheduler_ctx { @@ -86,6 +89,8 @@ struct scheduler_ctx { char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN]; + uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + uint16_t nb_wc; char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; int nb_init_slaves; @@ -100,12 +105,10 @@ struct scheduler_qp_ctx { uint32_t seqn; } __rte_cache_aligned; -struct scheduler_session { - struct rte_cryptodev_sym_session *sessions[ - RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; -}; -static inline uint16_t __attribute__((always_inline)) +extern uint8_t cryptodev_driver_id; + +static __rte_always_inline uint16_t get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) { uint32_t count = rte_ring_free_count(order_ring); @@ -113,7 +116,7 @@ get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) return count > nb_ops ? nb_ops : count; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void scheduler_order_insert(struct rte_ring *order_ring, struct rte_crypto_op **ops, uint16_t nb_ops) { @@ -125,7 +128,7 @@ scheduler_order_insert(struct rte_ring *order_ring, op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \ } while (0) -static inline uint16_t __attribute__((always_inline)) +static __rte_always_inline uint16_t scheduler_order_drain(struct rte_ring *order_ring, struct rte_crypto_op **ops, uint16_t nb_ops) { diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index 01162764..4a847281 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -52,8 +52,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx; struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx]; uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; - struct scheduler_session *sess0, *sess1, *sess2, *sess3; if (unlikely(nb_ops == 0)) return 0; @@ -61,39 +59,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) for (i = 0; i < nb_ops && i < 4; i++) rte_prefetch0(ops[i]->sym->session); - for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sess1 = (struct scheduler_session *) - ops[i+1]->sym->session->_private; - sess2 = (struct scheduler_session *) - ops[i+2]->sym->session->_private; - sess3 = (struct scheduler_session *) - ops[i+3]->sym->session->_private; - - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i + 1]->sym->session = sess1->sessions[slave_idx]; - ops[i + 2]->sym->session = sess2->sessions[slave_idx]; - ops[i + 3]->sym->session = sess3->sessions[slave_idx]; - - rte_prefetch0(ops[i + 4]->sym->session); - rte_prefetch0(ops[i + 5]->sym->session); - rte_prefetch0(ops[i + 6]->sym->session); - rte_prefetch0(ops[i + 7]->sym->session); - } - - for (; i < nb_ops; i++) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sessions[i] = ops[i]->sym->session; - ops[i]->sym->session = sess0->sessions[slave_idx]; - } - processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, slave->qp_id, ops, nb_ops); @@ -102,12 +67,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) rr_qp_ctx->last_enq_slave_idx += 1; rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves; - /* recover session if enqueue is failed */ - if (unlikely(processed_ops < nb_ops)) { - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; - } - return processed_ops; } |