summaryrefslogtreecommitdiffstats
path: root/lib/librte_eventdev
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_eventdev')
-rw-r--r--lib/librte_eventdev/Makefile15
-rw-r--r--lib/librte_eventdev/meson.build19
-rw-r--r--lib/librte_eventdev/rte_event_crypto_adapter.c1128
-rw-r--r--lib/librte_eventdev/rte_event_crypto_adapter.h575
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.c1662
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.h129
-rw-r--r--lib/librte_eventdev/rte_event_ring.c15
-rw-r--r--lib/librte_eventdev/rte_event_ring.h4
-rw-r--r--lib/librte_eventdev/rte_event_timer_adapter.c1299
-rw-r--r--lib/librte_eventdev/rte_event_timer_adapter.h766
-rw-r--r--lib/librte_eventdev/rte_event_timer_adapter_pmd.h114
-rw-r--r--lib/librte_eventdev/rte_eventdev.c79
-rw-r--r--lib/librte_eventdev/rte_eventdev.h167
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h225
-rw-r--r--lib/librte_eventdev/rte_eventdev_version.map39
15 files changed, 5910 insertions, 326 deletions
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
index d27dd070..47f599a6 100644
--- a/lib/librte_eventdev/Makefile
+++ b/lib/librte_eventdev/Makefile
@@ -8,18 +8,26 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_eventdev.a
# library version
-LIBABIVER := 3
+LIBABIVER := 5
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-LDLIBS += -lrte_eal -lrte_ring -lrte_ethdev -lrte_hash
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+CFLAGS += -DLINUX
+else
+CFLAGS += -DBSD
+endif
+LDLIBS += -lrte_eal -lrte_ring -lrte_ethdev -lrte_hash -lrte_mempool -lrte_timer
+LDLIBS += -lrte_mbuf -lrte_cryptodev -lpthread
# library source files
SRCS-y += rte_eventdev.c
SRCS-y += rte_event_ring.c
SRCS-y += rte_event_eth_rx_adapter.c
+SRCS-y += rte_event_timer_adapter.c
+SRCS-y += rte_event_crypto_adapter.c
# export include files
SYMLINK-y-include += rte_eventdev.h
@@ -28,6 +36,9 @@ SYMLINK-y-include += rte_eventdev_pmd_pci.h
SYMLINK-y-include += rte_eventdev_pmd_vdev.h
SYMLINK-y-include += rte_event_ring.h
SYMLINK-y-include += rte_event_eth_rx_adapter.h
+SYMLINK-y-include += rte_event_timer_adapter.h
+SYMLINK-y-include += rte_event_timer_adapter_pmd.h
+SYMLINK-y-include += rte_event_crypto_adapter.h
# versioning export map
EXPORT_MAP := rte_eventdev_version.map
diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
index d1a99602..3cbaf298 100644
--- a/lib/librte_eventdev/meson.build
+++ b/lib/librte_eventdev/meson.build
@@ -1,14 +1,27 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 5
allow_experimental_apis = true
+
+if host_machine.system() == 'linux'
+ cflags += '-DLINUX'
+else
+ cflags += '-DBSD'
+endif
+
sources = files('rte_eventdev.c',
'rte_event_ring.c',
- 'rte_event_eth_rx_adapter.c')
+ 'rte_event_eth_rx_adapter.c',
+ 'rte_event_timer_adapter.c',
+ 'rte_event_crypto_adapter.c')
headers = files('rte_eventdev.h',
'rte_eventdev_pmd.h',
'rte_eventdev_pmd_pci.h',
'rte_eventdev_pmd_vdev.h',
'rte_event_ring.h',
- 'rte_event_eth_rx_adapter.h')
-deps += ['ring', 'ethdev', 'hash']
+ 'rte_event_eth_rx_adapter.h',
+ 'rte_event_timer_adapter.h',
+ 'rte_event_timer_adapter_pmd.h',
+ 'rte_event_crypto_adapter.h')
+deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/librte_eventdev/rte_event_crypto_adapter.c b/lib/librte_eventdev/rte_event_crypto_adapter.c
new file mode 100644
index 00000000..11b28ca9
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_crypto_adapter.c
@@ -0,0 +1,1128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <stdbool.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_service_component.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_crypto_adapter.h"
+
+#define BATCH_SIZE 32
+#define DEFAULT_MAX_NB 128
+#define CRYPTO_ADAPTER_NAME_LEN 32
+#define CRYPTO_ADAPTER_MEM_NAME_LEN 32
+#define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
+
+/* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
+ * iterations of eca_crypto_adapter_enq_run()
+ */
+#define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
+
+struct rte_event_crypto_adapter {
+ /* Event device identifier */
+ uint8_t eventdev_id;
+ /* Event port identifier */
+ uint8_t event_port_id;
+ /* Store event device's implicit release capability */
+ uint8_t implicit_release_disabled;
+ /* Max crypto ops processed in any service function invocation */
+ uint32_t max_nb;
+ /* Lock to serialize config updates with service function */
+ rte_spinlock_t lock;
+ /* Next crypto device to be processed */
+ uint16_t next_cdev_id;
+ /* Per crypto device structure */
+ struct crypto_device_info *cdevs;
+ /* Loop counter to flush crypto ops */
+ uint16_t transmit_loop_count;
+ /* Per instance stats structure */
+ struct rte_event_crypto_adapter_stats crypto_stats;
+ /* Configuration callback for rte_service configuration */
+ rte_event_crypto_adapter_conf_cb conf_cb;
+ /* Configuration callback argument */
+ void *conf_arg;
+ /* Set if default_cb is being used */
+ int default_cb_arg;
+ /* Service initialization state */
+ uint8_t service_inited;
+ /* Memory allocation name */
+ char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
+ /* Socket identifier cached from eventdev */
+ int socket_id;
+ /* Per adapter EAL service */
+ uint32_t service_id;
+ /* No. of queue pairs configured */
+ uint16_t nb_qps;
+ /* Adapter mode */
+ enum rte_event_crypto_adapter_mode mode;
+} __rte_cache_aligned;
+
+/* Per crypto device information */
+struct crypto_device_info {
+ /* Pointer to cryptodev */
+ struct rte_cryptodev *dev;
+ /* Pointer to queue pair info */
+ struct crypto_queue_pair_info *qpairs;
+ /* Next queue pair to be processed */
+ uint16_t next_queue_pair_id;
+ /* Set to indicate cryptodev->eventdev packet
+ * transfer uses a hardware mechanism
+ */
+ uint8_t internal_event_port;
+ /* Set to indicate processing has been started */
+ uint8_t dev_started;
+ /* If num_qpairs > 0, the start callback will
+ * be invoked if not already invoked
+ */
+ uint16_t num_qpairs;
+} __rte_cache_aligned;
+
+/* Per queue pair information */
+struct crypto_queue_pair_info {
+ /* Set to indicate queue pair is enabled */
+ bool qp_enabled;
+ /* Pointer to hold rte_crypto_ops for batching */
+ struct rte_crypto_op **op_buffer;
+ /* No of crypto ops accumulated */
+ uint8_t len;
+} __rte_cache_aligned;
+
+static struct rte_event_crypto_adapter **event_crypto_adapter;
+
+/* Macros to check for valid adapter */
+#define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
+ if (!eca_valid_id(id)) { \
+ RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
+ return retval; \
+ } \
+} while (0)
+
+static inline int
+eca_valid_id(uint8_t id)
+{
+ return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
+}
+
+static int
+eca_init(void)
+{
+ const char *name = "crypto_adapter_array";
+ const struct rte_memzone *mz;
+ unsigned int sz;
+
+ sz = sizeof(*event_crypto_adapter) *
+ RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
+ RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
+ PRId32, rte_errno);
+ return -rte_errno;
+ }
+ }
+
+ event_crypto_adapter = mz->addr;
+ return 0;
+}
+
+static inline struct rte_event_crypto_adapter *
+eca_id_to_adapter(uint8_t id)
+{
+ return event_crypto_adapter ?
+ event_crypto_adapter[id] : NULL;
+}
+
+static int
+eca_default_config_cb(uint8_t id, uint8_t dev_id,
+ struct rte_event_crypto_adapter_conf *conf, void *arg)
+{
+ struct rte_event_dev_config dev_conf;
+ struct rte_eventdev *dev;
+ uint8_t port_id;
+ int started;
+ int ret;
+ struct rte_event_port_conf *port_conf = arg;
+ struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
+ if (started) {
+ if (rte_event_dev_start(dev_id))
+ return -EIO;
+ }
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
+ return ret;
+ }
+
+ conf->event_port_id = port_id;
+ conf->max_nb = DEFAULT_MAX_NB;
+ if (started)
+ ret = rte_event_dev_start(dev_id);
+
+ adapter->default_cb_arg = 1;
+ return ret;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_crypto_adapter_conf_cb conf_cb,
+ enum rte_event_crypto_adapter_mode mode,
+ void *conf_arg)
+{
+ struct rte_event_crypto_adapter *adapter;
+ char mem_name[CRYPTO_ADAPTER_NAME_LEN];
+ struct rte_event_dev_info dev_info;
+ int socket_id;
+ uint8_t i;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (conf_cb == NULL)
+ return -EINVAL;
+
+ if (event_crypto_adapter == NULL) {
+ ret = eca_init();
+ if (ret)
+ return ret;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter != NULL) {
+ RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
+ return -EEXIST;
+ }
+
+ socket_id = rte_event_dev_socket_id(dev_id);
+ snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
+ "rte_event_crypto_adapter_%d", id);
+
+ adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (adapter == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
+ return -ENOMEM;
+ }
+
+ ret = rte_event_dev_info_get(dev_id, &dev_info);
+ if (ret < 0) {
+ RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
+ dev_id, dev_info.driver_name);
+ return ret;
+ }
+
+ adapter->implicit_release_disabled = (dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+ adapter->eventdev_id = dev_id;
+ adapter->socket_id = socket_id;
+ adapter->conf_cb = conf_cb;
+ adapter->conf_arg = conf_arg;
+ adapter->mode = mode;
+ strcpy(adapter->mem_name, mem_name);
+ adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
+ rte_cryptodev_count() *
+ sizeof(struct crypto_device_info), 0,
+ socket_id);
+ if (adapter->cdevs == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
+ rte_free(adapter);
+ return -ENOMEM;
+ }
+
+ rte_spinlock_init(&adapter->lock);
+ for (i = 0; i < rte_cryptodev_count(); i++)
+ adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
+
+ event_crypto_adapter[id] = adapter;
+
+ return 0;
+}
+
+
+int __rte_experimental
+rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config,
+ enum rte_event_crypto_adapter_mode mode)
+{
+ struct rte_event_port_conf *pc;
+ int ret;
+
+ if (port_config == NULL)
+ return -EINVAL;
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ pc = rte_malloc(NULL, sizeof(*pc), 0);
+ if (pc == NULL)
+ return -ENOMEM;
+ *pc = *port_config;
+ ret = rte_event_crypto_adapter_create_ext(id, dev_id,
+ eca_default_config_cb,
+ mode,
+ pc);
+ if (ret)
+ rte_free(pc);
+
+ return ret;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_free(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ if (adapter->nb_qps) {
+ RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
+ adapter->nb_qps);
+ return -EBUSY;
+ }
+
+ if (adapter->default_cb_arg)
+ rte_free(adapter->conf_arg);
+ rte_free(adapter->cdevs);
+ rte_free(adapter);
+ event_crypto_adapter[id] = NULL;
+
+ return 0;
+}
+
+static inline unsigned int
+eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
+ struct rte_event *ev, unsigned int cnt)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ union rte_event_crypto_metadata *m_data = NULL;
+ struct crypto_queue_pair_info *qp_info = NULL;
+ struct rte_crypto_op *crypto_op;
+ unsigned int i, n;
+ uint16_t qp_id, len, ret;
+ uint8_t cdev_id;
+
+ len = 0;
+ ret = 0;
+ n = 0;
+ stats->event_deq_count += cnt;
+
+ for (i = 0; i < cnt; i++) {
+ crypto_op = ev[i].event_ptr;
+ if (crypto_op == NULL)
+ continue;
+ if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ m_data = rte_cryptodev_sym_session_get_user_data(
+ crypto_op->sym->session);
+ if (m_data == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+
+ cdev_id = m_data->request_info.cdev_id;
+ qp_id = m_data->request_info.queue_pair_id;
+ qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
+ if (qp_info == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+ len = qp_info->len;
+ qp_info->op_buffer[len] = crypto_op;
+ len++;
+ } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ crypto_op->private_data_offset) {
+ m_data = (union rte_event_crypto_metadata *)
+ ((uint8_t *)crypto_op +
+ crypto_op->private_data_offset);
+ cdev_id = m_data->request_info.cdev_id;
+ qp_id = m_data->request_info.queue_pair_id;
+ qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
+ if (qp_info == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+ len = qp_info->len;
+ qp_info->op_buffer[len] = crypto_op;
+ len++;
+ } else {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+
+ if (len == BATCH_SIZE) {
+ struct rte_crypto_op **op_buffer = qp_info->op_buffer;
+ ret = rte_cryptodev_enqueue_burst(cdev_id,
+ qp_id,
+ op_buffer,
+ BATCH_SIZE);
+
+ stats->crypto_enq_count += ret;
+
+ while (ret < len) {
+ struct rte_crypto_op *op;
+ op = op_buffer[ret++];
+ stats->crypto_enq_fail++;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+
+ len = 0;
+ }
+
+ if (qp_info)
+ qp_info->len = len;
+ n += ret;
+ }
+
+ return n;
+}
+
+static unsigned int
+eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct crypto_device_info *curr_dev;
+ struct crypto_queue_pair_info *curr_queue;
+ struct rte_crypto_op **op_buffer;
+ struct rte_cryptodev *dev;
+ uint8_t cdev_id;
+ uint16_t qp;
+ uint16_t ret;
+ uint16_t num_cdev = rte_cryptodev_count();
+
+ ret = 0;
+ for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
+ curr_dev = &adapter->cdevs[cdev_id];
+ if (curr_dev == NULL)
+ continue;
+ dev = curr_dev->dev;
+
+ for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
+
+ curr_queue = &curr_dev->qpairs[qp];
+ if (!curr_queue->qp_enabled)
+ continue;
+
+ op_buffer = curr_queue->op_buffer;
+ ret = rte_cryptodev_enqueue_burst(cdev_id,
+ qp,
+ op_buffer,
+ curr_queue->len);
+ stats->crypto_enq_count += ret;
+
+ while (ret < curr_queue->len) {
+ struct rte_crypto_op *op;
+ op = op_buffer[ret++];
+ stats->crypto_enq_fail++;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+ curr_queue->len = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int
+eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_enq)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct rte_event ev[BATCH_SIZE];
+ unsigned int nb_enq, nb_enqueued;
+ uint16_t n;
+ uint8_t event_dev_id = adapter->eventdev_id;
+ uint8_t event_port_id = adapter->event_port_id;
+
+ nb_enqueued = 0;
+ if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ return 0;
+
+ for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
+ stats->event_poll_count++;
+ n = rte_event_dequeue_burst(event_dev_id,
+ event_port_id, ev, BATCH_SIZE, 0);
+
+ if (!n)
+ break;
+
+ nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
+ }
+
+ if ((++adapter->transmit_loop_count &
+ (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
+ nb_enqueued += eca_crypto_enq_flush(adapter);
+ }
+
+ return nb_enqueued;
+}
+
+static inline void
+eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
+ struct rte_crypto_op **ops, uint16_t num)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ union rte_event_crypto_metadata *m_data = NULL;
+ uint8_t event_dev_id = adapter->eventdev_id;
+ uint8_t event_port_id = adapter->event_port_id;
+ struct rte_event events[BATCH_SIZE];
+ uint16_t nb_enqueued, nb_ev;
+ uint8_t retry;
+ uint8_t i;
+
+ nb_ev = 0;
+ retry = 0;
+ nb_enqueued = 0;
+ num = RTE_MIN(num, BATCH_SIZE);
+ for (i = 0; i < num; i++) {
+ struct rte_event *ev = &events[nb_ev++];
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ m_data = rte_cryptodev_sym_session_get_user_data(
+ ops[i]->sym->session);
+ } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ ops[i]->private_data_offset) {
+ m_data = (union rte_event_crypto_metadata *)
+ ((uint8_t *)ops[i] +
+ ops[i]->private_data_offset);
+ }
+
+ if (unlikely(m_data == NULL)) {
+ rte_pktmbuf_free(ops[i]->sym->m_src);
+ rte_crypto_op_free(ops[i]);
+ continue;
+ }
+
+ rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
+ ev->event_ptr = ops[i];
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ if (adapter->implicit_release_disabled)
+ ev->op = RTE_EVENT_OP_FORWARD;
+ else
+ ev->op = RTE_EVENT_OP_NEW;
+ }
+
+ do {
+ nb_enqueued += rte_event_enqueue_burst(event_dev_id,
+ event_port_id,
+ &events[nb_enqueued],
+ nb_ev - nb_enqueued);
+ } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
+ nb_enqueued < nb_ev);
+
+ /* Free mbufs and rte_crypto_ops for failed events */
+ for (i = nb_enqueued; i < nb_ev; i++) {
+ struct rte_crypto_op *op = events[i].event_ptr;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+
+ stats->event_enq_fail_count += nb_ev - nb_enqueued;
+ stats->event_enq_count += nb_enqueued;
+ stats->event_enq_retry_count += retry - 1;
+}
+
+static inline unsigned int
+eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_deq)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct crypto_device_info *curr_dev;
+ struct crypto_queue_pair_info *curr_queue;
+ struct rte_crypto_op *ops[BATCH_SIZE];
+ uint16_t n, nb_deq;
+ struct rte_cryptodev *dev;
+ uint8_t cdev_id;
+ uint16_t qp, dev_qps;
+ bool done;
+ uint16_t num_cdev = rte_cryptodev_count();
+
+ nb_deq = 0;
+ do {
+ uint16_t queues = 0;
+ done = true;
+
+ for (cdev_id = adapter->next_cdev_id;
+ cdev_id < num_cdev; cdev_id++) {
+ curr_dev = &adapter->cdevs[cdev_id];
+ if (curr_dev == NULL)
+ continue;
+ dev = curr_dev->dev;
+ dev_qps = dev->data->nb_queue_pairs;
+
+ for (qp = curr_dev->next_queue_pair_id;
+ queues < dev_qps; qp = (qp + 1) % dev_qps,
+ queues++) {
+
+ curr_queue = &curr_dev->qpairs[qp];
+ if (!curr_queue->qp_enabled)
+ continue;
+
+ n = rte_cryptodev_dequeue_burst(cdev_id, qp,
+ ops, BATCH_SIZE);
+ if (!n)
+ continue;
+
+ done = false;
+ stats->crypto_deq_count += n;
+ eca_ops_enqueue_burst(adapter, ops, n);
+ nb_deq += n;
+
+ if (nb_deq > max_deq) {
+ if ((qp + 1) == dev_qps) {
+ adapter->next_cdev_id =
+ (cdev_id + 1)
+ % num_cdev;
+ }
+ curr_dev->next_queue_pair_id = (qp + 1)
+ % dev->data->nb_queue_pairs;
+
+ return nb_deq;
+ }
+ }
+ }
+ } while (done == false);
+ return nb_deq;
+}
+
+static void
+eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_ops)
+{
+ while (max_ops) {
+ unsigned int e_cnt, d_cnt;
+
+ e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
+ max_ops -= RTE_MIN(max_ops, e_cnt);
+
+ d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
+ max_ops -= RTE_MIN(max_ops, d_cnt);
+
+ if (e_cnt == 0 && d_cnt == 0)
+ break;
+
+ }
+}
+
+static int
+eca_service_func(void *args)
+{
+ struct rte_event_crypto_adapter *adapter = args;
+
+ if (rte_spinlock_trylock(&adapter->lock) == 0)
+ return 0;
+ eca_crypto_adapter_run(adapter, adapter->max_nb);
+ rte_spinlock_unlock(&adapter->lock);
+
+ return 0;
+}
+
+static int
+eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+{
+ struct rte_event_crypto_adapter_conf adapter_conf;
+ struct rte_service_spec service;
+ int ret;
+
+ if (adapter->service_inited)
+ return 0;
+
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
+ "rte_event_crypto_adapter_%d", id);
+ service.socket_id = adapter->socket_id;
+ service.callback = eca_service_func;
+ service.callback_userdata = adapter;
+ /* Service function handles locking for queue add/del updates */
+ service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
+ ret = rte_service_component_register(&service, &adapter->service_id);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
+ service.name, ret);
+ return ret;
+ }
+
+ ret = adapter->conf_cb(id, adapter->eventdev_id,
+ &adapter_conf, adapter->conf_arg);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
+ ret);
+ return ret;
+ }
+
+ adapter->max_nb = adapter_conf.max_nb;
+ adapter->event_port_id = adapter_conf.event_port_id;
+ adapter->service_inited = 1;
+
+ return ret;
+}
+
+static void
+eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
+ struct crypto_device_info *dev_info,
+ int32_t queue_pair_id,
+ uint8_t add)
+{
+ struct crypto_queue_pair_info *qp_info;
+ int enabled;
+ uint16_t i;
+
+ if (dev_info->qpairs == NULL)
+ return;
+
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
+ eca_update_qp_info(adapter, dev_info, i, add);
+ } else {
+ qp_info = &dev_info->qpairs[queue_pair_id];
+ enabled = qp_info->qp_enabled;
+ if (add) {
+ adapter->nb_qps += !enabled;
+ dev_info->num_qpairs += !enabled;
+ } else {
+ adapter->nb_qps -= enabled;
+ dev_info->num_qpairs -= enabled;
+ }
+ qp_info->qp_enabled = !!add;
+ }
+}
+
+static int
+eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
+ uint8_t cdev_id,
+ int queue_pair_id)
+{
+ struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
+ struct crypto_queue_pair_info *qpairs;
+ uint32_t i;
+
+ if (dev_info->qpairs == NULL) {
+ dev_info->qpairs =
+ rte_zmalloc_socket(adapter->mem_name,
+ dev_info->dev->data->nb_queue_pairs *
+ sizeof(struct crypto_queue_pair_info),
+ 0, adapter->socket_id);
+ if (dev_info->qpairs == NULL)
+ return -ENOMEM;
+
+ qpairs = dev_info->qpairs;
+ qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
+ BATCH_SIZE *
+ sizeof(struct rte_crypto_op *),
+ 0, adapter->socket_id);
+ if (!qpairs->op_buffer) {
+ rte_free(qpairs);
+ return -ENOMEM;
+ }
+ }
+
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
+ eca_update_qp_info(adapter, dev_info, i, 1);
+ } else
+ eca_update_qp_info(adapter, dev_info,
+ (uint16_t)queue_pair_id, 1);
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_add(uint8_t id,
+ uint8_t cdev_id,
+ int32_t queue_pair_id,
+ const struct rte_event *event)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct rte_eventdev *dev;
+ struct crypto_device_info *dev_info;
+ uint32_t cap;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+ return -EINVAL;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
+ cdev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
+ " cdev %" PRIu8, id, cdev_id);
+ return ret;
+ }
+
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
+ (event == NULL)) {
+ RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
+ cdev_id);
+ return -EINVAL;
+ }
+
+ dev_info = &adapter->cdevs[cdev_id];
+
+ if (queue_pair_id != -1 &&
+ (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
+ RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
+ (uint16_t)queue_pair_id);
+ return -EINVAL;
+ }
+
+ /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
+ * no need of service core as HW supports event forward capability.
+ */
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
+ RTE_FUNC_PTR_OR_ERR_RET(
+ *dev->dev_ops->crypto_adapter_queue_pair_add,
+ -ENOTSUP);
+ if (dev_info->qpairs == NULL) {
+ dev_info->qpairs =
+ rte_zmalloc_socket(adapter->mem_name,
+ dev_info->dev->data->nb_queue_pairs *
+ sizeof(struct crypto_queue_pair_info),
+ 0, adapter->socket_id);
+ if (dev_info->qpairs == NULL)
+ return -ENOMEM;
+ }
+
+ ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
+ dev_info->dev,
+ queue_pair_id,
+ event);
+ if (ret)
+ return ret;
+
+ else
+ eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
+ queue_pair_id, 1);
+ }
+
+ /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
+ * or SW adapter, initiate services so the application can choose
+ * which ever way it wants to use the adapter.
+ * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
+ * Application may wants to use one of below two mode
+ * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
+ * b. OP_NEW mode -> HW Dequeue
+ * Case 2: No HW caps, use SW adapter
+ * a. OP_FORWARD mode -> SW enqueue & dequeue
+ * b. OP_NEW mode -> SW Dequeue
+ */
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
+ (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
+ rte_spinlock_lock(&adapter->lock);
+ ret = eca_init_service(adapter, id);
+ if (ret == 0)
+ ret = eca_add_queue_pair(adapter, cdev_id,
+ queue_pair_id);
+ rte_spinlock_unlock(&adapter->lock);
+
+ if (ret)
+ return ret;
+
+ rte_service_component_runstate_set(adapter->service_id, 1);
+ }
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
+ int32_t queue_pair_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ int ret;
+ uint32_t cap;
+ uint16_t i;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+ return -EINVAL;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
+ cdev_id,
+ &cap);
+ if (ret)
+ return ret;
+
+ dev_info = &adapter->cdevs[cdev_id];
+
+ if (queue_pair_id != -1 &&
+ (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
+ RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
+ (uint16_t)queue_pair_id);
+ return -EINVAL;
+ }
+
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
+ RTE_FUNC_PTR_OR_ERR_RET(
+ *dev->dev_ops->crypto_adapter_queue_pair_del,
+ -ENOTSUP);
+ ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
+ dev_info->dev,
+ queue_pair_id);
+ if (ret == 0) {
+ eca_update_qp_info(adapter,
+ &adapter->cdevs[cdev_id],
+ queue_pair_id,
+ 0);
+ if (dev_info->num_qpairs == 0) {
+ rte_free(dev_info->qpairs);
+ dev_info->qpairs = NULL;
+ }
+ }
+ } else {
+ if (adapter->nb_qps == 0)
+ return 0;
+
+ rte_spinlock_lock(&adapter->lock);
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
+ i++)
+ eca_update_qp_info(adapter, dev_info,
+ queue_pair_id, 0);
+ } else {
+ eca_update_qp_info(adapter, dev_info,
+ (uint16_t)queue_pair_id, 0);
+ }
+
+ if (dev_info->num_qpairs == 0) {
+ rte_free(dev_info->qpairs);
+ dev_info->qpairs = NULL;
+ }
+
+ rte_spinlock_unlock(&adapter->lock);
+ rte_service_component_runstate_set(adapter->service_id,
+ adapter->nb_qps);
+ }
+
+ return ret;
+}
+
+static int
+eca_adapter_ctrl(uint8_t id, int start)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ uint32_t i;
+ int use_service;
+ int stop = !start;
+
+ use_service = 0;
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ /* if start check for num queue pairs */
+ if (start && !dev_info->num_qpairs)
+ continue;
+ /* if stop check if dev has been started */
+ if (stop && !dev_info->dev_started)
+ continue;
+ use_service |= !dev_info->internal_event_port;
+ dev_info->dev_started = start;
+ if (dev_info->internal_event_port == 0)
+ continue;
+ start ? (*dev->dev_ops->crypto_adapter_start)(dev,
+ &dev_info->dev[i]) :
+ (*dev->dev_ops->crypto_adapter_stop)(dev,
+ &dev_info->dev[i]);
+ }
+
+ if (use_service)
+ rte_service_runstate_set(adapter->service_id, start);
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_start(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ return eca_adapter_ctrl(id, 1);
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stop(uint8_t id)
+{
+ return eca_adapter_ctrl(id, 0);
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stats_get(uint8_t id,
+ struct rte_event_crypto_adapter_stats *stats)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
+ struct rte_event_crypto_adapter_stats dev_stats;
+ struct rte_eventdev *dev;
+ struct crypto_device_info *dev_info;
+ uint32_t i;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || stats == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->crypto_adapter_stats_get == NULL)
+ continue;
+ ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
+ dev_info->dev,
+ &dev_stats);
+ if (ret)
+ continue;
+
+ dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
+ dev_stats_sum.event_enq_count +=
+ dev_stats.event_enq_count;
+ }
+
+ if (adapter->service_inited)
+ *stats = adapter->crypto_stats;
+
+ stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
+ stats->event_enq_count += dev_stats_sum.event_enq_count;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stats_reset(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ uint32_t i;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->crypto_adapter_stats_reset == NULL)
+ continue;
+ (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
+ dev_info->dev);
+ }
+
+ memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || service_id == NULL)
+ return -EINVAL;
+
+ if (adapter->service_inited)
+ *service_id = adapter->service_id;
+
+ return adapter->service_inited ? 0 : -ESRCH;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || event_port_id == NULL)
+ return -EINVAL;
+
+ *event_port_id = adapter->event_port_id;
+
+ return 0;
+}
diff --git a/lib/librte_eventdev/rte_event_crypto_adapter.h b/lib/librte_eventdev/rte_event_crypto_adapter.h
new file mode 100644
index 00000000..d367309c
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_crypto_adapter.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENT_CRYPTO_ADAPTER_
+#define _RTE_EVENT_CRYPTO_ADAPTER_
+
+/**
+ * @file
+ *
+ * RTE Event crypto adapter
+ *
+ * Eventdev library provides couple of adapters to bridge between various
+ * components for providing new event source. The event crypto adapter is
+ * one of those adapters which is intended to bridge between event devices
+ * and crypto devices.
+ *
+ * The crypto adapter adds support to enqueue/dequeue crypto operations to/
+ * from event device. The packet flow between crypto device and the event
+ * device can be accomplished using both SW and HW based transfer mechanisms.
+ * The adapter uses an EAL service core function for SW based packet transfer
+ * and uses the eventdev PMD functions to configure HW based packet transfer
+ * between the crypto device and the event device.
+ *
+ * The application can choose to submit a crypto operation directly to
+ * crypto device or send it to the crypto adapter via eventdev based on
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
+ * The first mode is known as the event new(RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ * mode and the second as the event forward(RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD)
+ * mode. The choice of mode can be specified while creating the adapter.
+ * In the former mode, it is an application responsibility to enable ingress
+ * packet ordering. In the latter mode, it is the adapter responsibility to
+ * enable the ingress packet ordering.
+ *
+ *
+ * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode:
+ *
+ * +--------------+ +--------------+
+ * | | | Crypto stage |
+ * | Application |---[2]-->| + enqueue to |
+ * | | | cryptodev |
+ * +--------------+ +--------------+
+ * ^ ^ |
+ * | | [3]
+ * [6] [1] |
+ * | | |
+ * +--------------+ |
+ * | | |
+ * | Event device | |
+ * | | |
+ * +--------------+ |
+ * ^ |
+ * | |
+ * [5] |
+ * | v
+ * +--------------+ +--------------+
+ * | | | |
+ * |Crypto adapter|<--[4]---| Cryptodev |
+ * | | | |
+ * +--------------+ +--------------+
+ *
+ *
+ * [1] Application dequeues events from the previous stage.
+ * [2] Application prepares the crypto operations.
+ * [3] Crypto operations are submitted to cryptodev by application.
+ * [4] Crypto adapter dequeues crypto completions from cryptodev.
+ * [5] Crypto adapter enqueues events to the eventdev.
+ * [6] Application dequeues from eventdev and prepare for further
+ * processing.
+ *
+ * In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, application submits crypto
+ * operations directly to crypto device. The adapter then dequeues crypto
+ * completions from crypto device and enqueue events to the event device.
+ * This mode does not ensure ingress ordering, if the application directly
+ * enqueues to cryptodev without going through crypto/atomic stage i.e.
+ * removing item [1] and [2].
+ * Events dequeued from the adapter will be treated as new events.
+ * In this mode, application needs to specify event information (response
+ * information) which is needed to enqueue an event after the crypto operation
+ * is completed.
+ *
+ *
+ * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode:
+ *
+ * +--------------+ +--------------+
+ * --[1]-->| |---[2]-->| Application |
+ * | Event device | | in |
+ * <--[8]--| |<--[3]---| Ordered stage|
+ * +--------------+ +--------------+
+ * ^ |
+ * | [4]
+ * [7] |
+ * | v
+ * +----------------+ +--------------+
+ * | |--[5]->| |
+ * | Crypto adapter | | Cryptodev |
+ * | |<-[6]--| |
+ * +----------------+ +--------------+
+ *
+ *
+ * [1] Events from the previous stage.
+ * [2] Application in ordered stage dequeues events from eventdev.
+ * [3] Application enqueues crypto operations as events to eventdev.
+ * [4] Crypto adapter dequeues event from eventdev.
+ * [5] Crypto adapter submits crypto operations to cryptodev
+ * (Atomic stage).
+ * [6] Crypto adapter dequeues crypto completions from cryptodev
+ * [7] Crypto adapter enqueues events to the eventdev
+ * [8] Events to the next stage
+ *
+ * In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, if HW supports
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability the application
+ * can directly submit the crypto operations to the cryptodev.
+ * If not, application retrieves crypto adapter's event port using
+ * rte_event_crypto_adapter_event_port_get() API. Then, links its event
+ * queue to this port and starts enqueuing crypto operations as events
+ * to the eventdev. The adapter then dequeues the events and submits the
+ * crypto operations to the cryptodev. After the crypto completions, the
+ * adapter enqueues events to the event device.
+ * Application can use this mode, when ingress packet ordering is needed.
+ * Events dequeued from the adapter will be treated as forwarded events.
+ * In this mode, the application needs to specify the cryptodev ID
+ * and queue pair ID (request information) needed to enqueue a crypto
+ * operation in addition to the event information (response information)
+ * needed to enqueue an event after the crypto operation has completed.
+ *
+ *
+ * The event crypto adapter provides common APIs to configure the packet flow
+ * from the crypto device to event devices for both SW and HW based transfers.
+ * The crypto event adapter's functions are:
+ * - rte_event_crypto_adapter_create_ext()
+ * - rte_event_crypto_adapter_create()
+ * - rte_event_crypto_adapter_free()
+ * - rte_event_crypto_adapter_queue_pair_add()
+ * - rte_event_crypto_adapter_queue_pair_del()
+ * - rte_event_crypto_adapter_start()
+ * - rte_event_crypto_adapter_stop()
+ * - rte_event_crypto_adapter_stats_get()
+ * - rte_event_crypto_adapter_stats_reset()
+
+ * The applicaton creates an instance using rte_event_crypto_adapter_create()
+ * or rte_event_crypto_adapter_create_ext().
+ *
+ * Cryptodev queue pair addition/deletion is done using the
+ * rte_event_crypto_adapter_queue_pair_xxx() APIs. If HW supports
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND capability, event
+ * information must be passed to the add API.
+ *
+ * The SW adapter or HW PMD uses rte_crypto_op::sess_type to decide whether
+ * request/response(private) data is located in the crypto/security session
+ * or at an offset in the rte_crypto_op.
+ *
+ * For session-based operations, the set and get API provides a mechanism for
+ * an application to store and retrieve the data information stored
+ * along with the crypto session.
+ * The RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA capability indicates
+ * whether HW or SW supports this feature.
+ *
+ * For session-less mode, the adapter gets the private data information placed
+ * along with the ``struct rte_crypto_op``.
+ * The rte_crypto_op::private_data_offset provides an offset to locate the
+ * request/response information in the rte_crypto_op. This offset is counted
+ * from the start of the rte_crypto_op including initialization vector (IV).
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include "rte_eventdev.h"
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this enum may change without prior notice
+ *
+ * Crypto event adapter mode
+ */
+enum rte_event_crypto_adapter_mode {
+ RTE_EVENT_CRYPTO_ADAPTER_OP_NEW,
+ /**< Start the crypto adapter in event new mode.
+ * @see RTE_EVENT_OP_NEW.
+ * Application submits crypto operations to the cryptodev.
+ * Adapter only dequeues the crypto completions from cryptodev
+ * and enqueue events to the eventdev.
+ */
+ RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD,
+ /**< Start the crypto adapter in event forward mode.
+ * @see RTE_EVENT_OP_FORWARD.
+ * Application submits crypto requests as events to the crypto
+ * adapter or crypto device based on
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
+ * Crypto completions are enqueued back to the eventdev by
+ * crypto adapter.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Crypto event request structure will be filled by application to
+ * provide event request information to the adapter.
+ */
+struct rte_event_crypto_request {
+ uint8_t resv[8];
+ /**< Overlaps with first 8 bytes of struct rte_event
+ * that encode the response event information. Application
+ * is expected to fill in struct rte_event response_info.
+ */
+ uint16_t cdev_id;
+ /**< cryptodev ID to be used */
+ uint16_t queue_pair_id;
+ /**< cryptodev queue pair ID to be used */
+ uint32_t resv1;
+ /**< Reserved bits */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Crypto event metadata structure will be filled by application
+ * to provide crypto request and event response information.
+ *
+ * If crypto events are enqueued using a HW mechanism, the cryptodev
+ * PMD will use the event response information to set up the event
+ * that is enqueued back to eventdev after completion of the crypto
+ * operation. If the transfer is done by SW, event response information
+ * will be used by the adapter.
+ */
+union rte_event_crypto_metadata {
+ struct rte_event_crypto_request request_info;
+ /**< Request information to be filled in by application
+ * for RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+ struct rte_event response_info;
+ /**< Response information to be filled in by application
+ * for RTE_EVENT_CRYPTO_ADAPTER_OP_NEW and
+ * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Adapter configuration structure that the adapter configuration callback
+ * function is expected to fill out
+ * @see rte_event_crypto_adapter_conf_cb
+ */
+struct rte_event_crypto_adapter_conf {
+ uint8_t event_port_id;
+ /**< Event port identifier, the adapter enqueues events to this
+ * port and dequeues crypto request events in
+ * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+ uint32_t max_nb;
+ /**< The adapter can return early if it has processed at least
+ * max_nb crypto ops. This isn't treated as a requirement; batching
+ * may cause the adapter to process more than max_nb crypto ops.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Function type used for adapter configuration callback. The callback is
+ * used to fill in members of the struct rte_event_crypto_adapter_conf, this
+ * callback is invoked when creating a SW service for packet transfer from
+ * cryptodev queue pair to the event device. The SW service is created within
+ * the rte_event_crypto_adapter_queue_pair_add() function if SW based packet
+ * transfers from cryptodev queue pair to the event device are required.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param conf
+ * Structure that needs to be populated by this callback.
+ *
+ * @param arg
+ * Argument to the callback. This is the same as the conf_arg passed to the
+ * rte_event_crypto_adapter_create_ext().
+ */
+typedef int (*rte_event_crypto_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
+ struct rte_event_crypto_adapter_conf *conf,
+ void *arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * A structure used to retrieve statistics for an event crypto adapter
+ * instance.
+ */
+
+struct rte_event_crypto_adapter_stats {
+ uint64_t event_poll_count;
+ /**< Event port poll count */
+ uint64_t event_deq_count;
+ /**< Event dequeue count */
+ uint64_t crypto_enq_count;
+ /**< Cryptodev enqueue count */
+ uint64_t crypto_enq_fail;
+ /**< Cryptodev enqueue failed count */
+ uint64_t crypto_deq_count;
+ /**< Cryptodev dequeue count */
+ uint64_t event_enq_count;
+ /**< Event enqueue count */
+ uint64_t event_enq_retry_count;
+ /**< Event enqueue retry count */
+ uint64_t event_enq_fail_count;
+ /**< Event enqueue fail count */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new event crypto adapter with the specified identifier.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param conf_cb
+ * Callback function that fills in members of a
+ * struct rte_event_crypto_adapter_conf struct passed into
+ * it.
+ *
+ * @param mode
+ * Flag to indicate the mode of the adapter.
+ * @see rte_event_crypto_adapter_mode
+ *
+ * @param conf_arg
+ * Argument that is passed to the conf_cb function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int __rte_experimental
+rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_crypto_adapter_conf_cb conf_cb,
+ enum rte_event_crypto_adapter_mode mode,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new event crypto adapter with the specified identifier.
+ * This function uses an internal configuration function that creates an event
+ * port. This default function reconfigures the event device with an
+ * additional event port and set up the event port using the port_config
+ * parameter passed into this function. In case the application needs more
+ * control in configuration of the service, it should use the
+ * rte_event_crypto_adapter_create_ext() version.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param port_config
+ * Argument of type *rte_event_port_conf* that is passed to the conf_cb
+ * function.
+ *
+ * @param mode
+ * Flag to indicate the mode of the adapter.
+ * @see rte_event_crypto_adapter_mode
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int __rte_experimental
+rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config,
+ enum rte_event_crypto_adapter_mode mode);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, If the adapter still has queue pairs
+ * added to it, the function returns -EBUSY.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_free(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Add a queue pair to an event crypto adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param cdev_id
+ * Cryptodev identifier.
+ *
+ * @param queue_pair_id
+ * Cryptodev queue pair identifier. If queue_pair_id is set -1,
+ * adapter adds all the pre configured queue pairs to the instance.
+ *
+ * @param event
+ * if HW supports cryptodev queue pair to event queue binding, application is
+ * expected to fill in event information, else it will be NULL.
+ * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+ *
+ * @return
+ * - 0: Success, queue pair added correctly.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_add(uint8_t id,
+ uint8_t cdev_id,
+ int32_t queue_pair_id,
+ const struct rte_event *event);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Delete a queue pair from an event crypto adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param cdev_id
+ * Cryptodev identifier.
+ *
+ * @param queue_pair_id
+ * Cryptodev queue pair identifier.
+ *
+ * @return
+ * - 0: Success, queue pair deleted successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
+ int32_t queue_pair_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ *
+ * @return
+ * - 0: Success, adapter started successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_start(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, adapter stopped successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stop(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] stats
+ * A pointer to structure used to retrieve statistics for an adapter.
+ *
+ * @return
+ * - 0: Success, retrieved successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stats_get(uint8_t id,
+ struct rte_event_crypto_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, statistics reset successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stats_reset(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the service ID of an adapter. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the adapter doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the event port of an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] event_port_id
+ * Application links its event queue to this adapter port which is used
+ * in RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_EVENT_CRYPTO_ADAPTER_ */
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index 9aece9f8..f5e5a0b5 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -1,3 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ */
+#if defined(LINUX)
+#include <sys/epoll.h>
+#endif
+#include <unistd.h>
+
#include <rte_cycles.h>
#include <rte_common.h>
#include <rte_dev.h>
@@ -7,6 +16,7 @@
#include <rte_malloc.h>
#include <rte_service_component.h>
#include <rte_thash.h>
+#include <rte_interrupts.h>
#include "rte_eventdev.h"
#include "rte_eventdev_pmd.h"
@@ -20,6 +30,22 @@
#define ETH_RX_ADAPTER_MEM_NAME_LEN 32
#define RSS_KEY_SIZE 40
+/* value written to intr thread pipe to signal thread exit */
+#define ETH_BRIDGE_INTR_THREAD_EXIT 1
+/* Sentinel value to detect initialized file handle */
+#define INIT_FD -1
+
+/*
+ * Used to store port and queue ID of interrupting Rx queue
+ */
+union queue_data {
+ RTE_STD_C11
+ void *ptr;
+ struct {
+ uint16_t port;
+ uint16_t queue;
+ };
+};
/*
* There is an instance of this struct per polled Rx queue added to the
@@ -27,7 +53,7 @@
*/
struct eth_rx_poll_entry {
/* Eth port to poll */
- uint8_t eth_dev_id;
+ uint16_t eth_dev_id;
/* Eth rx queue to poll */
uint16_t eth_rx_qid;
};
@@ -71,6 +97,30 @@ struct rte_event_eth_rx_adapter {
uint16_t enq_block_count;
/* Block start ts */
uint64_t rx_enq_block_start_ts;
+ /* epoll fd used to wait for Rx interrupts */
+ int epd;
+ /* Num of interrupt driven interrupt queues */
+ uint32_t num_rx_intr;
+ /* Used to send <dev id, queue id> of interrupting Rx queues from
+ * the interrupt thread to the Rx thread
+ */
+ struct rte_ring *intr_ring;
+ /* Rx Queue data (dev id, queue id) for the last non-empty
+ * queue polled
+ */
+ union queue_data qd;
+ /* queue_data is valid */
+ int qd_valid;
+ /* Interrupt ring lock, synchronizes Rx thread
+ * and interrupt thread
+ */
+ rte_spinlock_t intr_ring_lock;
+ /* event array passed to rte_poll_wait */
+ struct rte_epoll_event *epoll_events;
+ /* Count of interrupt vectors in use */
+ uint32_t num_intr_vec;
+ /* Thread blocked on Rx interrupts */
+ pthread_t rx_intr_thread;
/* Configuration callback for rte_service configuration */
rte_event_eth_rx_adapter_conf_cb conf_cb;
/* Configuration callback argument */
@@ -87,12 +137,20 @@ struct rte_event_eth_rx_adapter {
int socket_id;
/* Per adapter EAL service */
uint32_t service_id;
+ /* Adapter started flag */
+ uint8_t rxa_started;
+ /* Adapter ID */
+ uint8_t id;
} __rte_cache_aligned;
/* Per eth device */
struct eth_device_info {
struct rte_eth_dev *dev;
struct eth_rx_queue_info *rx_queue;
+ /* Rx callback */
+ rte_event_eth_rx_adapter_cb_fn cb_fn;
+ /* Rx callback argument */
+ void *cb_arg;
/* Set if ethdev->eventdev packet transfer uses a
* hardware mechanism
*/
@@ -103,15 +161,42 @@ struct eth_device_info {
* rx_adapter_stop callback needs to be invoked
*/
uint8_t dev_rx_started;
- /* If nb_dev_queues > 0, the start callback will
+ /* Number of queues added for this device */
+ uint16_t nb_dev_queues;
+ /* Number of poll based queues
+ * If nb_rx_poll > 0, the start callback will
* be invoked if not already invoked
*/
- uint16_t nb_dev_queues;
+ uint16_t nb_rx_poll;
+ /* Number of interrupt based queues
+ * If nb_rx_intr > 0, the start callback will
+ * be invoked if not already invoked.
+ */
+ uint16_t nb_rx_intr;
+ /* Number of queues that use the shared interrupt */
+ uint16_t nb_shared_intr;
+ /* sum(wrr(q)) for all queues within the device
+ * useful when deleting all device queues
+ */
+ uint32_t wrr_len;
+ /* Intr based queue index to start polling from, this is used
+ * if the number of shared interrupts is non-zero
+ */
+ uint16_t next_q_idx;
+ /* Intr based queue indices */
+ uint16_t *intr_queue;
+ /* device generates per Rx queue interrupt for queue index
+ * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
+ */
+ int multi_intr_cap;
+ /* shared interrupt enabled */
+ int shared_intr_enabled;
};
/* Per Rx queue */
struct eth_rx_queue_info {
int queue_enabled; /* True if added */
+ int intr_enabled;
uint16_t wt; /* Polling weight */
uint8_t event_queue_id; /* Event queue to enqueue packets to */
uint8_t sched_type; /* Sched type for events */
@@ -123,30 +208,30 @@ struct eth_rx_queue_info {
static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
static inline int
-valid_id(uint8_t id)
+rxa_validate_id(uint8_t id)
{
return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
}
#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
- if (!valid_id(id)) { \
+ if (!rxa_validate_id(id)) { \
RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
return retval; \
} \
} while (0)
static inline int
-sw_rx_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
{
- return rx_adapter->num_rx_polled;
+ return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
}
/* Greatest common divisor */
-static uint16_t gcd_u16(uint16_t a, uint16_t b)
+static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
{
uint16_t r = a % b;
- return r ? gcd_u16(b, r) : b;
+ return r ? rxa_gcd_u16(b, r) : b;
}
/* Returns the next queue in the polling sequence
@@ -154,7 +239,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
* http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
*/
static int
-wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
unsigned int n, int *cw,
struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
uint16_t gcd, int prev)
@@ -164,7 +249,7 @@ wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
while (1) {
uint16_t q;
- uint8_t d;
+ uint16_t d;
i = (i + 1) % n;
if (i == 0) {
@@ -182,13 +267,298 @@ wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
}
}
-/* Precalculate WRR polling sequence for all queues in rx_adapter */
+static inline int
+rxa_shared_intr(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ int multi_intr_cap;
+
+ if (dev_info->dev->intr_handle == NULL)
+ return 0;
+
+ multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
+ return !multi_intr_cap ||
+ rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
+}
+
+static inline int
+rxa_intr_queue(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ struct eth_rx_queue_info *queue_info;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ return dev_info->rx_queue &&
+ !dev_info->internal_event_port &&
+ queue_info->queue_enabled && queue_info->wt == 0;
+}
+
+static inline int
+rxa_polled_queue(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ struct eth_rx_queue_info *queue_info;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ return !dev_info->internal_event_port &&
+ dev_info->rx_queue &&
+ queue_info->queue_enabled && queue_info->wt != 0;
+}
+
+/* Calculate change in number of vectors after Rx queue ID is add/deleted */
static int
-eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
{
- uint8_t d;
+ uint16_t i;
+ int n, s;
+ uint16_t nbq;
+
+ nbq = dev_info->dev->data->nb_rx_queues;
+ n = 0; /* non shared count */
+ s = 0; /* shared count */
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < nbq; i++) {
+ if (!rxa_shared_intr(dev_info, i))
+ n += add ? !rxa_intr_queue(dev_info, i) :
+ rxa_intr_queue(dev_info, i);
+ else
+ s += add ? !rxa_intr_queue(dev_info, i) :
+ rxa_intr_queue(dev_info, i);
+ }
+
+ if (s > 0) {
+ if ((add && dev_info->nb_shared_intr == 0) ||
+ (!add && dev_info->nb_shared_intr))
+ n += 1;
+ }
+ } else {
+ if (!rxa_shared_intr(dev_info, rx_queue_id))
+ n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
+ rxa_intr_queue(dev_info, rx_queue_id);
+ else
+ n = add ? !dev_info->nb_shared_intr :
+ dev_info->nb_shared_intr == 1;
+ }
+
+ return add ? n : -n;
+}
+
+/* Calculate nb_rx_intr after deleting interrupt mode rx queues
+ */
+static void
+rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_intr)
+{
+ uint32_t intr_diff;
+
+ if (rx_queue_id == -1)
+ intr_diff = dev_info->nb_rx_intr;
+ else
+ intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
+
+ *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
+}
+
+/* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
+ * interrupt queues could currently be poll mode Rx queues
+ */
+static void
+rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ uint32_t intr_diff;
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ intr_diff = dev_info->dev->data->nb_rx_queues -
+ dev_info->nb_rx_intr;
+ poll_diff = dev_info->nb_rx_poll;
+ wrr_len_diff = dev_info->wrr_len;
+ } else {
+ intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
+ poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
+ 0;
+ }
+
+ *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
+ *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
+ *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
+}
+
+/* Calculate size of the eth_rx_poll and wrr_sched arrays
+ * after deleting poll mode rx queues
+ */
+static void
+rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_wrr)
+{
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ poll_diff = dev_info->nb_rx_poll;
+ wrr_len_diff = dev_info->wrr_len;
+ } else {
+ poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
+ 0;
+ }
+
+ *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
+ *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
+}
+
+/* Calculate nb_rx_* after adding poll mode rx queues
+ */
+static void
+rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint16_t wt,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ uint32_t intr_diff;
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ intr_diff = dev_info->nb_rx_intr;
+ poll_diff = dev_info->dev->data->nb_rx_queues -
+ dev_info->nb_rx_poll;
+ wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
+ - dev_info->wrr_len;
+ } else {
+ intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
+ poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
+ wt - dev_info->rx_queue[rx_queue_id].wt :
+ wt;
+ }
+
+ *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
+ *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
+ *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
+}
+
+/* Calculate nb_rx_* after adding rx_queue_id */
+static void
+rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint16_t wt,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ if (wt != 0)
+ rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
+ wt, nb_rx_poll, nb_rx_intr, nb_wrr);
+ else
+ rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
+ nb_rx_poll, nb_rx_intr, nb_wrr);
+}
+
+/* Calculate nb_rx_* after deleting rx_queue_id */
+static void
+rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
+ nb_wrr);
+ rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
+ nb_rx_intr);
+}
+
+/*
+ * Allocate the rx_poll array
+ */
+static struct eth_rx_poll_entry *
+rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t num_rx_polled)
+{
+ size_t len;
+
+ len = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
+ RTE_CACHE_LINE_SIZE);
+ return rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+}
+
+/*
+ * Allocate the WRR array
+ */
+static uint32_t *
+rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+{
+ size_t len;
+
+ len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
+ RTE_CACHE_LINE_SIZE);
+ return rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+}
+
+static int
+rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t nb_poll,
+ uint32_t nb_wrr,
+ struct eth_rx_poll_entry **rx_poll,
+ uint32_t **wrr_sched)
+{
+
+ if (nb_poll == 0) {
+ *rx_poll = NULL;
+ *wrr_sched = NULL;
+ return 0;
+ }
+
+ *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
+ if (*rx_poll == NULL) {
+ *wrr_sched = NULL;
+ return -ENOMEM;
+ }
+
+ *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
+ if (*wrr_sched == NULL) {
+ rte_free(*rx_poll);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Precalculate WRR polling sequence for all queues in rx_adapter */
+static void
+rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_rx_poll_entry *rx_poll,
+ uint32_t *rx_wrr)
+{
+ uint16_t d;
uint16_t q;
unsigned int i;
+ int prev = -1;
+ int cw = -1;
/* Initialize variables for calculation of wrr schedule */
uint16_t max_wrr_pos = 0;
@@ -196,79 +566,52 @@ eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
uint16_t max_wt = 0;
uint16_t gcd = 0;
- struct eth_rx_poll_entry *rx_poll = NULL;
- uint32_t *rx_wrr = NULL;
+ if (rx_poll == NULL)
+ return;
- if (rx_adapter->num_rx_polled) {
- size_t len = RTE_ALIGN(rx_adapter->num_rx_polled *
- sizeof(*rx_adapter->eth_rx_poll),
- RTE_CACHE_LINE_SIZE);
- rx_poll = rte_zmalloc_socket(rx_adapter->mem_name,
- len,
- RTE_CACHE_LINE_SIZE,
- rx_adapter->socket_id);
- if (rx_poll == NULL)
- return -ENOMEM;
+ /* Generate array of all queues to poll, the size of this
+ * array is poll_q
+ */
+ RTE_ETH_FOREACH_DEV(d) {
+ uint16_t nb_rx_queues;
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[d];
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ if (dev_info->rx_queue == NULL)
+ continue;
+ if (dev_info->internal_event_port)
+ continue;
+ dev_info->wrr_len = 0;
+ for (q = 0; q < nb_rx_queues; q++) {
+ struct eth_rx_queue_info *queue_info =
+ &dev_info->rx_queue[q];
+ uint16_t wt;
- /* Generate array of all queues to poll, the size of this
- * array is poll_q
- */
- for (d = 0; d < rte_eth_dev_count(); d++) {
- uint16_t nb_rx_queues;
- struct eth_device_info *dev_info =
- &rx_adapter->eth_devices[d];
- nb_rx_queues = dev_info->dev->data->nb_rx_queues;
- if (dev_info->rx_queue == NULL)
+ if (!rxa_polled_queue(dev_info, q))
continue;
- for (q = 0; q < nb_rx_queues; q++) {
- struct eth_rx_queue_info *queue_info =
- &dev_info->rx_queue[q];
- if (queue_info->queue_enabled == 0)
- continue;
-
- uint16_t wt = queue_info->wt;
- rx_poll[poll_q].eth_dev_id = d;
- rx_poll[poll_q].eth_rx_qid = q;
- max_wrr_pos += wt;
- max_wt = RTE_MAX(max_wt, wt);
- gcd = (gcd) ? gcd_u16(gcd, wt) : wt;
- poll_q++;
- }
- }
-
- len = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr),
- RTE_CACHE_LINE_SIZE);
- rx_wrr = rte_zmalloc_socket(rx_adapter->mem_name,
- len,
- RTE_CACHE_LINE_SIZE,
- rx_adapter->socket_id);
- if (rx_wrr == NULL) {
- rte_free(rx_poll);
- return -ENOMEM;
- }
-
- /* Generate polling sequence based on weights */
- int prev = -1;
- int cw = -1;
- for (i = 0; i < max_wrr_pos; i++) {
- rx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,
- rx_poll, max_wt, gcd, prev);
- prev = rx_wrr[i];
+ wt = queue_info->wt;
+ rx_poll[poll_q].eth_dev_id = d;
+ rx_poll[poll_q].eth_rx_qid = q;
+ max_wrr_pos += wt;
+ dev_info->wrr_len += wt;
+ max_wt = RTE_MAX(max_wt, wt);
+ gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
+ poll_q++;
}
}
- rte_free(rx_adapter->eth_rx_poll);
- rte_free(rx_adapter->wrr_sched);
-
- rx_adapter->eth_rx_poll = rx_poll;
- rx_adapter->wrr_sched = rx_wrr;
- rx_adapter->wrr_len = max_wrr_pos;
-
- return 0;
+ /* Generate polling sequence based on weights */
+ prev = -1;
+ cw = -1;
+ for (i = 0; i < max_wrr_pos; i++) {
+ rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
+ rx_poll, max_wt, gcd, prev);
+ prev = rx_wrr[i];
+ }
}
static inline void
-mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
+rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
struct ipv6_hdr **ipv6_hdr)
{
struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -307,7 +650,7 @@ mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
/* Calculate RSS hash for IPv4/6 */
static inline uint32_t
-do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
+rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
{
uint32_t input_len;
void *tuple;
@@ -316,7 +659,7 @@ do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
struct ipv4_hdr *ipv4_hdr;
struct ipv6_hdr *ipv6_hdr;
- mtoip(m, &ipv4_hdr, &ipv6_hdr);
+ rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
if (ipv4_hdr) {
ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
@@ -335,13 +678,13 @@ do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
}
static inline int
-rx_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
{
return !!rx_adapter->enq_block_count;
}
static inline void
-rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
{
if (rx_adapter->rx_enq_block_start_ts)
return;
@@ -354,13 +697,13 @@ rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
}
static inline void
-rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
struct rte_event_eth_rx_adapter_stats *stats)
{
if (unlikely(!stats->rx_enq_start_ts))
stats->rx_enq_start_ts = rte_get_tsc_cycles();
- if (likely(!rx_enq_blocked(rx_adapter)))
+ if (likely(!rxa_enq_blocked(rx_adapter)))
return;
rx_adapter->enq_block_count = 0;
@@ -376,8 +719,8 @@ rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
* this function
*/
static inline void
-buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
- struct rte_event *ev)
+rxa_buffer_event(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_event *ev)
{
struct rte_eth_event_enqueue_buffer *buf =
&rx_adapter->event_enqueue_buffer;
@@ -386,7 +729,7 @@ buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
/* Enqueue buffered events to event device */
static inline uint16_t
-flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
{
struct rte_eth_event_enqueue_buffer *buf =
&rx_adapter->event_enqueue_buffer;
@@ -403,8 +746,8 @@ flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
stats->rx_enq_retry++;
}
- n ? rx_enq_block_end_ts(rx_adapter, stats) :
- rx_enq_block_start_ts(rx_adapter);
+ n ? rxa_enq_block_end_ts(rx_adapter, stats) :
+ rxa_enq_block_start_ts(rx_adapter);
buf->count -= n;
stats->rx_enq_count += n;
@@ -413,18 +756,19 @@ flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
}
static inline void
-fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
- uint8_t dev_id,
- uint16_t rx_queue_id,
- struct rte_mbuf **mbufs,
- uint16_t num)
+rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t eth_dev_id,
+ uint16_t rx_queue_id,
+ struct rte_mbuf **mbufs,
+ uint16_t num)
{
uint32_t i;
- struct eth_device_info *eth_device_info =
- &rx_adapter->eth_devices[dev_id];
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[eth_dev_id];
struct eth_rx_queue_info *eth_rx_queue_info =
- &eth_device_info->rx_queue[rx_queue_id];
-
+ &dev_info->rx_queue[rx_queue_id];
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
int32_t qid = eth_rx_queue_info->event_queue_id;
uint8_t sched_type = eth_rx_queue_info->sched_type;
uint8_t priority = eth_rx_queue_info->priority;
@@ -434,22 +778,48 @@ fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
uint32_t rss_mask;
uint32_t rss;
int do_rss;
+ uint64_t ts;
+ struct rte_mbuf *cb_mbufs[BATCH_SIZE];
+ uint16_t nb_cb;
/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
+ if ((m->ol_flags & PKT_RX_TIMESTAMP) == 0) {
+ ts = rte_get_tsc_cycles();
+ for (i = 0; i < num; i++) {
+ m = mbufs[i];
+
+ m->timestamp = ts;
+ m->ol_flags |= PKT_RX_TIMESTAMP;
+ }
+ }
+
+
+ nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id,
+ ETH_EVENT_BUFFER_SIZE,
+ buf->count, mbufs,
+ num,
+ dev_info->cb_arg,
+ cb_mbufs) :
+ num;
+ if (nb_cb < num) {
+ mbufs = cb_mbufs;
+ num = nb_cb;
+ }
+
for (i = 0; i < num; i++) {
m = mbufs[i];
struct rte_event *ev = &events[i];
rss = do_rss ?
- do_softrss(m, rx_adapter->rss_key_be) : m->hash.rss;
+ rxa_do_softrss(m, rx_adapter->rss_key_be) :
+ m->hash.rss;
flow_id =
eth_rx_queue_info->flow_id &
eth_rx_queue_info->flow_id_mask;
flow_id |= rss & ~eth_rx_queue_info->flow_id_mask;
-
ev->flow_id = flow_id;
ev->op = RTE_EVENT_OP_NEW;
ev->sched_type = sched_type;
@@ -459,8 +829,275 @@ fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
ev->priority = priority;
ev->mbuf = m;
- buf_event_enqueue(rx_adapter, ev);
+ rxa_buffer_event(rx_adapter, ev);
+ }
+}
+
+/* Enqueue packets from <port, q> to event buffer */
+static inline uint32_t
+rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t port_id,
+ uint16_t queue_id,
+ uint32_t rx_count,
+ uint32_t max_rx,
+ int *rxq_empty)
+{
+ struct rte_mbuf *mbufs[BATCH_SIZE];
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats =
+ &rx_adapter->stats;
+ uint16_t n;
+ uint32_t nb_rx = 0;
+
+ if (rxq_empty)
+ *rxq_empty = 0;
+ /* Don't do a batch dequeue from the rx queue if there isn't
+ * enough space in the enqueue buffer.
+ */
+ while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) {
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ stats->rx_poll_count++;
+ n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
+ if (unlikely(!n)) {
+ if (rxq_empty)
+ *rxq_empty = 1;
+ break;
+ }
+ rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
+ nb_rx += n;
+ if (rx_count + nb_rx > max_rx)
+ break;
+ }
+
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ return nb_rx;
+}
+
+static inline void
+rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
+ void *data)
+{
+ uint16_t port_id;
+ uint16_t queue;
+ int err;
+ union queue_data qd;
+ struct eth_device_info *dev_info;
+ struct eth_rx_queue_info *queue_info;
+ int *intr_enabled;
+
+ qd.ptr = data;
+ port_id = qd.port;
+ queue = qd.queue;
+
+ dev_info = &rx_adapter->eth_devices[port_id];
+ queue_info = &dev_info->rx_queue[queue];
+ rte_spinlock_lock(&rx_adapter->intr_ring_lock);
+ if (rxa_shared_intr(dev_info, queue))
+ intr_enabled = &dev_info->shared_intr_enabled;
+ else
+ intr_enabled = &queue_info->intr_enabled;
+
+ if (*intr_enabled) {
+ *intr_enabled = 0;
+ err = rte_ring_enqueue(rx_adapter->intr_ring, data);
+ /* Entry should always be available.
+ * The ring size equals the maximum number of interrupt
+ * vectors supported (an interrupt vector is shared in
+ * case of shared interrupts)
+ */
+ if (err)
+ RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
+ " to ring: %s", strerror(err));
+ else
+ rte_eth_dev_rx_intr_disable(port_id, queue);
+ }
+ rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
+}
+
+static int
+rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t num_intr_vec)
+{
+ if (rx_adapter->num_intr_vec + num_intr_vec >
+ RTE_EVENT_ETH_INTR_RING_SIZE) {
+ RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
+ " %d needed %d limit %d", rx_adapter->num_intr_vec,
+ num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/* Delete entries for (dev, queue) from the interrupt ring */
+static void
+rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int i, n;
+ union queue_data qd;
+
+ rte_spinlock_lock(&rx_adapter->intr_ring_lock);
+
+ n = rte_ring_count(rx_adapter->intr_ring);
+ for (i = 0; i < n; i++) {
+ rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
+ if (!rxa_shared_intr(dev_info, rx_queue_id)) {
+ if (qd.port == dev_info->dev->data->port_id &&
+ qd.queue == rx_queue_id)
+ continue;
+ } else {
+ if (qd.port == dev_info->dev->data->port_id)
+ continue;
+ }
+ rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
+ }
+
+ rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
+}
+
+/* pthread callback handling interrupt mode receive queues
+ * After receiving an Rx interrupt, it enqueues the port id and queue id of the
+ * interrupting queue to the adapter's ring buffer for interrupt events.
+ * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
+ * the adapter service function.
+ */
+static void *
+rxa_intr_thread(void *arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter = arg;
+ struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
+ int n, i;
+
+ while (1) {
+ n = rte_epoll_wait(rx_adapter->epd, epoll_events,
+ RTE_EVENT_ETH_INTR_RING_SIZE, -1);
+ if (unlikely(n < 0))
+ RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
+ n);
+ for (i = 0; i < n; i++) {
+ rxa_intr_ring_enqueue(rx_adapter,
+ epoll_events[i].epdata.data);
+ }
+ }
+
+ return NULL;
+}
+
+/* Dequeue <port, q> from interrupt ring and enqueue received
+ * mbufs to eventdev
+ */
+static inline uint32_t
+rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ uint32_t n;
+ uint32_t nb_rx = 0;
+ int rxq_empty;
+ struct rte_eth_event_enqueue_buffer *buf;
+ rte_spinlock_t *ring_lock;
+ uint8_t max_done = 0;
+
+ if (rx_adapter->num_rx_intr == 0)
+ return 0;
+
+ if (rte_ring_count(rx_adapter->intr_ring) == 0
+ && !rx_adapter->qd_valid)
+ return 0;
+
+ buf = &rx_adapter->event_enqueue_buffer;
+ ring_lock = &rx_adapter->intr_ring_lock;
+
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) {
+ struct eth_device_info *dev_info;
+ uint16_t port;
+ uint16_t queue;
+ union queue_data qd = rx_adapter->qd;
+ int err;
+
+ if (!rx_adapter->qd_valid) {
+ struct eth_rx_queue_info *queue_info;
+
+ rte_spinlock_lock(ring_lock);
+ err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
+ if (err) {
+ rte_spinlock_unlock(ring_lock);
+ break;
+ }
+
+ port = qd.port;
+ queue = qd.queue;
+ rx_adapter->qd = qd;
+ rx_adapter->qd_valid = 1;
+ dev_info = &rx_adapter->eth_devices[port];
+ if (rxa_shared_intr(dev_info, queue))
+ dev_info->shared_intr_enabled = 1;
+ else {
+ queue_info = &dev_info->rx_queue[queue];
+ queue_info->intr_enabled = 1;
+ }
+ rte_eth_dev_rx_intr_enable(port, queue);
+ rte_spinlock_unlock(ring_lock);
+ } else {
+ port = qd.port;
+ queue = qd.queue;
+
+ dev_info = &rx_adapter->eth_devices[port];
+ }
+
+ if (rxa_shared_intr(dev_info, queue)) {
+ uint16_t i;
+ uint16_t nb_queues;
+
+ nb_queues = dev_info->dev->data->nb_rx_queues;
+ n = 0;
+ for (i = dev_info->next_q_idx; i < nb_queues; i++) {
+ uint8_t enq_buffer_full;
+
+ if (!rxa_intr_queue(dev_info, i))
+ continue;
+ n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
+ rx_adapter->max_nb_rx,
+ &rxq_empty);
+ nb_rx += n;
+
+ enq_buffer_full = !rxq_empty && n == 0;
+ max_done = nb_rx > rx_adapter->max_nb_rx;
+
+ if (enq_buffer_full || max_done) {
+ dev_info->next_q_idx = i;
+ goto done;
+ }
+ }
+
+ rx_adapter->qd_valid = 0;
+
+ /* Reinitialize for next interrupt */
+ dev_info->next_q_idx = dev_info->multi_intr_cap ?
+ RTE_MAX_RXTX_INTR_VEC_ID - 1 :
+ 0;
+ } else {
+ n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
+ rx_adapter->max_nb_rx,
+ &rxq_empty);
+ rx_adapter->qd_valid = !rxq_empty;
+ nb_rx += n;
+ if (nb_rx > rx_adapter->max_nb_rx)
+ break;
+ }
}
+
+done:
+ rx_adapter->stats.rx_intr_packets += nb_rx;
+ return nb_rx;
}
/*
@@ -477,12 +1114,10 @@ fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
* it.
*/
static inline uint32_t
-eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
{
uint32_t num_queue;
- uint16_t n;
uint32_t nb_rx = 0;
- struct rte_mbuf *mbufs[BATCH_SIZE];
struct rte_eth_event_enqueue_buffer *buf;
uint32_t wrr_pos;
uint32_t max_nb_rx;
@@ -490,57 +1125,54 @@ eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
wrr_pos = rx_adapter->wrr_pos;
max_nb_rx = rx_adapter->max_nb_rx;
buf = &rx_adapter->event_enqueue_buffer;
- struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
+ stats = &rx_adapter->stats;
/* Iterate through a WRR sequence */
for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
- uint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
+ uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
/* Don't do a batch dequeue from the rx queue if there isn't
* enough space in the enqueue buffer.
*/
if (buf->count >= BATCH_SIZE)
- flush_event_buffer(rx_adapter);
- if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))
- break;
-
- stats->rx_poll_count++;
- n = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);
+ rxa_flush_event_buffer(rx_adapter);
+ if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) {
+ rx_adapter->wrr_pos = wrr_pos;
+ return nb_rx;
+ }
- if (n) {
- stats->rx_packets += n;
- /* The check before rte_eth_rx_burst() ensures that
- * all n mbufs can be buffered
- */
- fill_event_buffer(rx_adapter, d, qid, mbufs, n);
- nb_rx += n;
- if (nb_rx > max_nb_rx) {
- rx_adapter->wrr_pos =
+ nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
+ NULL);
+ if (nb_rx > max_nb_rx) {
+ rx_adapter->wrr_pos =
(wrr_pos + 1) % rx_adapter->wrr_len;
- return nb_rx;
- }
+ break;
}
if (++wrr_pos == rx_adapter->wrr_len)
wrr_pos = 0;
}
-
return nb_rx;
}
static int
-event_eth_rx_adapter_service_func(void *args)
+rxa_service_func(void *args)
{
struct rte_event_eth_rx_adapter *rx_adapter = args;
- struct rte_eth_event_enqueue_buffer *buf;
+ struct rte_event_eth_rx_adapter_stats *stats;
- buf = &rx_adapter->event_enqueue_buffer;
if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
return 0;
- if (eth_rx_poll(rx_adapter) == 0 && buf->count)
- flush_event_buffer(rx_adapter);
+ if (!rx_adapter->rxa_started) {
+ return 0;
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
+
+ stats = &rx_adapter->stats;
+ stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
+ stats->rx_packets += rxa_poll(rx_adapter);
rte_spinlock_unlock(&rx_adapter->rx_lock);
return 0;
}
@@ -572,14 +1204,14 @@ rte_event_eth_rx_adapter_init(void)
}
static inline struct rte_event_eth_rx_adapter *
-id_to_rx_adapter(uint8_t id)
+rxa_id_to_adapter(uint8_t id)
{
return event_eth_rx_adapter ?
event_eth_rx_adapter[id] : NULL;
}
static int
-default_conf_cb(uint8_t id, uint8_t dev_id,
+rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
struct rte_event_eth_rx_adapter_conf *conf, void *arg)
{
int ret;
@@ -588,7 +1220,7 @@ default_conf_cb(uint8_t id, uint8_t dev_id,
int started;
uint8_t port_id;
struct rte_event_port_conf *port_conf = arg;
- struct rte_event_eth_rx_adapter *rx_adapter = id_to_rx_adapter(id);
+ struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
dev = &rte_eventdevs[rx_adapter->eventdev_id];
dev_conf = dev->data->dev_conf;
@@ -625,7 +1257,351 @@ default_conf_cb(uint8_t id, uint8_t dev_id,
}
static int
-init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_epoll_create1(void)
+{
+#if defined(LINUX)
+ int fd;
+ fd = epoll_create1(EPOLL_CLOEXEC);
+ return fd < 0 ? -errno : fd;
+#elif defined(BSD)
+ return -ENOTSUP;
+#endif
+}
+
+static int
+rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ if (rx_adapter->epd != INIT_FD)
+ return 0;
+
+ rx_adapter->epd = rxa_epoll_create1();
+ if (rx_adapter->epd < 0) {
+ int err = rx_adapter->epd;
+ rx_adapter->epd = INIT_FD;
+ RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int err;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
+
+ if (rx_adapter->intr_ring)
+ return 0;
+
+ rx_adapter->intr_ring = rte_ring_create("intr_ring",
+ RTE_EVENT_ETH_INTR_RING_SIZE,
+ rte_socket_id(), 0);
+ if (!rx_adapter->intr_ring)
+ return -ENOMEM;
+
+ rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
+ RTE_EVENT_ETH_INTR_RING_SIZE *
+ sizeof(struct rte_epoll_event),
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+ if (!rx_adapter->epoll_events) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ rte_spinlock_init(&rx_adapter->intr_ring_lock);
+
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
+ "rx-intr-thread-%d", rx_adapter->id);
+
+ err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
+ NULL, rxa_intr_thread, rx_adapter);
+ if (!err) {
+ rte_thread_setname(rx_adapter->rx_intr_thread, thread_name);
+ return 0;
+ }
+
+ RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
+error:
+ rte_ring_free(rx_adapter->intr_ring);
+ rx_adapter->intr_ring = NULL;
+ rx_adapter->epoll_events = NULL;
+ return err;
+}
+
+static int
+rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int err;
+
+ err = pthread_cancel(rx_adapter->rx_intr_thread);
+ if (err)
+ RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
+ err);
+
+ err = pthread_join(rx_adapter->rx_intr_thread, NULL);
+ if (err)
+ RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
+
+ rte_free(rx_adapter->epoll_events);
+ rte_ring_free(rx_adapter->intr_ring);
+ rx_adapter->intr_ring = NULL;
+ rx_adapter->epoll_events = NULL;
+ return 0;
+}
+
+static int
+rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int ret;
+
+ if (rx_adapter->num_rx_intr == 0)
+ return 0;
+
+ ret = rxa_destroy_intr_thread(rx_adapter);
+ if (ret)
+ return ret;
+
+ close(rx_adapter->epd);
+ rx_adapter->epd = INIT_FD;
+
+ return ret;
+}
+
+static int
+rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int err;
+ uint16_t eth_dev_id = dev_info->dev->data->port_id;
+ int sintr = rxa_shared_intr(dev_info, rx_queue_id);
+
+ err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
+ rx_queue_id);
+ return err;
+ }
+
+ err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_DEL,
+ 0);
+ if (err)
+ RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
+
+ if (sintr)
+ dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
+ else
+ dev_info->shared_intr_enabled = 0;
+ return err;
+}
+
+static int
+rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ int err;
+ int i;
+ int s;
+
+ if (dev_info->nb_rx_intr == 0)
+ return 0;
+
+ err = 0;
+ if (rx_queue_id == -1) {
+ s = dev_info->nb_shared_intr;
+ for (i = 0; i < dev_info->nb_rx_intr; i++) {
+ int sintr;
+ uint16_t q;
+
+ q = dev_info->intr_queue[i];
+ sintr = rxa_shared_intr(dev_info, q);
+ s -= sintr;
+
+ if (!sintr || s == 0) {
+
+ err = rxa_disable_intr(rx_adapter, dev_info,
+ q);
+ if (err)
+ return err;
+ rxa_intr_ring_del_entries(rx_adapter, dev_info,
+ q);
+ }
+ }
+ } else {
+ if (!rxa_intr_queue(dev_info, rx_queue_id))
+ return 0;
+ if (!rxa_shared_intr(dev_info, rx_queue_id) ||
+ dev_info->nb_shared_intr == 1) {
+ err = rxa_disable_intr(rx_adapter, dev_info,
+ rx_queue_id);
+ if (err)
+ return err;
+ rxa_intr_ring_del_entries(rx_adapter, dev_info,
+ rx_queue_id);
+ }
+
+ for (i = 0; i < dev_info->nb_rx_intr; i++) {
+ if (dev_info->intr_queue[i] == rx_queue_id) {
+ for (; i < dev_info->nb_rx_intr - 1; i++)
+ dev_info->intr_queue[i] =
+ dev_info->intr_queue[i + 1];
+ break;
+ }
+ }
+ }
+
+ return err;
+}
+
+static int
+rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int err, err1;
+ uint16_t eth_dev_id = dev_info->dev->data->port_id;
+ union queue_data qd;
+ int init_fd;
+ uint16_t *intr_queue;
+ int sintr = rxa_shared_intr(dev_info, rx_queue_id);
+
+ if (rxa_intr_queue(dev_info, rx_queue_id))
+ return 0;
+
+ intr_queue = dev_info->intr_queue;
+ if (dev_info->intr_queue == NULL) {
+ size_t len =
+ dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
+ dev_info->intr_queue =
+ rte_zmalloc_socket(
+ rx_adapter->mem_name,
+ len,
+ 0,
+ rx_adapter->socket_id);
+ if (dev_info->intr_queue == NULL)
+ return -ENOMEM;
+ }
+
+ init_fd = rx_adapter->epd;
+ err = rxa_init_epd(rx_adapter);
+ if (err)
+ goto err_free_queue;
+
+ qd.port = eth_dev_id;
+ qd.queue = rx_queue_id;
+
+ err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_ADD,
+ qd.ptr);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+ goto err_del_fd;
+ }
+
+ err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Could not enable interrupt for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+
+ goto err_del_event;
+ }
+
+ err = rxa_create_intr_thread(rx_adapter);
+ if (!err) {
+ if (sintr)
+ dev_info->shared_intr_enabled = 1;
+ else
+ dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
+ return 0;
+ }
+
+
+ err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
+ if (err)
+ RTE_EDEV_LOG_ERR("Could not disable interrupt for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+err_del_event:
+ err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_DEL,
+ 0);
+ if (err1) {
+ RTE_EDEV_LOG_ERR("Could not delete event for"
+ " Rx Queue %u err %d", rx_queue_id, err1);
+ }
+err_del_fd:
+ if (init_fd == INIT_FD) {
+ close(rx_adapter->epd);
+ rx_adapter->epd = -1;
+ }
+err_free_queue:
+ if (intr_queue == NULL)
+ rte_free(dev_info->intr_queue);
+
+ return err;
+}
+
+static int
+rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id)
+
+{
+ int i, j, err;
+ int si = -1;
+ int shared_done = (dev_info->nb_shared_intr > 0);
+
+ if (rx_queue_id != -1) {
+ if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
+ return 0;
+ return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
+ }
+
+ err = 0;
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
+
+ if (rxa_shared_intr(dev_info, i) && shared_done)
+ continue;
+
+ err = rxa_config_intr(rx_adapter, dev_info, i);
+
+ shared_done = err == 0 && rxa_shared_intr(dev_info, i);
+ if (shared_done) {
+ si = i;
+ dev_info->shared_intr_enabled = 1;
+ }
+ if (err)
+ break;
+ }
+
+ if (err == 0)
+ return 0;
+
+ shared_done = (dev_info->nb_shared_intr > 0);
+ for (j = 0; j < i; j++) {
+ if (rxa_intr_queue(dev_info, j))
+ continue;
+ if (rxa_shared_intr(dev_info, j) && si != j)
+ continue;
+ err = rxa_disable_intr(rx_adapter, dev_info, j);
+ if (err)
+ break;
+
+ }
+
+ return err;
+}
+
+
+static int
+rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
{
int ret;
struct rte_service_spec service;
@@ -638,7 +1614,7 @@ init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
"rte_event_eth_rx_adapter_%d", id);
service.socket_id = rx_adapter->socket_id;
- service.callback = event_eth_rx_adapter_service_func;
+ service.callback = rxa_service_func;
service.callback_userdata = rx_adapter;
/* Service function handles locking for queue add/del updates */
service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
@@ -659,6 +1635,7 @@ init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
rx_adapter->service_inited = 1;
+ rx_adapter->epd = INIT_FD;
return 0;
err_done:
@@ -666,9 +1643,8 @@ err_done:
return ret;
}
-
static void
-update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
struct eth_device_info *dev_info,
int32_t rx_queue_id,
uint8_t add)
@@ -682,7 +1658,7 @@ update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
if (rx_queue_id == -1) {
for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
- update_queue_info(rx_adapter, dev_info, i, add);
+ rxa_update_queue(rx_adapter, dev_info, i, add);
} else {
queue_info = &dev_info->rx_queue[rx_queue_id];
enabled = queue_info->queue_enabled;
@@ -697,31 +1673,65 @@ update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
}
}
-static int
-event_eth_rx_adapter_queue_del(struct rte_event_eth_rx_adapter *rx_adapter,
- struct eth_device_info *dev_info,
- uint16_t rx_queue_id)
+static void
+rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id)
{
- struct eth_rx_queue_info *queue_info;
+ int pollq;
+ int intrq;
+ int sintrq;
+
if (rx_adapter->nb_queues == 0)
- return 0;
+ return;
- queue_info = &dev_info->rx_queue[rx_queue_id];
- rx_adapter->num_rx_polled -= queue_info->queue_enabled;
- update_queue_info(rx_adapter, dev_info, rx_queue_id, 0);
- return 0;
+ if (rx_queue_id == -1) {
+ uint16_t nb_rx_queues;
+ uint16_t i;
+
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ for (i = 0; i < nb_rx_queues; i++)
+ rxa_sw_del(rx_adapter, dev_info, i);
+ return;
+ }
+
+ pollq = rxa_polled_queue(dev_info, rx_queue_id);
+ intrq = rxa_intr_queue(dev_info, rx_queue_id);
+ sintrq = rxa_shared_intr(dev_info, rx_queue_id);
+ rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
+ rx_adapter->num_rx_polled -= pollq;
+ dev_info->nb_rx_poll -= pollq;
+ rx_adapter->num_rx_intr -= intrq;
+ dev_info->nb_rx_intr -= intrq;
+ dev_info->nb_shared_intr -= intrq && sintrq;
}
static void
-event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,
- struct eth_device_info *dev_info,
- uint16_t rx_queue_id,
- const struct rte_event_eth_rx_adapter_queue_conf *conf)
-
+rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *conf)
{
struct eth_rx_queue_info *queue_info;
const struct rte_event *ev = &conf->ev;
+ int pollq;
+ int intrq;
+ int sintrq;
+
+ if (rx_queue_id == -1) {
+ uint16_t nb_rx_queues;
+ uint16_t i;
+
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ for (i = 0; i < nb_rx_queues; i++)
+ rxa_add_queue(rx_adapter, dev_info, i, conf);
+ return;
+ }
+
+ pollq = rxa_polled_queue(dev_info, rx_queue_id);
+ intrq = rxa_intr_queue(dev_info, rx_queue_id);
+ sintrq = rxa_shared_intr(dev_info, rx_queue_id);
queue_info = &dev_info->rx_queue[rx_queue_id];
queue_info->event_queue_id = ev->queue_id;
@@ -735,69 +1745,162 @@ event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,
queue_info->flow_id_mask = ~0;
}
- /* The same queue can be added more than once */
- rx_adapter->num_rx_polled += !queue_info->queue_enabled;
- update_queue_info(rx_adapter, dev_info, rx_queue_id, 1);
+ rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
+ if (rxa_polled_queue(dev_info, rx_queue_id)) {
+ rx_adapter->num_rx_polled += !pollq;
+ dev_info->nb_rx_poll += !pollq;
+ rx_adapter->num_rx_intr -= intrq;
+ dev_info->nb_rx_intr -= intrq;
+ dev_info->nb_shared_intr -= intrq && sintrq;
+ }
+
+ if (rxa_intr_queue(dev_info, rx_queue_id)) {
+ rx_adapter->num_rx_polled -= pollq;
+ dev_info->nb_rx_poll -= pollq;
+ rx_adapter->num_rx_intr += !intrq;
+ dev_info->nb_rx_intr += !intrq;
+ dev_info->nb_shared_intr += !intrq && sintrq;
+ if (dev_info->nb_shared_intr == 1) {
+ if (dev_info->multi_intr_cap)
+ dev_info->next_q_idx =
+ RTE_MAX_RXTX_INTR_VEC_ID - 1;
+ else
+ dev_info->next_q_idx = 0;
+ }
+ }
}
-static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
- uint8_t eth_dev_id,
+static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t eth_dev_id,
int rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
struct rte_event_eth_rx_adapter_queue_conf temp_conf;
- uint32_t i;
int ret;
+ struct eth_rx_poll_entry *rx_poll;
+ struct eth_rx_queue_info *rx_queue;
+ uint32_t *rx_wrr;
+ uint16_t nb_rx_queues;
+ uint32_t nb_rx_poll, nb_wrr;
+ uint32_t nb_rx_intr;
+ int num_intr_vec;
+ uint16_t wt;
if (queue_conf->servicing_weight == 0) {
-
struct rte_eth_dev_data *data = dev_info->dev->data;
- if (data->dev_conf.intr_conf.rxq) {
- RTE_EDEV_LOG_ERR("Interrupt driven queues"
- " not supported");
- return -ENOTSUP;
- }
- temp_conf = *queue_conf;
- /* If Rx interrupts are disabled set wt = 1 */
- temp_conf.servicing_weight = 1;
+ temp_conf = *queue_conf;
+ if (!data->dev_conf.intr_conf.rxq) {
+ /* If Rx interrupts are disabled set wt = 1 */
+ temp_conf.servicing_weight = 1;
+ }
queue_conf = &temp_conf;
}
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ rx_queue = dev_info->rx_queue;
+ wt = queue_conf->servicing_weight;
+
if (dev_info->rx_queue == NULL) {
dev_info->rx_queue =
rte_zmalloc_socket(rx_adapter->mem_name,
- dev_info->dev->data->nb_rx_queues *
+ nb_rx_queues *
sizeof(struct eth_rx_queue_info), 0,
rx_adapter->socket_id);
if (dev_info->rx_queue == NULL)
return -ENOMEM;
}
+ rx_wrr = NULL;
+ rx_poll = NULL;
- if (rx_queue_id == -1) {
- for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
- event_eth_rx_adapter_queue_add(rx_adapter,
- dev_info, i,
- queue_conf);
+ rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
+ queue_conf->servicing_weight,
+ &nb_rx_poll, &nb_rx_intr, &nb_wrr);
+
+ if (dev_info->dev->intr_handle)
+ dev_info->multi_intr_cap =
+ rte_intr_cap_multiple(dev_info->dev->intr_handle);
+
+ ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
+ &rx_poll, &rx_wrr);
+ if (ret)
+ goto err_free_rxqueue;
+
+ if (wt == 0) {
+ num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
+
+ ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
+ if (ret)
+ goto err_free_rxqueue;
+
+ ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
+ if (ret)
+ goto err_free_rxqueue;
} else {
- event_eth_rx_adapter_queue_add(rx_adapter, dev_info,
- (uint16_t)rx_queue_id,
- queue_conf);
+
+ num_intr_vec = 0;
+ if (rx_adapter->num_rx_intr > nb_rx_intr) {
+ num_intr_vec = rxa_nb_intr_vect(dev_info,
+ rx_queue_id, 0);
+ /* interrupt based queues are being converted to
+ * poll mode queues, delete the interrupt configuration
+ * for those.
+ */
+ ret = rxa_del_intr_queue(rx_adapter,
+ dev_info, rx_queue_id);
+ if (ret)
+ goto err_free_rxqueue;
+ }
}
- ret = eth_poll_wrr_calc(rx_adapter);
- if (ret) {
- event_eth_rx_adapter_queue_del(rx_adapter,
- dev_info, rx_queue_id);
- return ret;
+ if (nb_rx_intr == 0) {
+ ret = rxa_free_intr_resources(rx_adapter);
+ if (ret)
+ goto err_free_rxqueue;
}
- return ret;
+ if (wt == 0) {
+ uint16_t i;
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ dev_info->intr_queue[i] = i;
+ } else {
+ if (!rxa_intr_queue(dev_info, rx_queue_id))
+ dev_info->intr_queue[nb_rx_intr - 1] =
+ rx_queue_id;
+ }
+ }
+
+
+
+ rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+ rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
+
+ rte_free(rx_adapter->eth_rx_poll);
+ rte_free(rx_adapter->wrr_sched);
+
+ rx_adapter->eth_rx_poll = rx_poll;
+ rx_adapter->wrr_sched = rx_wrr;
+ rx_adapter->wrr_len = nb_wrr;
+ rx_adapter->num_intr_vec += num_intr_vec;
+ return 0;
+
+err_free_rxqueue:
+ if (rx_queue == NULL) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+
+ rte_free(rx_poll);
+ rte_free(rx_wrr);
+
+ return 0;
}
static int
-rx_adapter_ctrl(uint8_t id, int start)
+rxa_ctrl(uint8_t id, int start)
{
struct rte_event_eth_rx_adapter *rx_adapter;
struct rte_eventdev *dev;
@@ -807,13 +1910,13 @@ rx_adapter_ctrl(uint8_t id, int start)
int stop = !start;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL)
return -EINVAL;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
/* if start check for num dev queues */
if (start && !dev_info->nb_dev_queues)
@@ -831,8 +1934,12 @@ rx_adapter_ctrl(uint8_t id, int start)
&rte_eth_devices[i]);
}
- if (use_service)
+ if (use_service) {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ rx_adapter->rxa_started = start;
rte_service_runstate_set(rx_adapter->service_id, start);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
return 0;
}
@@ -845,7 +1952,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
struct rte_event_eth_rx_adapter *rx_adapter;
int ret;
int socket_id;
- uint8_t i;
+ uint16_t i;
char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
const uint8_t default_rss_key[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
@@ -866,7 +1973,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
return ret;
}
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter != NULL) {
RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
return -EEXIST;
@@ -888,9 +1995,11 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
rx_adapter->socket_id = socket_id;
rx_adapter->conf_cb = conf_cb;
rx_adapter->conf_arg = conf_arg;
+ rx_adapter->id = id;
strcpy(rx_adapter->mem_name, mem_name);
rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
- rte_eth_dev_count() *
+ /* FIXME: incompatible with hotplug */
+ rte_eth_dev_count_total() *
sizeof(struct eth_device_info), 0,
socket_id);
rte_convert_rss_key((const uint32_t *)default_rss_key,
@@ -903,11 +2012,11 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
return -ENOMEM;
}
rte_spinlock_init(&rx_adapter->rx_lock);
- for (i = 0; i < rte_eth_dev_count(); i++)
+ RTE_ETH_FOREACH_DEV(i)
rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
event_eth_rx_adapter[id] = rx_adapter;
- if (conf_cb == default_conf_cb)
+ if (conf_cb == rxa_default_conf_cb)
rx_adapter->default_cb_arg = 1;
return 0;
}
@@ -928,7 +2037,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
return -ENOMEM;
*pc = *port_config;
ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
- default_conf_cb,
+ rxa_default_conf_cb,
pc);
if (ret)
rte_free(pc);
@@ -942,7 +2051,7 @@ rte_event_eth_rx_adapter_free(uint8_t id)
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL)
return -EINVAL;
@@ -963,7 +2072,7 @@ rte_event_eth_rx_adapter_free(uint8_t id)
int
rte_event_eth_rx_adapter_queue_add(uint8_t id,
- uint8_t eth_dev_id,
+ uint16_t eth_dev_id,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
@@ -972,12 +2081,11 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
struct rte_event_eth_rx_adapter *rx_adapter;
struct rte_eventdev *dev;
struct eth_device_info *dev_info;
- int start_service;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if ((rx_adapter == NULL) || (queue_conf == NULL))
return -EINVAL;
@@ -987,7 +2095,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
&cap);
if (ret) {
RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
- "eth port %" PRIu8, id, eth_dev_id);
+ "eth port %" PRIu16, id, eth_dev_id);
return ret;
}
@@ -995,7 +2103,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
&& (queue_conf->rx_queue_flags &
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
- " eth port: %" PRIu8 " adapter id: %" PRIu8,
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
@@ -1003,7 +2111,8 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
(rx_queue_id != -1)) {
RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
- "event queue id %u eth port %u", id, eth_dev_id);
+ "event queue, eth port: %" PRIu16 " adapter id: %"
+ PRIu8, eth_dev_id, id);
return -EINVAL;
}
@@ -1014,7 +2123,6 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
return -EINVAL;
}
- start_service = 0;
dev_info = &rx_adapter->eth_devices[eth_dev_id];
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
@@ -1034,33 +2142,34 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
&rte_eth_devices[eth_dev_id],
rx_queue_id, queue_conf);
if (ret == 0) {
- update_queue_info(rx_adapter,
+ dev_info->internal_event_port = 1;
+ rxa_update_queue(rx_adapter,
&rx_adapter->eth_devices[eth_dev_id],
rx_queue_id,
1);
}
} else {
rte_spinlock_lock(&rx_adapter->rx_lock);
- ret = init_service(rx_adapter, id);
- if (ret == 0)
- ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,
+ dev_info->internal_event_port = 0;
+ ret = rxa_init_service(rx_adapter, id);
+ if (ret == 0) {
+ uint32_t service_id = rx_adapter->service_id;
+ ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
queue_conf);
+ rte_service_component_runstate_set(service_id,
+ rxa_sw_adapter_queue_count(rx_adapter));
+ }
rte_spinlock_unlock(&rx_adapter->rx_lock);
- if (ret == 0)
- start_service = !!sw_rx_adapter_queue_count(rx_adapter);
}
if (ret)
return ret;
- if (start_service)
- rte_service_component_runstate_set(rx_adapter->service_id, 1);
-
return 0;
}
int
-rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
int32_t rx_queue_id)
{
int ret = 0;
@@ -1068,12 +2177,17 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
struct rte_event_eth_rx_adapter *rx_adapter;
struct eth_device_info *dev_info;
uint32_t cap;
- uint16_t i;
+ uint32_t nb_rx_poll = 0;
+ uint32_t nb_wrr = 0;
+ uint32_t nb_rx_intr;
+ struct eth_rx_poll_entry *rx_poll = NULL;
+ uint32_t *rx_wrr = NULL;
+ int num_intr_vec;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL)
return -EINVAL;
@@ -1100,7 +2214,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
&rte_eth_devices[eth_dev_id],
rx_queue_id);
if (ret == 0) {
- update_queue_info(rx_adapter,
+ rxa_update_queue(rx_adapter,
&rx_adapter->eth_devices[eth_dev_id],
rx_queue_id,
0);
@@ -1110,48 +2224,78 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
}
}
} else {
- int rc;
+ rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
+ &nb_rx_poll, &nb_rx_intr, &nb_wrr);
+
+ ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
+ &rx_poll, &rx_wrr);
+ if (ret)
+ return ret;
+
rte_spinlock_lock(&rx_adapter->rx_lock);
- if (rx_queue_id == -1) {
- for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
- event_eth_rx_adapter_queue_del(rx_adapter,
- dev_info,
- i);
- } else {
- event_eth_rx_adapter_queue_del(rx_adapter,
- dev_info,
- (uint16_t)rx_queue_id);
+
+ num_intr_vec = 0;
+ if (rx_adapter->num_rx_intr > nb_rx_intr) {
+
+ num_intr_vec = rxa_nb_intr_vect(dev_info,
+ rx_queue_id, 0);
+ ret = rxa_del_intr_queue(rx_adapter, dev_info,
+ rx_queue_id);
+ if (ret)
+ goto unlock_ret;
+ }
+
+ if (nb_rx_intr == 0) {
+ ret = rxa_free_intr_resources(rx_adapter);
+ if (ret)
+ goto unlock_ret;
+ }
+
+ rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
+ rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
+
+ rte_free(rx_adapter->eth_rx_poll);
+ rte_free(rx_adapter->wrr_sched);
+
+ if (nb_rx_intr == 0) {
+ rte_free(dev_info->intr_queue);
+ dev_info->intr_queue = NULL;
}
- rc = eth_poll_wrr_calc(rx_adapter);
- if (rc)
- RTE_EDEV_LOG_ERR("WRR recalculation failed %" PRId32,
- rc);
+ rx_adapter->eth_rx_poll = rx_poll;
+ rx_adapter->wrr_sched = rx_wrr;
+ rx_adapter->wrr_len = nb_wrr;
+ rx_adapter->num_intr_vec += num_intr_vec;
if (dev_info->nb_dev_queues == 0) {
rte_free(dev_info->rx_queue);
dev_info->rx_queue = NULL;
}
-
+unlock_ret:
rte_spinlock_unlock(&rx_adapter->rx_lock);
+ if (ret) {
+ rte_free(rx_poll);
+ rte_free(rx_wrr);
+ return ret;
+ }
+
rte_service_component_runstate_set(rx_adapter->service_id,
- sw_rx_adapter_queue_count(rx_adapter));
+ rxa_sw_adapter_queue_count(rx_adapter));
}
return ret;
}
-
int
rte_event_eth_rx_adapter_start(uint8_t id)
{
- return rx_adapter_ctrl(id, 1);
+ return rxa_ctrl(id, 1);
}
int
rte_event_eth_rx_adapter_stop(uint8_t id)
{
- return rx_adapter_ctrl(id, 0);
+ return rxa_ctrl(id, 0);
}
int
@@ -1168,13 +2312,13 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL || stats == NULL)
return -EINVAL;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
memset(stats, 0, sizeof(*stats));
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
if (dev_info->internal_event_port == 0 ||
dev->dev_ops->eth_rx_adapter_stats_get == NULL)
@@ -1206,12 +2350,12 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL)
return -EINVAL;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
if (dev_info->internal_event_port == 0 ||
dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
@@ -1231,7 +2375,7 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL || service_id == NULL)
return -EINVAL;
@@ -1240,3 +2384,47 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
return rx_adapter->service_inited ? 0 : -ESRCH;
}
+
+int rte_event_eth_rx_adapter_cb_register(uint8_t id,
+ uint16_t eth_dev_id,
+ rte_event_eth_rx_adapter_cb_fn cb_fn,
+ void *cb_arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct eth_device_info *dev_info;
+ uint32_t cap;
+ int ret;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+ if (dev_info->rx_queue == NULL)
+ return -EINVAL;
+
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+ "eth port %" PRIu16, id, eth_dev_id);
+ return ret;
+ }
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
+ PRIu16, eth_dev_id);
+ return -EINVAL;
+ }
+
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ dev_info->cb_fn = cb_fn;
+ dev_info->cb_arg = cb_arg;
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+
+ return 0;
+}
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
index c20507b2..332ee216 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
@@ -1,32 +1,6 @@
-/*
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
*/
#ifndef _RTE_EVENT_ETH_RX_ADAPTER_
@@ -47,7 +21,11 @@
*
* The adapter uses a EAL service core function for SW based packet transfer
* and uses the eventdev PMD functions to configure HW based packet transfer
- * between the ethernet device and the event device.
+ * between the ethernet device and the event device. For SW based packet
+ * transfer, if the mbuf does not have a timestamp set, the adapter adds a
+ * timestamp to the mbuf using rte_get_tsc_cycles(), this provides a more
+ * accurate timestamp as compared to if the application were to set the time
+ * stamp since it avoids event device schedule latency.
*
* The ethernet Rx event adapter's functions are:
* - rte_event_eth_rx_adapter_create_ext()
@@ -85,7 +63,23 @@
* rte_event_eth_rx_adapter_service_id_get() function can be used to retrieve
* the service function ID of the adapter in this case.
*
- * Note: Interrupt driven receive queues are currently unimplemented.
+ * For SW based packet transfers, i.e., when the
+ * RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
+ * capabilities flags for a particular ethernet device, the service function
+ * temporarily enqueues mbufs to an event buffer before batch enqueueing these
+ * to the event device. If the buffer fills up, the service function stops
+ * dequeueing packets from the ethernet device. The application may want to
+ * monitor the buffer fill level and instruct the service function to
+ * selectively buffer packets. The application may also use some other
+ * criteria to decide which packets should enter the event device even when
+ * the event buffer fill level is low. The
+ * rte_event_eth_rx_adapter_cb_register() function allows the
+ * application to register a callback that selects which packets to enqueue
+ * to the event device.
+ *
+ * Note:
+ * 1) Devices created after an instance of rte_event_eth_rx_adapter_create
+ * should be added to a new instance of the rx adapter.
*/
#ifdef __cplusplus
@@ -218,12 +212,55 @@ struct rte_event_eth_rx_adapter_stats {
* block cycles can be used to compute the percentage of
* cycles the service is blocked by the event device.
*/
+ uint64_t rx_intr_packets;
+ /**< Received packet count for interrupt mode Rx queues */
};
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
+ * Callback function invoked by the SW adapter before it continues
+ * to process packets. The callback is passed the size of the enqueue
+ * buffer in the SW adapter and the occupancy of the buffer. The
+ * callback can use these values to decide which mbufs should be
+ * enqueued to the event device. If the return value of the callback
+ * is less than nb_mbuf then the SW adapter uses the return value to
+ * enqueue enq_mbuf[] to the event device.
+ *
+ * @param eth_dev_id
+ * Port identifier of the Ethernet device.
+ * @param queue_id
+ * Receive queue index.
+ * @param enqueue_buf_size
+ * Total enqueue buffer size.
+ * @param enqueue_buf_count
+ * mbuf count in enqueue buffer.
+ * @param mbuf
+ * mbuf array.
+ * @param nb_mbuf
+ * mbuf count.
+ * @param cb_arg
+ * Callback argument.
+ * @param[out] enq_mbuf
+ * The adapter enqueues enq_mbuf[] if the return value of the
+ * callback is less than nb_mbuf
+ * @return
+ * Returns the number of mbufs should be enqueued to eventdev
+ */
+typedef uint16_t (*rte_event_eth_rx_adapter_cb_fn)(uint16_t eth_dev_id,
+ uint16_t queue_id,
+ uint32_t enqueue_buf_size,
+ uint32_t enqueue_buf_count,
+ struct rte_mbuf **mbuf,
+ uint16_t nb_mbuf,
+ void *cb_arg,
+ struct rte_mbuf **enq_buf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
* Create a new ethernet Rx event adapter with the specified identifier.
*
* @param id
@@ -329,7 +366,7 @@ int rte_event_eth_rx_adapter_free(uint8_t id);
* combination of the two error codes.
*/
int rte_event_eth_rx_adapter_queue_add(uint8_t id,
- uint8_t eth_dev_id,
+ uint16_t eth_dev_id,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *conf);
@@ -357,7 +394,7 @@ int rte_event_eth_rx_adapter_queue_add(uint8_t id,
* - 0: Success, Receive queue deleted correctly.
* - <0: Error code on failure.
*/
-int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
int32_t rx_queue_id);
/**
@@ -444,6 +481,32 @@ int rte_event_eth_rx_adapter_stats_reset(uint8_t id);
*/
int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Register callback to process Rx packets, this is supported for
+ * SW based packet transfers.
+ * @see rte_event_eth_rx_cb_fn
+ *
+ * @param id
+ * Adapter identifier.
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ * @param cb_fn
+ * Callback function.
+ * @param cb_arg
+ * Callback arg.
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_eth_rx_adapter_cb_register(uint8_t id,
+ uint16_t eth_dev_id,
+ rte_event_eth_rx_adapter_cb_fn cb_fn,
+ void *cb_arg);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eventdev/rte_event_ring.c b/lib/librte_eventdev/rte_event_ring.c
index eb67751d..16d02a95 100644
--- a/lib/librte_eventdev/rte_event_ring.c
+++ b/lib/librte_eventdev/rte_event_ring.c
@@ -82,11 +82,16 @@ rte_event_ring_create(const char *name, unsigned int count, int socket_id,
mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
if (mz != NULL) {
r = mz->addr;
- /*
- * no need to check return value here, we already checked the
- * arguments above
- */
- rte_event_ring_init(r, name, requested_count, flags);
+ /* Check return value in case rte_ring_init() fails on size */
+ int err = rte_event_ring_init(r, name, requested_count, flags);
+ if (err) {
+ RTE_LOG(ERR, RING, "Ring init failed\n");
+ if (rte_memzone_free(mz) != 0)
+ RTE_LOG(ERR, RING, "Cannot free memzone\n");
+ rte_free(te);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return NULL;
+ }
te->data = (void *) r;
r->r.memzone = mz;
diff --git a/lib/librte_eventdev/rte_event_ring.h b/lib/librte_eventdev/rte_event_ring.h
index 29d4228a..827a3209 100644
--- a/lib/librte_eventdev/rte_event_ring.h
+++ b/lib/librte_eventdev/rte_event_ring.h
@@ -99,7 +99,7 @@ rte_event_ring_enqueue_burst(struct rte_event_ring *r,
ENQUEUE_PTRS(&r->r, &r[1], prod_head, events, n, struct rte_event);
- update_tail(&r->r.prod, prod_head, prod_next, 1, 1);
+ update_tail(&r->r.prod, prod_head, prod_next, r->r.prod.single, 1);
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -140,7 +140,7 @@ rte_event_ring_dequeue_burst(struct rte_event_ring *r,
DEQUEUE_PTRS(&r->r, &r[1], cons_head, events, n, struct rte_event);
- update_tail(&r->r.cons, cons_head, cons_next, 1, 0);
+ update_tail(&r->r.cons, cons_head, cons_next, r->r.cons.single, 0);
end:
if (available != NULL)
diff --git a/lib/librte_eventdev/rte_event_timer_adapter.c b/lib/librte_eventdev/rte_event_timer_adapter.c
new file mode 100644
index 00000000..79070d48
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_timer_adapter.c
@@ -0,0 +1,1299 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_memzone.h>
+#include <rte_memory.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_common.h>
+#include <rte_timer.h>
+#include <rte_service_component.h>
+#include <rte_cycles.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_timer_adapter.h"
+#include "rte_event_timer_adapter_pmd.h"
+
+#define DATA_MZ_NAME_MAX_LEN 64
+#define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
+
+static int evtim_logtype;
+static int evtim_svc_logtype;
+static int evtim_buffer_logtype;
+
+static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
+
+static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops;
+
+#define EVTIM_LOG(level, logtype, ...) \
+ rte_log(RTE_LOG_ ## level, logtype, \
+ RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
+ "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+#define EVTIM_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
+#define EVTIM_BUF_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
+#define EVTIM_SVC_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
+#else
+#define EVTIM_LOG_DBG(...) (void)0
+#define EVTIM_BUF_LOG_DBG(...) (void)0
+#define EVTIM_SVC_LOG_DBG(...) (void)0
+#endif
+
+static int
+default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
+ void *conf_arg)
+{
+ struct rte_event_timer_adapter *adapter;
+ struct rte_eventdev *dev;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_port_conf *port_conf, def_port_conf = {0};
+ int started;
+ uint8_t port_id;
+ uint8_t dev_id;
+ int ret;
+
+ RTE_SET_USED(event_dev_id);
+
+ adapter = &adapters[id];
+ dev = &rte_eventdevs[adapter->data->event_dev_id];
+ dev_id = dev->data->dev_id;
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
+ if (started)
+ if (rte_event_dev_start(dev_id))
+ return -EIO;
+
+ return ret;
+ }
+
+ if (conf_arg != NULL)
+ port_conf = conf_arg;
+ else {
+ port_conf = &def_port_conf;
+ ret = rte_event_port_default_conf_get(dev_id, port_id,
+ port_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
+ port_id, dev_id);
+ return ret;
+ }
+
+ *event_port_id = port_id;
+
+ if (started)
+ ret = rte_event_dev_start(dev_id);
+
+ return ret;
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
+{
+ return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
+ NULL);
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create_ext(
+ const struct rte_event_timer_adapter_conf *conf,
+ rte_event_timer_adapter_port_conf_cb_t conf_cb,
+ void *conf_arg)
+{
+ uint16_t adapter_id;
+ struct rte_event_timer_adapter *adapter;
+ const struct rte_memzone *mz;
+ char mz_name[DATA_MZ_NAME_MAX_LEN];
+ int n, ret;
+ struct rte_eventdev *dev;
+
+ if (conf == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Check eventdev ID */
+ if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ dev = &rte_eventdevs[conf->event_dev_id];
+
+ adapter_id = conf->timer_adapter_id;
+
+ /* Check that adapter_id is in range */
+ if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Check adapter ID not already allocated */
+ adapter = &adapters[adapter_id];
+ if (adapter->allocated) {
+ rte_errno = EEXIST;
+ return NULL;
+ }
+
+ /* Create shared data area. */
+ n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
+ if (n >= (int)sizeof(mz_name)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_event_timer_adapter_data),
+ conf->socket_id, 0);
+ if (mz == NULL)
+ /* rte_errno set by rte_memzone_reserve */
+ return NULL;
+
+ adapter->data = mz->addr;
+ memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
+
+ adapter->data->mz = mz;
+ adapter->data->event_dev_id = conf->event_dev_id;
+ adapter->data->id = adapter_id;
+ adapter->data->socket_id = conf->socket_id;
+ adapter->data->conf = *conf; /* copy conf structure */
+
+ /* Query eventdev PMD for timer adapter capabilities and ops */
+ ret = dev->dev_ops->timer_adapter_caps_get(dev,
+ adapter->data->conf.flags,
+ &adapter->data->caps,
+ &adapter->ops);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+
+ if (!(adapter->data->caps &
+ RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, -EINVAL);
+ ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
+ &adapter->data->event_port_id, conf_arg);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+ }
+
+ /* If eventdev PMD did not provide ops, use default software
+ * implementation.
+ */
+ if (adapter->ops == NULL)
+ adapter->ops = &sw_event_adapter_timer_ops;
+
+ /* Allow driver to do some setup */
+ FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, -ENOTSUP);
+ ret = adapter->ops->init(adapter);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+
+ /* Set fast-path function pointers */
+ adapter->arm_burst = adapter->ops->arm_burst;
+ adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
+ adapter->cancel_burst = adapter->ops->cancel_burst;
+
+ adapter->allocated = 1;
+
+ return adapter;
+
+free_memzone:
+ rte_memzone_free(adapter->data->mz);
+ return NULL;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+
+ if (adapter->ops->get_info)
+ /* let driver set values it knows */
+ adapter->ops->get_info(adapter, adapter_info);
+
+ /* Set common values */
+ adapter_info->conf = adapter->data->conf;
+ adapter_info->event_dev_port_id = adapter->data->event_port_id;
+ adapter_info->caps = adapter->data->caps;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
+
+ ret = adapter->ops->start(adapter);
+ if (ret < 0)
+ return ret;
+
+ adapter->data->started = 1;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
+
+ if (adapter->data->started == 0) {
+ EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
+ adapter->data->id);
+ return 0;
+ }
+
+ ret = adapter->ops->stop(adapter);
+ if (ret < 0)
+ return ret;
+
+ adapter->data->started = 0;
+
+ return 0;
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_lookup(uint16_t adapter_id)
+{
+ char name[DATA_MZ_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ struct rte_event_timer_adapter_data *data;
+ struct rte_event_timer_adapter *adapter;
+ int ret;
+ struct rte_eventdev *dev;
+
+ if (adapters[adapter_id].allocated)
+ return &adapters[adapter_id]; /* Adapter is already loaded */
+
+ snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ data = mz->addr;
+
+ adapter = &adapters[data->id];
+ adapter->data = data;
+
+ dev = &rte_eventdevs[adapter->data->event_dev_id];
+
+ /* Query eventdev PMD for timer adapter capabilities and ops */
+ ret = dev->dev_ops->timer_adapter_caps_get(dev,
+ adapter->data->conf.flags,
+ &adapter->data->caps,
+ &adapter->ops);
+ if (ret < 0) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* If eventdev PMD did not provide ops, use default software
+ * implementation.
+ */
+ if (adapter->ops == NULL)
+ adapter->ops = &sw_event_adapter_timer_ops;
+
+ /* Set fast-path function pointers */
+ adapter->arm_burst = adapter->ops->arm_burst;
+ adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
+ adapter->cancel_burst = adapter->ops->cancel_burst;
+
+ adapter->allocated = 1;
+
+ return adapter;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
+
+ if (adapter->data->started == 1) {
+ EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
+ "before freeing", adapter->data->id);
+ return -EBUSY;
+ }
+
+ /* free impl priv data */
+ ret = adapter->ops->uninit(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* free shared data area */
+ ret = rte_memzone_free(adapter->data->mz);
+ if (ret < 0)
+ return ret;
+
+ adapter->data = NULL;
+ adapter->allocated = 0;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+
+ if (adapter->data->service_inited && service_id != NULL)
+ *service_id = adapter->data->service_id;
+
+ return adapter->data->service_inited ? 0 : -ESRCH;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
+ if (stats == NULL)
+ return -EINVAL;
+
+ return adapter->ops->stats_get(adapter, stats);
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
+ return adapter->ops->stats_reset(adapter);
+}
+
+/*
+ * Software event timer adapter buffer helper functions
+ */
+
+#define NSECPERSEC 1E9
+
+/* Optimizations used to index into the buffer require that the buffer size
+ * be a power of 2.
+ */
+#define EVENT_BUFFER_SZ 4096
+#define EVENT_BUFFER_BATCHSZ 32
+#define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
+
+struct event_buffer {
+ uint16_t head;
+ uint16_t tail;
+ struct rte_event events[EVENT_BUFFER_SZ];
+} __rte_cache_aligned;
+
+static inline bool
+event_buffer_full(struct event_buffer *bufp)
+{
+ return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
+}
+
+static inline bool
+event_buffer_batch_ready(struct event_buffer *bufp)
+{
+ return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
+}
+
+static void
+event_buffer_init(struct event_buffer *bufp)
+{
+ bufp->head = bufp->tail = 0;
+ memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
+}
+
+static int
+event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
+{
+ uint16_t head_idx;
+ struct rte_event *buf_eventp;
+
+ if (event_buffer_full(bufp))
+ return -1;
+
+ /* Instead of modulus, bitwise AND with mask to get head_idx. */
+ head_idx = bufp->head & EVENT_BUFFER_MASK;
+ buf_eventp = &bufp->events[head_idx];
+ rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
+
+ /* Wrap automatically when overflow occurs. */
+ bufp->head++;
+
+ return 0;
+}
+
+static void
+event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
+ uint16_t *nb_events_flushed,
+ uint16_t *nb_events_inv)
+{
+ uint16_t head_idx, tail_idx, n = 0;
+ struct rte_event *events = bufp->events;
+
+ /* Instead of modulus, bitwise AND with mask to get index. */
+ head_idx = bufp->head & EVENT_BUFFER_MASK;
+ tail_idx = bufp->tail & EVENT_BUFFER_MASK;
+
+ /* Determine the largest contigous run we can attempt to enqueue to the
+ * event device.
+ */
+ if (head_idx > tail_idx)
+ n = head_idx - tail_idx;
+ else if (head_idx < tail_idx)
+ n = EVENT_BUFFER_SZ - tail_idx;
+ else {
+ *nb_events_flushed = 0;
+ return;
+ }
+
+ *nb_events_inv = 0;
+ *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
+ &events[tail_idx], n);
+ if (*nb_events_flushed != n && rte_errno == -EINVAL) {
+ EVTIM_LOG_ERR("failed to enqueue invalid event - dropping it");
+ (*nb_events_inv)++;
+ }
+
+ bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
+}
+
+/*
+ * Software event timer adapter implementation
+ */
+
+struct rte_event_timer_adapter_sw_data {
+ /* List of messages for outstanding timers */
+ TAILQ_HEAD(, msg) msgs_tailq_head;
+ /* Lock to guard tailq and armed count */
+ rte_spinlock_t msgs_tailq_sl;
+ /* Identifier of service executing timer management logic. */
+ uint32_t service_id;
+ /* The cycle count at which the adapter should next tick */
+ uint64_t next_tick_cycles;
+ /* Incremented as the service moves through phases of an iteration */
+ volatile int service_phase;
+ /* The tick resolution used by adapter instance. May have been
+ * adjusted from what user requested
+ */
+ uint64_t timer_tick_ns;
+ /* Maximum timeout in nanoseconds allowed by adapter instance. */
+ uint64_t max_tmo_ns;
+ /* Ring containing messages to arm or cancel event timers */
+ struct rte_ring *msg_ring;
+ /* Mempool containing msg objects */
+ struct rte_mempool *msg_pool;
+ /* Buffered timer expiry events to be enqueued to an event device. */
+ struct event_buffer buffer;
+ /* Statistics */
+ struct rte_event_timer_adapter_stats stats;
+ /* The number of threads currently adding to the message ring */
+ rte_atomic16_t message_producer_count;
+};
+
+enum msg_type {MSG_TYPE_ARM, MSG_TYPE_CANCEL};
+
+struct msg {
+ enum msg_type type;
+ struct rte_event_timer *evtim;
+ struct rte_timer tim;
+ TAILQ_ENTRY(msg) msgs;
+};
+
+static void
+sw_event_timer_cb(struct rte_timer *tim, void *arg)
+{
+ int ret;
+ uint16_t nb_evs_flushed = 0;
+ uint16_t nb_evs_invalid = 0;
+ uint64_t opaque;
+ struct rte_event_timer *evtim;
+ struct rte_event_timer_adapter *adapter;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ evtim = arg;
+ opaque = evtim->impl_opaque[1];
+ adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
+ sw_data = adapter->data->adapter_priv;
+
+ ret = event_buffer_add(&sw_data->buffer, &evtim->ev);
+ if (ret < 0) {
+ /* If event buffer is full, put timer back in list with
+ * immediate expiry value, so that we process it again on the
+ * next iteration.
+ */
+ rte_timer_reset_sync(tim, 0, SINGLE, rte_lcore_id(),
+ sw_event_timer_cb, evtim);
+
+ sw_data->stats.evtim_retry_count++;
+ EVTIM_LOG_DBG("event buffer full, resetting rte_timer with "
+ "immediate expiry value");
+ } else {
+ struct msg *m = container_of(tim, struct msg, tim);
+ TAILQ_REMOVE(&sw_data->msgs_tailq_head, m, msgs);
+ EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
+ evtim->state = RTE_EVENT_TIMER_NOT_ARMED;
+
+ /* Free the msg object containing the rte_timer now that
+ * we've buffered its event successfully.
+ */
+ rte_mempool_put(sw_data->msg_pool, m);
+
+ /* Bump the count when we successfully add an expiry event to
+ * the buffer.
+ */
+ sw_data->stats.evtim_exp_count++;
+ }
+
+ if (event_buffer_batch_ready(&sw_data->buffer)) {
+ event_buffer_flush(&sw_data->buffer,
+ adapter->data->event_dev_id,
+ adapter->data->event_port_id,
+ &nb_evs_flushed,
+ &nb_evs_invalid);
+
+ sw_data->stats.ev_enq_count += nb_evs_flushed;
+ sw_data->stats.ev_inv_count += nb_evs_invalid;
+ }
+}
+
+static __rte_always_inline uint64_t
+get_timeout_cycles(struct rte_event_timer *evtim,
+ struct rte_event_timer_adapter *adapter)
+{
+ uint64_t timeout_ns;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ timeout_ns = evtim->timeout_ticks * sw_data->timer_tick_ns;
+ return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
+
+}
+
+/* This function returns true if one or more (adapter) ticks have occurred since
+ * the last time it was called.
+ */
+static inline bool
+adapter_did_tick(struct rte_event_timer_adapter *adapter)
+{
+ uint64_t cycles_per_adapter_tick, start_cycles;
+ uint64_t *next_tick_cyclesp;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ next_tick_cyclesp = &sw_data->next_tick_cycles;
+
+ cycles_per_adapter_tick = sw_data->timer_tick_ns *
+ (rte_get_timer_hz() / NSECPERSEC);
+
+ start_cycles = rte_get_timer_cycles();
+
+ /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
+ * execute, and set things going.
+ */
+
+ if (start_cycles >= *next_tick_cyclesp) {
+ /* Snap the current cycle count to the preceding adapter tick
+ * boundary.
+ */
+ start_cycles -= start_cycles % cycles_per_adapter_tick;
+
+ *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Check that event timer timeout value is in range */
+static __rte_always_inline int
+check_timeout(struct rte_event_timer *evtim,
+ const struct rte_event_timer_adapter *adapter)
+{
+ uint64_t tmo_nsec;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ tmo_nsec = evtim->timeout_ticks * sw_data->timer_tick_ns;
+
+ if (tmo_nsec > sw_data->max_tmo_ns)
+ return -1;
+
+ if (tmo_nsec < sw_data->timer_tick_ns)
+ return -2;
+
+ return 0;
+}
+
+/* Check that event timer event queue sched type matches destination event queue
+ * sched type
+ */
+static __rte_always_inline int
+check_destination_event_queue(struct rte_event_timer *evtim,
+ const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ uint32_t sched_type;
+
+ ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
+ evtim->ev.queue_id,
+ RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
+ &sched_type);
+
+ if ((ret < 0 && ret != -EOVERFLOW) ||
+ evtim->ev.sched_type != sched_type)
+ return -1;
+
+ return 0;
+}
+
+#define NB_OBJS 32
+static int
+sw_event_timer_adapter_service_func(void *arg)
+{
+ int i, num_msgs;
+ uint64_t cycles, opaque;
+ uint16_t nb_evs_flushed = 0;
+ uint16_t nb_evs_invalid = 0;
+ struct rte_event_timer_adapter *adapter;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_timer *tim = NULL;
+ struct msg *msg, *msgs[NB_OBJS];
+
+ adapter = arg;
+ sw_data = adapter->data->adapter_priv;
+
+ sw_data->service_phase = 1;
+ rte_smp_wmb();
+
+ while (rte_atomic16_read(&sw_data->message_producer_count) > 0 ||
+ !rte_ring_empty(sw_data->msg_ring)) {
+
+ num_msgs = rte_ring_dequeue_burst(sw_data->msg_ring,
+ (void **)msgs, NB_OBJS, NULL);
+
+ for (i = 0; i < num_msgs; i++) {
+ int ret = 0;
+
+ RTE_SET_USED(ret);
+
+ msg = msgs[i];
+ evtim = msg->evtim;
+
+ switch (msg->type) {
+ case MSG_TYPE_ARM:
+ EVTIM_SVC_LOG_DBG("dequeued ARM message from "
+ "ring");
+ tim = &msg->tim;
+ rte_timer_init(tim);
+ cycles = get_timeout_cycles(evtim,
+ adapter);
+ ret = rte_timer_reset(tim, cycles, SINGLE,
+ rte_lcore_id(),
+ sw_event_timer_cb,
+ evtim);
+ RTE_ASSERT(ret == 0);
+
+ evtim->impl_opaque[0] = (uintptr_t)tim;
+ evtim->impl_opaque[1] = (uintptr_t)adapter;
+
+ TAILQ_INSERT_TAIL(&sw_data->msgs_tailq_head,
+ msg,
+ msgs);
+ break;
+ case MSG_TYPE_CANCEL:
+ EVTIM_SVC_LOG_DBG("dequeued CANCEL message "
+ "from ring");
+ opaque = evtim->impl_opaque[0];
+ tim = (struct rte_timer *)(uintptr_t)opaque;
+ RTE_ASSERT(tim != NULL);
+
+ ret = rte_timer_stop(tim);
+ RTE_ASSERT(ret == 0);
+
+ /* Free the msg object for the original arm
+ * request.
+ */
+ struct msg *m;
+ m = container_of(tim, struct msg, tim);
+ TAILQ_REMOVE(&sw_data->msgs_tailq_head, m,
+ msgs);
+ rte_mempool_put(sw_data->msg_pool, m);
+
+ /* Free the msg object for the current msg */
+ rte_mempool_put(sw_data->msg_pool, msg);
+
+ evtim->impl_opaque[0] = 0;
+ evtim->impl_opaque[1] = 0;
+
+ break;
+ }
+ }
+ }
+
+ sw_data->service_phase = 2;
+ rte_smp_wmb();
+
+ if (adapter_did_tick(adapter)) {
+ rte_timer_manage();
+
+ event_buffer_flush(&sw_data->buffer,
+ adapter->data->event_dev_id,
+ adapter->data->event_port_id,
+ &nb_evs_flushed, &nb_evs_invalid);
+
+ sw_data->stats.ev_enq_count += nb_evs_flushed;
+ sw_data->stats.ev_inv_count += nb_evs_invalid;
+ sw_data->stats.adapter_tick_count++;
+ }
+
+ sw_data->service_phase = 0;
+ rte_smp_wmb();
+
+ return 0;
+}
+
+/* The adapter initialization function rounds the mempool size up to the next
+ * power of 2, so we can take the difference between that value and what the
+ * user requested, and use the space for caches. This avoids a scenario where a
+ * user can't arm the number of timers the adapter was configured with because
+ * mempool objects have been lost to caches.
+ *
+ * nb_actual should always be a power of 2, so we can iterate over the powers
+ * of 2 to see what the largest cache size we can use is.
+ */
+static int
+compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
+{
+ int i;
+ int size;
+ int cache_size = 0;
+
+ for (i = 0; ; i++) {
+ size = 1 << i;
+
+ if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
+ size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
+ size <= nb_actual / 1.5)
+ cache_size = size;
+ else
+ break;
+ }
+
+ return cache_size;
+}
+
+#define SW_MIN_INTERVAL 1E5
+
+static int
+sw_event_timer_adapter_init(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ uint64_t nb_timers;
+ unsigned int flags;
+ struct rte_service_spec service;
+ static bool timer_subsystem_inited; // static initialized to false
+
+ /* Allocate storage for SW implementation data */
+ char priv_data_name[RTE_RING_NAMESIZE];
+ snprintf(priv_data_name, RTE_RING_NAMESIZE, "sw_evtim_adap_priv_%"PRIu8,
+ adapter->data->id);
+ adapter->data->adapter_priv = rte_zmalloc_socket(
+ priv_data_name,
+ sizeof(struct rte_event_timer_adapter_sw_data),
+ RTE_CACHE_LINE_SIZE,
+ adapter->data->socket_id);
+ if (adapter->data->adapter_priv == NULL) {
+ EVTIM_LOG_ERR("failed to allocate space for private data");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ if (adapter->data->conf.timer_tick_ns < SW_MIN_INTERVAL) {
+ EVTIM_LOG_ERR("failed to create adapter with requested tick "
+ "interval");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ sw_data = adapter->data->adapter_priv;
+
+ sw_data->timer_tick_ns = adapter->data->conf.timer_tick_ns;
+ sw_data->max_tmo_ns = adapter->data->conf.max_tmo_ns;
+
+ TAILQ_INIT(&sw_data->msgs_tailq_head);
+ rte_spinlock_init(&sw_data->msgs_tailq_sl);
+ rte_atomic16_init(&sw_data->message_producer_count);
+
+ /* Rings require power of 2, so round up to next such value */
+ nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
+
+ char msg_ring_name[RTE_RING_NAMESIZE];
+ snprintf(msg_ring_name, RTE_RING_NAMESIZE,
+ "sw_evtim_adap_msg_ring_%"PRIu8, adapter->data->id);
+ flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
+ RING_F_SP_ENQ | RING_F_SC_DEQ :
+ RING_F_SC_DEQ;
+ sw_data->msg_ring = rte_ring_create(msg_ring_name, nb_timers,
+ adapter->data->socket_id, flags);
+ if (sw_data->msg_ring == NULL) {
+ EVTIM_LOG_ERR("failed to create message ring");
+ rte_errno = ENOMEM;
+ goto free_priv_data;
+ }
+
+ char pool_name[RTE_RING_NAMESIZE];
+ snprintf(pool_name, RTE_RING_NAMESIZE, "sw_evtim_adap_msg_pool_%"PRIu8,
+ adapter->data->id);
+
+ /* Both the arming/canceling thread and the service thread will do puts
+ * to the mempool, but if the SP_PUT flag is enabled, we can specify
+ * single-consumer get for the mempool.
+ */
+ flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
+ MEMPOOL_F_SC_GET : 0;
+
+ /* The usable size of a ring is count - 1, so subtract one here to
+ * make the counts agree.
+ */
+ int pool_size = nb_timers - 1;
+ int cache_size = compute_msg_mempool_cache_size(
+ adapter->data->conf.nb_timers, nb_timers);
+ sw_data->msg_pool = rte_mempool_create(pool_name, pool_size,
+ sizeof(struct msg), cache_size,
+ 0, NULL, NULL, NULL, NULL,
+ adapter->data->socket_id, flags);
+ if (sw_data->msg_pool == NULL) {
+ EVTIM_LOG_ERR("failed to create message object mempool");
+ rte_errno = ENOMEM;
+ goto free_msg_ring;
+ }
+
+ event_buffer_init(&sw_data->buffer);
+
+ /* Register a service component to run adapter logic */
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, RTE_SERVICE_NAME_MAX,
+ "sw_evimer_adap_svc_%"PRIu8, adapter->data->id);
+ service.socket_id = adapter->data->socket_id;
+ service.callback = sw_event_timer_adapter_service_func;
+ service.callback_userdata = adapter;
+ service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
+ ret = rte_service_component_register(&service, &sw_data->service_id);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
+ ": err = %d", service.name, sw_data->service_id,
+ ret);
+
+ rte_errno = ENOSPC;
+ goto free_msg_pool;
+ }
+
+ EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
+ sw_data->service_id);
+
+ adapter->data->service_id = sw_data->service_id;
+ adapter->data->service_inited = 1;
+
+ if (!timer_subsystem_inited) {
+ rte_timer_subsystem_init();
+ timer_subsystem_inited = true;
+ }
+
+ return 0;
+
+free_msg_pool:
+ rte_mempool_free(sw_data->msg_pool);
+free_msg_ring:
+ rte_ring_free(sw_data->msg_ring);
+free_priv_data:
+ rte_free(sw_data);
+ return -1;
+}
+
+static int
+sw_event_timer_adapter_uninit(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct msg *m1, *m2;
+ struct rte_event_timer_adapter_sw_data *sw_data =
+ adapter->data->adapter_priv;
+
+ rte_spinlock_lock(&sw_data->msgs_tailq_sl);
+
+ /* Cancel outstanding rte_timers and free msg objects */
+ m1 = TAILQ_FIRST(&sw_data->msgs_tailq_head);
+ while (m1 != NULL) {
+ EVTIM_LOG_DBG("freeing outstanding timer");
+ m2 = TAILQ_NEXT(m1, msgs);
+
+ rte_timer_stop_sync(&m1->tim);
+ rte_mempool_put(sw_data->msg_pool, m1);
+
+ m1 = m2;
+ }
+
+ rte_spinlock_unlock(&sw_data->msgs_tailq_sl);
+
+ ret = rte_service_component_unregister(sw_data->service_id);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to unregister service component");
+ return ret;
+ }
+
+ rte_ring_free(sw_data->msg_ring);
+ rte_mempool_free(sw_data->msg_pool);
+ rte_free(adapter->data->adapter_priv);
+
+ return 0;
+}
+
+static inline int32_t
+get_mapped_count_for_service(uint32_t service_id)
+{
+ int32_t core_count, i, mapped_count = 0;
+ uint32_t lcore_arr[RTE_MAX_LCORE];
+
+ core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
+
+ for (i = 0; i < core_count; i++)
+ if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
+ mapped_count++;
+
+ return mapped_count;
+}
+
+static int
+sw_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
+{
+ int mapped_count;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+
+ /* Mapping the service to more than one service core can introduce
+ * delays while one thread is waiting to acquire a lock, so only allow
+ * one core to be mapped to the service.
+ */
+ mapped_count = get_mapped_count_for_service(sw_data->service_id);
+
+ if (mapped_count == 1)
+ return rte_service_component_runstate_set(sw_data->service_id,
+ 1);
+
+ return mapped_count < 1 ? -ENOENT : -ENOTSUP;
+}
+
+static int
+sw_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data =
+ adapter->data->adapter_priv;
+
+ ret = rte_service_component_runstate_set(sw_data->service_id, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the service to complete its final iteration before
+ * stopping.
+ */
+ while (sw_data->service_phase != 0)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ return 0;
+}
+
+static void
+sw_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+
+ adapter_info->min_resolution_ns = sw_data->timer_tick_ns;
+ adapter_info->max_tmo_ns = sw_data->max_tmo_ns;
+}
+
+static int
+sw_event_timer_adapter_stats_get(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+ *stats = sw_data->stats;
+ return 0;
+}
+
+static int
+sw_event_timer_adapter_stats_reset(
+ const struct rte_event_timer_adapter *adapter)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+ memset(&sw_data->stats, 0, sizeof(sw_data->stats));
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+__sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ uint16_t i;
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct msg *msgs[nb_evtims];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ /* Check that the service is running. */
+ if (rte_service_runstate_get(adapter->data->service_id) != 1) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+#endif
+
+ sw_data = adapter->data->adapter_priv;
+
+ ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
+ if (ret < 0) {
+ rte_errno = ENOSPC;
+ return 0;
+ }
+
+ /* Let the service know we're producing messages for it to process */
+ rte_atomic16_inc(&sw_data->message_producer_count);
+
+ /* If the service is managing timers, wait for it to finish */
+ while (sw_data->service_phase == 2)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ for (i = 0; i < nb_evtims; i++) {
+ /* Don't modify the event timer state in these cases */
+ if (evtims[i]->state == RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EALREADY;
+ break;
+ } else if (!(evtims[i]->state == RTE_EVENT_TIMER_NOT_ARMED ||
+ evtims[i]->state == RTE_EVENT_TIMER_CANCELED)) {
+ rte_errno = EINVAL;
+ break;
+ }
+
+ ret = check_timeout(evtims[i], adapter);
+ if (ret == -1) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
+ rte_errno = EINVAL;
+ break;
+ }
+ if (ret == -2) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ rte_errno = EINVAL;
+ break;
+ }
+
+ if (check_destination_event_queue(evtims[i], adapter) < 0) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR;
+ rte_errno = EINVAL;
+ break;
+ }
+
+ /* Checks passed, set up a message to enqueue */
+ msgs[i]->type = MSG_TYPE_ARM;
+ msgs[i]->evtim = evtims[i];
+
+ /* Set the payload pointer if not set. */
+ if (evtims[i]->ev.event_ptr == NULL)
+ evtims[i]->ev.event_ptr = evtims[i];
+
+ /* msg objects that get enqueued successfully will be freed
+ * either by a future cancel operation or by the timer
+ * expiration callback.
+ */
+ if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
+ rte_errno = ENOSPC;
+ break;
+ }
+
+ EVTIM_LOG_DBG("enqueued ARM message to ring");
+
+ evtims[i]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ /* Let the service know we're done producing messages */
+ rte_atomic16_dec(&sw_data->message_producer_count);
+
+ if (i < nb_evtims)
+ rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
+ nb_evtims - i);
+
+ return i;
+}
+
+static uint16_t
+sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
+}
+
+static uint16_t
+sw_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ uint16_t i;
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct msg *msgs[nb_evtims];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ /* Check that the service is running. */
+ if (rte_service_runstate_get(adapter->data->service_id) != 1) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+#endif
+
+ sw_data = adapter->data->adapter_priv;
+
+ ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
+ if (ret < 0) {
+ rte_errno = ENOSPC;
+ return 0;
+ }
+
+ /* Let the service know we're producing messages for it to process */
+ rte_atomic16_inc(&sw_data->message_producer_count);
+
+ /* If the service could be modifying event timer states, wait */
+ while (sw_data->service_phase == 2)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ for (i = 0; i < nb_evtims; i++) {
+ /* Don't modify the event timer state in these cases */
+ if (evtims[i]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ } else if (evtims[i]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+
+ msgs[i]->type = MSG_TYPE_CANCEL;
+ msgs[i]->evtim = evtims[i];
+
+ if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
+ rte_errno = ENOSPC;
+ break;
+ }
+
+ EVTIM_LOG_DBG("enqueued CANCEL message to ring");
+
+ evtims[i]->state = RTE_EVENT_TIMER_CANCELED;
+ }
+
+ /* Let the service know we're done producing messages */
+ rte_atomic16_dec(&sw_data->message_producer_count);
+
+ if (i < nb_evtims)
+ rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
+ nb_evtims - i);
+
+ return i;
+}
+
+static uint16_t
+sw_event_timer_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint64_t timeout_ticks,
+ uint16_t nb_evtims)
+{
+ int i;
+
+ for (i = 0; i < nb_evtims; i++)
+ evtims[i]->timeout_ticks = timeout_ticks;
+
+ return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
+}
+
+static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops = {
+ .init = sw_event_timer_adapter_init,
+ .uninit = sw_event_timer_adapter_uninit,
+ .start = sw_event_timer_adapter_start,
+ .stop = sw_event_timer_adapter_stop,
+ .get_info = sw_event_timer_adapter_get_info,
+ .stats_get = sw_event_timer_adapter_stats_get,
+ .stats_reset = sw_event_timer_adapter_stats_reset,
+ .arm_burst = sw_event_timer_arm_burst,
+ .arm_tmo_tick_burst = sw_event_timer_arm_tmo_tick_burst,
+ .cancel_burst = sw_event_timer_cancel_burst,
+};
+
+RTE_INIT(event_timer_adapter_init_log)
+{
+ evtim_logtype = rte_log_register("lib.eventdev.adapter.timer");
+ if (evtim_logtype >= 0)
+ rte_log_set_level(evtim_logtype, RTE_LOG_NOTICE);
+
+ evtim_buffer_logtype = rte_log_register("lib.eventdev.adapter.timer."
+ "buffer");
+ if (evtim_buffer_logtype >= 0)
+ rte_log_set_level(evtim_buffer_logtype, RTE_LOG_NOTICE);
+
+ evtim_svc_logtype = rte_log_register("lib.eventdev.adapter.timer.svc");
+ if (evtim_svc_logtype >= 0)
+ rte_log_set_level(evtim_svc_logtype, RTE_LOG_NOTICE);
+}
diff --git a/lib/librte_eventdev/rte_event_timer_adapter.h b/lib/librte_eventdev/rte_event_timer_adapter.h
new file mode 100644
index 00000000..d4ea6f17
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_timer_adapter.h
@@ -0,0 +1,766 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc.
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef __RTE_EVENT_TIMER_ADAPTER_H__
+#define __RTE_EVENT_TIMER_ADAPTER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Timer Adapter
+ *
+ * An event timer adapter has the following abstract working model:
+ *
+ * timer_tick_ns
+ * +
+ * +-------+ |
+ * | | |
+ * +-------+ bkt 0 +----v---+
+ * | | | |
+ * | +-------+ |
+ * +---+---+ +---+---+ +---+---+---+---+
+ * | | | | | | | | |
+ * | bkt n | | bkt 1 |<-> t0| t1| t2| tn|
+ * | | | | | | | | |
+ * +---+---+ +---+---+ +---+---+---+---+
+ * | Timer adapter |
+ * +---+---+ +---+---+
+ * | | | |
+ * | bkt 4 | | bkt 2 |<--- Current bucket
+ * | | | |
+ * +---+---+ +---+---+
+ * | +-------+ |
+ * | | | |
+ * +------+ bkt 3 +-------+
+ * | |
+ * +-------+
+ *
+ * - It has a virtual monotonically increasing 64-bit timer adapter clock based
+ * on *enum rte_event_timer_adapter_clk_src* clock source. The clock source
+ * could be a CPU clock, or a platform dependent external clock.
+ *
+ * - The application creates a timer adapter instance with given the clock
+ * source, the total number of event timers, and a resolution(expressed in ns)
+ * to traverse between the buckets.
+ *
+ * - Each timer adapter may have 0 to n buckets based on the configured
+ * max timeout(max_tmo_ns) and resolution(timer_tick_ns). Upon starting the
+ * timer adapter, the adapter starts ticking at *timer_tick_ns* resolution.
+ *
+ * - The application arms an event timer that will expire *timer_tick_ns*
+ * from now.
+ *
+ * - The application can cancel an armed timer and no timer expiry event will be
+ * generated.
+ *
+ * - If a timer expires then the library injects the timer expiry event in
+ * the designated event queue.
+ *
+ * - The timer expiry event will be received through *rte_event_dequeue_burst*.
+ *
+ * - The application frees the timer adapter instance.
+ *
+ * Multiple timer adapters can be created with a varying level of resolution
+ * for various expiry use cases that run in parallel.
+ *
+ * Before using the timer adapter, the application has to create and configure
+ * an event device along with the event port. Based on the event device
+ * capability it might require creating an additional event port to be used
+ * by the timer adapter.
+ *
+ * The application creates the event timer adapter using the
+ * ``rte_event_timer_adapter_create()``. The event device id is passed to this
+ * function, inside this function the event device capability is checked,
+ * and if an in-built port is absent the application uses the default
+ * function to create a new producer port.
+ *
+ * The application may also use the function
+ * ``rte_event_timer_adapter_create_ext()`` to have granular control over
+ * producer port creation in a case where the in-built port is absent.
+ *
+ * After creating the timer adapter, the application has to start it
+ * using ``rte_event_timer_adapter_start()``. The buckets are traversed from
+ * 0 to n; when the adapter ticks, the next bucket is visited. Each time,
+ * the list per bucket is processed, and timer expiry events are sent to the
+ * designated event queue.
+ *
+ * The application can arm one or more event timers using the
+ * ``rte_event_timer_arm_burst()``. The *timeout_ticks* represents the number
+ * of *timer_tick_ns* after which the timer has to expire. The timeout at
+ * which the timers expire can be grouped or be independent of each
+ * event timer instance. ``rte_event_timer_arm_tmo_tick_burst()`` addresses the
+ * former case and ``rte_event_timer_arm_burst()`` addresses the latter case.
+ *
+ * The application can cancel the timers from expiring using the
+ * ``rte_event_timer_cancel_burst()``.
+ *
+ * On the secondary process, ``rte_event_timer_adapter_lookup()`` can be used
+ * to get the timer adapter pointer from its id and use it to invoke fastpath
+ * operations such as arm and cancel.
+ *
+ * Some of the use cases of event timer adapter are Beacon Timers,
+ * Generic SW Timeout, Wireless MAC Scheduling, 3G Frame Protocols,
+ * Packet Scheduling, Protocol Retransmission Timers, Supervision Timers.
+ * All these use cases require high resolution and low time drift.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_spinlock.h>
+#include <rte_memory.h>
+
+#include "rte_eventdev.h"
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this enum may change without prior notice
+ *
+ * Timer adapter clock source
+ */
+enum rte_event_timer_adapter_clk_src {
+ RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ /**< Use CPU clock as the clock source. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
+ /**< Platform dependent external clock source 0. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
+ /**< Platform dependent external clock source 1. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+ /**< Platform dependent external clock source 2. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK3,
+ /**< Platform dependent external clock source 3. */
+};
+
+#define RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES (1ULL << 0)
+/**< The event timer adapter implementation may have constraints on the
+ * resolution (timer_tick_ns) and maximum timer expiry timeout(max_tmo_ns)
+ * based on the given timer adapter or system. If this flag is set, the
+ * implementation adjusts the resolution and maximum timeout to the best
+ * possible configuration. On successful timer adapter creation, the
+ * application can get the configured resolution and max timeout with
+ * ``rte_event_timer_adapter_get_info()``.
+ *
+ * @see struct rte_event_timer_adapter_info::min_resolution_ns
+ * @see struct rte_event_timer_adapter_info::max_tmo_ns
+ */
+#define RTE_EVENT_TIMER_ADAPTER_F_SP_PUT (1ULL << 1)
+/**< ``rte_event_timer_arm_burst()`` API to be used in single producer mode.
+ *
+ * @see struct rte_event_timer_adapter_conf::flags
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Timer adapter configuration structure
+ */
+struct rte_event_timer_adapter_conf {
+ uint8_t event_dev_id;
+ /**< Event device identifier */
+ uint16_t timer_adapter_id;
+ /**< Event timer adapter identifier */
+ uint32_t socket_id;
+ /**< Identifier of socket from which to allocate memory for adapter */
+ enum rte_event_timer_adapter_clk_src clk_src;
+ /**< Clock source for timer adapter */
+ uint64_t timer_tick_ns;
+ /**< Timer adapter resolution in ns */
+ uint64_t max_tmo_ns;
+ /**< Maximum timer timeout(expiry) in ns */
+ uint64_t nb_timers;
+ /**< Total number of timers per adapter */
+ uint64_t flags;
+ /**< Timer adapter config flags (RTE_EVENT_TIMER_ADAPTER_F_*) */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Event timer adapter stats structure
+ */
+struct rte_event_timer_adapter_stats {
+ uint64_t evtim_exp_count;
+ /**< Number of event timers that have expired. */
+ uint64_t ev_enq_count;
+ /**< Eventdev enqueue count */
+ uint64_t ev_inv_count;
+ /**< Invalid expiry event count */
+ uint64_t evtim_retry_count;
+ /**< Event timer retry count */
+ uint64_t adapter_tick_count;
+ /**< Tick count for the adapter, at its resolution */
+};
+
+struct rte_event_timer_adapter;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Callback function type for producer port creation.
+ */
+typedef int (*rte_event_timer_adapter_port_conf_cb_t)(uint16_t id,
+ uint8_t event_dev_id,
+ uint8_t *event_port_id,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create an event timer adapter.
+ *
+ * This function must be invoked first before any other function in the API.
+ *
+ * @param conf
+ * The event timer adapter configuration structure.
+ *
+ * @return
+ * A pointer to the new allocated event timer adapter on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ERANGE: timer_tick_ns is not in supported range.
+ * - ENOMEM: unable to allocate sufficient memory for adapter instances
+ * - EINVAL: invalid event device identifier specified in config
+ * - ENOSPC: maximum number of adapters already created
+ * - EIO: event device reconfiguration and restart error. The adapter
+ * reconfigures the event device with an additional port by default if it is
+ * required to use a service to manage timers. If the device had been started
+ * before this call, this error code indicates an error in restart following
+ * an error in reconfiguration, i.e., a combination of the two error codes.
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a timer adapter with the supplied callback.
+ *
+ * This function can be used to have a more granular control over the timer
+ * adapter creation. If a built-in port is absent, then the function uses the
+ * callback provided to create and get the port id to be used as a producer
+ * port.
+ *
+ * @param conf
+ * The timer adapter configuration structure
+ * @param conf_cb
+ * The port config callback function.
+ * @param conf_arg
+ * Opaque pointer to the argument for the callback function
+ *
+ * @return
+ * A pointer to the new allocated event timer adapter on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ERANGE: timer_tick_ns is not in supported range.
+ * - ENOMEM: unable to allocate sufficient memory for adapter instances
+ * - EINVAL: invalid event device identifier specified in config
+ * - ENOSPC: maximum number of adapters already created
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create_ext(
+ const struct rte_event_timer_adapter_conf *conf,
+ rte_event_timer_adapter_port_conf_cb_t conf_cb,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Timer adapter info structure.
+ */
+struct rte_event_timer_adapter_info {
+ uint64_t min_resolution_ns;
+ /**< Minimum timer adapter resolution in ns */
+ uint64_t max_tmo_ns;
+ /**< Maximum timer timeout(expire) in ns */
+ struct rte_event_timer_adapter_conf conf;
+ /**< Configured timer adapter attributes */
+ uint32_t caps;
+ /**< Event timer adapter capabilities */
+ int16_t event_dev_port_id;
+ /**< Event device port ID, if applicable */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the contextual information of an event timer adapter.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @param[out] adapter_info
+ * A pointer to a structure of type *rte_event_timer_adapter_info* to be
+ * filled with the contextual information of the adapter.
+ *
+ * @return
+ * - 0: Success, driver updates the contextual information of the
+ * timer adapter
+ * - <0: Error code returned by the driver info get function.
+ * - -EINVAL: adapter identifier invalid
+ *
+ * @see RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
+ * struct rte_event_timer_adapter_info
+ *
+ */
+int __rte_experimental
+rte_event_timer_adapter_get_info(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start a timer adapter.
+ *
+ * The adapter start step is the last one and consists of setting the timer
+ * adapter to start accepting the timers and schedules to event queues.
+ *
+ * On success, all basic functions exported by the API (timer arm,
+ * timer cancel and so on) can be invoked.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @return
+ * - 0: Success, adapter started.
+ * - <0: Error code returned by the driver start function.
+ * - -EINVAL if adapter identifier invalid
+ * - -ENOENT if software adapter but no service core mapped
+ * - -ENOTSUP if software adapter and more than one service core mapped
+ */
+int __rte_experimental
+rte_event_timer_adapter_start(
+ const struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop an event timer adapter.
+ *
+ * The adapter can be restarted with a call to
+ * ``rte_event_timer_adapter_start()``.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @return
+ * - 0: Success, adapter stopped.
+ * - <0: Error code returned by the driver stop function.
+ * - -EINVAL if adapter identifier invalid
+ */
+int __rte_experimental
+rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Lookup an event timer adapter using its identifier.
+ *
+ * If an event timer adapter was created in another process with the same
+ * identifier, this function will locate its state and set up access to it
+ * so that it can be used in this process.
+ *
+ * @param adapter_id
+ * The event timer adapter identifier.
+ *
+ * @return
+ * A pointer to the event timer adapter matching the identifier on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ENOENT - requested entry not available to return.
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_lookup(uint16_t adapter_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event timer adapter.
+ *
+ * Destroy an event timer adapter, freeing all resources.
+ *
+ * Before invoking this function, the application must wait for all the
+ * armed timers to expire or cancel the outstanding armed timers.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully freed the event timer adapter resources.
+ * - <0: Failed to free the event timer adapter resources.
+ * - -EAGAIN: adapter is busy; timers outstanding
+ * - -EBUSY: stop hasn't been called for this adapter yet
+ * - -EINVAL: adapter id invalid, or adapter invalid
+ */
+int __rte_experimental
+rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter);
+
+/**
+ * Retrieve the service ID of the event timer adapter. If the adapter doesn't
+ * use an rte_service function, this function returns -ESRCH.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ * - -ESRCH: the adapter does not require a service to operate
+ */
+int __rte_experimental
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param[out] stats
+ * A pointer to a structure to fill with statistics.
+ *
+ * @return
+ * - 0: Successfully retrieved.
+ * - <0: Failure; error code returned.
+ */
+int __rte_experimental
+rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully reset;
+ * - <0: Failure; error code returned.
+ */
+int __rte_experimental rte_event_timer_adapter_stats_reset(
+ struct rte_event_timer_adapter *adapter);
+
+/**
+ * Retrieve the service ID of the event timer adapter. If the adapter doesn't
+ * use an rte_service function, this function returns -ESRCH.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the event dev doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param[out] stats
+ * A pointer to a structure to fill with statistics.
+ *
+ * @return
+ * - 0: Successfully retrieved.
+ * - <0: Failure; error code returned.
+ */
+int rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully reset;
+ * - <0: Failure; error code returned.
+ */
+int rte_event_timer_adapter_stats_reset(
+ struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Event timer state.
+ */
+enum rte_event_timer_state {
+ RTE_EVENT_TIMER_NOT_ARMED = 0,
+ /**< Event timer not armed. */
+ RTE_EVENT_TIMER_ARMED = 1,
+ /**< Event timer successfully armed. */
+ RTE_EVENT_TIMER_CANCELED = 2,
+ /**< Event timer successfully canceled. */
+ RTE_EVENT_TIMER_ERROR = -1,
+ /**< Generic event timer error. */
+ RTE_EVENT_TIMER_ERROR_TOOEARLY = -2,
+ /**< Event timer timeout tick value is too small for the adapter to
+ * handle, given its configured resolution.
+ */
+ RTE_EVENT_TIMER_ERROR_TOOLATE = -3,
+ /**< Event timer timeout tick is greater than the maximum timeout.*/
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * The generic *rte_event_timer* structure to hold the event timer attributes
+ * for arm and cancel operations.
+ */
+RTE_STD_C11
+struct rte_event_timer {
+ struct rte_event ev;
+ /**<
+ * Expiry event attributes. On successful event timer timeout,
+ * the following attributes will be used to inject the expiry event to
+ * the eventdev:
+ * - event_queue_id: Targeted event queue id for expiry events.
+ * - event_priority: Event priority of the event expiry event in the
+ * event queue relative to other events.
+ * - sched_type: Scheduling type of the expiry event.
+ * - flow_id: Flow id of the expiry event.
+ * - op: RTE_EVENT_OP_NEW
+ * - event_type: RTE_EVENT_TYPE_TIMER
+ */
+ volatile enum rte_event_timer_state state;
+ /**< State of the event timer. */
+ uint64_t timeout_ticks;
+ /**< Expiry timer ticks expressed in number of *timer_ticks_ns* from
+ * now.
+ * @see struct rte_event_timer_adapter_info::adapter_conf::timer_tick_ns
+ */
+ uint64_t impl_opaque[2];
+ /**< Implementation-specific opaque data.
+ * An event timer adapter implementation use this field to hold
+ * implementation specific values to share between the arm and cancel
+ * operations. The application should not modify this field.
+ */
+ uint8_t user_meta[0];
+ /**< Memory to store user specific metadata.
+ * The event timer adapter implementation should not modify this area.
+ */
+} __rte_cache_aligned;
+
+typedef uint16_t (*rte_event_timer_arm_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint16_t nb_tims);
+/**< @internal Enable event timers to enqueue timer events upon expiry */
+typedef uint16_t (*rte_event_timer_arm_tmo_tick_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint64_t timeout_tick,
+ uint16_t nb_tims);
+/**< @internal Enable event timers with common expiration time */
+typedef uint16_t (*rte_event_timer_cancel_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint16_t nb_tims);
+/**< @internal Prevent event timers from enqueuing timer events */
+
+/**
+ * @internal Data structure associated with each event timer adapter.
+ */
+struct rte_event_timer_adapter {
+ rte_event_timer_arm_burst_t arm_burst;
+ /**< Pointer to driver arm_burst function. */
+ rte_event_timer_arm_tmo_tick_burst_t arm_tmo_tick_burst;
+ /**< Pointer to driver arm_tmo_tick_burst function. */
+ rte_event_timer_cancel_burst_t cancel_burst;
+ /**< Pointer to driver cancel function. */
+ struct rte_event_timer_adapter_data *data;
+ /**< Pointer to shared adapter data */
+ const struct rte_event_timer_adapter_ops *ops;
+ /**< Functions exported by adapter driver */
+
+ RTE_STD_C11
+ uint8_t allocated : 1;
+ /**< Flag to indicate that this adapter has been allocated */
+} __rte_cache_aligned;
+
+#define ADAPTER_VALID_OR_ERR_RET(adapter, retval) do { \
+ if (adapter == NULL || !adapter->allocated) \
+ return retval; \
+} while (0)
+
+#define FUNC_PTR_OR_ERR_RET(func, errval) do { \
+ if ((func) == NULL) \
+ return errval; \
+} while (0)
+
+#define FUNC_PTR_OR_NULL_RET_WITH_ERRNO(func, errval) do { \
+ if ((func) == NULL) { \
+ rte_errno = errval; \
+ return NULL; \
+ } \
+} while (0)
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Arm a burst of event timers with separate expiration timeout tick for each
+ * event timer.
+ *
+ * Before calling this function, the application allocates
+ * ``struct rte_event_timer`` objects from mempool or huge page backed
+ * application buffers of desired size. On successful allocation,
+ * application updates the `struct rte_event_timer`` attributes such as
+ * expiry event attributes, timeout ticks from now.
+ * This function submits the event timer arm requests to the event timer adapter
+ * and on expiry, the events will be injected to designated event queue.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Pointer to an array of objects of type *rte_event_timer* structure.
+ * @param nb_evtims
+ * Number of event timers in the supplied array.
+ *
+ * @return
+ * The number of successfully armed event timers. The return value can be less
+ * than the value of the *nb_evtims* parameter. If the return value is less
+ * than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter, expiry event queue ID is invalid, or an
+ * expiry event's sched type doesn't match the capabilities of the
+ * destination event queue.
+ * - EAGAIN Specified timer adapter is not running
+ * - EALREADY A timer was encountered that was already armed
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->arm_burst, -EINVAL);
+#endif
+ return adapter->arm_burst(adapter, evtims, nb_evtims);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Arm a burst of event timers with same expiration timeout tick.
+ *
+ * Provides the same functionality as ``rte_event_timer_arm_burst()``, except
+ * that application can use this API when all the event timers have the
+ * same timeout expiration tick. This specialized function can provide the
+ * additional hint to the adapter implementation and optimize if possible.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Points to an array of objects of type *rte_event_timer* structure.
+ * @param timeout_ticks
+ * The number of ticks in which the timers should expire.
+ * @param nb_evtims
+ * Number of event timers in the supplied array.
+ *
+ * @return
+ * The number of successfully armed event timers. The return value can be less
+ * than the value of the *nb_evtims* parameter. If the return value is less
+ * than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter, expiry event queue ID is invalid, or an
+ * expiry event's sched type doesn't match the capabilities of the
+ * destination event queue.
+ * - EAGAIN Specified event timer adapter is not running
+ * - EALREADY A timer was encountered that was already armed
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_arm_tmo_tick_burst(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ const uint64_t timeout_ticks,
+ const uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->arm_tmo_tick_burst, -EINVAL);
+#endif
+ return adapter->arm_tmo_tick_burst(adapter, evtims, timeout_ticks,
+ nb_evtims);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Cancel a burst of event timers from being scheduled to the event device.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Points to an array of objects of type *rte_event_timer* structure
+ * @param nb_evtims
+ * Number of event timer instances in the supplied array.
+ *
+ * @return
+ * The number of successfully canceled event timers. The return value can be
+ * less than the value of the *nb_evtims* parameter. If the return value is
+ * less than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter identifier
+ * - EAGAIN Specified timer adapter is not running
+ * - EALREADY A timer was encountered that was already canceled
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->cancel_burst, -EINVAL);
+#endif
+ return adapter->cancel_burst(adapter, evtims, nb_evtims);
+}
+
+#endif /* __RTE_EVENT_TIMER_ADAPTER_H__ */
diff --git a/lib/librte_eventdev/rte_event_timer_adapter_pmd.h b/lib/librte_eventdev/rte_event_timer_adapter_pmd.h
new file mode 100644
index 00000000..cf3509dc
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_timer_adapter_pmd.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+
+/**
+ * @file
+ * RTE Event Timer Adapter API (PMD Side)
+ *
+ * @note
+ * This file provides implementation helpers for internal use by PMDs. They
+ * are not intended to be exposed to applications and are not subject to ABI
+ * versioning.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_event_timer_adapter.h"
+
+/*
+ * Definitions of functions exported by an event timer adapter implementation
+ * through *rte_event_timer_adapter_ops* structure supplied in the
+ * *rte_event_timer_adapter* structure associated with an event timer adapter.
+ */
+
+typedef int (*rte_event_timer_adapter_init_t)(
+ struct rte_event_timer_adapter *adapter);
+/**< @internal Event timer adapter implementation setup */
+typedef int (*rte_event_timer_adapter_uninit_t)(
+ struct rte_event_timer_adapter *adapter);
+/**< @internal Event timer adapter implementation teardown */
+typedef int (*rte_event_timer_adapter_start_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Start running event timer adapter */
+typedef int (*rte_event_timer_adapter_stop_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Stop running event timer adapter */
+typedef void (*rte_event_timer_adapter_get_info_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info);
+/**< @internal Get contextual information for event timer adapter */
+typedef int (*rte_event_timer_adapter_stats_get_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+/**< @internal Get statistics for event timer adapter */
+typedef int (*rte_event_timer_adapter_stats_reset_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Reset statistics for event timer adapter */
+
+/**
+ * @internal Structure containing the functions exported by an event timer
+ * adapter implementation.
+ */
+struct rte_event_timer_adapter_ops {
+ rte_event_timer_adapter_init_t init; /**< Set up adapter */
+ rte_event_timer_adapter_uninit_t uninit;/**< Tear down adapter */
+ rte_event_timer_adapter_start_t start; /**< Start adapter */
+ rte_event_timer_adapter_stop_t stop; /**< Stop adapter */
+ rte_event_timer_adapter_get_info_t get_info;
+ /**< Get info from driver */
+ rte_event_timer_adapter_stats_get_t stats_get;
+ /**< Get adapter statistics */
+ rte_event_timer_adapter_stats_reset_t stats_reset;
+ /**< Reset adapter statistics */
+ rte_event_timer_arm_burst_t arm_burst;
+ /**< Arm one or more event timers */
+ rte_event_timer_arm_tmo_tick_burst_t arm_tmo_tick_burst;
+ /**< Arm event timers with same expiration time */
+ rte_event_timer_cancel_burst_t cancel_burst;
+ /**< Cancel one or more event timers */
+};
+
+/**
+ * @internal Adapter data; structure to be placed in shared memory to be
+ * accessible by various processes in a multi-process configuration.
+ */
+struct rte_event_timer_adapter_data {
+ uint8_t id;
+ /**< Event timer adapter ID */
+ uint8_t event_dev_id;
+ /**< Event device ID */
+ uint32_t socket_id;
+ /**< Socket ID where memory is allocated */
+ uint8_t event_port_id;
+ /**< Optional: event port ID used when the inbuilt port is absent */
+ const struct rte_memzone *mz;
+ /**< Event timer adapter memzone pointer */
+ struct rte_event_timer_adapter_conf conf;
+ /**< Configuration used to configure the adapter. */
+ uint32_t caps;
+ /**< Adapter capabilities */
+ void *adapter_priv;
+ /**< Timer adapter private data*/
+ uint8_t service_inited;
+ /**< Service initialization state */
+ uint32_t service_id;
+ /**< Service ID*/
+
+ RTE_STD_C11
+ uint8_t started : 1;
+ /**< Flag to indicate adapter started. */
+} __rte_cache_aligned;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index 851a1190..801810ed 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -29,6 +29,8 @@
#include <rte_malloc.h>
#include <rte_errno.h>
#include <rte_ethdev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
#include "rte_eventdev.h"
#include "rte_eventdev_pmd.h"
@@ -55,16 +57,21 @@ int
rte_event_dev_get_dev_id(const char *name)
{
int i;
+ uint8_t cmp;
if (!name)
return -EINVAL;
- for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
- if ((strcmp(rte_event_devices[i].data->name, name)
- == 0) &&
- (rte_event_devices[i].attached ==
- RTE_EVENTDEV_ATTACHED))
+ for (i = 0; i < rte_eventdev_globals->nb_devs; i++) {
+ cmp = (strncmp(rte_event_devices[i].data->name, name,
+ RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
+ (rte_event_devices[i].dev ? (strncmp(
+ rte_event_devices[i].dev->driver->name, name,
+ RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
+ if (cmp && (rte_event_devices[i].attached ==
+ RTE_EVENTDEV_ATTACHED))
return i;
+ }
return -ENODEV;
}
@@ -123,6 +130,51 @@ rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
: 0;
}
+int __rte_experimental
+rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+ const struct rte_event_timer_adapter_ops *ops;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+ dev = &rte_eventdevs[dev_id];
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->timer_adapter_caps_get ?
+ (*dev->dev_ops->timer_adapter_caps_get)(dev,
+ 0,
+ caps,
+ &ops)
+ : 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
+ uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+ struct rte_cryptodev *cdev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
+ return -EINVAL;
+
+ dev = &rte_eventdevs[dev_id];
+ cdev = rte_cryptodev_pmd_get_dev(cdev_id);
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->crypto_adapter_caps_get ?
+ (*dev->dev_ops->crypto_adapter_caps_get)
+ (dev, cdev, caps) : -ENOTSUP;
+}
+
static inline int
rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
{
@@ -1123,6 +1175,23 @@ rte_event_dev_start(uint8_t dev_id)
return 0;
}
+int
+rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
+ eventdev_stop_flush_t callback, void *userdata)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ dev->dev_ops->dev_stop_flush = callback;
+ dev->data->dev_stop_flush_arg = userdata;
+
+ return 0;
+}
+
void
rte_event_dev_stop(uint8_t dev_id)
{
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index b21c2717..b6fd6ee7 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -1,35 +1,8 @@
-/*
- * BSD LICENSE
- *
- * Copyright 2016 Cavium, Inc.
- * Copyright 2016 Intel Corporation.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc.
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright 2016 NXP
+ * All rights reserved.
*/
#ifndef _RTE_EVENTDEV_H_
@@ -244,6 +217,7 @@ extern "C" {
#include <rte_errno.h>
struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
+struct rte_event;
/* Event device capability bitmap flags */
#define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
@@ -835,15 +809,60 @@ int
rte_event_dev_start(uint8_t dev_id);
/**
- * Stop an event device. The device can be restarted with a call to
- * rte_event_dev_start()
+ * Stop an event device.
+ *
+ * This function causes all queued events to be drained, including those
+ * residing in event ports. While draining events out of the device, this
+ * function calls the user-provided flush callback (if one was registered) once
+ * per event.
+ *
+ * The device can be restarted with a call to rte_event_dev_start(). Threads
+ * that continue to enqueue/dequeue while the device is stopped, or being
+ * stopped, will result in undefined behavior. This includes event adapters,
+ * which must be stopped prior to stopping the eventdev.
*
* @param dev_id
* Event device identifier.
+ *
+ * @see rte_event_dev_stop_flush_callback_register()
*/
void
rte_event_dev_stop(uint8_t dev_id);
+typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event,
+ void *arg);
+/**< Callback function called during rte_event_dev_stop(), invoked once per
+ * flushed event.
+ */
+
+/**
+ * Registers a callback function to be invoked during rte_event_dev_stop() for
+ * each flushed event. This function can be used to properly dispose of queued
+ * events, for example events containing memory pointers.
+ *
+ * The callback function is only registered for the calling process. The
+ * callback function must be registered in every process that can call
+ * rte_event_dev_stop().
+ *
+ * To unregister a callback, call this function with a NULL callback pointer.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param callback
+ * Callback function invoked once per flushed event.
+ * @param userdata
+ * Argument supplied to callback.
+ *
+ * @return
+ * - 0 on success.
+ * - -EINVAL if *dev_id* is invalid
+ *
+ * @see rte_event_dev_stop()
+ */
+int
+rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
+ eventdev_stop_flush_t callback, void *userdata);
+
/**
* Close an event device. The device cannot be restarted!
*
@@ -923,8 +942,8 @@ rte_event_dev_close(uint8_t dev_id);
/**< The event generated from ethdev subsystem */
#define RTE_EVENT_TYPE_CRYPTODEV 0x1
/**< The event generated from crypodev subsystem */
-#define RTE_EVENT_TYPE_TIMERDEV 0x2
-/**< The event generated from timerdev subsystem */
+#define RTE_EVENT_TYPE_TIMER 0x2
+/**< The event generated from event timer adapter */
#define RTE_EVENT_TYPE_CPU 0x3
/**< The event generated from cpu for pipelining.
* Application may use *sub_event_type* to further classify the event
@@ -1096,7 +1115,77 @@ int
rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
uint32_t *caps);
-struct rte_eventdev_driver;
+#define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
+/**< This flag is set when the timer mechanism is in HW. */
+
+/**
+ * Retrieve the event device's timer adapter capabilities.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param[out] caps
+ * A pointer to memory to be filled with event timer adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provided event timer adapter capabilities.
+ * - <0: Error code returned by the driver function.
+ */
+int __rte_experimental
+rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
+
+/* Crypto adapter capability bitmap flag */
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
+/**< Flag indicates HW is capable of generating events in
+ * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
+ * packets to the event device as new events using an internal
+ * event port.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
+/**< Flag indicates HW is capable of generating events in
+ * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
+ * packets to the event device as forwarded event using an
+ * internal event port.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
+/**< Flag indicates HW is capable of mapping crypto queue pair to
+ * event queue.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
+/**< Flag indicates HW/SW suports a mechanism to store and retrieve
+ * the private data information along with the crypto session.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the event device's crypto adapter capabilities for the
+ * specified cryptodev device
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param cdev_id
+ * The identifier of the cryptodev device.
+ *
+ * @param[out] caps
+ * A pointer to memory filled with event adapter capabilities.
+ * It is expected to be pre-allocated & initialized by caller.
+ *
+ * @return
+ * - 0: Success, driver provides event adapter capabilities for the
+ * cryptodev device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+int __rte_experimental
+rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
+ uint32_t *caps);
+
struct rte_eventdev_ops;
struct rte_eventdev;
@@ -1152,6 +1241,8 @@ struct rte_eventdev_data {
/* Service initialization state */
uint32_t service_id;
/* Service ID*/
+ void *dev_stop_flush_arg;
+ /**< User-provided argument for event flush function */
RTE_STD_C11
uint8_t dev_started : 1;
@@ -1178,7 +1269,7 @@ struct rte_eventdev {
struct rte_eventdev_data *data;
/**< Pointer to device data */
- const struct rte_eventdev_ops *dev_ops;
+ struct rte_eventdev_ops *dev_ops;
/**< Functions exported by PMD */
struct rte_device *dev;
/**< Device info. supplied by probing */
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
index 31343b51..3fbb4d2b 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -26,6 +26,7 @@ extern "C" {
#include <rte_malloc.h>
#include "rte_eventdev.h"
+#include "rte_event_timer_adapter_pmd.h"
/* Logging Macros */
#define RTE_EDEV_LOG_ERR(...) \
@@ -69,6 +70,9 @@ extern "C" {
((RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) | \
(RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ))
+#define RTE_EVENT_CRYPTO_ADAPTER_SW_CAP \
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA
+
/**< Ethernet Rx adapter cap to return If the packet transfers from
* the ethdev to eventdev use a SW service function
*/
@@ -449,6 +453,37 @@ typedef int (*eventdev_eth_rx_adapter_caps_get_t)
struct rte_event_eth_rx_adapter_queue_conf *queue_conf;
/**
+ * Retrieve the event device's timer adapter capabilities, as well as the ops
+ * structure that an event timer adapter should call through to enter the
+ * driver
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param flags
+ * Flags that can be used to determine how to select an event timer
+ * adapter ops structure
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @param[out] ops
+ * A pointer to the ops pointer to set with the address of the desired ops
+ * structure
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_timer_adapter_caps_get_t)(
+ const struct rte_eventdev *dev,
+ uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops);
+
+/**
* Add ethernet Rx queues to event device. This callback is invoked if
* the caps returned from rte_eventdev_eth_rx_adapter_caps_get(, eth_port_id)
* has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
@@ -585,6 +620,175 @@ typedef int (*eventdev_eth_rx_adapter_stats_reset)
*/
typedef int (*eventdev_selftest)(void);
+
+struct rte_cryptodev;
+
+/**
+ * This API may change without prior notice
+ *
+ * Retrieve the event device's crypto adapter capabilities for the
+ * specified cryptodev
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param[out] caps
+ * A pointer to memory filled with event adapter capabilities.
+ * It is expected to be pre-allocated & initialized by caller.
+ *
+ * @return
+ * - 0: Success, driver provides event adapter capabilities for the
+ * cryptodev.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_caps_get_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps);
+
+/**
+ * This API may change without prior notice
+ *
+ * Add crypto queue pair to event device. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(, cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param queue_pair_id
+ * cryptodev queue pair identifier.
+ *
+ * @param event
+ * Event information required for binding cryptodev queue pair to event queue.
+ * This structure will have a valid value for only those HW PMDs supporting
+ * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND capability.
+ *
+ * @return
+ * - 0: Success, cryptodev queue pair added successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id,
+ const struct rte_event *event);
+
+
+/**
+ * This API may change without prior notice
+ *
+ * Delete crypto queue pair to event device. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(, cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param queue_pair_id
+ * cryptodev queue pair identifier.
+ *
+ * @return
+ * - 0: Success, cryptodev queue pair deleted successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_queue_pair_del_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id);
+
+/**
+ * Start crypto adapter. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(.., cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set and queue pairs
+ * from cdev_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * - 0: Success, crypto adapter started successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_start_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
+/**
+ * Stop crypto adapter. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(.., cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set and queue pairs
+ * from cdev_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * - 0: Success, crypto adapter stopped successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_stop_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
+struct rte_event_crypto_adapter_stats;
+
+/**
+ * Retrieve crypto adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @param[out] stats
+ * Pointer to stats structure
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_crypto_adapter_stats_get)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ struct rte_event_crypto_adapter_stats *stats);
+
+/**
+ * Reset crypto adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_crypto_adapter_stats_reset)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
/** Event device operations function pointer table */
struct rte_eventdev_ops {
eventdev_info_get_t dev_infos_get; /**< Get device info. */
@@ -640,8 +844,29 @@ struct rte_eventdev_ops {
eventdev_eth_rx_adapter_stats_reset eth_rx_adapter_stats_reset;
/**< Reset ethernet Rx stats */
+ eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
+ /**< Get timer adapter capabilities */
+
+ eventdev_crypto_adapter_caps_get_t crypto_adapter_caps_get;
+ /**< Get crypto adapter capabilities */
+ eventdev_crypto_adapter_queue_pair_add_t crypto_adapter_queue_pair_add;
+ /**< Add queue pair to crypto adapter */
+ eventdev_crypto_adapter_queue_pair_del_t crypto_adapter_queue_pair_del;
+ /**< Delete queue pair from crypto adapter */
+ eventdev_crypto_adapter_start_t crypto_adapter_start;
+ /**< Start crypto adapter */
+ eventdev_crypto_adapter_stop_t crypto_adapter_stop;
+ /**< Stop crypto adapter */
+ eventdev_crypto_adapter_stats_get crypto_adapter_stats_get;
+ /**< Get crypto stats */
+ eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
+ /**< Reset crypto stats */
+
eventdev_selftest dev_selftest;
/**< Start eventdev Selftest */
+
+ eventdev_stop_flush_t dev_stop_flush;
+ /**< User-provided event flush function */
};
/**
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 2aef470b..12835e9f 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -66,7 +66,6 @@ DPDK_17.11 {
rte_event_eth_rx_adapter_stats_get;
rte_event_eth_rx_adapter_stats_reset;
rte_event_eth_rx_adapter_stop;
-
} DPDK_17.08;
DPDK_18.02 {
@@ -74,3 +73,41 @@ DPDK_18.02 {
rte_event_dev_selftest;
} DPDK_17.11;
+
+DPDK_18.05 {
+ global:
+
+ rte_event_dev_stop_flush_callback_register;
+} DPDK_18.02;
+
+EXPERIMENTAL {
+ global:
+
+ rte_event_crypto_adapter_caps_get;
+ rte_event_crypto_adapter_create;
+ rte_event_crypto_adapter_create_ext;
+ rte_event_crypto_adapter_event_port_get;
+ rte_event_crypto_adapter_free;
+ rte_event_crypto_adapter_queue_pair_add;
+ rte_event_crypto_adapter_queue_pair_del;
+ rte_event_crypto_adapter_service_id_get;
+ rte_event_crypto_adapter_start;
+ rte_event_crypto_adapter_stats_get;
+ rte_event_crypto_adapter_stats_reset;
+ rte_event_crypto_adapter_stop;
+ rte_event_eth_rx_adapter_cb_register;
+ rte_event_timer_adapter_caps_get;
+ rte_event_timer_adapter_create;
+ rte_event_timer_adapter_create_ext;
+ rte_event_timer_adapter_free;
+ rte_event_timer_adapter_get_info;
+ rte_event_timer_adapter_lookup;
+ rte_event_timer_adapter_service_id_get;
+ rte_event_timer_adapter_start;
+ rte_event_timer_adapter_stats_get;
+ rte_event_timer_adapter_stats_reset;
+ rte_event_timer_adapter_stop;
+ rte_event_timer_arm_burst;
+ rte_event_timer_arm_tmo_tick_burst;
+ rte_event_timer_cancel_burst;
+};