summaryrefslogtreecommitdiffstats
path: root/lib/librte_eventdev
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 14:51:32 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 16:20:45 +0200
commit7595afa4d30097c1177b69257118d8ad89a539be (patch)
tree4bfeadc905c977e45e54a90c42330553b8942e4e /lib/librte_eventdev
parentce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff)
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'lib/librte_eventdev')
-rw-r--r--lib/librte_eventdev/Makefile53
-rw-r--r--lib/librte_eventdev/rte_eventdev.c1345
-rw-r--r--lib/librte_eventdev/rte_eventdev.h1588
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h599
-rw-r--r--lib/librte_eventdev/rte_eventdev_version.map44
5 files changed, 3629 insertions, 0 deletions
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
new file mode 100644
index 00000000..e06346a6
--- /dev/null
+++ b/lib/librte_eventdev/Makefile
@@ -0,0 +1,53 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Cavium networks. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_eventdev.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library source files
+SRCS-y += rte_eventdev.c
+
+# export include files
+SYMLINK-y-include += rte_eventdev.h
+SYMLINK-y-include += rte_eventdev_pmd.h
+
+# versioning export map
+EXPORT_MAP := rte_eventdev_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
new file mode 100644
index 00000000..20afc3f0
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -0,0 +1,1345 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+
+struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
+
+struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
+
+static struct rte_eventdev_global eventdev_globals = {
+ .nb_devs = 0
+};
+
+struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
+
+/* Event dev north bound API implementation */
+
+uint8_t
+rte_event_dev_count(void)
+{
+ return rte_eventdev_globals->nb_devs;
+}
+
+int
+rte_event_dev_get_dev_id(const char *name)
+{
+ int i;
+
+ if (!name)
+ return -EINVAL;
+
+ for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
+ if ((strcmp(rte_event_devices[i].data->name, name)
+ == 0) &&
+ (rte_event_devices[i].attached ==
+ RTE_EVENTDEV_ATTACHED))
+ return i;
+ return -ENODEV;
+}
+
+int
+rte_event_dev_socket_id(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ return dev->data->socket_id;
+}
+
+int
+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (dev_info == NULL)
+ return -EINVAL;
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+ dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
+
+ dev_info->dev = dev->dev;
+ if (dev->driver)
+ dev_info->driver_name = dev->driver->pci_drv.driver.name;
+ return 0;
+}
+
+static inline int
+rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
+{
+ uint8_t old_nb_queues = dev->data->nb_queues;
+ uint8_t *queues_prio;
+ unsigned int i;
+
+ RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
+ dev->data->dev_id);
+
+ /* First time configuration */
+ if (dev->data->queues_prio == NULL && nb_queues != 0) {
+ /* Allocate memory to store queue priority */
+ dev->data->queues_prio = rte_zmalloc_socket(
+ "eventdev->data->queues_prio",
+ sizeof(dev->data->queues_prio[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->queues_prio == NULL) {
+ dev->data->nb_queues = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
+ "nb_queues %u", nb_queues);
+ return -(ENOMEM);
+ }
+ /* Re-configure */
+ } else if (dev->data->queues_prio != NULL && nb_queues != 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->queue_release)(dev, i);
+
+ /* Re allocate memory to store queue priority */
+ queues_prio = dev->data->queues_prio;
+ queues_prio = rte_realloc(queues_prio,
+ sizeof(queues_prio[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (queues_prio == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
+ " nb_queues %u", nb_queues);
+ return -(ENOMEM);
+ }
+ dev->data->queues_prio = queues_prio;
+
+ if (nb_queues > old_nb_queues) {
+ uint8_t new_qs = nb_queues - old_nb_queues;
+
+ memset(queues_prio + old_nb_queues, 0,
+ sizeof(queues_prio[0]) * new_qs);
+ }
+ } else if (dev->data->queues_prio != NULL && nb_queues == 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->queue_release)(dev, i);
+ }
+
+ dev->data->nb_queues = nb_queues;
+ return 0;
+}
+
+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
+
+static inline int
+rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
+{
+ uint8_t old_nb_ports = dev->data->nb_ports;
+ void **ports;
+ uint16_t *links_map;
+ uint8_t *ports_dequeue_depth;
+ uint8_t *ports_enqueue_depth;
+ unsigned int i;
+
+ RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
+ dev->data->dev_id);
+
+ /* First time configuration */
+ if (dev->data->ports == NULL && nb_ports != 0) {
+ dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
+ sizeof(dev->data->ports[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->ports == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Allocate memory to store ports dequeue depth */
+ dev->data->ports_dequeue_depth =
+ rte_zmalloc_socket("eventdev->ports_dequeue_depth",
+ sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->ports_dequeue_depth == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port deq meta,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Allocate memory to store ports enqueue depth */
+ dev->data->ports_enqueue_depth =
+ rte_zmalloc_socket("eventdev->ports_enqueue_depth",
+ sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->ports_enqueue_depth == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port enq meta,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Allocate memory to store queue to port link connection */
+ dev->data->links_map =
+ rte_zmalloc_socket("eventdev->links_map",
+ sizeof(dev->data->links_map[0]) * nb_ports *
+ RTE_EVENT_MAX_QUEUES_PER_DEV,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->links_map == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+ for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
+ dev->data->links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
+
+ ports = dev->data->ports;
+ ports_dequeue_depth = dev->data->ports_dequeue_depth;
+ ports_enqueue_depth = dev->data->ports_enqueue_depth;
+ links_map = dev->data->links_map;
+
+ for (i = nb_ports; i < old_nb_ports; i++)
+ (*dev->dev_ops->port_release)(ports[i]);
+
+ /* Realloc memory for ports */
+ ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (ports == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
+ " nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Realloc memory for ports_dequeue_depth */
+ ports_dequeue_depth = rte_realloc(ports_dequeue_depth,
+ sizeof(ports_dequeue_depth[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (ports_dequeue_depth == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta,"
+ " nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Realloc memory for ports_enqueue_depth */
+ ports_enqueue_depth = rte_realloc(ports_enqueue_depth,
+ sizeof(ports_enqueue_depth[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (ports_enqueue_depth == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta,"
+ " nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Realloc memory to store queue to port link connection */
+ links_map = rte_realloc(links_map,
+ sizeof(dev->data->links_map[0]) * nb_ports *
+ RTE_EVENT_MAX_QUEUES_PER_DEV,
+ RTE_CACHE_LINE_SIZE);
+ if (dev->data->links_map == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ if (nb_ports > old_nb_ports) {
+ uint8_t new_ps = nb_ports - old_nb_ports;
+ unsigned int old_links_map_end =
+ old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+ unsigned int links_map_end =
+ nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+
+ memset(ports + old_nb_ports, 0,
+ sizeof(ports[0]) * new_ps);
+ memset(ports_dequeue_depth + old_nb_ports, 0,
+ sizeof(ports_dequeue_depth[0]) * new_ps);
+ memset(ports_enqueue_depth + old_nb_ports, 0,
+ sizeof(ports_enqueue_depth[0]) * new_ps);
+ for (i = old_links_map_end; i < links_map_end; i++)
+ links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ }
+
+ dev->data->ports = ports;
+ dev->data->ports_dequeue_depth = ports_dequeue_depth;
+ dev->data->ports_enqueue_depth = ports_enqueue_depth;
+ dev->data->links_map = links_map;
+ } else if (dev->data->ports != NULL && nb_ports == 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
+
+ ports = dev->data->ports;
+ for (i = nb_ports; i < old_nb_ports; i++)
+ (*dev->dev_ops->port_release)(ports[i]);
+ }
+
+ dev->data->nb_ports = nb_ports;
+ return 0;
+}
+
+int
+rte_event_dev_configure(uint8_t dev_id,
+ const struct rte_event_dev_config *dev_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_dev_info info;
+ int diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ if (dev_conf == NULL)
+ return -EINVAL;
+
+ (*dev->dev_ops->dev_infos_get)(dev, &info);
+
+ /* Check dequeue_timeout_ns value is in limit */
+ if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
+ if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
+ || dev_conf->dequeue_timeout_ns >
+ info.max_dequeue_timeout_ns) {
+ RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
+ " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
+ dev_id, dev_conf->dequeue_timeout_ns,
+ info.min_dequeue_timeout_ns,
+ info.max_dequeue_timeout_ns);
+ return -EINVAL;
+ }
+ }
+
+ /* Check nb_events_limit is in limit */
+ if (dev_conf->nb_events_limit > info.max_num_events) {
+ RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
+ dev_id, dev_conf->nb_events_limit, info.max_num_events);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_queues is in limit */
+ if (!dev_conf->nb_event_queues) {
+ RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_queues > info.max_event_queues) {
+ RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
+ dev_id, dev_conf->nb_event_queues, info.max_event_queues);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_ports is in limit */
+ if (!dev_conf->nb_event_ports) {
+ RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_ports > info.max_event_ports) {
+ RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
+ dev_id, dev_conf->nb_event_ports, info.max_event_ports);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_queue_flows is in limit */
+ if (!dev_conf->nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
+ RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
+ dev_id, dev_conf->nb_event_queue_flows,
+ info.max_event_queue_flows);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_port_dequeue_depth is in limit */
+ if (!dev_conf->nb_event_port_dequeue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_port_dequeue_depth >
+ info.max_event_port_dequeue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
+ dev_id, dev_conf->nb_event_port_dequeue_depth,
+ info.max_event_port_dequeue_depth);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_port_enqueue_depth is in limit */
+ if (!dev_conf->nb_event_port_enqueue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_port_enqueue_depth >
+ info.max_event_port_enqueue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
+ dev_id, dev_conf->nb_event_port_enqueue_depth,
+ info.max_event_port_enqueue_depth);
+ return -EINVAL;
+ }
+
+ /* Copy the dev_conf parameter into the dev structure */
+ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
+ /* Setup new number of queues and reconfigure device. */
+ diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
+ if (diag != 0) {
+ RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ /* Setup new number of ports and reconfigure device. */
+ diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
+ if (diag != 0) {
+ rte_event_dev_queue_config(dev, 0);
+ RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ /* Configure the device */
+ diag = (*dev->dev_ops->dev_configure)(dev);
+ if (diag != 0) {
+ RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+ rte_event_dev_queue_config(dev, 0);
+ rte_event_dev_port_config(dev, 0);
+ }
+
+ dev->data->event_dev_cap = info.event_dev_cap;
+ return diag;
+}
+
+static inline int
+is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ if (queue_id < dev->data->nb_queues && queue_id <
+ RTE_EVENT_MAX_QUEUES_PER_DEV)
+ return 1;
+ else
+ return 0;
+}
+
+int
+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (queue_conf == NULL)
+ return -EINVAL;
+
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
+ memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+ (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
+ return 0;
+}
+
+static inline int
+is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
+{
+ if (queue_conf && (
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK)
+ == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK)
+ == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ ))
+ return 1;
+ else
+ return 0;
+}
+
+static inline int
+is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
+{
+ if (queue_conf && (
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK)
+ == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK)
+ == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ ))
+ return 1;
+ else
+ return 0;
+}
+
+
+int
+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_queue_conf def_conf;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ /* Check nb_atomic_flows limit */
+ if (is_valid_atomic_queue_conf(queue_conf)) {
+ if (queue_conf->nb_atomic_flows == 0 ||
+ queue_conf->nb_atomic_flows >
+ dev->data->dev_conf.nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
+ dev_id, queue_id, queue_conf->nb_atomic_flows,
+ dev->data->dev_conf.nb_event_queue_flows);
+ return -EINVAL;
+ }
+ }
+
+ /* Check nb_atomic_order_sequences limit */
+ if (is_valid_ordered_queue_conf(queue_conf)) {
+ if (queue_conf->nb_atomic_order_sequences == 0 ||
+ queue_conf->nb_atomic_order_sequences >
+ dev->data->dev_conf.nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
+ dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
+ dev->data->dev_conf.nb_event_queue_flows);
+ return -EINVAL;
+ }
+ }
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow queue setup", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
+
+ if (queue_conf == NULL) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
+ -ENOTSUP);
+ (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
+ queue_conf = &def_conf;
+ }
+
+ dev->data->queues_prio[queue_id] = queue_conf->priority;
+ return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
+}
+
+uint8_t
+rte_event_queue_count(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ return dev->data->nb_queues;
+}
+
+uint8_t
+rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
+ return dev->data->queues_prio[queue_id];
+ else
+ return RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static inline int
+is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
+{
+ if (port_id < dev->data->nb_ports)
+ return 1;
+ else
+ return 0;
+}
+
+int
+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (port_conf == NULL)
+ return -EINVAL;
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
+ memset(port_conf, 0, sizeof(struct rte_event_port_conf));
+ (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
+ return 0;
+}
+
+int
+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_port_conf def_conf;
+ int diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ /* Check new_event_threshold limit */
+ if ((port_conf && !port_conf->new_event_threshold) ||
+ (port_conf && port_conf->new_event_threshold >
+ dev->data->dev_conf.nb_events_limit)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
+ dev_id, port_id, port_conf->new_event_threshold,
+ dev->data->dev_conf.nb_events_limit);
+ return -EINVAL;
+ }
+
+ /* Check dequeue_depth limit */
+ if ((port_conf && !port_conf->dequeue_depth) ||
+ (port_conf && port_conf->dequeue_depth >
+ dev->data->dev_conf.nb_event_port_dequeue_depth)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
+ dev_id, port_id, port_conf->dequeue_depth,
+ dev->data->dev_conf.nb_event_port_dequeue_depth);
+ return -EINVAL;
+ }
+
+ /* Check enqueue_depth limit */
+ if ((port_conf && !port_conf->enqueue_depth) ||
+ (port_conf && port_conf->enqueue_depth >
+ dev->data->dev_conf.nb_event_port_enqueue_depth)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
+ dev_id, port_id, port_conf->enqueue_depth,
+ dev->data->dev_conf.nb_event_port_enqueue_depth);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow port setup", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
+
+ if (port_conf == NULL) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
+ -ENOTSUP);
+ (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
+ port_conf = &def_conf;
+ }
+
+ dev->data->ports_dequeue_depth[port_id] =
+ port_conf->dequeue_depth;
+ dev->data->ports_enqueue_depth[port_id] =
+ port_conf->enqueue_depth;
+
+ diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
+
+ /* Unlink all the queues from this port(default state after setup) */
+ if (!diag)
+ diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
+
+ if (diag < 0)
+ return diag;
+
+ return 0;
+}
+
+uint8_t
+rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ return dev->data->ports_dequeue_depth[port_id];
+}
+
+uint8_t
+rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ return dev->data->ports_enqueue_depth[port_id];
+}
+
+uint8_t
+rte_event_port_count(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ return dev->data->nb_ports;
+}
+
+int
+rte_event_port_link(uint8_t dev_id, uint8_t port_id,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct rte_eventdev *dev;
+ uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t *links_map;
+ int i, diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ if (queues == NULL) {
+ for (i = 0; i < dev->data->nb_queues; i++)
+ queues_list[i] = i;
+
+ queues = queues_list;
+ nb_links = dev->data->nb_queues;
+ }
+
+ if (priorities == NULL) {
+ for (i = 0; i < nb_links; i++)
+ priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ priorities = priorities_list;
+ }
+
+ for (i = 0; i < nb_links; i++)
+ if (queues[i] >= dev->data->nb_queues)
+ return -EINVAL;
+
+ diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
+ queues, priorities, nb_links);
+ if (diag < 0)
+ return diag;
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < diag; i++)
+ links_map[queues[i]] = (uint8_t)priorities[i];
+
+ return diag;
+}
+
+int
+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct rte_eventdev *dev;
+ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ int i, diag;
+ uint16_t *links_map;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ if (queues == NULL) {
+ for (i = 0; i < dev->data->nb_queues; i++)
+ all_queues[i] = i;
+ queues = all_queues;
+ nb_unlinks = dev->data->nb_queues;
+ }
+
+ for (i = 0; i < nb_unlinks; i++)
+ if (queues[i] >= dev->data->nb_queues)
+ return -EINVAL;
+
+ diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
+ queues, nb_unlinks);
+
+ if (diag < 0)
+ return diag;
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < diag; i++)
+ links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+
+ return diag;
+}
+
+int
+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint8_t priorities[])
+{
+ struct rte_eventdev *dev;
+ uint16_t *links_map;
+ int i, count = 0;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < dev->data->nb_queues; i++) {
+ if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
+ queues[count] = i;
+ priorities[count] = (uint8_t)links_map[i];
+ ++count;
+ }
+ }
+ return count;
+}
+
+int
+rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
+
+ if (timeout_ticks == NULL)
+ return -EINVAL;
+
+ return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
+}
+
+int
+rte_event_dev_dump(uint8_t dev_id, FILE *f)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
+
+ (*dev->dev_ops->dump)(dev, f);
+ return 0;
+
+}
+
+static int
+xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ if (dev->dev_ops->xstats_get_names != NULL)
+ return (*dev->dev_ops->xstats_get_names)(dev, mode,
+ queue_port_id,
+ NULL, NULL, 0);
+ return 0;
+}
+
+int
+rte_event_dev_xstats_names_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+ const int cnt_expected_entries = xstats_get_count(dev_id, mode,
+ queue_port_id);
+ if (xstats_names == NULL || cnt_expected_entries < 0 ||
+ (int)size < cnt_expected_entries)
+ return cnt_expected_entries;
+
+ /* dev_id checked above */
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->xstats_get_names != NULL)
+ return (*dev->dev_ops->xstats_get_names)(dev, mode,
+ queue_port_id, xstats_names, ids, size);
+
+ return -ENOTSUP;
+}
+
+/* retrieve eventdev extended statistics */
+int
+rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id, const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_get != NULL)
+ return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
+ ids, values, n);
+ return -ENOTSUP;
+}
+
+uint64_t
+rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
+ unsigned int *id)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ unsigned int temp = -1;
+
+ if (id != NULL)
+ *id = (unsigned int)-1;
+ else
+ id = &temp; /* ensure driver never gets a NULL value */
+
+ /* implemented by driver */
+ if (dev->dev_ops->xstats_get_by_name != NULL)
+ return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
+ return -ENOTSUP;
+}
+
+int rte_event_dev_xstats_reset(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->xstats_reset != NULL)
+ return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
+ ids, nb_ids);
+ return -ENOTSUP;
+}
+
+int
+rte_event_dev_start(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+ int diag;
+
+ RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
+ dev_id);
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->dev_start)(dev);
+ if (diag == 0)
+ dev->data->dev_started = 1;
+ else
+ return diag;
+
+ return 0;
+}
+
+void
+rte_event_dev_stop(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
+ dev_id);
+ return;
+ }
+
+ dev->data->dev_started = 0;
+ (*dev->dev_ops->dev_stop)(dev);
+}
+
+int
+rte_event_dev_close(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+
+ /* Device must be stopped before it can be closed */
+ if (dev->data->dev_started == 1) {
+ RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
+ dev_id);
+ return -EBUSY;
+ }
+
+ return (*dev->dev_ops->dev_close)(dev);
+}
+
+static inline int
+rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+ int socket_id)
+{
+ char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ int n;
+
+ /* Generate memzone name */
+ n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
+ if (n >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_eventdev_data),
+ socket_id, 0);
+ } else
+ mz = rte_memzone_lookup(mz_name);
+
+ if (mz == NULL)
+ return -ENOMEM;
+
+ *data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(*data, 0, sizeof(struct rte_eventdev_data));
+
+ return 0;
+}
+
+static inline uint8_t
+rte_eventdev_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
+ if (rte_eventdevs[dev_id].attached ==
+ RTE_EVENTDEV_DETACHED)
+ return dev_id;
+ }
+ return RTE_EVENT_MAX_DEVS;
+}
+
+struct rte_eventdev *
+rte_event_pmd_allocate(const char *name, int socket_id)
+{
+ struct rte_eventdev *eventdev;
+ uint8_t dev_id;
+
+ if (rte_event_pmd_get_named_dev(name) != NULL) {
+ RTE_EDEV_LOG_ERR("Event device with name %s already "
+ "allocated!", name);
+ return NULL;
+ }
+
+ dev_id = rte_eventdev_find_free_device_index();
+ if (dev_id == RTE_EVENT_MAX_DEVS) {
+ RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
+ return NULL;
+ }
+
+ eventdev = &rte_eventdevs[dev_id];
+
+ if (eventdev->data == NULL) {
+ struct rte_eventdev_data *eventdev_data = NULL;
+
+ int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
+ socket_id);
+
+ if (retval < 0 || eventdev_data == NULL)
+ return NULL;
+
+ eventdev->data = eventdev_data;
+
+ snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
+ "%s", name);
+
+ eventdev->data->dev_id = dev_id;
+ eventdev->data->socket_id = socket_id;
+ eventdev->data->dev_started = 0;
+
+ eventdev->attached = RTE_EVENTDEV_ATTACHED;
+
+ eventdev_globals.nb_devs++;
+ }
+
+ return eventdev;
+}
+
+int
+rte_event_pmd_release(struct rte_eventdev *eventdev)
+{
+ int ret;
+ char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+
+ if (eventdev == NULL)
+ return -EINVAL;
+
+ ret = rte_event_dev_close(eventdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+
+ eventdev->attached = RTE_EVENTDEV_DETACHED;
+ eventdev_globals.nb_devs--;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rte_free(eventdev->data->dev_private);
+
+ /* Generate memzone name */
+ ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
+ eventdev->data->dev_id);
+ if (ret >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL)
+ return -ENOMEM;
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ return ret;
+ }
+
+ eventdev->data = NULL;
+ return 0;
+}
+
+struct rte_eventdev *
+rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
+ int socket_id)
+{
+ struct rte_eventdev *eventdev;
+
+ /* Allocate device structure */
+ eventdev = rte_event_pmd_allocate(name, socket_id);
+ if (eventdev == NULL)
+ return NULL;
+
+ /* Allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket("eventdev device private",
+ dev_private_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private device"
+ " data");
+ }
+
+ return eventdev;
+}
+
+int
+rte_event_pmd_vdev_uninit(const char *name)
+{
+ struct rte_eventdev *eventdev;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ eventdev = rte_event_pmd_get_named_dev(name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ /* Free the event device */
+ rte_event_pmd_release(eventdev);
+
+ return 0;
+}
+
+int
+rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eventdev_driver *eventdrv;
+ struct rte_eventdev *eventdev;
+
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+
+ int retval;
+
+ eventdrv = (struct rte_eventdev_driver *)pci_drv;
+ if (eventdrv == NULL)
+ return -ENODEV;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_allocate(eventdev_name,
+ pci_dev->device.numa_node);
+ if (eventdev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket(
+ "eventdev private structure",
+ eventdrv->dev_private_size,
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ eventdev->dev = &pci_dev->device;
+ eventdev->driver = eventdrv;
+
+ /* Invoke PMD device initialization function */
+ retval = (*eventdrv->eventdev_init)(eventdev);
+ if (retval == 0)
+ return 0;
+
+ RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
+ " failed", pci_drv->driver.name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eventdev->data->dev_private);
+
+ eventdev->attached = RTE_EVENTDEV_DETACHED;
+ eventdev_globals.nb_devs--;
+
+ return -ENXIO;
+}
+
+int
+rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev)
+{
+ const struct rte_eventdev_driver *eventdrv;
+ struct rte_eventdev *eventdev;
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ int ret;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_get_named_dev(eventdev_name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ eventdrv = (const struct rte_eventdev_driver *)pci_dev->driver;
+ if (eventdrv == NULL)
+ return -ENODEV;
+
+ /* Invoke PMD device un-init function */
+ if (*eventdrv->eventdev_uninit) {
+ ret = (*eventdrv->eventdev_uninit)(eventdev);
+ if (ret)
+ return ret;
+ }
+
+ /* Free event device */
+ rte_event_pmd_release(eventdev);
+
+ eventdev->dev = NULL;
+ eventdev->driver = NULL;
+
+ return 0;
+}
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
new file mode 100644
index 00000000..20e7293e
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -0,0 +1,1588 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright 2016 Cavium.
+ * Copyright 2016 Intel Corporation.
+ * Copyright 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENTDEV_H_
+#define _RTE_EVENTDEV_H_
+
+/**
+ * @file
+ *
+ * RTE Event Device API
+ *
+ * In a polling model, lcores poll ethdev ports and associated rx queues
+ * directly to look for packet. In an event driven model, by contrast, lcores
+ * call the scheduler that selects packets for them based on programmer
+ * specified criteria. Eventdev library adds support for event driven
+ * programming model, which offer applications automatic multicore scaling,
+ * dynamic load balancing, pipelining, packet ingress order maintenance and
+ * synchronization services to simplify application packet processing.
+ *
+ * The Event Device API is composed of two parts:
+ *
+ * - The application-oriented Event API that includes functions to setup
+ * an event device (configure it, setup its queues, ports and start it), to
+ * establish the link between queues to port and to receive events, and so on.
+ *
+ * - The driver-oriented Event API that exports a function allowing
+ * an event poll Mode Driver (PMD) to simultaneously register itself as
+ * an event device driver.
+ *
+ * Event device components:
+ *
+ * +-----------------+
+ * | +-------------+ |
+ * +-------+ | | flow 0 | |
+ * |Packet | | +-------------+ |
+ * |event | | +-------------+ |
+ * | | | | flow 1 | |port_link(port0, queue0)
+ * +-------+ | +-------------+ | | +--------+
+ * +-------+ | +-------------+ o-----v-----o |dequeue +------+
+ * |Crypto | | | flow n | | | event +------->|Core 0|
+ * |work | | +-------------+ o----+ | port 0 | | |
+ * |done ev| | event queue 0 | | +--------+ +------+
+ * +-------+ +-----------------+ |
+ * +-------+ |
+ * |Timer | +-----------------+ | +--------+
+ * |expiry | | +-------------+ | +------o |dequeue +------+
+ * |event | | | flow 0 | o-----------o event +------->|Core 1|
+ * +-------+ | +-------------+ | +----o port 1 | | |
+ * Event enqueue | +-------------+ | | +--------+ +------+
+ * o-------------> | | flow 1 | | |
+ * enqueue( | +-------------+ | |
+ * queue_id, | | | +--------+ +------+
+ * flow_id, | +-------------+ | | | |dequeue |Core 2|
+ * sched_type, | | flow n | o-----------o event +------->| |
+ * event_type, | +-------------+ | | | port 2 | +------+
+ * subev_type, | event queue 1 | | +--------+
+ * event) +-----------------+ | +--------+
+ * | | |dequeue +------+
+ * +-------+ +-----------------+ | | event +------->|Core n|
+ * |Core | | +-------------+ o-----------o port n | | |
+ * |(SW) | | | flow 0 | | | +--------+ +--+---+
+ * |event | | +-------------+ | | |
+ * +-------+ | +-------------+ | | |
+ * ^ | | flow 1 | | | |
+ * | | +-------------+ o------+ |
+ * | | +-------------+ | |
+ * | | | flow n | | |
+ * | | +-------------+ | |
+ * | | event queue n | |
+ * | +-----------------+ |
+ * | |
+ * +-----------------------------------------------------------+
+ *
+ * Event device: A hardware or software-based event scheduler.
+ *
+ * Event: A unit of scheduling that encapsulates a packet or other datatype
+ * like SW generated event from the CPU, Crypto work completion notification,
+ * Timer expiry event notification etc as well as metadata.
+ * The metadata includes flow ID, scheduling type, event priority, event_type,
+ * sub_event_type etc.
+ *
+ * Event queue: A queue containing events that are scheduled by the event dev.
+ * An event queue contains events of different flows associated with scheduling
+ * types, such as atomic, ordered, or parallel.
+ *
+ * Event port: An application's interface into the event dev for enqueue and
+ * dequeue operations. Each event port can be linked with one or more
+ * event queues for dequeue operations.
+ *
+ * By default, all the functions of the Event Device API exported by a PMD
+ * are lock-free functions which assume to not be invoked in parallel on
+ * different logical cores to work on the same target object. For instance,
+ * the dequeue function of a PMD cannot be invoked in parallel on two logical
+ * cores to operates on same event port. Of course, this function
+ * can be invoked in parallel by different logical cores on different ports.
+ * It is the responsibility of the upper level application to enforce this rule.
+ *
+ * In all functions of the Event API, the Event device is
+ * designated by an integer >= 0 named the device identifier *dev_id*
+ *
+ * At the Event driver level, Event devices are represented by a generic
+ * data structure of type *rte_event_dev*.
+ *
+ * Event devices are dynamically registered during the PCI/SoC device probing
+ * phase performed at EAL initialization time.
+ * When an Event device is being probed, a *rte_event_dev* structure and
+ * a new device identifier are allocated for that device. Then, the
+ * event_dev_init() function supplied by the Event driver matching the probed
+ * device is invoked to properly initialize the device.
+ *
+ * The role of the device init function consists of resetting the hardware or
+ * software event driver implementations.
+ *
+ * If the device init operation is successful, the correspondence between
+ * the device identifier assigned to the new device and its associated
+ * *rte_event_dev* structure is effectively registered.
+ * Otherwise, both the *rte_event_dev* structure and the device identifier are
+ * freed.
+ *
+ * The functions exported by the application Event API to setup a device
+ * designated by its device identifier must be invoked in the following order:
+ * - rte_event_dev_configure()
+ * - rte_event_queue_setup()
+ * - rte_event_port_setup()
+ * - rte_event_port_link()
+ * - rte_event_dev_start()
+ *
+ * Then, the application can invoke, in any order, the functions
+ * exported by the Event API to schedule events, dequeue events, enqueue events,
+ * change event queue(s) to event port [un]link establishment and so on.
+ *
+ * Application may use rte_event_[queue/port]_default_conf_get() to get the
+ * default configuration to set up an event queue or event port by
+ * overriding few default values.
+ *
+ * If the application wants to change the configuration (i.e. call
+ * rte_event_dev_configure(), rte_event_queue_setup(), or
+ * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
+ * device and then do the reconfiguration before calling rte_event_dev_start()
+ * again. The schedule, enqueue and dequeue functions should not be invoked
+ * when the device is stopped.
+ *
+ * Finally, an application can close an Event device by invoking the
+ * rte_event_dev_close() function.
+ *
+ * Each function of the application Event API invokes a specific function
+ * of the PMD that controls the target device designated by its device
+ * identifier.
+ *
+ * For this purpose, all device-specific functions of an Event driver are
+ * supplied through a set of pointers contained in a generic structure of type
+ * *event_dev_ops*.
+ * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
+ * structure by the device init function of the Event driver, which is
+ * invoked during the PCI/SoC device probing phase, as explained earlier.
+ *
+ * In other words, each function of the Event API simply retrieves the
+ * *rte_event_dev* structure associated with the device identifier and
+ * performs an indirect invocation of the corresponding driver function
+ * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
+ *
+ * For performance reasons, the address of the fast-path functions of the
+ * Event driver is not contained in the *event_dev_ops* structure.
+ * Instead, they are directly stored at the beginning of the *rte_event_dev*
+ * structure to avoid an extra indirect memory access during their invocation.
+ *
+ * RTE event device drivers do not use interrupts for enqueue or dequeue
+ * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
+ * functions to applications.
+ *
+ * An event driven based application has following typical workflow on fastpath:
+ * \code{.c}
+ * while (1) {
+ *
+ * rte_event_schedule(dev_id);
+ *
+ * rte_event_dequeue(...);
+ *
+ * (event processing)
+ *
+ * rte_event_enqueue(...);
+ * }
+ * \endcode
+ *
+ * The events are injected to event device through *enqueue* operation by
+ * event producers in the system. The typical event producers are ethdev
+ * subsystem for generating packet events, CPU(SW) for generating events based
+ * on different stages of application processing, cryptodev for generating
+ * crypto work completion notification etc
+ *
+ * The *dequeue* operation gets one or more events from the event ports.
+ * The application process the events and send to downstream event queue through
+ * rte_event_enqueue_burst() if it is an intermediate stage of event processing,
+ * on the final stage, the application may send to different subsystem like
+ * ethdev to send the packet/event on the wire using ethdev
+ * rte_eth_tx_burst() API.
+ *
+ * The point at which events are scheduled to ports depends on the device.
+ * For hardware devices, scheduling occurs asynchronously without any software
+ * intervention. Software schedulers can either be distributed
+ * (each worker thread schedules events to its own port) or centralized
+ * (a dedicated thread schedules to all ports). Distributed software schedulers
+ * perform the scheduling in rte_event_dequeue_burst(), whereas centralized
+ * scheduler logic is located in rte_event_schedule().
+ * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
+ * indicates the device is centralized and thus needs a dedicated scheduling
+ * thread that repeatedly calls rte_event_schedule().
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_errno.h>
+
+struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
+
+/* Event device capability bitmap flags */
+#define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
+/**< Event scheduling prioritization is based on the priority associated with
+ * each event queue.
+ *
+ * @see rte_event_queue_setup()
+ */
+#define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
+/**< Event scheduling prioritization is based on the priority associated with
+ * each event. Priority of each event is supplied in *rte_event* structure
+ * on each enqueue operation.
+ *
+ * @see rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
+/**< Event device operates in distributed scheduling mode.
+ * In distributed scheduling mode, event scheduling happens in HW or
+ * rte_event_dequeue_burst() or the combination of these two.
+ * If the flag is not set then eventdev is centralized and thus needs a
+ * dedicated scheduling thread that repeatedly calls rte_event_schedule().
+ *
+ * @see rte_event_schedule(), rte_event_dequeue_burst()
+ */
+#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
+/**< Event device is capable of enqueuing events of any type to any queue.
+ * If this capability is not set, the queue only supports events of the
+ * *RTE_EVENT_QUEUE_CFG_* type that it was created with.
+ *
+ * @see RTE_EVENT_QUEUE_CFG_* values
+ */
+
+/* Event device priority levels */
+#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
+/**< Highest priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+#define RTE_EVENT_DEV_PRIORITY_NORMAL 128
+/**< Normal priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+#define RTE_EVENT_DEV_PRIORITY_LOWEST 255
+/**< Lowest priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+
+/**
+ * Get the total number of event devices that have been successfully
+ * initialised.
+ *
+ * @return
+ * The total number of usable event devices.
+ */
+uint8_t
+rte_event_dev_count(void);
+
+/**
+ * Get the device identifier for the named event device.
+ *
+ * @param name
+ * Event device name to select the event device identifier.
+ *
+ * @return
+ * Returns event device identifier on success.
+ * - <0: Failure to find named event device.
+ */
+int
+rte_event_dev_get_dev_id(const char *name);
+
+/**
+ * Return the NUMA socket to which a device is connected.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @return
+ * The NUMA socket id to which the device is connected or
+ * a default of zero if the socket could not be determined.
+ * -(-EINVAL) dev_id value is out of range.
+ */
+int
+rte_event_dev_socket_id(uint8_t dev_id);
+
+/**
+ * Event device information
+ */
+struct rte_event_dev_info {
+ const char *driver_name; /**< Event driver name */
+ struct rte_device *dev; /**< Device information */
+ uint32_t min_dequeue_timeout_ns;
+ /**< Minimum supported global dequeue timeout(ns) by this device */
+ uint32_t max_dequeue_timeout_ns;
+ /**< Maximum supported global dequeue timeout(ns) by this device */
+ uint32_t dequeue_timeout_ns;
+ /**< Configured global dequeue timeout(ns) for this device */
+ uint8_t max_event_queues;
+ /**< Maximum event_queues supported by this device */
+ uint32_t max_event_queue_flows;
+ /**< Maximum supported flows in an event queue by this device*/
+ uint8_t max_event_queue_priority_levels;
+ /**< Maximum number of event queue priority levels by this device.
+ * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
+ */
+ uint8_t max_event_priority_levels;
+ /**< Maximum number of event priority levels by this device.
+ * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability
+ */
+ uint8_t max_event_ports;
+ /**< Maximum number of event ports supported by this device */
+ uint8_t max_event_port_dequeue_depth;
+ /**< Maximum number of events can be dequeued at a time from an
+ * event port by this device.
+ * A device that does not support bulk dequeue will set this as 1.
+ */
+ uint32_t max_event_port_enqueue_depth;
+ /**< Maximum number of events can be enqueued at a time from an
+ * event port by this device.
+ * A device that does not support bulk enqueue will set this as 1.
+ */
+ int32_t max_num_events;
+ /**< A *closed system* event dev has a limit on the number of events it
+ * can manage at a time. An *open system* event dev does not have a
+ * limit and will specify this as -1.
+ */
+ uint32_t event_dev_cap;
+ /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+};
+
+/**
+ * Retrieve the contextual information of an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param[out] dev_info
+ * A pointer to a structure of type *rte_event_dev_info* to be filled with the
+ * contextual information of the device.
+ *
+ * @return
+ * - 0: Success, driver updates the contextual information of the event device
+ * - <0: Error code returned by the driver info get function.
+ *
+ */
+int
+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
+
+/* Event device configuration bitmap flags */
+#define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
+/**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
+ * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
+ */
+
+/** Event device configuration structure */
+struct rte_event_dev_config {
+ uint32_t dequeue_timeout_ns;
+ /**< rte_event_dequeue_burst() timeout on this device.
+ * This value should be in the range of *min_dequeue_timeout_ns* and
+ * *max_dequeue_timeout_ns* which previously provided in
+ * rte_event_dev_info_get()
+ * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ */
+ int32_t nb_events_limit;
+ /**< In a *closed system* this field is the limit on maximum number of
+ * events that can be inflight in the eventdev at a given time. The
+ * limit is required to ensure that the finite space in a closed system
+ * is not overwhelmed. The value cannot exceed the *max_num_events*
+ * as provided by rte_event_dev_info_get().
+ * This value should be set to -1 for *open system*.
+ */
+ uint8_t nb_event_queues;
+ /**< Number of event queues to configure on this device.
+ * This value cannot exceed the *max_event_queues* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint8_t nb_event_ports;
+ /**< Number of event ports to configure on this device.
+ * This value cannot exceed the *max_event_ports* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint32_t nb_event_queue_flows;
+ /**< Number of flows for any event queue on this device.
+ * This value cannot exceed the *max_event_queue_flows* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint32_t nb_event_port_dequeue_depth;
+ /**< Maximum number of events can be dequeued at a time from an
+ * event port by this device.
+ * This value cannot exceed the *max_event_port_dequeue_depth*
+ * which previously provided in rte_event_dev_info_get()
+ * @see rte_event_port_setup()
+ */
+ uint32_t nb_event_port_enqueue_depth;
+ /**< Maximum number of events can be enqueued at a time from an
+ * event port by this device.
+ * This value cannot exceed the *max_event_port_enqueue_depth*
+ * which previously provided in rte_event_dev_info_get()
+ * @see rte_event_port_setup()
+ */
+ uint32_t event_dev_cfg;
+ /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+};
+
+/**
+ * Configure an event device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * The caller may use rte_event_dev_info_get() to get the capability of each
+ * resources available for this event device.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ * @param dev_conf
+ * The event device configuration structure.
+ *
+ * @return
+ * - 0: Success, device configured.
+ * - <0: Error code returned by the driver configuration function.
+ */
+int
+rte_event_dev_configure(uint8_t dev_id,
+ const struct rte_event_dev_config *dev_conf);
+
+
+/* Event queue specific APIs */
+
+/* Event queue configuration bitmap flags */
+#define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
+/**< Mask for event queue schedule type configuration request */
+#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
+/**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
+ *
+ * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
+ * @see rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
+/**< Allow only ATOMIC schedule type enqueue
+ *
+ * The rte_event_enqueue_burst() result is undefined if the queue configured
+ * with ATOMIC only and sched_type != RTE_SCHED_TYPE_ATOMIC
+ *
+ * @see RTE_SCHED_TYPE_ATOMIC, rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
+/**< Allow only ORDERED schedule type enqueue
+ *
+ * The rte_event_enqueue_burst() result is undefined if the queue configured
+ * with ORDERED only and sched_type != RTE_SCHED_TYPE_ORDERED
+ *
+ * @see RTE_SCHED_TYPE_ORDERED, rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
+/**< Allow only PARALLEL schedule type enqueue
+ *
+ * The rte_event_enqueue_burst() result is undefined if the queue configured
+ * with PARALLEL only and sched_type != RTE_SCHED_TYPE_PARALLEL
+ *
+ * @see RTE_SCHED_TYPE_PARALLEL, rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
+/**< This event queue links only to a single event port.
+ *
+ * @see rte_event_port_setup(), rte_event_port_link()
+ */
+
+/** Event queue configuration structure */
+struct rte_event_queue_conf {
+ uint32_t nb_atomic_flows;
+ /**< The maximum number of active flows this queue can track at any
+ * given time. The value must be in the range of
+ * [1 - nb_event_queue_flows)] which previously provided in
+ * rte_event_dev_info_get().
+ */
+ uint32_t nb_atomic_order_sequences;
+ /**< The maximum number of outstanding events waiting to be
+ * reordered by this queue. In other words, the number of entries in
+ * this queue’s reorder buffer.When the number of events in the
+ * reorder buffer reaches to *nb_atomic_order_sequences* then the
+ * scheduler cannot schedule the events from this queue and invalid
+ * event will be returned from dequeue until one or more entries are
+ * freed up/released.
+ * The value must be in the range of [1 - nb_event_queue_flows)]
+ * which previously supplied to rte_event_dev_configure().
+ */
+ uint32_t event_queue_cfg; /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
+ uint8_t priority;
+ /**< Priority for this event queue relative to other event queues.
+ * The requested priority should in the range of
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested priority to
+ * event device supported priority value.
+ * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
+ */
+};
+
+/**
+ * Retrieve the default configuration information of an event queue designated
+ * by its *queue_id* from the event driver for an event device.
+ *
+ * This function intended to be used in conjunction with rte_event_queue_setup()
+ * where caller needs to set up the queue by overriding few default values.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the event queue to get the configuration information.
+ * The value must be in the range [0, nb_event_queues - 1]
+ * previously supplied to rte_event_dev_configure().
+ * @param[out] queue_conf
+ * The pointer to the default event queue configuration data.
+ * @return
+ * - 0: Success, driver updates the default event queue configuration data.
+ * - <0: Error code returned by the driver info get function.
+ *
+ * @see rte_event_queue_setup()
+ *
+ */
+int
+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Allocate and set up an event queue for an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the event queue to setup. The value must be in the range
+ * [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
+ * @param queue_conf
+ * The pointer to the configuration data to be used for the event queue.
+ * NULL value is allowed, in which case default configuration used.
+ *
+ * @see rte_event_queue_default_conf_get()
+ *
+ * @return
+ * - 0: Success, event queue correctly set up.
+ * - <0: event queue configuration failed
+ */
+int
+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Get the number of event queues on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @return
+ * - The number of configured event queues
+ */
+uint8_t
+rte_event_queue_count(uint8_t dev_id);
+
+/**
+ * Get the priority of the event queue on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @param queue_id
+ * Event queue identifier.
+ * @return
+ * - If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then the
+ * configured priority of the event queue in
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST] range
+ * else the value RTE_EVENT_DEV_PRIORITY_NORMAL
+ */
+uint8_t
+rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id);
+
+/* Event port specific APIs */
+
+/** Event port configuration structure */
+struct rte_event_port_conf {
+ int32_t new_event_threshold;
+ /**< A backpressure threshold for new event enqueues on this port.
+ * Use for *closed system* event dev where event capacity is limited,
+ * and cannot exceed the capacity of the event dev.
+ * Configuring ports with different thresholds can make higher priority
+ * traffic less likely to be backpressured.
+ * For example, a port used to inject NIC Rx packets into the event dev
+ * can have a lower threshold so as not to overwhelm the device,
+ * while ports used for worker pools can have a higher threshold.
+ * This value cannot exceed the *nb_events_limit*
+ * which was previously supplied to rte_event_dev_configure().
+ * This should be set to '-1' for *open system*.
+ */
+ uint16_t dequeue_depth;
+ /**< Configure number of bulk dequeues for this event port.
+ * This value cannot exceed the *nb_event_port_dequeue_depth*
+ * which previously supplied to rte_event_dev_configure()
+ */
+ uint16_t enqueue_depth;
+ /**< Configure number of bulk enqueues for this event port.
+ * This value cannot exceed the *nb_event_port_enqueue_depth*
+ * which previously supplied to rte_event_dev_configure()
+ */
+};
+
+/**
+ * Retrieve the default configuration information of an event port designated
+ * by its *port_id* from the event driver for an event device.
+ *
+ * This function intended to be used in conjunction with rte_event_port_setup()
+ * where caller needs to set up the port by overriding few default values.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The index of the event port to get the configuration information.
+ * The value must be in the range [0, nb_event_ports - 1]
+ * previously supplied to rte_event_dev_configure().
+ * @param[out] port_conf
+ * The pointer to the default event port configuration data
+ * @return
+ * - 0: Success, driver updates the default event port configuration data.
+ * - <0: Error code returned by the driver info get function.
+ *
+ * @see rte_event_port_setup()
+ *
+ */
+int
+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
+ struct rte_event_port_conf *port_conf);
+
+/**
+ * Allocate and set up an event port for an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The index of the event port to setup. The value must be in the range
+ * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
+ * @param port_conf
+ * The pointer to the configuration data to be used for the queue.
+ * NULL value is allowed, in which case default configuration used.
+ *
+ * @see rte_event_port_default_conf_get()
+ *
+ * @return
+ * - 0: Success, event port correctly set up.
+ * - <0: Port configuration failed
+ * - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
+ * with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
+ */
+int
+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf);
+
+/**
+ * Get the number of dequeue queue depth configured for event port designated
+ * by its *port_id* on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @param port_id
+ * Event port identifier.
+ * @return
+ * - The number of configured dequeue queue depth
+ *
+ * @see rte_event_dequeue_burst()
+ */
+uint8_t
+rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id);
+
+/**
+ * Get the number of enqueue queue depth configured for event port designated
+ * by its *port_id* on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @param port_id
+ * Event port identifier.
+ * @return
+ * - The number of configured enqueue queue depth
+ *
+ * @see rte_event_enqueue_burst()
+ */
+uint8_t
+rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id);
+
+/**
+ * Get the number of ports on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @return
+ * - The number of configured ports
+ */
+uint8_t
+rte_event_port_count(uint8_t dev_id);
+
+/**
+ * Start an event device.
+ *
+ * The device start step is the last one and consists of setting the event
+ * queues to start accepting the events and schedules to event ports.
+ *
+ * On success, all basic functions exported by the API (event enqueue,
+ * event dequeue and so on) can be invoked.
+ *
+ * @param dev_id
+ * Event device identifier
+ * @return
+ * - 0: Success, device started.
+ * - -ESTALE : Not all ports of the device are configured
+ * - -ENOLINK: Not all queues are linked, which could lead to deadlock.
+ */
+int
+rte_event_dev_start(uint8_t dev_id);
+
+/**
+ * Stop an event device. The device can be restarted with a call to
+ * rte_event_dev_start()
+ *
+ * @param dev_id
+ * Event device identifier.
+ */
+void
+rte_event_dev_stop(uint8_t dev_id);
+
+/**
+ * Close an event device. The device cannot be restarted!
+ *
+ * @param dev_id
+ * Event device identifier
+ *
+ * @return
+ * - 0 on successfully closing device
+ * - <0 on failure to close device
+ * - (-EAGAIN) if device is busy
+ */
+int
+rte_event_dev_close(uint8_t dev_id);
+
+/* Scheduler type definitions */
+#define RTE_SCHED_TYPE_ORDERED 0
+/**< Ordered scheduling
+ *
+ * Events from an ordered flow of an event queue can be scheduled to multiple
+ * ports for concurrent processing while maintaining the original event order.
+ * This scheme enables the user to achieve high single flow throughput by
+ * avoiding SW synchronization for ordering between ports which bound to cores.
+ *
+ * The source flow ordering from an event queue is maintained when events are
+ * enqueued to their destination queue within the same ordered flow context.
+ * An event port holds the context until application call
+ * rte_event_dequeue_burst() from the same port, which implicitly releases
+ * the context.
+ * User may allow the scheduler to release the context earlier than that
+ * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
+ *
+ * Events from the source queue appear in their original order when dequeued
+ * from a destination queue.
+ * Event ordering is based on the received event(s), but also other
+ * (newly allocated or stored) events are ordered when enqueued within the same
+ * ordered context. Events not enqueued (e.g. released or stored) within the
+ * context are considered missing from reordering and are skipped at this time
+ * (but can be ordered again within another context).
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
+ */
+
+#define RTE_SCHED_TYPE_ATOMIC 1
+/**< Atomic scheduling
+ *
+ * Events from an atomic flow of an event queue can be scheduled only to a
+ * single port at a time. The port is guaranteed to have exclusive (atomic)
+ * access to the associated flow context, which enables the user to avoid SW
+ * synchronization. Atomic flows also help to maintain event ordering
+ * since only one port at a time can process events from a flow of an
+ * event queue.
+ *
+ * The atomic queue synchronization context is dedicated to the port until
+ * application call rte_event_dequeue_burst() from the same port,
+ * which implicitly releases the context. User may allow the scheduler to
+ * release the context earlier than that by invoking rte_event_enqueue_burst()
+ * with RTE_EVENT_OP_RELEASE operation.
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
+ */
+
+#define RTE_SCHED_TYPE_PARALLEL 2
+/**< Parallel scheduling
+ *
+ * The scheduler performs priority scheduling, load balancing, etc. functions
+ * but does not provide additional event synchronization or ordering.
+ * It is free to schedule events from a single parallel flow of an event queue
+ * to multiple events ports for concurrent processing.
+ * The application is responsible for flow context synchronization and
+ * event ordering (SW synchronization).
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst()
+ */
+
+/* Event types to classify the event source */
+#define RTE_EVENT_TYPE_ETHDEV 0x0
+/**< The event generated from ethdev subsystem */
+#define RTE_EVENT_TYPE_CRYPTODEV 0x1
+/**< The event generated from crypodev subsystem */
+#define RTE_EVENT_TYPE_TIMERDEV 0x2
+/**< The event generated from timerdev subsystem */
+#define RTE_EVENT_TYPE_CPU 0x3
+/**< The event generated from cpu for pipelining.
+ * Application may use *sub_event_type* to further classify the event
+ */
+#define RTE_EVENT_TYPE_MAX 0x10
+/**< Maximum number of event types */
+
+/* Event enqueue operations */
+#define RTE_EVENT_OP_NEW 0
+/**< The event producers use this operation to inject a new event to the
+ * event device.
+ */
+#define RTE_EVENT_OP_FORWARD 1
+/**< The CPU use this operation to forward the event to different event queue or
+ * change to new application specific flow or schedule type to enable
+ * pipelining
+ */
+#define RTE_EVENT_OP_RELEASE 2
+/**< Release the flow context associated with the schedule type.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
+ * then this function hints the scheduler that the user has completed critical
+ * section processing in the current atomic context.
+ * The scheduler is now allowed to schedule events from the same flow from
+ * an event queue to another port. However, the context may be still held
+ * until the next rte_event_dequeue_burst() call, this call allows but does not
+ * force the scheduler to release the context early.
+ *
+ * Early atomic context release may increase parallelism and thus system
+ * performance, but the user needs to design carefully the split into critical
+ * vs non-critical sections.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
+ * then this function hints the scheduler that the user has done all that need
+ * to maintain event order in the current ordered context.
+ * The scheduler is allowed to release the ordered context of this port and
+ * avoid reordering any following enqueues.
+ *
+ * Early ordered context release may increase parallelism and thus system
+ * performance.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
+ * or no scheduling context is held then this function may be an NOOP,
+ * depending on the implementation.
+ *
+ */
+
+/**
+ * The generic *rte_event* structure to hold the event attributes
+ * for dequeue and enqueue operation
+ */
+RTE_STD_C11
+struct rte_event {
+ /** WORD0 */
+ union {
+ uint64_t event;
+ /** Event attributes for dequeue or enqueue operation */
+ struct {
+ uint32_t flow_id:20;
+ /**< Targeted flow identifier for the enqueue and
+ * dequeue operation.
+ * The value must be in the range of
+ * [0, nb_event_queue_flows - 1] which
+ * previously supplied to rte_event_dev_configure().
+ */
+ uint32_t sub_event_type:8;
+ /**< Sub-event types based on the event source.
+ * @see RTE_EVENT_TYPE_CPU
+ */
+ uint32_t event_type:4;
+ /**< Event type to classify the event source.
+ * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
+ */
+ uint8_t op:2;
+ /**< The type of event enqueue operation - new/forward/
+ * etc.This field is not preserved across an instance
+ * and is undefined on dequeue.
+ * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
+ */
+ uint8_t rsvd:4;
+ /**< Reserved for future use */
+ uint8_t sched_type:2;
+ /**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
+ * associated with flow id on a given event queue
+ * for the enqueue and dequeue operation.
+ */
+ uint8_t queue_id;
+ /**< Targeted event queue identifier for the enqueue or
+ * dequeue operation.
+ * The value must be in the range of
+ * [0, nb_event_queues - 1] which previously supplied to
+ * rte_event_dev_configure().
+ */
+ uint8_t priority;
+ /**< Event priority relative to other events in the
+ * event queue. The requested priority should in the
+ * range of [RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ * RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested
+ * priority to supported priority value.
+ * Valid when the device has
+ * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
+ */
+ uint8_t impl_opaque;
+ /**< Implementation specific opaque value.
+ * An implementation may use this field to hold
+ * implementation specific value to share between
+ * dequeue and enqueue operation.
+ * The application should not modify this field.
+ */
+ };
+ };
+ /** WORD1 */
+ union {
+ uint64_t u64;
+ /**< Opaque 64-bit value */
+ void *event_ptr;
+ /**< Opaque event pointer */
+ struct rte_mbuf *mbuf;
+ /**< mbuf pointer if dequeued event is associated with mbuf */
+ };
+};
+
+
+struct rte_eventdev_driver;
+struct rte_eventdev_ops;
+struct rte_eventdev;
+
+typedef void (*event_schedule_t)(struct rte_eventdev *dev);
+/**< @internal Schedule one or more events in the event dev. */
+
+typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
+/**< @internal Enqueue event on port of a device */
+
+typedef uint16_t (*event_enqueue_burst_t)(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks);
+/**< @internal Dequeue event from port of a device */
+
+typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+/**< @internal Dequeue burst of events from port of a device */
+
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+ int socket_id;
+ /**< Socket ID where memory is allocated */
+ uint8_t dev_id;
+ /**< Device ID for this instance */
+ uint8_t nb_queues;
+ /**< Number of event queues. */
+ uint8_t nb_ports;
+ /**< Number of event ports. */
+ void **ports;
+ /**< Array of pointers to ports. */
+ uint8_t *ports_dequeue_depth;
+ /**< Array of port dequeue depth. */
+ uint8_t *ports_enqueue_depth;
+ /**< Array of port enqueue depth. */
+ uint8_t *queues_prio;
+ /**< Array of queue priority. */
+ uint16_t *links_map;
+ /**< Memory to store queues to port connections. */
+ void *dev_private;
+ /**< PMD-specific private data */
+ uint32_t event_dev_cap;
+ /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+ struct rte_event_dev_config dev_conf;
+ /**< Configuration applied to device. */
+
+ RTE_STD_C11
+ uint8_t dev_started : 1;
+ /**< Device state: STARTED(1)/STOPPED(0) */
+
+ char name[RTE_EVENTDEV_NAME_MAX_LEN];
+ /**< Unique identifier name */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+ event_schedule_t schedule;
+ /**< Pointer to PMD schedule function. */
+ event_enqueue_t enqueue;
+ /**< Pointer to PMD enqueue function. */
+ event_enqueue_burst_t enqueue_burst;
+ /**< Pointer to PMD enqueue burst function. */
+ event_dequeue_t dequeue;
+ /**< Pointer to PMD dequeue function. */
+ event_dequeue_burst_t dequeue_burst;
+ /**< Pointer to PMD dequeue burst function. */
+
+ struct rte_eventdev_data *data;
+ /**< Pointer to device data */
+ const struct rte_eventdev_ops *dev_ops;
+ /**< Functions exported by PMD */
+ struct rte_device *dev;
+ /**< Device info. supplied by probing */
+ const struct rte_eventdev_driver *driver;
+ /**< Driver for this device */
+
+ RTE_STD_C11
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
+
+/**
+ * Schedule one or more events in the event dev.
+ *
+ * An event dev implementation may define this is a NOOP, for instance if
+ * the event dev performs its scheduling in hardware.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ */
+static inline void
+rte_event_schedule(uint8_t dev_id)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ if (*dev->schedule)
+ (*dev->schedule)(dev);
+}
+
+/**
+ * Enqueue a burst of events objects or an event object supplied in *rte_event*
+ * structure on an event device designated by its *dev_id* through the event
+ * port specified by *port_id*. Each event object specifies the event queue on
+ * which it will be enqueued.
+ *
+ * The *nb_events* parameter is the number of event objects to enqueue which are
+ * supplied in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_enqueue_burst() function returns the number of
+ * events objects it actually enqueued. A return value equal to *nb_events*
+ * means that all event objects have been enqueued.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth()
+ */
+static inline uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (port_id >= dev->data->nb_ports) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->enqueue)(
+ dev->data->ports[port_id], ev);
+ else
+ return (*dev->enqueue_burst)(
+ dev->data->ports[port_id], ev, nb_events);
+}
+
+/**
+ * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
+ *
+ * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
+ * then application can use this function to convert timeout value in
+ * nanoseconds to implementations specific timeout value supplied in
+ * rte_event_dequeue_burst()
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param ns
+ * Wait time in nanosecond
+ * @param[out] timeout_ticks
+ * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
+ *
+ * @return
+ * - 0 on success.
+ * - -ENOTSUP if the device doesn't support timeouts
+ * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
+ * - other values < 0 on failure.
+ *
+ * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ * @see rte_event_dev_configure()
+ *
+ */
+int
+rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
+ uint64_t *timeout_ticks);
+
+/**
+ * Dequeue a burst of events objects or an event object from the event port
+ * designated by its *event_port_id*, on an event device designated
+ * by its *dev_id*.
+ *
+ * rte_event_dequeue_burst() does not dictate the specifics of scheduling
+ * algorithm as each eventdev driver may have different criteria to schedule
+ * an event. However, in general, from an application perspective scheduler may
+ * use the following scheme to dispatch an event to the port.
+ *
+ * 1) Selection of event queue based on
+ * a) The list of event queues are linked to the event port.
+ * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
+ * queue selection from list is based on event queue priority relative to
+ * other event queue supplied as *priority* in rte_event_queue_setup()
+ * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
+ * queue selection from the list is based on event priority supplied as
+ * *priority* in rte_event_enqueue_burst()
+ * 2) Selection of event
+ * a) The number of flows available in selected event queue.
+ * b) Schedule type method associated with the event
+ *
+ * The *nb_events* parameter is the maximum number of event objects to dequeue
+ * which are returned in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_dequeue_burst() function returns the number of events objects
+ * it actually dequeued. A return value equal to *nb_events* means that all
+ * event objects have been dequeued.
+ *
+ * The number of events dequeued is the number of scheduler contexts held by
+ * this port. These contexts are automatically released in the next
+ * rte_event_dequeue_burst() invocation, or invoking rte_event_enqueue_burst()
+ * with RTE_EVENT_OP_RELEASE operation can be used to release the
+ * contexts early.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param[out] ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * for output to be populated with the dequeued event objects.
+ * @param nb_events
+ * The maximum number of event objects to dequeue, typically number of
+ * rte_event_port_dequeue_depth() available for this port.
+ *
+ * @param timeout_ticks
+ * - 0 no-wait, returns immediately if there is no event.
+ * - >0 wait for the event, if the device is configured with
+ * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
+ * at least one event is available or *timeout_ticks* time.
+ * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ * then this function will wait until the event available or
+ * *dequeue_timeout_ns* ns which was previously supplied to
+ * rte_event_dev_configure()
+ *
+ * @return
+ * The number of event objects actually dequeued from the port. The return
+ * value can be less than the value of the *nb_events* parameter when the
+ * event port's queue is not full.
+ *
+ * @see rte_event_port_dequeue_depth()
+ */
+static inline uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (port_id >= dev->data->nb_ports) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->dequeue)(
+ dev->data->ports[port_id], ev, timeout_ticks);
+ else
+ return (*dev->dequeue_burst)(
+ dev->data->ports[port_id], ev, nb_events,
+ timeout_ticks);
+}
+
+/**
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated service priority
+ * supplied in *priorities* on the event device designated by its *dev_id*.
+ *
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
+ *
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
+ *
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier to select the destination port to link.
+ *
+ * @param queues
+ * Points to an array of *nb_links* event queues to be linked
+ * to the event port.
+ * NULL value is allowed, in which case this function links all the configured
+ * event queues *nb_event_queues* which previously supplied to
+ * rte_event_dev_configure() to the event port *port_id*
+ *
+ * @param priorities
+ * Points to an array of *nb_links* service priorities associated with each
+ * event queue link to event port.
+ * The priority defines the event port's servicing priority for
+ * event queue, which may be ignored by an implementation.
+ * The requested priority should in the range of
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested priority to
+ * implementation supported priority value.
+ * NULL value is allowed, in which case this function links the event queues
+ * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
+ *
+ * @param nb_links
+ * The number of links to establish. This parameter is ignored if queues is
+ * NULL.
+ *
+ * @return
+ * The number of links actually established. The return value can be less than
+ * the value of the *nb_links* parameter when the implementation has the
+ * limitation on specific queue to port link establishment or if invalid
+ * parameters are specified in *queues*
+ * If the return value is less than *nb_links*, the remaining links at the end
+ * of link[] are not established, and the caller has to take care of them.
+ * If return value is less than *nb_links* then implementation shall update the
+ * rte_errno accordingly, Possible rte_errno values are
+ * (-EDQUOT) Quota exceeded(Application tried to link the queue configured with
+ * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
+ * (-EINVAL) Invalid parameter
+ *
+ */
+int
+rte_event_port_link(uint8_t dev_id, uint8_t port_id,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links);
+
+/**
+ * Unlink multiple source event queues supplied in *queues* from the destination
+ * event port designated by its *port_id* on the event device designated
+ * by its *dev_id*.
+ *
+ * The unlink establishment shall disable the event port *port_id* from
+ * receiving events from the specified event queue *queue_id*
+ *
+ * Event queue(s) to event port unlink establishment can be changed at runtime
+ * without re-configuring the device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier to select the destination port to unlink.
+ *
+ * @param queues
+ * Points to an array of *nb_unlinks* event queues to be unlinked
+ * from the event port.
+ * NULL value is allowed, in which case this function unlinks all the
+ * event queue(s) from the event port *port_id*.
+ *
+ * @param nb_unlinks
+ * The number of unlinks to establish. This parameter is ignored if queues is
+ * NULL.
+ *
+ * @return
+ * The number of unlinks actually established. The return value can be less
+ * than the value of the *nb_unlinks* parameter when the implementation has the
+ * limitation on specific queue to port unlink establishment or
+ * if invalid parameters are specified.
+ * If the return value is less than *nb_unlinks*, the remaining queues at the
+ * end of queues[] are not established, and the caller has to take care of them.
+ * If return value is less than *nb_unlinks* then implementation shall update
+ * the rte_errno accordingly, Possible rte_errno values are
+ * (-EINVAL) Invalid parameter
+ *
+ */
+int
+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint16_t nb_unlinks);
+
+/**
+ * Retrieve the list of source event queues and its associated service priority
+ * linked to the destination event port designated by its *port_id*
+ * on the event device designated by its *dev_id*.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier.
+ *
+ * @param[out] queues
+ * Points to an array of *queues* for output.
+ * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ * store the event queue(s) linked with event port *port_id*
+ *
+ * @param[out] priorities
+ * Points to an array of *priorities* for output.
+ * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ * store the service priority associated with each event queue linked
+ *
+ * @return
+ * The number of links established on the event port designated by its
+ * *port_id*.
+ * - <0 on failure.
+ *
+ */
+int
+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint8_t priorities[]);
+
+/**
+ * Dump internal information about *dev_id* to the FILE* provided in *f*.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param f
+ * A pointer to a file for output
+ *
+ * @return
+ * - 0: on success
+ * - <0: on failure.
+ */
+int
+rte_event_dev_dump(uint8_t dev_id, FILE *f);
+
+/** Maximum name length for extended statistics counters */
+#define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
+
+/**
+ * Selects the component of the eventdev to retrieve statistics from.
+ */
+enum rte_event_dev_xstats_mode {
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+};
+
+/**
+ * A name-key lookup element for extended statistics.
+ *
+ * This structure is used to map between names and ID numbers
+ * for extended ethdev statistics.
+ */
+struct rte_event_dev_xstats_name {
+ char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
+};
+
+/**
+ * Retrieve names of extended statistics of an event device.
+ *
+ * @param dev_id
+ * The identifier of the event device.
+ * @param mode
+ * The mode of statistics to retrieve. Choices include the device statistics,
+ * port statistics or queue statistics.
+ * @param queue_port_id
+ * Used to specify the port or queue number in queue or port mode, and is
+ * ignored in device mode.
+ * @param[out] xstats_names
+ * Block of memory to insert names into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity.
+ * @param[out] ids
+ * Block of memory to insert ids into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity. The id values returned
+ * can be passed to *rte_event_dev_xstats_get* to select statistics.
+ * @param size
+ * Capacity of xstats_names (number of names).
+ * @return
+ * - positive value lower or equal to size: success. The return value
+ * is the number of entries filled in the stats table.
+ * - positive value higher than size: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * - negative value on error:
+ * -ENODEV for invalid *dev_id*
+ * -EINVAL for invalid mode, queue port or id parameters
+ * -ENOTSUP if the device doesn't support this function.
+ */
+int
+rte_event_dev_xstats_names_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids,
+ unsigned int size);
+
+/**
+ * Retrieve extended statistics of an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param mode
+ * The mode of statistics to retrieve. Choices include the device statistics,
+ * port statistics or queue statistics.
+ * @param queue_port_id
+ * Used to specify the port or queue number in queue or port mode, and is
+ * ignored in device mode.
+ * @param ids
+ * The id numbers of the stats to get. The ids can be got from the stat
+ * position in the stat list from rte_event_dev_get_xstats_names(), or
+ * by using rte_eventdev_get_xstats_by_name()
+ * @param[out] values
+ * The values for each stats request by ID.
+ * @param n
+ * The number of stats requested
+ * @return
+ * - positive value: number of stat entries filled into the values array
+ * - negative value on error:
+ * -ENODEV for invalid *dev_id*
+ * -EINVAL for invalid mode, queue port or id parameters
+ * -ENOTSUP if the device doesn't support this function.
+ */
+int
+rte_event_dev_xstats_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ const unsigned int ids[],
+ uint64_t values[], unsigned int n);
+
+/**
+ * Retrieve the value of a single stat by requesting it by name.
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @param name
+ * The stat name to retrieve
+ * @param[out] id
+ * If non-NULL, the numerical id of the stat will be returned, so that further
+ * requests for the stat can be got using rte_eventdev_xstats_get, which will
+ * be faster as it doesn't need to scan a list of names for the stat.
+ * If the stat cannot be found, the id returned will be (unsigned)-1.
+ * @return
+ * - positive value or zero: the stat value
+ * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
+ */
+uint64_t
+rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
+ unsigned int *id);
+
+/**
+ * Reset the values of the xstats of the selected component in the device.
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @param mode
+ * The mode of the statistics to reset. Choose from device, queue or port.
+ * @param queue_port_id
+ * The queue or port to reset. 0 and positive values select ports and queues,
+ * while -1 indicates all ports or queues.
+ * @param ids
+ * Selects specific statistics to be reset. When NULL, all statistics selected
+ * by *mode* will be reset. If non-NULL, must point to array of at least
+ * *nb_ids* size.
+ * @param nb_ids
+ * The number of ids available from the *ids* array. Ignored when ids is NULL.
+ * @return
+ * - zero: successfully reset the statistics to zero
+ * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
+ */
+int
+rte_event_dev_xstats_reset(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_H_ */
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
new file mode 100644
index 00000000..4005b3c9
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -0,0 +1,599 @@
+/*
+ *
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_H_
+#define _RTE_EVENTDEV_PMD_H_
+
+/** @file
+ * RTE Event PMD APIs
+ *
+ * @note
+ * These API are from event PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_common.h>
+
+#include "rte_eventdev.h"
+
+/* Logging Macros */
+#define RTE_EDEV_LOG_ERR(...) \
+ RTE_LOG(ERR, EVENTDEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+#define RTE_EDEV_LOG_DEBUG(...) \
+ RTE_LOG(DEBUG, EVENTDEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+#else
+#define RTE_EDEV_LOG_DEBUG(...) (void)0
+#endif
+
+/* Macros to check for valid device */
+#define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
+ if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+ return retval; \
+ } \
+} while (0)
+
+#define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \
+ if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+ return; \
+ } \
+} while (0)
+
+#define RTE_EVENTDEV_DETACHED (0)
+#define RTE_EVENTDEV_ATTACHED (1)
+
+/**
+ * Initialisation function of a event driver invoked for each matching
+ * event PCI device detected during the PCI probing phase.
+ *
+ * @param dev
+ * The dev pointer is the address of the *rte_eventdev* structure associated
+ * with the matching device and which has been [automatically] allocated in
+ * the *rte_event_devices* array.
+ *
+ * @return
+ * - 0: Success, the device is properly initialised by the driver.
+ * In particular, the driver MUST have set up the *dev_ops* pointer
+ * of the *dev* structure.
+ * - <0: Error code of the device initialisation failure.
+ */
+typedef int (*eventdev_init_t)(struct rte_eventdev *dev);
+
+/**
+ * Finalisation function of a driver invoked for each matching
+ * PCI device detected during the PCI closing phase.
+ *
+ * @param dev
+ * The dev pointer is the address of the *rte_eventdev* structure associated
+ * with the matching device and which has been [automatically] allocated in
+ * the *rte_event_devices* array.
+ *
+ * @return
+ * - 0: Success, the device is properly finalised by the driver.
+ * In particular, the driver MUST free the *dev_ops* pointer
+ * of the *dev* structure.
+ * - <0: Error code of the device initialisation failure.
+ */
+typedef int (*eventdev_uninit_t)(struct rte_eventdev *dev);
+
+/**
+ * The structure associated with a PMD driver.
+ *
+ * Each driver acts as a PCI driver and is represented by a generic
+ * *event_driver* structure that holds:
+ *
+ * - An *rte_pci_driver* structure (which must be the first field).
+ *
+ * - The *eventdev_init* function invoked for each matching PCI device.
+ *
+ * - The size of the private data to allocate for each matching device.
+ */
+struct rte_eventdev_driver {
+ struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */
+ unsigned int dev_private_size; /**< Size of device private data. */
+
+ eventdev_init_t eventdev_init; /**< Device init function. */
+ eventdev_uninit_t eventdev_uninit; /**< Device uninit function. */
+};
+
+/** Global structure used for maintaining state of allocated event devices */
+struct rte_eventdev_global {
+ uint8_t nb_devs; /**< Number of devices found */
+};
+
+extern struct rte_eventdev_global *rte_eventdev_globals;
+/** Pointer to global event devices data structure. */
+extern struct rte_eventdev *rte_eventdevs;
+/** The pool of rte_eventdev structures. */
+
+/**
+ * Get the rte_eventdev structure device pointer for the named device.
+ *
+ * @param name
+ * device name to select the device structure.
+ *
+ * @return
+ * - The rte_eventdev structure pointer for the given device ID.
+ */
+static inline struct rte_eventdev *
+rte_event_pmd_get_named_dev(const char *name)
+{
+ struct rte_eventdev *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_EVENT_MAX_DEVS; i++) {
+ dev = &rte_eventdevs[i];
+ if ((dev->attached == RTE_EVENTDEV_ATTACHED) &&
+ (strcmp(dev->data->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+/**
+ * Validate if the event device index is valid attached event device.
+ *
+ * @param dev_id
+ * Event device index.
+ *
+ * @return
+ * - If the device index is valid (1) or not (0).
+ */
+static inline unsigned
+rte_event_pmd_is_valid_dev(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ if (dev_id >= RTE_EVENT_MAX_DEVS)
+ return 0;
+
+ dev = &rte_eventdevs[dev_id];
+ if (dev->attached != RTE_EVENTDEV_ATTACHED)
+ return 0;
+ else
+ return 1;
+}
+
+/**
+ * Definitions of all functions exported by a driver through the
+ * the generic structure of type *event_dev_ops* supplied in the
+ * *rte_eventdev* structure associated with a device.
+ */
+
+/**
+ * Get device information of a device.
+ *
+ * @param dev
+ * Event device pointer
+ * @param dev_info
+ * Event device information structure
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef void (*eventdev_info_get_t)(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info);
+
+/**
+ * Configure a device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef int (*eventdev_configure_t)(const struct rte_eventdev *dev);
+
+/**
+ * Start a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef int (*eventdev_start_t)(struct rte_eventdev *dev);
+
+/**
+ * Stop a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ */
+typedef void (*eventdev_stop_t)(struct rte_eventdev *dev);
+
+/**
+ * Close a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * - 0 on success
+ * - (-EAGAIN) if can't close as device is busy
+ */
+typedef int (*eventdev_close_t)(struct rte_eventdev *dev);
+
+/**
+ * Retrieve the default event queue configuration.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ * @param[out] queue_conf
+ * Event queue configuration structure
+ *
+ */
+typedef void (*eventdev_queue_default_conf_get_t)(struct rte_eventdev *dev,
+ uint8_t queue_id, struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Setup an event queue.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ * @param queue_conf
+ * Event queue configuration structure
+ *
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*eventdev_queue_setup_t)(struct rte_eventdev *dev,
+ uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Release resources allocated by given event queue.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ *
+ */
+typedef void (*eventdev_queue_release_t)(struct rte_eventdev *dev,
+ uint8_t queue_id);
+
+/**
+ * Retrieve the default event port configuration.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port_id
+ * Event port index
+ * @param[out] port_conf
+ * Event port configuration structure
+ *
+ */
+typedef void (*eventdev_port_default_conf_get_t)(struct rte_eventdev *dev,
+ uint8_t port_id, struct rte_event_port_conf *port_conf);
+
+/**
+ * Setup an event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port_id
+ * Event port index
+ * @param port_conf
+ * Event port configuration structure
+ *
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*eventdev_port_setup_t)(struct rte_eventdev *dev,
+ uint8_t port_id,
+ const struct rte_event_port_conf *port_conf);
+
+/**
+ * Release memory resources allocated by given event port.
+ *
+ * @param port
+ * Event port pointer
+ *
+ */
+typedef void (*eventdev_port_release_t)(void *port);
+
+/**
+ * Link multiple source event queues to destination event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port
+ * Event port pointer
+ * @param link
+ * Points to an array of *nb_links* event queues to be linked
+ * to the event port.
+ * @param priorities
+ * Points to an array of *nb_links* service priorities associated with each
+ * event queue link to event port.
+ * @param nb_links
+ * The number of links to establish
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_port_link_t)(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links);
+
+/**
+ * Unlink multiple source event queues from destination event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port
+ * Event port pointer
+ * @param queues
+ * An array of *nb_unlinks* event queues to be unlinked from the event port.
+ * @param nb_unlinks
+ * The number of unlinks to establish
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_port_unlink_t)(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks);
+
+/**
+ * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue()
+ *
+ * @param dev
+ * Event device pointer
+ * @param ns
+ * Wait time in nanosecond
+ * @param[out] timeout_ticks
+ * Value for the *timeout_ticks* parameter in rte_event_dequeue() function
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_dequeue_timeout_ticks_t)(struct rte_eventdev *dev,
+ uint64_t ns, uint64_t *timeout_ticks);
+
+/**
+ * Dump internal information
+ *
+ * @param dev
+ * Event device pointer
+ * @param f
+ * A pointer to a file for output
+ *
+ */
+typedef void (*eventdev_dump_t)(struct rte_eventdev *dev, FILE *f);
+
+/**
+ * Retrieve a set of statistics from device
+ *
+ * @param dev
+ * Event device pointer
+ * @param ids
+ * The stat ids to retrieve
+ * @param values
+ * The returned stat values
+ * @param n
+ * The number of id values and entries in the values array
+ * @return
+ * The number of stat values successfully filled into the values array
+ */
+typedef int (*eventdev_xstats_get_t)(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+
+/**
+ * Resets the statistic values in xstats for the device, based on mode.
+ */
+typedef int (*eventdev_xstats_reset_t)(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+/**
+ * Get names of extended stats of an event device
+ *
+ * @param dev
+ * Event device pointer
+ * @param xstats_names
+ * Array of name values to be filled in
+ * @param size
+ * Number of values in the xstats_names array
+ * @return
+ * When size >= the number of stats, return the number of stat values filled
+ * into the array.
+ * When size < the number of available stats, return the number of stats
+ * values, and do not fill in any data into xstats_names.
+ */
+typedef int (*eventdev_xstats_get_names_t)(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+
+/**
+ * Get value of one stats and optionally return its id
+ *
+ * @param dev
+ * Event device pointer
+ * @param name
+ * The name of the stat to retrieve
+ * @param id
+ * Pointer to an unsigned int where we store the stat-id for future reference.
+ * This pointer may be null if the id is not required.
+ * @return
+ * The value of the stat, or (uint64_t)-1 if the stat is not found.
+ * If the stat is not found, the id value will be returned as (unsigned)-1,
+ * if id pointer is non-NULL
+ */
+typedef uint64_t (*eventdev_xstats_get_by_name)(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+
+/** Event device operations function pointer table */
+struct rte_eventdev_ops {
+ eventdev_info_get_t dev_infos_get; /**< Get device info. */
+ eventdev_configure_t dev_configure; /**< Configure device. */
+ eventdev_start_t dev_start; /**< Start device. */
+ eventdev_stop_t dev_stop; /**< Stop device. */
+ eventdev_close_t dev_close; /**< Close device. */
+
+ eventdev_queue_default_conf_get_t queue_def_conf;
+ /**< Get default queue configuration. */
+ eventdev_queue_setup_t queue_setup;
+ /**< Set up an event queue. */
+ eventdev_queue_release_t queue_release;
+ /**< Release an event queue. */
+
+ eventdev_port_default_conf_get_t port_def_conf;
+ /**< Get default port configuration. */
+ eventdev_port_setup_t port_setup;
+ /**< Set up an event port. */
+ eventdev_port_release_t port_release;
+ /**< Release an event port. */
+
+ eventdev_port_link_t port_link;
+ /**< Link event queues to an event port. */
+ eventdev_port_unlink_t port_unlink;
+ /**< Unlink event queues from an event port. */
+ eventdev_dequeue_timeout_ticks_t timeout_ticks;
+ /**< Converts ns to *timeout_ticks* value for rte_event_dequeue() */
+ eventdev_dump_t dump;
+ /* Dump internal information */
+
+ eventdev_xstats_get_t xstats_get;
+ /**< Get extended device statistics. */
+ eventdev_xstats_get_names_t xstats_get_names;
+ /**< Get names of extended stats. */
+ eventdev_xstats_get_by_name xstats_get_by_name;
+ /**< Get one value by name. */
+ eventdev_xstats_reset_t xstats_reset;
+ /**< Reset the statistics values in xstats. */
+};
+
+/**
+ * Allocates a new eventdev slot for an event device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ * Unique identifier name for each device
+ * @param socket_id
+ * Socket to allocate resources on.
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_eventdev *
+rte_event_pmd_allocate(const char *name, int socket_id);
+
+/**
+ * Release the specified eventdev device.
+ *
+ * @param eventdev
+ * The *eventdev* pointer is the address of the *rte_eventdev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+int
+rte_event_pmd_release(struct rte_eventdev *eventdev);
+
+/**
+ * Creates a new virtual event device and returns the pointer to that device.
+ *
+ * @param name
+ * PMD type name
+ * @param dev_private_size
+ * Size of event PMDs private data
+ * @param socket_id
+ * Socket to allocate resources on.
+ *
+ * @return
+ * - Eventdev pointer if device is successfully created.
+ * - NULL if device cannot be created.
+ */
+struct rte_eventdev *
+rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
+ int socket_id);
+
+/**
+ * Destroy the given virtual event device
+ *
+ * @param name
+ * PMD type name
+ * @return
+ * - 0 on success, negative on error
+ */
+int
+rte_event_pmd_vdev_uninit(const char *name);
+
+/**
+ * Wrapper for use by pci drivers as a .probe function to attach to a event
+ * interface.
+ */
+int rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev);
+
+/**
+ * Wrapper for use by pci drivers as a .remove function to detach a event
+ * interface.
+ */
+int rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_H_ */
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
new file mode 100644
index 00000000..1fa6b333
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -0,0 +1,44 @@
+DPDK_17.05 {
+ global:
+
+ rte_eventdevs;
+
+ rte_event_dev_count;
+ rte_event_dev_get_dev_id;
+ rte_event_dev_socket_id;
+ rte_event_dev_info_get;
+ rte_event_dev_configure;
+ rte_event_dev_start;
+ rte_event_dev_stop;
+ rte_event_dev_close;
+ rte_event_dev_dump;
+ rte_event_dev_xstats_by_name_get;
+ rte_event_dev_xstats_get;
+ rte_event_dev_xstats_names_get;
+ rte_event_dev_xstats_reset;
+
+ rte_event_port_default_conf_get;
+ rte_event_port_setup;
+ rte_event_port_dequeue_depth;
+ rte_event_port_enqueue_depth;
+ rte_event_port_count;
+ rte_event_port_link;
+ rte_event_port_unlink;
+ rte_event_port_links_get;
+
+ rte_event_queue_default_conf_get;
+ rte_event_queue_setup;
+ rte_event_queue_count;
+ rte_event_queue_priority;
+
+ rte_event_dequeue_timeout_ticks;
+
+ rte_event_pmd_allocate;
+ rte_event_pmd_release;
+ rte_event_pmd_vdev_init;
+ rte_event_pmd_vdev_uninit;
+ rte_event_pmd_pci_probe;
+ rte_event_pmd_pci_remove;
+
+ local: *;
+};