From ca33590b6af032bff57d9cc70455660466a654b2 Mon Sep 17 00:00:00 2001
From: Luca Boccassi <luca.boccassi@gmail.com>
Date: Mon, 19 Feb 2018 11:16:57 +0000
Subject: New upstream version 18.02

Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
---
 drivers/event/Makefile                             |   33 +-
 drivers/event/dpaa/Makefile                        |   38 +
 drivers/event/dpaa/dpaa_eventdev.c                 |  655 ++++
 drivers/event/dpaa/dpaa_eventdev.h                 |   81 +
 drivers/event/dpaa/rte_pmd_dpaa_event_version.map  |    4 +
 drivers/event/dpaa2/Makefile                       |   31 +-
 drivers/event/dpaa2/dpaa2_eventdev.c               |  154 +-
 drivers/event/dpaa2/dpaa2_eventdev.h               |   41 +-
 drivers/event/dpaa2/dpaa2_eventdev_logs.h          |   37 +
 drivers/event/dpaa2/dpaa2_hw_dpcon.c               |   33 +-
 drivers/event/meson.build                          |    7 +
 drivers/event/octeontx/Makefile                    |   39 +-
 drivers/event/octeontx/meson.build                 |    9 +
 .../octeontx/rte_pmd_octeontx_event_version.map    |    3 +
 .../octeontx/rte_pmd_octeontx_ssovf_version.map    |    3 -
 drivers/event/octeontx/ssovf_evdev.c               |   95 +-
 drivers/event/octeontx/ssovf_evdev.h               |   59 +-
 drivers/event/octeontx/ssovf_evdev_selftest.c      | 1487 +++++++++
 drivers/event/octeontx/ssovf_worker.c              |   32 +-
 drivers/event/octeontx/ssovf_worker.h              |   39 +-
 drivers/event/opdl/Makefile                        |   39 +
 drivers/event/opdl/opdl_evdev.c                    |  769 +++++
 drivers/event/opdl/opdl_evdev.h                    |  314 ++
 drivers/event/opdl/opdl_evdev_init.c               |  940 ++++++
 drivers/event/opdl/opdl_evdev_xstats.c             |  180 ++
 drivers/event/opdl/opdl_log.h                      |   21 +
 drivers/event/opdl/opdl_ring.c                     | 1233 ++++++++
 drivers/event/opdl/opdl_ring.h                     |  600 ++++
 drivers/event/opdl/opdl_test.c                     | 1057 +++++++
 drivers/event/opdl/rte_pmd_evdev_opdl_version.map  |    3 +
 drivers/event/skeleton/Makefile                    |   32 +-
 drivers/event/skeleton/meson.build                 |    5 +
 drivers/event/skeleton/skeleton_eventdev.c         |   33 +-
 drivers/event/skeleton/skeleton_eventdev.h         |   32 +-
 drivers/event/sw/Makefile                          |   34 +-
 drivers/event/sw/event_ring.h                      |   32 +-
 drivers/event/sw/iq_chunk.h                        |  196 ++
 drivers/event/sw/iq_ring.h                         |  172 --
 drivers/event/sw/meson.build                       |   11 +
 drivers/event/sw/sw_evdev.c                        |  184 +-
 drivers/event/sw/sw_evdev.h                        |   71 +-
 drivers/event/sw/sw_evdev_log.h                    |   23 +
 drivers/event/sw/sw_evdev_scheduler.c              |   72 +-
 drivers/event/sw/sw_evdev_selftest.c               | 3245 ++++++++++++++++++++
 drivers/event/sw/sw_evdev_worker.c                 |   70 +-
 drivers/event/sw/sw_evdev_xstats.c                 |   44 +-
 46 files changed, 11323 insertions(+), 969 deletions(-)
 create mode 100644 drivers/event/dpaa/Makefile
 create mode 100644 drivers/event/dpaa/dpaa_eventdev.c
 create mode 100644 drivers/event/dpaa/dpaa_eventdev.h
 create mode 100644 drivers/event/dpaa/rte_pmd_dpaa_event_version.map
 create mode 100644 drivers/event/dpaa2/dpaa2_eventdev_logs.h
 create mode 100644 drivers/event/meson.build
 create mode 100644 drivers/event/octeontx/meson.build
 create mode 100644 drivers/event/octeontx/rte_pmd_octeontx_event_version.map
 delete mode 100644 drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
 create mode 100644 drivers/event/octeontx/ssovf_evdev_selftest.c
 create mode 100644 drivers/event/opdl/Makefile
 create mode 100644 drivers/event/opdl/opdl_evdev.c
 create mode 100644 drivers/event/opdl/opdl_evdev.h
 create mode 100644 drivers/event/opdl/opdl_evdev_init.c
 create mode 100644 drivers/event/opdl/opdl_evdev_xstats.c
 create mode 100644 drivers/event/opdl/opdl_log.h
 create mode 100644 drivers/event/opdl/opdl_ring.c
 create mode 100644 drivers/event/opdl/opdl_ring.h
 create mode 100644 drivers/event/opdl/opdl_test.c
 create mode 100644 drivers/event/opdl/rte_pmd_evdev_opdl_version.map
 create mode 100644 drivers/event/skeleton/meson.build
 create mode 100644 drivers/event/sw/iq_chunk.h
 delete mode 100644 drivers/event/sw/iq_ring.h
 create mode 100644 drivers/event/sw/meson.build
 create mode 100644 drivers/event/sw/sw_evdev_log.h
 create mode 100644 drivers/event/sw/sw_evdev_selftest.c

(limited to 'drivers/event')

diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index 1f9c0ba2..c3d89a15 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -1,39 +1,14 @@
-#   BSD LICENSE
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
 #
-#   Copyright(c) 2016 Cavium, Inc. All rights reserved.
-#   All rights reserved.
-#
-#   Redistribution and use in source and binary forms, with or without
-#   modification, are permitted provided that the following conditions
-#   are met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright
-#       notice, this list of conditions and the following disclaimer in
-#       the documentation and/or other materials provided with the
-#       distribution.
-#     * Neither the name of Cavium, Inc nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa/Makefile b/drivers/event/dpaa/Makefile
new file mode 100644
index 00000000..ddd85522
--- /dev/null
+++ b/drivers/event/dpaa/Makefile
@@ -0,0 +1,38 @@
+#   SPDX-License-Identifier:        BSD-3-Clause
+#   Copyright 2017 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa_event.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -I$(RTE_SDK_DPAA)/
+CFLAGS += -I$(RTE_SDK_DPAA)/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
+
+EXPORT_MAP := rte_pmd_dpaa_event_version.map
+
+LIBABIVER := 1
+
+# Interfaces with DPDK
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa_eventdev.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_eventdev -lrte_pmd_dpaa -lrte_bus_vdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
new file mode 100644
index 00000000..00068015
--- /dev/null
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -0,0 +1,655 @@
+/*   SPDX-License-Identifier:        BSD-3-Clause
+ *   Copyright 2017 NXP
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/epoll.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+#include <rte_cycles_64.h>
+
+#include <dpaa_ethdev.h>
+#include "dpaa_eventdev.h"
+#include <dpaa_mempool.h>
+
+/*
+ * Clarifications
+ * Evendev = Virtual Instance for SoC
+ * Eventport = Portal Instance
+ * Eventqueue = Channel Instance
+ * 1 Eventdev can have N Eventqueue
+ */
+
+static int
+dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+				 uint64_t *timeout_ticks)
+{
+	uint64_t cycles_per_second;
+
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+
+	cycles_per_second = rte_get_timer_hz();
+	*timeout_ticks = ns * (cycles_per_second / NS_PER_S);
+
+	return 0;
+}
+
+static void
+dpaa_eventq_portal_add(u16 ch_id)
+{
+	uint32_t sdqcr;
+
+	sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
+	qman_static_dequeue_add(sdqcr, NULL);
+}
+
+static uint16_t
+dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
+			 uint16_t nb_events)
+{
+	uint16_t i;
+	struct rte_mbuf *mbuf;
+
+	RTE_SET_USED(port);
+	/*Release all the contexts saved previously*/
+	for (i = 0; i < nb_events; i++) {
+		switch (ev[i].op) {
+		case RTE_EVENT_OP_RELEASE:
+			qman_dca_index(ev[i].impl_opaque, 0);
+			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+			mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+			DPAA_PER_LCORE_DQRR_SIZE--;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return nb_events;
+}
+
+static uint16_t
+dpaa_event_enqueue(void *port, const struct rte_event *ev)
+{
+	return dpaa_event_enqueue_burst(port, ev, 1);
+}
+
+static uint16_t
+dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
+			 uint16_t nb_events, uint64_t timeout_ticks)
+{
+	int ret;
+	u16 ch_id;
+	void *buffers[8];
+	u32 num_frames, i;
+	uint64_t wait_time, cur_ticks, start_ticks;
+	struct dpaa_port *portal = (struct dpaa_port *)port;
+	struct rte_mbuf *mbuf;
+
+	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+		/* Affine current thread context to a qman portal */
+		ret = rte_dpaa_portal_init((void *)0);
+		if (ret) {
+			DPAA_EVENTDEV_ERR("Unable to initialize portal");
+			return ret;
+		}
+	}
+
+	if (unlikely(!portal->is_port_linked)) {
+		/*
+		 * Affine event queue for current thread context
+		 * to a qman portal.
+		 */
+		for (i = 0; i < portal->num_linked_evq; i++) {
+			ch_id = portal->evq_info[i].ch_id;
+			dpaa_eventq_portal_add(ch_id);
+		}
+		portal->is_port_linked = true;
+	}
+
+	/* Check if there are atomic contexts to be released */
+	i = 0;
+	while (DPAA_PER_LCORE_DQRR_SIZE) {
+		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
+			qman_dca_index(i, 0);
+			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+			mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+			DPAA_PER_LCORE_DQRR_SIZE--;
+		}
+		i++;
+	}
+	DPAA_PER_LCORE_DQRR_HELD = 0;
+
+	if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
+		wait_time = timeout_ticks;
+	else
+		wait_time = portal->timeout;
+
+	/* Lets dequeue the frames */
+	start_ticks = rte_get_timer_cycles();
+	wait_time += start_ticks;
+	do {
+		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
+		if (num_frames != 0)
+			break;
+		cur_ticks = rte_get_timer_cycles();
+	} while (cur_ticks < wait_time);
+
+	return num_frames;
+}
+
+static uint16_t
+dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+	return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
+static void
+dpaa_event_dev_info_get(struct rte_eventdev *dev,
+			struct rte_event_dev_info *dev_info)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	dev_info->driver_name = "event_dpaa";
+	dev_info->min_dequeue_timeout_ns =
+		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+	dev_info->max_dequeue_timeout_ns =
+		DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
+	dev_info->dequeue_timeout_ns =
+		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+	dev_info->max_event_queues =
+		DPAA_EVENT_MAX_QUEUES;
+	dev_info->max_event_queue_flows =
+		DPAA_EVENT_MAX_QUEUE_FLOWS;
+	dev_info->max_event_queue_priority_levels =
+		DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+	dev_info->max_event_priority_levels =
+		DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+	dev_info->max_event_ports =
+		DPAA_EVENT_MAX_EVENT_PORT;
+	dev_info->max_event_port_dequeue_depth =
+		DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+	dev_info->max_event_port_enqueue_depth =
+		DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+	/*
+	 * TODO: Need to find out that how to fetch this info
+	 * from kernel or somewhere else.
+	 */
+	dev_info->max_num_events =
+		DPAA_EVENT_MAX_NUM_EVENTS;
+	dev_info->event_dev_cap =
+		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+		RTE_EVENT_DEV_CAP_BURST_MODE |
+		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+		RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+}
+
+static int
+dpaa_event_dev_configure(const struct rte_eventdev *dev)
+{
+	struct dpaa_eventdev *priv = dev->data->dev_private;
+	struct rte_event_dev_config *conf = &dev->data->dev_conf;
+	int ret, i;
+	uint32_t *ch_id;
+
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+	priv->nb_events_limit = conf->nb_events_limit;
+	priv->nb_event_queues = conf->nb_event_queues;
+	priv->nb_event_ports = conf->nb_event_ports;
+	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+	priv->event_dev_cfg = conf->event_dev_cfg;
+
+	/* Check dequeue timeout method is per dequeue or global */
+	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+		/*
+		 * Use timeout value as given in dequeue operation.
+		 * So invalidating this timetout value.
+		 */
+		priv->dequeue_timeout_ns = 0;
+	}
+
+	ch_id = rte_malloc("dpaa-channels",
+			  sizeof(uint32_t) * priv->nb_event_queues,
+			  RTE_CACHE_LINE_SIZE);
+	if (ch_id == NULL) {
+		EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n");
+		return -ENOMEM;
+	}
+	/* Create requested event queues within the given event device */
+	ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
+	if (ret < 0) {
+		EVENTDEV_DRV_ERR("Failed to create internal channel\n");
+		rte_free(ch_id);
+		return ret;
+	}
+	for (i = 0; i < priv->nb_event_queues; i++)
+		priv->evq_info[i].ch_id = (u16)ch_id[i];
+
+	/* Lets prepare event ports */
+	memset(&priv->ports[0], 0,
+	      sizeof(struct dpaa_port) * priv->nb_event_ports);
+	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+		for (i = 0; i < priv->nb_event_ports; i++) {
+			priv->ports[i].timeout =
+				DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
+		}
+	} else if (priv->dequeue_timeout_ns == 0) {
+		for (i = 0; i < priv->nb_event_ports; i++) {
+			dpaa_event_dequeue_timeout_ticks(NULL,
+				DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
+				&priv->ports[i].timeout);
+		}
+	} else {
+		for (i = 0; i < priv->nb_event_ports; i++) {
+			dpaa_event_dequeue_timeout_ticks(NULL,
+				priv->dequeue_timeout_ns,
+				&priv->ports[i].timeout);
+		}
+	}
+	/*
+	 * TODO: Currently portals are affined with threads. Maximum threads
+	 * can be created equals to number of lcore.
+	 */
+	rte_free(ch_id);
+	EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id);
+
+	return 0;
+}
+
+static int
+dpaa_event_dev_start(struct rte_eventdev *dev)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+static void
+dpaa_event_dev_stop(struct rte_eventdev *dev)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+	RTE_SET_USED(dev);
+}
+
+static int
+dpaa_event_dev_close(struct rte_eventdev *dev)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+static void
+dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+			  struct rte_event_queue_conf *queue_conf)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(queue_id);
+
+	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
+	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+}
+
+static int
+dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+		       const struct rte_event_queue_conf *queue_conf)
+{
+	struct dpaa_eventdev *priv = dev->data->dev_private;
+	struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
+
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	switch (queue_conf->schedule_type) {
+	case RTE_SCHED_TYPE_PARALLEL:
+	case RTE_SCHED_TYPE_ATOMIC:
+		break;
+	case RTE_SCHED_TYPE_ORDERED:
+		EVENTDEV_DRV_ERR("Schedule type is not supported.");
+		return -1;
+	}
+	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+	evq_info->event_queue_id = queue_id;
+
+	return 0;
+}
+
+static void
+dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(queue_id);
+}
+
+static void
+dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
+				 struct rte_event_port_conf *port_conf)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(port_id);
+
+	port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
+	port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+	port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+}
+
+static int
+dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+		      const struct rte_event_port_conf *port_conf)
+{
+	struct dpaa_eventdev *eventdev = dev->data->dev_private;
+
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(port_conf);
+	dev->data->ports[port_id] = &eventdev->ports[port_id];
+
+	return 0;
+}
+
+static void
+dpaa_event_port_release(void *port)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(port);
+}
+
+static int
+dpaa_event_port_link(struct rte_eventdev *dev, void *port,
+		     const uint8_t queues[], const uint8_t priorities[],
+		     uint16_t nb_links)
+{
+	struct dpaa_eventdev *priv = dev->data->dev_private;
+	struct dpaa_port *event_port = (struct dpaa_port *)port;
+	struct dpaa_eventq *event_queue;
+	uint8_t eventq_id;
+	int i;
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(priorities);
+
+	/* First check that input configuration are valid */
+	for (i = 0; i < nb_links; i++) {
+		eventq_id = queues[i];
+		event_queue = &priv->evq_info[eventq_id];
+		if ((event_queue->event_queue_cfg
+			& RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
+			&& (event_queue->event_port)) {
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < nb_links; i++) {
+		eventq_id = queues[i];
+		event_queue = &priv->evq_info[eventq_id];
+		event_port->evq_info[i].event_queue_id = eventq_id;
+		event_port->evq_info[i].ch_id = event_queue->ch_id;
+		event_queue->event_port = port;
+	}
+
+	event_port->num_linked_evq = event_port->num_linked_evq + i;
+
+	return (int)i;
+}
+
+static int
+dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
+		       uint8_t queues[], uint16_t nb_links)
+{
+	int i;
+	uint8_t eventq_id;
+	struct dpaa_eventq *event_queue;
+	struct dpaa_eventdev *priv = dev->data->dev_private;
+	struct dpaa_port *event_port = (struct dpaa_port *)port;
+
+	if (!event_port->num_linked_evq)
+		return nb_links;
+
+	for (i = 0; i < nb_links; i++) {
+		eventq_id = queues[i];
+		event_port->evq_info[eventq_id].event_queue_id = -1;
+		event_port->evq_info[eventq_id].ch_id = 0;
+		event_queue = &priv->evq_info[eventq_id];
+		event_queue->event_port = NULL;
+	}
+
+	event_port->num_linked_evq = event_port->num_linked_evq - i;
+
+	return (int)i;
+}
+
+static int
+dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+				   const struct rte_eth_dev *eth_dev,
+				   uint32_t *caps)
+{
+	const char *ethdev_driver = eth_dev->device->driver->name;
+
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+
+	if (!strcmp(ethdev_driver, "net_dpaa"))
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+
+	return 0;
+}
+
+static int
+dpaa_event_eth_rx_adapter_queue_add(
+		const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev,
+		int32_t rx_queue_id,
+		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	struct dpaa_eventdev *eventdev = dev->data->dev_private;
+	uint8_t ev_qid = queue_conf->ev.queue_id;
+	u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
+	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
+	int ret, i;
+
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	if (rx_queue_id == -1) {
+		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
+			ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
+						     queue_conf);
+			if (ret) {
+				EVENTDEV_DRV_ERR(
+					"Event Queue attach failed:%d\n", ret);
+				goto detach_configured_queues;
+			}
+		}
+		return 0;
+	}
+
+	ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
+	if (ret)
+		EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
+	return ret;
+
+detach_configured_queues:
+
+	for (i = (i - 1); i >= 0 ; i--)
+		dpaa_eth_eventq_detach(eth_dev, i);
+
+	return ret;
+}
+
+static int
+dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+				    const struct rte_eth_dev *eth_dev,
+				    int32_t rx_queue_id)
+{
+	int ret, i;
+	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
+
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	if (rx_queue_id == -1) {
+		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
+			ret = dpaa_eth_eventq_detach(eth_dev, i);
+			if (ret)
+				EVENTDEV_DRV_ERR(
+					"Event Queue detach failed:%d\n", ret);
+		}
+
+		return 0;
+	}
+
+	ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
+	if (ret)
+		EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
+	return ret;
+}
+
+static int
+dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
+				const struct rte_eth_dev *eth_dev)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
+
+static int
+dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+			       const struct rte_eth_dev *eth_dev)
+{
+	EVENTDEV_DRV_FUNC_TRACE();
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
+
+static const struct rte_eventdev_ops dpaa_eventdev_ops = {
+	.dev_infos_get    = dpaa_event_dev_info_get,
+	.dev_configure    = dpaa_event_dev_configure,
+	.dev_start        = dpaa_event_dev_start,
+	.dev_stop         = dpaa_event_dev_stop,
+	.dev_close        = dpaa_event_dev_close,
+	.queue_def_conf   = dpaa_event_queue_def_conf,
+	.queue_setup      = dpaa_event_queue_setup,
+	.queue_release    = dpaa_event_queue_release,
+	.port_def_conf    = dpaa_event_port_default_conf_get,
+	.port_setup       = dpaa_event_port_setup,
+	.port_release       = dpaa_event_port_release,
+	.port_link        = dpaa_event_port_link,
+	.port_unlink      = dpaa_event_port_unlink,
+	.timeout_ticks    = dpaa_event_dequeue_timeout_ticks,
+	.eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
+	.eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
+	.eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
+};
+
+static int
+dpaa_event_dev_create(const char *name)
+{
+	struct rte_eventdev *eventdev;
+	struct dpaa_eventdev *priv;
+
+	eventdev = rte_event_pmd_vdev_init(name,
+					   sizeof(struct dpaa_eventdev),
+					   rte_socket_id());
+	if (eventdev == NULL) {
+		EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name);
+		goto fail;
+	}
+
+	eventdev->dev_ops       = &dpaa_eventdev_ops;
+	eventdev->enqueue       = dpaa_event_enqueue;
+	eventdev->enqueue_burst = dpaa_event_enqueue_burst;
+	eventdev->dequeue       = dpaa_event_dequeue;
+	eventdev->dequeue_burst = dpaa_event_dequeue_burst;
+
+	/* For secondary processes, the primary has done all the work */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	priv = eventdev->data->dev_private;
+	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
+
+	return 0;
+fail:
+	return -EFAULT;
+}
+
+static int
+dpaa_event_dev_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	EVENTDEV_DRV_LOG("Initializing %s", name);
+
+	return dpaa_event_dev_create(name);
+}
+
+static int
+dpaa_event_dev_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	EVENTDEV_DRV_LOG("Closing %s", name);
+
+	return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
+	.probe = dpaa_event_dev_probe,
+	.remove = dpaa_event_dev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
new file mode 100644
index 00000000..918fe35c
--- /dev/null
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -0,0 +1,81 @@
+/*   SPDX-License-Identifier:        BSD-3-Clause
+ *   Copyright 2017 NXP
+ */
+
+#ifndef __DPAA_EVENTDEV_H__
+#define __DPAA_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include <rte_per_lcore.h>
+
+#define EVENTDEV_NAME_DPAA_PMD		event_dpaa1
+
+#define EVENTDEV_DRV_LOG(fmt, args...)	\
+		DPAA_EVENTDEV_INFO(fmt, ## args)
+#define EVENTDEV_DRV_FUNC_TRACE()	\
+		DPAA_EVENTDEV_DEBUG("%s() Called:\n", __func__)
+#define EVENTDEV_DRV_ERR(fmt, args...)	\
+		DPAA_EVENTDEV_ERR("%s(): " fmt "\n", __func__, ## args)
+
+#define DPAA_EVENT_MAX_PORTS			8
+#define DPAA_EVENT_MAX_QUEUES			16
+#define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT	1
+#define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT	(UINT32_MAX - 1)
+#define DPAA_EVENT_MAX_QUEUE_FLOWS		2048
+#define DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS	8
+#define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS	0
+#define DPAA_EVENT_MAX_EVENT_PORT		RTE_MAX_LCORE
+#define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH	8
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100UL
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID	((uint64_t)-1)
+#define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH	1
+#define DPAA_EVENT_MAX_NUM_EVENTS		(INT32_MAX - 1)
+
+#define DPAA_EVENT_DEV_CAP			\
+do {						\
+	RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |	\
+	RTE_EVENT_DEV_CAP_BURST_MODE;		\
+} while (0)
+
+#define DPAA_EVENT_QUEUE_ATOMIC_FLOWS	0
+#define DPAA_EVENT_QUEUE_ORDER_SEQUENCES	2048
+
+#define RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP \
+		(RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | \
+		RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | \
+		RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID)
+
+struct dpaa_eventq {
+	/* Channel Id */
+	uint16_t ch_id;
+	/* Configuration provided by the user */
+	uint32_t event_queue_cfg;
+	uint32_t event_queue_id;
+	/* Event port */
+	void *event_port;
+};
+
+struct dpaa_port {
+	struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
+	uint8_t num_linked_evq;
+	uint8_t is_port_linked;
+	uint64_t timeout;
+};
+
+struct dpaa_eventdev {
+	struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
+	struct dpaa_port ports[DPAA_EVENT_MAX_PORTS];
+	uint32_t dequeue_timeout_ns;
+	uint32_t nb_events_limit;
+	uint8_t max_event_queues;
+	uint8_t nb_event_queues;
+	uint8_t nb_event_ports;
+	uint8_t resvd;
+	uint32_t nb_event_queue_flows;
+	uint32_t nb_event_port_dequeue_depth;
+	uint32_t nb_event_port_enqueue_depth;
+	uint32_t event_dev_cfg;
+};
+#endif /* __DPAA_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa/rte_pmd_dpaa_event_version.map b/drivers/event/dpaa/rte_pmd_dpaa_event_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/drivers/event/dpaa/rte_pmd_dpaa_event_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+	local: *;
+};
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index f34eebfa..b26862cd 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -1,32 +1,5 @@
-#   BSD LICENSE
-#
-#   Copyright 2017 NXP.
-#
-#   Redistribution and use in source and binary forms, with or without
-#   modification, are permitted provided that the following conditions
-#   are met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright
-#       notice, this list of conditions and the following disclaimer in
-#       the documentation and/or other materials provided with the
-#       distribution.
-#     * Neither the name of NXP nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
 #
 
 include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index eeeb2312..c3e6fbff 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,33 +1,7 @@
-/*-
- *   BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
  *
- *   Copyright 2017 NXP.
+ *   Copyright 2017 NXP
  *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of NXP nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <assert.h>
@@ -52,7 +26,7 @@
 #include <rte_memory.h>
 #include <rte_pci.h>
 #include <rte_bus_vdev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_event_eth_rx_adapter.h>
 
 #include <fslmc_vfio.h>
@@ -61,6 +35,7 @@
 #include <dpaa2_hw_dpio.h>
 #include <dpaa2_ethdev.h>
 #include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
 #include <portal/dpaa2_hw_pvt.h>
 #include <mc/fsl_dpci.h>
 
@@ -72,6 +47,9 @@
  * Soft Event Flow is DPCI Instance
  */
 
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_event;
+
 static uint16_t
 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			     uint16_t nb_events)
@@ -94,7 +72,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+			DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
 			return 0;
 		}
 	}
@@ -121,13 +99,13 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 			qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
 			qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
 
-			if (event->impl_opaque) {
-				uint8_t dqrr_index = event->impl_opaque - 1;
+			if (event->mbuf->seqn) {
+				uint8_t dqrr_index = event->mbuf->seqn - 1;
 
 				qbman_eq_desc_set_dca(&eqdesc[loop], 1,
 						      dqrr_index, 0);
-				DPAA2_PER_LCORE_DPIO->dqrr_size--;
-				DPAA2_PER_LCORE_DPIO->dqrr_held &=
+				DPAA2_PER_LCORE_DQRR_SIZE--;
+				DPAA2_PER_LCORE_DQRR_HELD &=
 					~(1 << dqrr_index);
 			}
 
@@ -144,7 +122,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 				if (!loop)
 					return num_tx;
 				frames_to_send = loop;
-				PMD_DRV_LOG(ERR, "Unable to allocate memory");
+				DPAA2_EVENTDEV_ERR("Unable to allocate memory");
 				goto send_partial;
 			}
 			rte_memcpy(ev_temp, event, sizeof(struct rte_event));
@@ -189,9 +167,9 @@ RETRY:
 		 * case to avoid the problem.
 		 */
 		if (errno == EINTR) {
-			PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
+			DPAA2_EVENTDEV_DEBUG("epoll_wait fails\n");
 			if (i++ > 10)
-				PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
+				DPAA2_EVENTDEV_DEBUG("Dequeue burst Failed\n");
 		goto RETRY;
 		}
 	}
@@ -229,9 +207,9 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
 
 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
 	rte_free(ev_temp);
-	ev->impl_opaque = dqrr_index + 1;
-	DPAA2_PER_LCORE_DPIO->dqrr_size++;
-	DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
+	ev->mbuf->seqn = dqrr_index + 1;
+	DPAA2_PER_LCORE_DQRR_SIZE++;
+	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
 }
 
 static uint16_t
@@ -249,23 +227,23 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+			DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
 			return 0;
 		}
 	}
-
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	/* Check if there are atomic contexts to be released */
-	while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
-		if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
-			dq = qbman_get_dqrr_from_idx(swp, i);
-			qbman_swp_dqrr_consume(swp, dq);
-			DPAA2_PER_LCORE_DPIO->dqrr_size--;
+	while (DPAA2_PER_LCORE_DQRR_SIZE) {
+		if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
+			qbman_swp_dqrr_idx_consume(swp, i);
+			DPAA2_PER_LCORE_DQRR_SIZE--;
+			DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
+				DPAA2_INVALID_MBUF_SEQN;
 		}
 		i++;
 	}
-	DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
+	DPAA2_PER_LCORE_DQRR_HELD = 0;
 
 	do {
 		dq = qbman_swp_dqrr_next(swp);
@@ -277,15 +255,15 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
 			}
 			return num_pkts;
 		}
+		qbman_swp_prefetch_dqrr_next(swp);
 
 		fd = qbman_result_DQ_fd(dq);
-
 		rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
 		if (rxq) {
 			rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
 		} else {
 			qbman_swp_dqrr_consume(swp, dq);
-			PMD_DRV_LOG(ERR, "Null Return VQ received\n");
+			DPAA2_EVENTDEV_ERR("Null Return VQ received\n");
 			return 0;
 		}
 
@@ -308,7 +286,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
@@ -326,14 +304,18 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
 		DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
 	dev_info->max_event_priority_levels =
 		DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
-	dev_info->max_event_ports = RTE_MAX_LCORE;
+	dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
 	dev_info->max_event_port_dequeue_depth =
 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
 	dev_info->max_event_port_enqueue_depth =
 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
 	dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
-		RTE_EVENT_DEV_CAP_BURST_MODE;
+		RTE_EVENT_DEV_CAP_BURST_MODE|
+		RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+		RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+
 }
 
 static int
@@ -342,7 +324,7 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
 	priv->nb_event_queues = conf->nb_event_queues;
@@ -352,14 +334,15 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
 	priv->event_dev_cfg = conf->event_dev_cfg;
 
-	PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
+	DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
+		dev->data->dev_id);
 	return 0;
 }
 
 static int
 dpaa2_eventdev_start(struct rte_eventdev *dev)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
@@ -369,7 +352,7 @@ dpaa2_eventdev_start(struct rte_eventdev *dev)
 static void
 dpaa2_eventdev_stop(struct rte_eventdev *dev)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 }
@@ -377,7 +360,7 @@ dpaa2_eventdev_stop(struct rte_eventdev *dev)
 static int
 dpaa2_eventdev_close(struct rte_eventdev *dev)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
@@ -388,7 +371,7 @@ static void
 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 			      struct rte_event_queue_conf *queue_conf)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(queue_id);
@@ -403,7 +386,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 static void
 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(queue_id);
@@ -417,7 +400,7 @@ dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 	struct evq_info_t *evq_info =
 		&priv->evq_info[queue_id];
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
 
@@ -428,7 +411,7 @@ static void
 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 			     struct rte_event_port_conf *port_conf)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(port_id);
@@ -440,12 +423,13 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
 	port_conf->enqueue_depth =
 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+	port_conf->disable_implicit_release = 0;
 }
 
 static void
 dpaa2_eventdev_port_release(void *port)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(port);
 }
@@ -454,7 +438,7 @@ static int
 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 			  const struct rte_event_port_conf *port_conf)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(port_conf);
 
@@ -480,7 +464,7 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
 	struct evq_info_t *evq_info;
 	int i;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	for (i = 0; i < nb_unlinks; i++) {
 		evq_info = &priv->evq_info[queues[i]];
@@ -506,7 +490,7 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
 	uint8_t channel_index;
 	int ret, i, n;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	for (i = 0; i < nb_links; i++) {
 		evq_info = &priv->evq_info[queues[i]];
@@ -518,7 +502,7 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
 			CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
 			evq_info->dpcon->dpcon_id, &channel_index);
 		if (ret < 0) {
-			PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
+			DPAA2_EVENTDEV_ERR("Static dequeue cfg failed with ret: %d\n",
 				    ret);
 			goto err;
 		}
@@ -551,7 +535,7 @@ dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 {
 	uint32_t scale = 1;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	*timeout_ticks = ns * scale;
@@ -562,7 +546,7 @@ dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 static void
 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(f);
@@ -575,7 +559,7 @@ dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
 {
 	const char *ethdev_driver = eth_dev->device->driver->name;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
@@ -597,13 +581,13 @@ dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
 	uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
 	int i, ret;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		ret = dpaa2_eth_eventq_attach(eth_dev, i,
 				dpcon_id, queue_conf);
 		if (ret) {
-			PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
+			DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
 				    ret);
 			goto fail;
 		}
@@ -627,7 +611,7 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
 	uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
 	int ret;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	if (rx_queue_id == -1)
 		return dpaa2_eventdev_eth_queue_add_all(dev,
@@ -636,7 +620,7 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
 	ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
 			dpcon_id, queue_conf);
 	if (ret) {
-		PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
+		DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
 		return ret;
 	}
 	return 0;
@@ -648,14 +632,14 @@ dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
 {
 	int i, ret;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		ret = dpaa2_eth_eventq_detach(eth_dev, i);
 		if (ret) {
-			PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
+			DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
 				    ret);
 			return ret;
 		}
@@ -671,14 +655,14 @@ dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
 {
 	int ret;
 
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	if (rx_queue_id == -1)
 		return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
 
 	ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
 	if (ret) {
-		PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
+		DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
 		return ret;
 	}
 
@@ -689,7 +673,7 @@ static int
 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
 			 const struct rte_eth_dev *eth_dev)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(eth_dev);
@@ -701,7 +685,7 @@ static int
 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
 			const struct rte_eth_dev *eth_dev)
 {
-	PMD_DRV_FUNC_TRACE();
+	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(eth_dev);
@@ -758,7 +742,7 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
 					dpci_dev->token, i,
 					&rx_queue_cfg);
 		if (ret) {
-			PMD_DRV_LOG(ERR,
+			DPAA2_EVENTDEV_ERR(
 				    "set_rx_q failed with err code: %d", ret);
 			return ret;
 		}
@@ -779,7 +763,7 @@ dpaa2_eventdev_create(const char *name)
 					   sizeof(struct dpaa2_eventdev),
 					   rte_socket_id());
 	if (eventdev == NULL) {
-		PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
+		DPAA2_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
 		goto fail;
 	}
 
@@ -813,7 +797,7 @@ dpaa2_eventdev_create(const char *name)
 
 		ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
 		if (ret) {
-			PMD_DRV_LOG(ERR,
+			DPAA2_EVENTDEV_ERR(
 				    "dpci setup failed with err code: %d", ret);
 			return ret;
 		}
@@ -831,7 +815,7 @@ dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
 	const char *name;
 
 	name = rte_vdev_device_name(vdev);
-	PMD_DRV_LOG(INFO, "Initializing %s", name);
+	DPAA2_EVENTDEV_INFO("Initializing %s", name);
 	return dpaa2_eventdev_create(name);
 }
 
@@ -841,7 +825,7 @@ dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
 	const char *name;
 
 	name = rte_vdev_device_name(vdev);
-	PMD_DRV_LOG(INFO, "Closing %s", name);
+	DPAA2_EVENTDEV_INFO("Closing %s", name);
 
 	return rte_event_pmd_vdev_uninit(name);
 }
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index ae8e07e9..91c8f2a3 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -1,33 +1,8 @@
 /*
- *   BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
  *
- *   Copyright 2017 NXP.
+ *   Copyright 2017 NXP
  *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of NXP nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef __DPAA2_EVENTDEV_H__
@@ -41,18 +16,6 @@
 
 #define EVENTDEV_NAME_DPAA2_PMD		event_dpaa2
 
-#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV_DEBUG
-#define PMD_DRV_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
-#define PMD_DRV_FUNC_TRACE() do { } while (0)
-#endif
-
-#define PMD_DRV_ERR(fmt, args...) \
-	RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args)
-
 #define DPAA2_EVENT_DEFAULT_DPCI_PRIO 0
 
 #define DPAA2_EVENT_MAX_QUEUES			16
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
new file mode 100644
index 00000000..7d250c3f
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -0,0 +1,37 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _DPAA2_EVENTDEV_LOGS_H_
+#define _DPAA2_EVENTDEV_LOGS_H_
+
+extern int dpaa2_logtype_event;
+
+#define DPAA2_EVENTDEV_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_LOG(DEBUG, " >>")
+
+#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
+	DPAA2_EVENTDEV_LOG(DEBUG, fmt, ## args)
+#define DPAA2_EVENTDEV_INFO(fmt, args...) \
+	DPAA2_EVENTDEV_LOG(INFO, fmt, ## args)
+#define DPAA2_EVENTDEV_ERR(fmt, args...) \
+	DPAA2_EVENTDEV_LOG(ERR, fmt, ## args)
+#define DPAA2_EVENTDEV_WARN(fmt, args...) \
+	DPAA2_EVENTDEV_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_EVENTDEV_DP_LOG(level, fmt, args...) \
+	RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_EVENTDEV_DP_DEBUG(fmt, args...) \
+	DPAA2_EVENTDEV_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_EVENTDEV_DP_INFO(fmt, args...) \
+	DPAA2_EVENTDEV_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
+	DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_EVENTDEV_LOGS_H_ */
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
index 005e6234..f2377b98 100644
--- a/drivers/event/dpaa2/dpaa2_hw_dpcon.c
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -1,33 +1,7 @@
-/*-
- *   BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
  *
- *   Copyright 2017 NXP.
+ *   Copyright 2017 NXP
  *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of NXP nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <unistd.h>
@@ -44,8 +18,9 @@
 #include <rte_cycles.h>
 #include <rte_kvargs.h>
 #include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 
+#include <fslmc_logs.h>
 #include <rte_fslmc.h>
 #include <mc/fsl_dpcon.h>
 #include <portal/dpaa2_hw_pvt.h>
diff --git a/drivers/event/meson.build b/drivers/event/meson.build
new file mode 100644
index 00000000..d7bc4854
--- /dev/null
+++ b/drivers/event/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['skeleton', 'sw', 'octeontx']
+std_deps = ['eventdev', 'kvargs']
+config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD'
+driver_name_fmt = 'rte_pmd_@0@_event'
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index fdf1b738..0e49efd8 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -1,33 +1,5 @@
-#   BSD LICENSE
-#
-#   Copyright(c) 2017 Cavium, Inc. All rights reserved.
-#   All rights reserved.
-#
-#   Redistribution and use in source and binary forms, with or without
-#   modification, are permitted provided that the following conditions
-#   are met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright
-#       notice, this list of conditions and the following disclaimer in
-#       the documentation and/or other materials provided with the
-#       distribution.
-#     * Neither the name of Cavium, Inc nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
 #
 
 include $(RTE_SDK)/mk/rte.vars.mk
@@ -41,11 +13,11 @@ CFLAGS += $(WERROR_FLAGS)
 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
 CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
 
-LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx
-LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx -lrte_pmd_octeontx
+LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
 LDLIBS += -lrte_bus_vdev
 
-EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map
+EXPORT_MAP := rte_pmd_octeontx_event_version.map
 
 LIBABIVER := 1
 
@@ -54,6 +26,7 @@ LIBABIVER := 1
 #
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c
 
 ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
 CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
diff --git a/drivers/event/octeontx/meson.build b/drivers/event/octeontx/meson.build
new file mode 100644
index 00000000..358fc9fc
--- /dev/null
+++ b/drivers/event/octeontx/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = files('ssovf_worker.c',
+		'ssovf_evdev.c',
+		'ssovf_evdev_selftest.c'
+)
+
+deps += ['mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_event_version.map b/drivers/event/octeontx/rte_pmd_octeontx_event_version.map
new file mode 100644
index 00000000..5352e7e3
--- /dev/null
+++ b/drivers/event/octeontx/rte_pmd_octeontx_event_version.map
@@ -0,0 +1,3 @@
+DPDK_17.05 {
+	local: *;
+};
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map b/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
deleted file mode 100644
index 5352e7e3..00000000
--- a/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_17.05 {
-	local: *;
-};
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 117b1453..a1086077 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -1,33 +1,5 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium, Inc. 2017.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
  */
 
 #include <inttypes.h>
@@ -36,8 +8,9 @@
 #include <rte_debug.h>
 #include <rte_dev.h>
 #include <rte_eal.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_event_eth_rx_adapter.h>
+#include <rte_kvargs.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
 #include <rte_malloc.h>
@@ -46,6 +19,17 @@
 
 #include "ssovf_evdev.h"
 
+int otx_logtype_ssovf;
+
+RTE_INIT(otx_ssovf_init_log);
+static void
+otx_ssovf_init_log(void)
+{
+	otx_logtype_ssovf = rte_log_register("pmd.event.octeontx");
+	if (otx_logtype_ssovf >= 0)
+		rte_log_set_level(otx_logtype_ssovf, RTE_LOG_NOTICE);
+}
+
 /* SSOPF Mailbox messages */
 
 struct ssovf_mbox_dev_info {
@@ -187,7 +171,11 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
 	dev_info->max_num_events =  edev->max_num_events;
 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
 					RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
-					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
+					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
+					RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+					RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+					RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+
 }
 
 static int
@@ -252,6 +240,7 @@ ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 	port_conf->new_event_threshold = edev->max_num_events;
 	port_conf->dequeue_depth = 1;
 	port_conf->enqueue_depth = 1;
+	port_conf->disable_implicit_release = 0;
 }
 
 static void
@@ -592,6 +581,15 @@ ssovf_close(struct rte_eventdev *dev)
 	return 0;
 }
 
+static int
+ssovf_selftest(const char *key __rte_unused, const char *value,
+		void *opaque)
+{
+	int *flag = opaque;
+	*flag = !!atoi(value);
+	return 0;
+}
+
 /* Initialize and register event driver with DPDK Application */
 static const struct rte_eventdev_ops ssovf_ops = {
 	.dev_infos_get    = ssovf_info_get,
@@ -612,6 +610,8 @@ static const struct rte_eventdev_ops ssovf_ops = {
 	.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
 	.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
 
+	.dev_selftest = test_eventdev_octeontx,
+
 	.dump             = ssovf_dump,
 	.dev_start        = ssovf_start,
 	.dev_stop         = ssovf_stop,
@@ -627,7 +627,14 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
 	struct rte_eventdev *eventdev;
 	static int ssovf_init_once;
 	const char *name;
+	const char *params;
 	int ret;
+	int selftest = 0;
+
+	static const char *const args[] = {
+		SSOVF_SELFTEST_ARG,
+		NULL
+	};
 
 	name = rte_vdev_device_name(vdev);
 	/* More than one instance is not supported */
@@ -636,6 +643,28 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
 		return -EINVAL;
 	}
 
+	params = rte_vdev_device_args(vdev);
+	if (params != NULL && params[0] != '\0') {
+		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+		if (!kvlist) {
+			ssovf_log_info(
+				"Ignoring unsupported params supplied '%s'",
+				name);
+		} else {
+			int ret = rte_kvargs_process(kvlist,
+					SSOVF_SELFTEST_ARG,
+					ssovf_selftest, &selftest);
+			if (ret != 0) {
+				ssovf_log_err("%s: Error in selftest", name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+		}
+
+		rte_kvargs_free(kvlist);
+	}
+
 	eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
 				rte_socket_id());
 	if (eventdev == NULL) {
@@ -686,6 +715,8 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
 			edev->max_event_ports);
 
 	ssovf_init_once = 1;
+	if (selftest)
+		test_eventdev_octeontx();
 	return 0;
 
 error:
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index b093a3e7..d1825b4f 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -1,33 +1,5 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium, Inc. 2017.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
  */
 
 #ifndef __SSOVF_EVDEV_H__
@@ -41,22 +13,16 @@
 
 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
 
-#ifdef RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG
-#define ssovf_log_info(fmt, args...) \
-	RTE_LOG(INFO, EVENTDEV, "[%s] %s() " fmt "\n", \
-		RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
-#define ssovf_log_dbg(fmt, args...) \
-	RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() " fmt "\n", \
-		RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
-#else
-#define ssovf_log_info(fmt, args...)
-#define ssovf_log_dbg(fmt, args...)
-#endif
+#define SSOVF_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, otx_logtype_ssovf, \
+			"[%s] %s() " fmt "\n", \
+			RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
 
+#define ssovf_log_info(fmt, ...) SSOVF_LOG(INFO, fmt, ##__VA_ARGS__)
+#define ssovf_log_dbg(fmt, ...) SSOVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define ssovf_log_err(fmt, ...) SSOVF_LOG(ERR, fmt, ##__VA_ARGS__)
 #define ssovf_func_trace ssovf_log_dbg
-#define ssovf_log_err(fmt, args...) \
-	RTE_LOG(ERR, EVENTDEV, "[%s] %s() " fmt "\n", \
-		RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#define ssovf_log_selftest ssovf_log_info
 
 #define SSO_MAX_VHGRP                     (64)
 #define SSO_MAX_VHWS                      (32)
@@ -114,6 +80,8 @@
 #define SSO_GRP_GET_PRIORITY              0x7
 #define SSO_GRP_SET_PRIORITY              0x8
 
+#define SSOVF_SELFTEST_ARG               ("selftest")
+
 /*
  * In Cavium OcteonTX SoC, all accesses to the device registers are
  * implictly strongly ordered. So, The relaxed version of IO operation is
@@ -180,6 +148,8 @@ ssovf_pmd_priv(const struct rte_eventdev *eventdev)
 	return eventdev->data->dev_private;
 }
 
+extern int otx_logtype_ssovf;
+
 uint16_t ssows_enq(void *port, const struct rte_event *ev);
 uint16_t ssows_enq_burst(void *port,
 		const struct rte_event ev[], uint16_t nb_events);
@@ -196,5 +166,6 @@ uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
 		uint16_t nb_events, uint64_t timeout_ticks);
 void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
 void ssows_reset(struct ssows *ws);
+int test_eventdev_octeontx(void);
 
 #endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c
new file mode 100644
index 00000000..5e012a95
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_evdev_selftest.c
@@ -0,0 +1,1487 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+#include "ssovf_evdev.h"
+
+#define NUM_PACKETS (1 << 18)
+#define MAX_EVENTS  (16 * 1024)
+
+#define OCTEONTX_TEST_RUN(setup, teardown, test) \
+	octeontx_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+	uint32_t flow_id;
+	uint8_t event_type;
+	uint8_t sub_event_type;
+	uint8_t sched_type;
+	uint8_t queue;
+	uint8_t port;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static inline void
+seqn_list_init(void)
+{
+	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+	memset(seqn_list, 0, sizeof(seqn_list));
+	seqn_list_index = 0;
+}
+
+static inline int
+seqn_list_update(int val)
+{
+	if (seqn_list_index >= NUM_PACKETS)
+		return -1;
+
+	seqn_list[seqn_list_index++] = val;
+	rte_smp_wmb();
+	return 0;
+}
+
+static inline int
+seqn_list_check(int limit)
+{
+	int i;
+
+	for (i = 0; i < limit; i++) {
+		if (seqn_list[i] != i) {
+			ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+struct test_core_param {
+	rte_atomic32_t *total_events;
+	uint64_t dequeue_tmo_ticks;
+	uint8_t port;
+	uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+	const char *eventdev_name = "event_octeontx";
+
+	evdev = rte_event_dev_get_dev_id(eventdev_name);
+	if (evdev < 0) {
+		ssovf_log_dbg("%d: Eventdev %s not found - creating.",
+				__LINE__, eventdev_name);
+		if (rte_vdev_init(eventdev_name, NULL) < 0) {
+			ssovf_log_dbg("Error creating eventdev %s",
+					eventdev_name);
+			return -1;
+		}
+		evdev = rte_event_dev_get_dev_id(eventdev_name);
+		if (evdev < 0) {
+			ssovf_log_dbg("Error finding newly created eventdev");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_event_dev_close(evdev);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+			struct rte_event_dev_info *info)
+{
+	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+	dev_conf->nb_event_ports = info->max_event_ports;
+	dev_conf->nb_event_queues = info->max_event_queues;
+	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+	dev_conf->nb_event_port_dequeue_depth =
+			info->max_event_port_dequeue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+			info->max_event_port_enqueue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+			info->max_event_port_enqueue_depth;
+	dev_conf->nb_events_limit =
+			info->max_num_events;
+}
+
+enum {
+	TEST_EVENTDEV_SETUP_DEFAULT,
+	TEST_EVENTDEV_SETUP_PRIORITY,
+	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static inline int
+_eventdev_setup(int mode)
+{
+	int i, ret;
+	struct rte_event_dev_config dev_conf;
+	struct rte_event_dev_info info;
+	const char *pool_name = "evdev_octeontx_test_pool";
+
+	/* Create and destrory pool for each test case to make it standalone */
+	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+					MAX_EVENTS,
+					0 /*MBUF_CACHE_SIZE*/,
+					0,
+					512, /* Use very small mbufs */
+					rte_socket_id());
+	if (!eventdev_test_mempool) {
+		ssovf_log_dbg("ERROR creating mempool");
+		return -1;
+	}
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
+			"ERROR max_num_events=%d < max_events=%d",
+				info.max_num_events, MAX_EVENTS);
+
+	devconf_set_default_sane_values(&dev_conf, &info);
+	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+	ret = rte_event_dev_configure(evdev, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+		if (queue_count > 8) {
+			ssovf_log_dbg(
+				"test expects the unique priority per queue");
+			return -ENOTSUP;
+		}
+
+		/* Configure event queues(0 to n) with
+		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+		 * RTE_EVENT_DEV_PRIORITY_LOWEST
+		 */
+		uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+				queue_count;
+		for (i = 0; i < (int)queue_count; i++) {
+			struct rte_event_queue_conf queue_conf;
+
+			ret = rte_event_queue_default_conf_get(evdev, i,
+						&queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+					i);
+			queue_conf.priority = i * step;
+			ret = rte_event_queue_setup(evdev, i, &queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+					i);
+		}
+
+	} else {
+		/* Configure event queues with default priority */
+		for (i = 0; i < (int)queue_count; i++) {
+			ret = rte_event_queue_setup(evdev, i, NULL);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+					i);
+		}
+	}
+	/* Configure event ports */
+	uint32_t port_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&port_count), "Port count get failed");
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_setup(evdev, i, NULL);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+				i);
+	}
+
+	ret = rte_event_dev_start(evdev);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+	return 0;
+}
+
+static inline int
+eventdev_setup(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static inline int
+eventdev_setup_priority(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
+}
+
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
+static inline void
+eventdev_teardown(void)
+{
+	rte_event_dev_stop(evdev);
+	rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+			uint32_t flow_id, uint8_t event_type,
+			uint8_t sub_event_type, uint8_t sched_type,
+			uint8_t queue, uint8_t port)
+{
+	struct event_attr *attr;
+
+	/* Store the event attributes in mbuf for future reference */
+	attr = rte_pktmbuf_mtod(m, struct event_attr *);
+	attr->flow_id = flow_id;
+	attr->event_type = event_type;
+	attr->sub_event_type = sub_event_type;
+	attr->sched_type = sched_type;
+	attr->queue = queue;
+	attr->port = port;
+
+	ev->flow_id = flow_id;
+	ev->sub_event_type = sub_event_type;
+	ev->event_type = event_type;
+	/* Inject the new event */
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = sched_type;
+	ev->queue_id = queue;
+	ev->mbuf = m;
+}
+
+static inline int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+		uint8_t sched_type, uint8_t queue, uint8_t port,
+		unsigned int events)
+{
+	struct rte_mbuf *m;
+	unsigned int i;
+
+	for (i = 0; i < events; i++) {
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+		m->seqn = i;
+		update_event_and_validation_attr(m, &ev, flow_id, event_type,
+			sub_event_type, sched_type, queue, port);
+		rte_event_enqueue_burst(evdev, port, &ev, 1);
+	}
+	return 0;
+}
+
+static inline int
+check_excess_events(uint8_t port)
+{
+	int i;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	/* Check for excess events, try for a few times and exit */
+	for (i = 0; i < 32; i++) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+		RTE_TEST_ASSERT_SUCCESS(valid_event,
+				"Unexpected valid event=%d", ev.mbuf->seqn);
+	}
+	return 0;
+}
+
+static inline int
+generate_random_events(const unsigned int total_events)
+{
+	struct rte_event_dev_info info;
+	unsigned int i;
+	int ret;
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	for (i = 0; i < total_events; i++) {
+		ret = inject_events(
+			rte_rand() % info.max_event_queue_flows /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			rte_rand() % queue_count /* queue */,
+			0 /* port */,
+			1 /* events */);
+		if (ret)
+			return -1;
+	}
+	return ret;
+}
+
+
+static inline int
+validate_event(struct rte_event *ev)
+{
+	struct event_attr *attr;
+
+	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+			"flow_id mismatch enq=%d deq =%d",
+			attr->flow_id, ev->flow_id);
+	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+			"event_type mismatch enq=%d deq =%d",
+			attr->event_type, ev->event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+			"sub_event_type mismatch enq=%d deq =%d",
+			attr->sub_event_type, ev->sub_event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+			"sched_type mismatch enq=%d deq =%d",
+			attr->sched_type, ev->sched_type);
+	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+			"queue mismatch enq=%d deq =%d",
+			attr->queue, ev->queue_id);
+	return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+				 struct rte_event *ev);
+
+static inline int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+	int ret;
+	uint16_t valid_event;
+	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+	struct rte_event ev;
+
+	while (1) {
+		if (++forward_progress_cnt > UINT16_MAX) {
+			ssovf_log_dbg("Detected deadlock");
+			return -1;
+		}
+
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		forward_progress_cnt = 0;
+		ret = validate_event(&ev);
+		if (ret)
+			return -1;
+
+		if (fn != NULL) {
+			ret = fn(index, port, &ev);
+			RTE_TEST_ASSERT_SUCCESS(ret,
+				"Failed to validate test specific event");
+		}
+
+		++index;
+
+		rte_pktmbuf_free(ev.mbuf);
+		if (++events >= total_events)
+			break;
+	}
+
+	return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+	RTE_SET_USED(port);
+	RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
+			index, ev->mbuf->seqn);
+	return 0;
+}
+
+static inline int
+test_simple_enqdeq(uint8_t sched_type)
+{
+	int ret;
+
+	ret = inject_events(0 /*flow_id */,
+				RTE_EVENT_TYPE_CPU /* event_type */,
+				0 /* sub_event_type */,
+				sched_type,
+				0 /* queue */,
+				0 /* port */,
+				MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS,	validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_ordered(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+	int ret;
+
+	ret = generate_random_events(MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+/*
+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
+ * operation
+ *
+ * For example, Inject 32 events over 0..7 queues
+ * enqueue events 0, 8, 16, 24 in queue 0
+ * enqueue events 1, 9, 17, 25 in queue 1
+ * ..
+ * ..
+ * enqueue events 7, 15, 23, 31 in queue 7
+ *
+ * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
+ * order from queue0(highest priority) to queue7(lowest_priority)
+ */
+static int
+validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+	uint32_t range = MAX_EVENTS / queue_count;
+	uint32_t expected_val = (index % range) * queue_count;
+
+	expected_val += ev->queue_id;
+	RTE_SET_USED(port);
+	RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
+	"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+			ev->mbuf->seqn, index, expected_val, range,
+			queue_count, MAX_EVENTS);
+	return 0;
+}
+
+static int
+test_multi_queue_priority(void)
+{
+	uint8_t queue;
+	struct rte_mbuf *m;
+	int i, max_evts_roundoff;
+
+	/* See validate_queue_priority() comments for priority validate logic */
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+	max_evts_roundoff  = MAX_EVENTS / queue_count;
+	max_evts_roundoff *= queue_count;
+
+	for (i = 0; i < max_evts_roundoff; i++) {
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+		m->seqn = i;
+		queue = i % queue_count;
+		update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
+			0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
+		rte_event_enqueue_burst(evdev, 0, &ev, 1);
+	}
+
+	return consume_events(0, max_evts_roundoff, validate_queue_priority);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	rte_atomic32_t *total_events = param->total_events;
+	int ret;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		ret = validate_event(&ev);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+		rte_pktmbuf_free(ev.mbuf);
+		rte_atomic32_sub(total_events, 1);
+	}
+	return 0;
+}
+
+static inline int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+	uint64_t cycles, print_cycles;
+	RTE_SET_USED(count);
+
+	print_cycles = cycles = rte_get_timer_cycles();
+	while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+		uint64_t new_cycles = rte_get_timer_cycles();
+
+		if (new_cycles - print_cycles > rte_get_timer_hz()) {
+			ssovf_log_dbg("\r%s: events %d", __func__,
+				rte_atomic32_read(count));
+			print_cycles = new_cycles;
+		}
+		if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+			ssovf_log_dbg(
+				"%s: No schedules for seconds, deadlock (%d)",
+				__func__,
+				rte_atomic32_read(count));
+			rte_event_dev_dump(evdev, stdout);
+			cycles = new_cycles;
+			return -1;
+		}
+	}
+	rte_eal_mp_wait_lcore();
+	return 0;
+}
+
+
+static inline int
+launch_workers_and_wait(int (*master_worker)(void *),
+			int (*slave_workers)(void *), uint32_t total_events,
+			uint8_t nb_workers, uint8_t sched_type)
+{
+	uint8_t port = 0;
+	int w_lcore;
+	int ret;
+	struct test_core_param *param;
+	rte_atomic32_t atomic_total_events;
+	uint64_t dequeue_tmo_ticks;
+
+	if (!nb_workers)
+		return 0;
+
+	rte_atomic32_set(&atomic_total_events, total_events);
+	seqn_list_init();
+
+	param = malloc(sizeof(struct test_core_param) * nb_workers);
+	if (!param)
+		return -1;
+
+	ret = rte_event_dequeue_timeout_ticks(evdev,
+		rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+	if (ret) {
+		free(param);
+		return -1;
+	}
+
+	param[0].total_events = &atomic_total_events;
+	param[0].sched_type = sched_type;
+	param[0].port = 0;
+	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+	rte_smp_wmb();
+
+	w_lcore = rte_get_next_lcore(
+			/* start core */ -1,
+			/* skip master */ 1,
+			/* wrap */ 0);
+	rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+	for (port = 1; port < nb_workers; port++) {
+		param[port].total_events = &atomic_total_events;
+		param[port].sched_type = sched_type;
+		param[port].port = port;
+		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+		rte_smp_wmb();
+		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+		rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+	}
+
+	ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+	free(param);
+	return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t nr_ports;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+			nr_ports, rte_lcore_count() - 1);
+		return 0;
+	}
+
+	return launch_workers_and_wait(worker_multi_port_fn,
+					worker_multi_port_fn, total_events,
+					nr_ports, 0xff /* invalid */);
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+			struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+				"queue mismatch enq=%d deq =%d",
+				port, ev->queue_id);
+	return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+	int i, nr_links, ret;
+
+	uint32_t port_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&port_count), "Port count get failed");
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_unlink(evdev, i, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0,
+				"Failed to unlink all queues port=%d", i);
+	}
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	nr_links = RTE_MIN(port_count, queue_count);
+	const unsigned int total_events = MAX_EVENTS / nr_links;
+
+	/* Link queue x to port x and inject events to queue x through port x */
+	for (i = 0; i < nr_links; i++) {
+		uint8_t queue = (uint8_t)i;
+
+		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+		ret = inject_events(
+			0x100 /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			queue /* queue */,
+			i /* port */,
+			total_events /* events */);
+		if (ret)
+			return -1;
+	}
+
+	/* Verify the events generated from correct queue */
+	for (i = 0; i < nr_links; i++) {
+		ret = consume_events(i /* port */, total_events,
+				validate_queue_to_port_single_link);
+		if (ret)
+			return -1;
+	}
+
+	return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+			struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+				"queue mismatch enq=%d deq =%d",
+				port, ev->queue_id);
+	return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+	int ret, port0_events = 0, port1_events = 0;
+	uint8_t queue, port;
+	uint32_t nr_queues = 0;
+	uint32_t nr_ports = 0;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &nr_queues), "Queue count get failed");
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				&nr_queues), "Queue count get failed");
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+
+	if (nr_ports < 2) {
+		ssovf_log_dbg("%s: Not enough ports to test ports=%d",
+				__func__, nr_ports);
+		return 0;
+	}
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (port = 0; port < nr_ports; port++) {
+		ret = rte_event_port_unlink(evdev, port, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+					port);
+	}
+
+	const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+	/* Link all even number of queues to port0 and odd numbers to port 1*/
+	for (queue = 0; queue < nr_queues; queue++) {
+		port = queue & 0x1;
+		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+					queue, port);
+
+		ret = inject_events(
+			0x100 /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			queue /* queue */,
+			port /* port */,
+			total_events /* events */);
+		if (ret)
+			return -1;
+
+		if (port == 0)
+			port0_events += total_events;
+		else
+			port1_events += total_events;
+	}
+
+	ret = consume_events(0 /* port */, port0_events,
+				validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+	ret = consume_events(1 /* port */, port1_events,
+				validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+static int
+worker_flow_based_pipeline(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	uint8_t new_sched_type = param->sched_type;
+	rte_atomic32_t *total_events = param->total_events;
+	uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+					dequeue_tmo_ticks);
+		if (!valid_event)
+			continue;
+
+		/* Events from stage 0 */
+		if (ev.sub_event_type == 0) {
+			/* Move to atomic flow to maintain the ordering */
+			ev.flow_id = 0x2;
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.sub_event_type = 1; /* stage 1 */
+			ev.sched_type = new_sched_type;
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
+			if (seqn_list_update(ev.mbuf->seqn) == 0) {
+				rte_pktmbuf_free(ev.mbuf);
+				rte_atomic32_sub(total_events, 1);
+			} else {
+				ssovf_log_dbg("Failed to update seqn_list");
+				return -1;
+			}
+		} else {
+			ssovf_log_dbg("Invalid ev.sub_event_type = %d",
+					ev.sub_event_type);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int
+test_multiport_flow_sched_type_test(uint8_t in_sched_type,
+			uint8_t out_sched_type)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t nr_ports;
+	int ret;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+			nr_ports, rte_lcore_count() - 1);
+		return 0;
+	}
+
+	/* Injects events with m->seqn=0 to total_events */
+	ret = inject_events(
+		0x1 /*flow_id */,
+		RTE_EVENT_TYPE_CPU /* event_type */,
+		0 /* sub_event_type (stage 0) */,
+		in_sched_type,
+		0 /* queue */,
+		0 /* port */,
+		total_events /* events */);
+	if (ret)
+		return -1;
+
+	ret = launch_workers_and_wait(worker_flow_based_pipeline,
+					worker_flow_based_pipeline,
+					total_events, nr_ports, out_sched_type);
+	if (ret)
+		return -1;
+
+	if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+			out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+		/* Check the events order maintained or not */
+		return seqn_list_check(total_events);
+	}
+	return 0;
+}
+
+
+/* Multi port ordered to atomic transaction */
+static int
+test_multi_port_flow_ordered_to_atomic(void)
+{
+	/* Ingress event order test */
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+				RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_ordered_to_ordered(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+				RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_ordered_to_parallel(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+				RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_atomic_to_atomic(void)
+{
+	/* Ingress event order test */
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+				RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_atomic_to_ordered(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+				RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_atomic_to_parallel(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+				RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_parallel_to_atomic(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+				RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_parallel_to_ordered(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+				RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_parallel_to_parallel(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+				RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_group_based_pipeline(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	uint8_t new_sched_type = param->sched_type;
+	rte_atomic32_t *total_events = param->total_events;
+	uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+					dequeue_tmo_ticks);
+		if (!valid_event)
+			continue;
+
+		/* Events from stage 0(group 0) */
+		if (ev.queue_id == 0) {
+			/* Move to atomic flow to maintain the ordering */
+			ev.flow_id = 0x2;
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.sched_type = new_sched_type;
+			ev.queue_id = 1; /* Stage 1*/
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
+			if (seqn_list_update(ev.mbuf->seqn) == 0) {
+				rte_pktmbuf_free(ev.mbuf);
+				rte_atomic32_sub(total_events, 1);
+			} else {
+				ssovf_log_dbg("Failed to update seqn_list");
+				return -1;
+			}
+		} else {
+			ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
+			return -1;
+		}
+	}
+
+
+	return 0;
+}
+
+static int
+test_multiport_queue_sched_type_test(uint8_t in_sched_type,
+			uint8_t out_sched_type)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t nr_ports;
+	int ret;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+	if (queue_count < 2 ||  !nr_ports) {
+		ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
+			 __func__, queue_count, nr_ports,
+			 rte_lcore_count() - 1);
+		return 0;
+	}
+
+	/* Injects events with m->seqn=0 to total_events */
+	ret = inject_events(
+		0x1 /*flow_id */,
+		RTE_EVENT_TYPE_CPU /* event_type */,
+		0 /* sub_event_type (stage 0) */,
+		in_sched_type,
+		0 /* queue */,
+		0 /* port */,
+		total_events /* events */);
+	if (ret)
+		return -1;
+
+	ret = launch_workers_and_wait(worker_group_based_pipeline,
+					worker_group_based_pipeline,
+					total_events, nr_ports, out_sched_type);
+	if (ret)
+		return -1;
+
+	if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+			out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+		/* Check the events order maintained or not */
+		return seqn_list_check(total_events);
+	}
+	return 0;
+}
+
+static int
+test_multi_port_queue_ordered_to_atomic(void)
+{
+	/* Ingress event order test */
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+				RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_ordered_to_ordered(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+				RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_ordered_to_parallel(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+				RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_atomic_to_atomic(void)
+{
+	/* Ingress event order test */
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+				RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_atomic_to_ordered(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+				RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_atomic_to_parallel(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+				RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_parallel_to_atomic(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+				RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_parallel_to_ordered(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+				RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_parallel_to_parallel(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+				RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	rte_atomic32_t *total_events = param->total_events;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		if (ev.sub_event_type == 255) { /* last stage */
+			rte_pktmbuf_free(ev.mbuf);
+			rte_atomic32_sub(total_events, 1);
+		} else {
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.sub_event_type++;
+			ev.sched_type =
+				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		}
+	}
+	return 0;
+}
+
+static int
+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
+{
+	uint32_t nr_ports;
+	int ret;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+			nr_ports, rte_lcore_count() - 1);
+		return 0;
+	}
+
+	/* Injects events with m->seqn=0 to total_events */
+	ret = inject_events(
+		0x1 /*flow_id */,
+		RTE_EVENT_TYPE_CPU /* event_type */,
+		0 /* sub_event_type (stage 0) */,
+		rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
+		0 /* queue */,
+		0 /* port */,
+		MAX_EVENTS /* events */);
+	if (ret)
+		return -1;
+
+	return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
+					 0xff /* invalid */);
+}
+
+/* Flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_flow_max_stages_random_sched_type(void)
+{
+	return launch_multi_port_max_stages_random_sched_type(
+		worker_flow_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+	uint8_t nr_queues = queue_count;
+	rte_atomic32_t *total_events = param->total_events;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		if (ev.queue_id == nr_queues - 1) { /* last stage */
+			rte_pktmbuf_free(ev.mbuf);
+			rte_atomic32_sub(total_events, 1);
+		} else {
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.queue_id++;
+			ev.sched_type =
+				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		}
+	}
+	return 0;
+}
+
+/* Queue based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_queue_max_stages_random_sched_type(void)
+{
+	return launch_multi_port_max_stages_random_sched_type(
+		worker_queue_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+	uint8_t nr_queues = queue_count;
+	rte_atomic32_t *total_events = param->total_events;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		if (ev.queue_id == nr_queues - 1) { /* Last stage */
+			rte_pktmbuf_free(ev.mbuf);
+			rte_atomic32_sub(total_events, 1);
+		} else {
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.queue_id++;
+			ev.sub_event_type = rte_rand() % 256;
+			ev.sched_type =
+				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		}
+	}
+	return 0;
+}
+
+/* Queue and flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_mixed_max_stages_random_sched_type(void)
+{
+	return launch_multi_port_max_stages_random_sched_type(
+		worker_mixed_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_ordered_flow_producer(void *arg)
+{
+	struct test_core_param *param = arg;
+	uint8_t port = param->port;
+	struct rte_mbuf *m;
+	int counter = 0;
+
+	while (counter < NUM_PACKETS) {
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		if (m == NULL)
+			continue;
+
+		m->seqn = counter++;
+
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		ev.flow_id = 0x1; /* Generate a fat flow */
+		ev.sub_event_type = 0;
+		/* Inject the new event */
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.event_type = RTE_EVENT_TYPE_CPU;
+		ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+		ev.queue_id = 0;
+		ev.mbuf = m;
+		rte_event_enqueue_burst(evdev, port, &ev, 1);
+	}
+
+	return 0;
+}
+
+static inline int
+test_producer_consumer_ingress_order_test(int (*fn)(void *))
+{
+	uint32_t nr_ports;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (rte_lcore_count() < 3 || nr_ports < 2) {
+		ssovf_log_dbg("### Not enough cores for %s test.", __func__);
+		return 0;
+	}
+
+	launch_workers_and_wait(worker_ordered_flow_producer, fn,
+				NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
+	/* Check the events order maintained or not */
+	return seqn_list_check(NUM_PACKETS);
+}
+
+/* Flow based producer consumer ingress order test */
+static int
+test_flow_producer_consumer_ingress_order_test(void)
+{
+	return test_producer_consumer_ingress_order_test(
+				worker_flow_based_pipeline);
+}
+
+/* Queue based producer consumer ingress order test */
+static int
+test_queue_producer_consumer_ingress_order_test(void)
+{
+	return test_producer_consumer_ingress_order_test(
+				worker_group_based_pipeline);
+}
+
+static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
+		int (*test)(void), const char *name)
+{
+	if (setup() < 0) {
+		ssovf_log_selftest("Error setting up test %s", name);
+		unsupported++;
+	} else {
+		if (test() < 0) {
+			failed++;
+			ssovf_log_selftest("%s Failed", name);
+		} else {
+			passed++;
+			ssovf_log_selftest("%s Passed", name);
+		}
+	}
+
+	total++;
+	tdown();
+}
+
+int
+test_eventdev_octeontx(void)
+{
+	testsuite_setup();
+
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_ordered);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_atomic);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_parallel);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_queue_enq_single_port_deq);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_queue_enq_multi_port_deq);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_to_port_single_link);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_to_port_multi_link);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_ordered_to_atomic);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_ordered_to_ordered);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_ordered_to_parallel);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_atomic_to_atomic);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_atomic_to_ordered);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_atomic_to_parallel);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_parallel_to_atomic);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_parallel_to_ordered);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_parallel_to_parallel);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_ordered_to_atomic);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_ordered_to_ordered);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_ordered_to_parallel);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_atomic_to_atomic);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_atomic_to_ordered);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_atomic_to_parallel);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_parallel_to_atomic);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_parallel_to_ordered);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_parallel_to_parallel);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_flow_max_stages_random_sched_type);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_queue_max_stages_random_sched_type);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_port_mixed_max_stages_random_sched_type);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_flow_producer_consumer_ingress_order_test);
+	OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_producer_consumer_ingress_order_test);
+	OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
+			test_multi_queue_priority);
+	OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+			test_multi_port_flow_ordered_to_atomic);
+	OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+			test_multi_port_queue_ordered_to_atomic);
+
+	ssovf_log_selftest("Total tests   : %d", total);
+	ssovf_log_selftest("Passed        : %d", passed);
+	ssovf_log_selftest("Failed        : %d", failed);
+	ssovf_log_selftest("Not supported : %d", unsupported);
+
+	testsuite_teardown();
+
+	if (failed)
+		return -1;
+
+	return 0;
+}
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 5e17c7b8..753c1e9f 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -1,33 +1,5 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium, Inc. 2017.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
  */
 
 #include "ssovf_worker.h"
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index bf76ac88..d55018a9 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -1,36 +1,7 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium, Inc. 2017.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
  */
 
-
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 
@@ -53,7 +24,7 @@ enum {
 /* SSO Operations */
 
 static __rte_always_inline struct rte_mbuf *
-ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
 {
 	struct rte_mbuf *mbuf;
 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
@@ -69,7 +40,7 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
 	mbuf->data_len = mbuf->pkt_len;
 	mbuf->nb_segs = 1;
 	mbuf->ol_flags = 0;
-	mbuf->port = port_id;
+	mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
 	rte_mbuf_refcnt_set(mbuf, 1);
 	return mbuf;
 }
@@ -89,7 +60,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
 	if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
 		ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
-				(ev->event >> 20) & 0xF);
+				(ev->event >> 20) & 0x7F);
 	} else {
 		ev->u64 = get_work1;
 	}
diff --git a/drivers/event/opdl/Makefile b/drivers/event/opdl/Makefile
new file mode 100644
index 00000000..cea8118d
--- /dev/null
+++ b/drivers/event/opdl/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_opdl_event.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# for older GCC versions, allow us to initialize an event using
+# designated initializers.
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
+CFLAGS += -Wno-missing-field-initializers
+endif
+endif
+
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_kvargs
+LDLIBS += -lrte_bus_vdev -lrte_mbuf -lrte_mempool
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_evdev_opdl_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev_init.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev_xstats.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_test.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
new file mode 100644
index 00000000..77083691
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -0,0 +1,769 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_lcore.h>
+#include <rte_memzone.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+
+#include "opdl_evdev.h"
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+#define EVENTDEV_NAME_OPDL_PMD event_opdl
+#define NUMA_NODE_ARG "numa_node"
+#define DO_VALIDATION_ARG "do_validation"
+#define DO_TEST_ARG "self_test"
+
+
+static void
+opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
+
+uint16_t
+opdl_event_enqueue_burst(void *port,
+			 const struct rte_event ev[],
+			 uint16_t num)
+{
+	struct opdl_port *p = port;
+
+	if (unlikely(!p->opdl->data->dev_started))
+		return 0;
+
+
+	/* either rx_enqueue or disclaim*/
+	return p->enq(p, ev, num);
+}
+
+uint16_t
+opdl_event_enqueue(void *port, const struct rte_event *ev)
+{
+	struct opdl_port *p = port;
+
+	if (unlikely(!p->opdl->data->dev_started))
+		return 0;
+
+
+	return p->enq(p, ev, 1);
+}
+
+uint16_t
+opdl_event_dequeue_burst(void *port,
+			 struct rte_event *ev,
+			 uint16_t num,
+			 uint64_t wait)
+{
+	struct opdl_port *p = (void *)port;
+
+	RTE_SET_USED(wait);
+
+	if (unlikely(!p->opdl->data->dev_started))
+		return 0;
+
+	/* This function pointer can point to tx_dequeue or claim*/
+	return p->deq(p, ev, num);
+}
+
+uint16_t
+opdl_event_dequeue(void *port,
+		   struct rte_event *ev,
+		   uint64_t wait)
+{
+	struct opdl_port *p = (void *)port;
+
+	if (unlikely(!p->opdl->data->dev_started))
+		return 0;
+
+	RTE_SET_USED(wait);
+
+	return p->deq(p, ev, 1);
+}
+
+static int
+opdl_port_link(struct rte_eventdev *dev,
+	       void *port,
+	       const uint8_t queues[],
+	       const uint8_t priorities[],
+	       uint16_t num)
+{
+	struct opdl_port *p = port;
+
+	RTE_SET_USED(priorities);
+	RTE_SET_USED(dev);
+
+	if (unlikely(dev->data->dev_started)) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "Attempt to link queue (%u) to port %d while device started\n",
+			     dev->data->dev_id,
+				queues[0],
+				p->id);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	/* Max of 1 queue per port */
+	if (num > 1) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "Attempt to link more than one queue (%u) to port %d requested\n",
+			     dev->data->dev_id,
+				num,
+				p->id);
+		rte_errno = -EDQUOT;
+		return 0;
+	}
+
+	if (!p->configured) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "port %d not configured, cannot link to %u\n",
+			     dev->data->dev_id,
+				p->id,
+				queues[0]);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	if (p->external_qid != OPDL_INVALID_QID) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "port %d already linked to queue %u, cannot link to %u\n",
+			     dev->data->dev_id,
+				p->id,
+				p->external_qid,
+				queues[0]);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	p->external_qid = queues[0];
+
+	return 1;
+}
+
+static int
+opdl_port_unlink(struct rte_eventdev *dev,
+		 void *port,
+		 uint8_t queues[],
+		 uint16_t nb_unlinks)
+{
+	struct opdl_port *p = port;
+
+	RTE_SET_USED(queues);
+	RTE_SET_USED(nb_unlinks);
+
+	if (unlikely(dev->data->dev_started)) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "Attempt to unlink queue (%u) to port %d while device started\n",
+			     dev->data->dev_id,
+			     queues[0],
+			     p->id);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+	RTE_SET_USED(nb_unlinks);
+
+	/* Port Stuff */
+	p->queue_id = OPDL_INVALID_QID;
+	p->p_type = OPDL_INVALID_PORT;
+	p->external_qid = OPDL_INVALID_QID;
+
+	/* always unlink 0 queue due to statice pipeline */
+	return 0;
+}
+
+static int
+opdl_port_setup(struct rte_eventdev *dev,
+		uint8_t port_id,
+		const struct rte_event_port_conf *conf)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+	struct opdl_port *p = &device->ports[port_id];
+
+	RTE_SET_USED(conf);
+
+	/* Check if port already configured */
+	if (p->configured) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "Attempt to setup port %d which is already setup\n",
+			     dev->data->dev_id,
+			     p->id);
+		return -EDQUOT;
+	}
+
+	*p = (struct opdl_port){0}; /* zero entire structure */
+	p->id = port_id;
+	p->opdl = device;
+	p->queue_id = OPDL_INVALID_QID;
+	p->external_qid = OPDL_INVALID_QID;
+	dev->data->ports[port_id] = p;
+	rte_smp_wmb();
+	p->configured = 1;
+	device->nb_ports++;
+	return 0;
+}
+
+static void
+opdl_port_release(void *port)
+{
+	struct opdl_port *p = (void *)port;
+
+	if (p == NULL ||
+	    p->opdl->data->dev_started) {
+		return;
+	}
+
+	p->configured = 0;
+	p->initialized = 0;
+}
+
+static void
+opdl_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+		struct rte_event_port_conf *port_conf)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(port_id);
+
+	port_conf->new_event_threshold = MAX_OPDL_CONS_Q_DEPTH;
+	port_conf->dequeue_depth = MAX_OPDL_CONS_Q_DEPTH;
+	port_conf->enqueue_depth = MAX_OPDL_CONS_Q_DEPTH;
+}
+
+static int
+opdl_queue_setup(struct rte_eventdev *dev,
+		 uint8_t queue_id,
+		 const struct rte_event_queue_conf *conf)
+{
+	enum queue_type type;
+
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	/* Extra sanity check, probably not needed */
+	if (queue_id == OPDL_INVALID_QID) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "Invalid queue id %u requested\n",
+			     dev->data->dev_id,
+			     queue_id);
+		return -EINVAL;
+	}
+
+	if (device->nb_q_md > device->max_queue_nb) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "Max number of queues %u exceeded by request %u\n",
+			     dev->data->dev_id,
+			     device->max_queue_nb,
+			     device->nb_q_md);
+		return -EINVAL;
+	}
+
+	if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
+	    & conf->event_queue_cfg) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "QUEUE_CFG_ALL_TYPES not supported\n",
+			     dev->data->dev_id);
+		return -ENOTSUP;
+	} else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK
+		   & conf->event_queue_cfg) {
+		type = OPDL_Q_TYPE_SINGLE_LINK;
+	} else {
+		switch (conf->schedule_type) {
+		case RTE_SCHED_TYPE_ORDERED:
+			type = OPDL_Q_TYPE_ORDERED;
+			break;
+		case RTE_SCHED_TYPE_ATOMIC:
+			type = OPDL_Q_TYPE_ATOMIC;
+			break;
+		case RTE_SCHED_TYPE_PARALLEL:
+			type = OPDL_Q_TYPE_ORDERED;
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+				     "Unknown queue type %d requested\n",
+				     dev->data->dev_id,
+				     conf->event_queue_cfg);
+			return -EINVAL;
+		}
+	}
+	/* Check if queue id has been setup already */
+	uint32_t i;
+	for (i = 0; i < device->nb_q_md; i++) {
+		if (device->q_md[i].ext_id == queue_id) {
+			PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+				     "queue id %u already setup\n",
+				     dev->data->dev_id,
+				     queue_id);
+			return -EINVAL;
+		}
+	}
+
+	device->q_md[device->nb_q_md].ext_id = queue_id;
+	device->q_md[device->nb_q_md].type = type;
+	device->q_md[device->nb_q_md].setup = 1;
+	device->nb_q_md++;
+
+	return 1;
+}
+
+static void
+opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	RTE_SET_USED(queue_id);
+
+	if (device->data->dev_started)
+		return;
+
+}
+
+static void
+opdl_queue_def_conf(struct rte_eventdev *dev,
+		    uint8_t queue_id,
+		    struct rte_event_queue_conf *conf)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(queue_id);
+
+	static const struct rte_event_queue_conf default_conf = {
+		.nb_atomic_flows = 1024,
+		.nb_atomic_order_sequences = 1,
+		.event_queue_cfg = 0,
+		.schedule_type = RTE_SCHED_TYPE_ORDERED,
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+	};
+
+	*conf = default_conf;
+}
+
+
+static int
+opdl_dev_configure(const struct rte_eventdev *dev)
+{
+	struct opdl_evdev *opdl = opdl_pmd_priv(dev);
+	const struct rte_eventdev_data *data = dev->data;
+	const struct rte_event_dev_config *conf = &data->dev_conf;
+
+	opdl->max_queue_nb = conf->nb_event_queues;
+	opdl->max_port_nb = conf->nb_event_ports;
+	opdl->nb_events_limit = conf->nb_events_limit;
+
+	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "DEQUEUE_TIMEOUT not supported\n",
+			     dev->data->dev_id);
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+static void
+opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
+{
+	RTE_SET_USED(dev);
+
+	static const struct rte_event_dev_info evdev_opdl_info = {
+		.driver_name = OPDL_PMD_NAME,
+		.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
+		.max_event_queue_flows = OPDL_QID_NUM_FIDS,
+		.max_event_queue_priority_levels = OPDL_Q_PRIORITY_MAX,
+		.max_event_priority_levels = OPDL_IQS_MAX,
+		.max_event_ports = OPDL_PORTS_MAX,
+		.max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
+		.max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
+		.max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
+		.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
+	};
+
+	*info = evdev_opdl_info;
+}
+
+static void
+opdl_dump(struct rte_eventdev *dev, FILE *f)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return;
+
+	fprintf(f,
+		"\n\n -- RING STATISTICS --\n");
+	uint32_t i;
+	for (i = 0; i < device->nb_opdls; i++)
+		opdl_ring_dump(device->opdl[i], f);
+
+	fprintf(f,
+		"\n\n -- PORT STATISTICS --\n"
+		"Type Port Index  Port Id  Queue Id     Av. Req Size  "
+		"Av. Grant Size     Av. Cycles PP"
+		"      Empty DEQs   Non Empty DEQs   Pkts Processed\n");
+
+	for (i = 0; i < device->max_port_nb; i++) {
+		char queue_id[64];
+		char total_cyc[64];
+		const char *p_type;
+
+		uint64_t cne, cpg;
+		struct opdl_port *port = &device->ports[i];
+
+		if (port->initialized) {
+			cne = port->port_stat[claim_non_empty];
+			cpg = port->port_stat[claim_pkts_granted];
+			if (port->p_type == OPDL_REGULAR_PORT)
+				p_type = "REG";
+			else if (port->p_type == OPDL_PURE_RX_PORT)
+				p_type = "  RX";
+			else if (port->p_type == OPDL_PURE_TX_PORT)
+				p_type = "  TX";
+			else if (port->p_type == OPDL_ASYNC_PORT)
+				p_type = "SYNC";
+			else
+				p_type = "????";
+
+			sprintf(queue_id, "%02u", port->external_qid);
+			if (port->p_type == OPDL_REGULAR_PORT ||
+					port->p_type == OPDL_ASYNC_PORT)
+				sprintf(total_cyc,
+					" %'16"PRIu64"",
+					(cpg != 0 ?
+					 port->port_stat[total_cycles] / cpg
+					 : 0));
+			else
+				sprintf(total_cyc,
+					"             ----");
+			fprintf(f,
+				"%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s "
+				"%'16"PRIu64" %'16"PRIu64" %'16"PRIu64"\n",
+				p_type,
+				i,
+				port->id,
+				(port->external_qid == OPDL_INVALID_QID ? "---"
+				 : queue_id),
+				(cne != 0 ?
+				 port->port_stat[claim_pkts_requested] / cne
+				 : 0),
+				(cne != 0 ?
+				 port->port_stat[claim_pkts_granted] / cne
+				 : 0),
+				total_cyc,
+				port->port_stat[claim_empty],
+				port->port_stat[claim_non_empty],
+				port->port_stat[claim_pkts_granted]);
+		}
+	}
+	fprintf(f, "\n");
+}
+
+
+static void
+opdl_stop(struct rte_eventdev *dev)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	opdl_xstats_uninit(dev);
+
+	destroy_queues_and_rings(dev);
+
+
+	device->started = 0;
+
+	rte_smp_wmb();
+}
+
+static int
+opdl_start(struct rte_eventdev *dev)
+{
+	int err = 0;
+
+	if (!err)
+		err = create_queues_and_rings(dev);
+
+
+	if (!err)
+		err = assign_internal_queue_ids(dev);
+
+
+	if (!err)
+		err = initialise_queue_zero_ports(dev);
+
+
+	if (!err)
+		err = initialise_all_other_ports(dev);
+
+
+	if (!err)
+		err = check_queues_linked(dev);
+
+
+	if (!err)
+		err = opdl_add_event_handlers(dev);
+
+
+	if (!err)
+		err = build_all_dependencies(dev);
+
+	if (!err) {
+		opdl_xstats_init(dev);
+
+		struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+		PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
+			      "SUCCESS : Created %u total queues (%u ex, %u in),"
+			      " %u opdls, %u event_dev ports, %u input ports",
+			      opdl_pmd_dev_id(device),
+			      device->nb_queues,
+			      (device->nb_queues - device->nb_opdls),
+			      device->nb_opdls,
+			      device->nb_opdls,
+			      device->nb_ports,
+			      device->queue[0].nb_ports);
+	} else
+		opdl_stop(dev);
+
+	return err;
+}
+
+static int
+opdl_close(struct rte_eventdev *dev)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+	uint32_t i;
+
+	for (i = 0; i < device->max_port_nb; i++) {
+		memset(&device->ports[i],
+		       0,
+		       sizeof(struct opdl_port));
+	}
+
+	memset(&device->s_md,
+			0x0,
+			sizeof(struct opdl_stage_meta_data)*OPDL_PORTS_MAX);
+
+	memset(&device->q_md,
+			0xFF,
+			sizeof(struct opdl_queue_meta_data)*OPDL_MAX_QUEUES);
+
+
+	memset(device->q_map_ex_to_in,
+			0,
+			sizeof(uint8_t)*OPDL_INVALID_QID);
+
+	opdl_xstats_uninit(dev);
+
+	device->max_port_nb = 0;
+
+	device->max_queue_nb = 0;
+
+	device->nb_opdls = 0;
+
+	device->nb_queues   = 0;
+
+	device->nb_ports    = 0;
+
+	device->nb_q_md     = 0;
+
+	dev->data->nb_queues = 0;
+
+	dev->data->nb_ports = 0;
+
+
+	return 0;
+}
+
+static int
+assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
+{
+	int *socket_id = opaque;
+	*socket_id = atoi(value);
+	if (*socket_id >= RTE_MAX_NUMA_NODES)
+		return -1;
+	return 0;
+}
+
+static int
+set_do_validation(const char *key __rte_unused, const char *value, void *opaque)
+{
+	int *do_val = opaque;
+	*do_val = atoi(value);
+	if (*do_val != 0)
+		*do_val = 1;
+
+	return 0;
+}
+static int
+set_do_test(const char *key __rte_unused, const char *value, void *opaque)
+{
+	int *do_test = opaque;
+
+	*do_test = atoi(value);
+
+	if (*do_test != 0)
+		*do_test = 1;
+	return 0;
+}
+
+static int
+opdl_probe(struct rte_vdev_device *vdev)
+{
+	static const struct rte_eventdev_ops evdev_opdl_ops = {
+		.dev_configure = opdl_dev_configure,
+		.dev_infos_get = opdl_info_get,
+		.dev_close = opdl_close,
+		.dev_start = opdl_start,
+		.dev_stop = opdl_stop,
+		.dump = opdl_dump,
+
+		.queue_def_conf = opdl_queue_def_conf,
+		.queue_setup = opdl_queue_setup,
+		.queue_release = opdl_queue_release,
+		.port_def_conf = opdl_port_def_conf,
+		.port_setup = opdl_port_setup,
+		.port_release = opdl_port_release,
+		.port_link = opdl_port_link,
+		.port_unlink = opdl_port_unlink,
+
+
+		.xstats_get = opdl_xstats_get,
+		.xstats_get_names = opdl_xstats_get_names,
+		.xstats_get_by_name = opdl_xstats_get_by_name,
+		.xstats_reset = opdl_xstats_reset,
+	};
+
+	static const char *const args[] = {
+		NUMA_NODE_ARG,
+		DO_VALIDATION_ARG,
+		DO_TEST_ARG,
+		NULL
+	};
+	const char *name;
+	const char *params;
+	struct rte_eventdev *dev;
+	struct opdl_evdev *opdl;
+	int socket_id = rte_socket_id();
+	int do_validation = 0;
+	int do_test = 0;
+	int str_len;
+	int test_result = 0;
+
+	name = rte_vdev_device_name(vdev);
+	params = rte_vdev_device_args(vdev);
+	if (params != NULL && params[0] != '\0') {
+		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+		if (!kvlist) {
+			PMD_DRV_LOG(INFO,
+					"Ignoring unsupported parameters when creating device '%s'\n",
+					name);
+		} else {
+			int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
+					assign_numa_node, &socket_id);
+			if (ret != 0) {
+				PMD_DRV_LOG(ERR,
+						"%s: Error parsing numa node parameter",
+						name);
+
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist, DO_VALIDATION_ARG,
+					set_do_validation, &do_validation);
+			if (ret != 0) {
+				PMD_DRV_LOG(ERR,
+					"%s: Error parsing do validation parameter",
+					name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist, DO_TEST_ARG,
+					set_do_test, &do_test);
+			if (ret != 0) {
+				PMD_DRV_LOG(ERR,
+					"%s: Error parsing do test parameter",
+					name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			rte_kvargs_free(kvlist);
+		}
+	}
+	dev = rte_event_pmd_vdev_init(name,
+			sizeof(struct opdl_evdev), socket_id);
+
+	if (dev == NULL) {
+		PMD_DRV_LOG(ERR, "eventdev vdev init() failed");
+		return -EFAULT;
+	}
+
+	PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
+		      "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
+			  " , self_test:[%s]\n",
+		      dev->data->dev_id,
+		      name,
+		      socket_id,
+		      (do_validation ? "true" : "false"),
+			  (do_test ? "true" : "false"));
+
+	dev->dev_ops = &evdev_opdl_ops;
+
+	dev->enqueue = opdl_event_enqueue;
+	dev->enqueue_burst = opdl_event_enqueue_burst;
+	dev->enqueue_new_burst = opdl_event_enqueue_burst;
+	dev->enqueue_forward_burst = opdl_event_enqueue_burst;
+	dev->dequeue = opdl_event_dequeue;
+	dev->dequeue_burst = opdl_event_dequeue_burst;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	opdl = dev->data->dev_private;
+	opdl->data = dev->data;
+	opdl->socket = socket_id;
+	opdl->do_validation = do_validation;
+	opdl->do_test = do_test;
+	str_len = strlen(name);
+	memcpy(opdl->service_name, name, str_len);
+
+	if (do_test == 1)
+		test_result =  opdl_selftest();
+
+	return test_result;
+}
+
+static int
+opdl_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s\n", name);
+
+	return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver evdev_opdl_pmd_drv = {
+	.probe = opdl_probe,
+	.remove = opdl_remove
+};
+
+RTE_INIT(opdl_init_log);
+
+static void
+opdl_init_log(void)
+{
+	opdl_logtype_driver = rte_log_register("pmd.event.opdl.driver");
+	if (opdl_logtype_driver >= 0)
+		rte_log_set_level(opdl_logtype_driver, RTE_LOG_INFO);
+}
+
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OPDL_PMD, evdev_opdl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(event_opdl, NUMA_NODE_ARG "=<int>"
+			      DO_VALIDATION_ARG "=<int>" DO_TEST_ARG "=<int>");
diff --git a/drivers/event/opdl/opdl_evdev.h b/drivers/event/opdl/opdl_evdev.h
new file mode 100644
index 00000000..610b58b3
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_EVDEV_H_
+#define _OPDL_EVDEV_H_
+
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include "opdl_ring.h"
+
+#define OPDL_QID_NUM_FIDS 1024
+#define OPDL_IQS_MAX 1
+#define OPDL_Q_PRIORITY_MAX 1
+#define OPDL_PORTS_MAX 64
+#define MAX_OPDL_CONS_Q_DEPTH 128
+/* OPDL size */
+#define OPDL_INFLIGHT_EVENTS_TOTAL 4096
+/* allow for lots of over-provisioning */
+#define OPDL_FRAGMENTS_MAX 1
+
+/* report dequeue burst sizes in buckets */
+#define OPDL_DEQ_STAT_BUCKET_SHIFT 2
+/* how many packets pulled from port by sched */
+#define SCHED_DEQUEUE_BURST_SIZE 32
+
+/* size of our history list */
+#define OPDL_PORT_HIST_LIST (MAX_OPDL_PROD_Q_DEPTH)
+
+/* how many data points use for average stats */
+#define NUM_SAMPLES 64
+
+#define EVENTDEV_NAME_OPDL_PMD event_opdl
+#define OPDL_PMD_NAME RTE_STR(event_opdl)
+#define OPDL_PMD_NAME_MAX 64
+
+#define OPDL_INVALID_QID 255
+
+#define OPDL_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
+
+#define OPDL_NUM_POLL_BUCKETS  \
+	(MAX_OPDL_CONS_Q_DEPTH >> OPDL_DEQ_STAT_BUCKET_SHIFT)
+
+enum {
+	QE_FLAG_VALID_SHIFT = 0,
+	QE_FLAG_COMPLETE_SHIFT,
+	QE_FLAG_NOT_EOP_SHIFT,
+	_QE_FLAG_COUNT
+};
+
+enum port_type {
+	OPDL_INVALID_PORT = 0,
+	OPDL_REGULAR_PORT = 1,
+	OPDL_PURE_RX_PORT,
+	OPDL_PURE_TX_PORT,
+	OPDL_ASYNC_PORT
+};
+
+enum queue_type {
+	OPDL_Q_TYPE_INVALID = 0,
+	OPDL_Q_TYPE_SINGLE_LINK = 1,
+	OPDL_Q_TYPE_ATOMIC,
+	OPDL_Q_TYPE_ORDERED
+};
+
+enum queue_pos {
+	OPDL_Q_POS_START = 0,
+	OPDL_Q_POS_MIDDLE,
+	OPDL_Q_POS_END
+};
+
+#define QE_FLAG_VALID    (1 << QE_FLAG_VALID_SHIFT)    /* for NEW FWD, FRAG */
+#define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP  */
+#define QE_FLAG_NOT_EOP  (1 << QE_FLAG_NOT_EOP_SHIFT)  /* set for FRAG only  */
+
+static const uint8_t opdl_qe_flag_map[] = {
+	QE_FLAG_VALID /* NEW Event */,
+	QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
+	QE_FLAG_COMPLETE /* RELEASE Event */,
+
+	/* Values which can be used for future support for partial
+	 * events, i.e. where one event comes back to the scheduler
+	 * as multiple which need to be tracked together
+	 */
+	QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
+};
+
+
+enum port_xstat_name {
+	claim_pkts_requested = 0,
+	claim_pkts_granted,
+	claim_non_empty,
+	claim_empty,
+	total_cycles,
+	max_num_port_xstat
+};
+
+#define OPDL_MAX_PORT_XSTAT_NUM (OPDL_PORTS_MAX * max_num_port_xstat)
+
+struct opdl_port;
+
+typedef uint16_t (*opdl_enq_operation)(struct opdl_port *port,
+		const struct rte_event ev[],
+		uint16_t num);
+
+typedef uint16_t (*opdl_deq_operation)(struct opdl_port *port,
+		struct rte_event ev[],
+		uint16_t num);
+
+struct opdl_evdev;
+
+struct opdl_stage_meta_data {
+	uint32_t num_claimed;	/* number of entries claimed by this stage */
+	uint32_t burst_sz;	/* Port claim burst size */
+};
+
+struct opdl_port {
+
+	/* back pointer */
+	struct opdl_evdev *opdl;
+
+	/* enq handler & stage instance */
+	opdl_enq_operation enq;
+	struct opdl_stage *enq_stage_inst;
+
+	/* deq handler & stage instance */
+	opdl_deq_operation deq;
+	struct opdl_stage *deq_stage_inst;
+
+	/* port id has correctly been set */
+	uint8_t configured;
+
+	/* set when the port is initialized */
+	uint8_t initialized;
+
+	/* A numeric ID for the port */
+	uint8_t id;
+
+	/* Space for claimed entries */
+	struct rte_event *entries[MAX_OPDL_CONS_Q_DEPTH];
+
+	/* RX/REGULAR/TX/ASYNC - determined on position in queue */
+	enum port_type p_type;
+
+	/* if the claim is static atomic type  */
+	bool atomic_claim;
+
+	/* Queue linked to this port - internal queue id*/
+	uint8_t queue_id;
+
+	/* Queue linked to this port - external queue id*/
+	uint8_t external_qid;
+
+	/* Next queue linked to this port - external queue id*/
+	uint8_t next_external_qid;
+
+	/* number of instances of this stage */
+	uint32_t num_instance;
+
+	/* instance ID of this stage*/
+	uint32_t instance_id;
+
+	/* track packets in and out of this port */
+	uint64_t port_stat[max_num_port_xstat];
+	uint64_t start_cycles;
+};
+
+struct opdl_queue_meta_data {
+	uint8_t         ext_id;
+	enum queue_type type;
+	int8_t          setup;
+};
+
+struct opdl_xstats_entry {
+	struct rte_event_dev_xstats_name stat;
+	unsigned int id;
+	uint64_t *value;
+};
+
+struct opdl_queue {
+
+	/* Opdl ring this queue is associated with */
+	uint32_t opdl_id;
+
+	/* type and position have correctly been set */
+	uint8_t configured;
+
+	/* port number and associated ports have been associated */
+	uint8_t initialized;
+
+	/* type of this queue (Atomic, Ordered, Parallel, Direct)*/
+	enum queue_type q_type;
+
+	/* position of queue (START, MIDDLE, END) */
+	enum queue_pos q_pos;
+
+	/* external queue id. It is mapped to the queue position */
+	uint8_t external_qid;
+
+	struct opdl_port *ports[OPDL_PORTS_MAX];
+	uint32_t nb_ports;
+
+	/* priority, reserved for future */
+	uint8_t priority;
+};
+
+
+#define OPDL_TUR_PER_DEV 12
+
+/* PMD needs an extra queue per Opdl  */
+#define OPDL_MAX_QUEUES (RTE_EVENT_MAX_QUEUES_PER_DEV - OPDL_TUR_PER_DEV)
+
+
+struct opdl_evdev {
+	struct rte_eventdev_data *data;
+
+	uint8_t started;
+
+	/* Max number of ports and queues*/
+	uint32_t max_port_nb;
+	uint32_t max_queue_nb;
+
+	/* slots in the opdl ring */
+	uint32_t nb_events_limit;
+
+	/*
+	 * Array holding all opdl for this device
+	 */
+	struct opdl_ring *opdl[OPDL_TUR_PER_DEV];
+	uint32_t nb_opdls;
+
+	struct opdl_queue_meta_data q_md[OPDL_MAX_QUEUES];
+	uint32_t nb_q_md;
+
+	/* Internal queues - one per logical queue */
+	struct opdl_queue
+		queue[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
+
+	uint32_t nb_queues;
+
+	struct opdl_stage_meta_data s_md[OPDL_PORTS_MAX];
+
+	/* Contains all ports - load balanced and directed */
+	struct opdl_port ports[OPDL_PORTS_MAX] __rte_cache_aligned;
+	uint32_t nb_ports;
+
+	uint8_t q_map_ex_to_in[OPDL_INVALID_QID];
+
+	/* Stats */
+	struct opdl_xstats_entry port_xstat[OPDL_MAX_PORT_XSTAT_NUM];
+
+	char service_name[OPDL_PMD_NAME_MAX];
+	int socket;
+	int do_validation;
+	int do_test;
+};
+
+
+static inline struct opdl_evdev *
+opdl_pmd_priv(const struct rte_eventdev *eventdev)
+{
+	return eventdev->data->dev_private;
+}
+
+static inline uint8_t
+opdl_pmd_dev_id(const struct opdl_evdev *opdl)
+{
+	return opdl->data->dev_id;
+}
+
+static inline const struct opdl_evdev *
+opdl_pmd_priv_const(const struct rte_eventdev *eventdev)
+{
+	return eventdev->data->dev_private;
+}
+
+uint16_t opdl_event_enqueue(void *port, const struct rte_event *ev);
+uint16_t opdl_event_enqueue_burst(void *port, const struct rte_event ev[],
+		uint16_t num);
+
+uint16_t opdl_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
+uint16_t opdl_event_dequeue_burst(void *port, struct rte_event *ev,
+		uint16_t num, uint64_t wait);
+void opdl_event_schedule(struct rte_eventdev *dev);
+
+void opdl_xstats_init(struct rte_eventdev *dev);
+int opdl_xstats_uninit(struct rte_eventdev *dev);
+int opdl_xstats_get_names(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+		struct rte_event_dev_xstats_name *xstats_names,
+		unsigned int *ids, unsigned int size);
+int opdl_xstats_get(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+		const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t opdl_xstats_get_by_name(const struct rte_eventdev *dev,
+		const char *name, unsigned int *id);
+int opdl_xstats_reset(struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode,
+		int16_t queue_port_id,
+		const uint32_t ids[],
+		uint32_t nb_ids);
+
+int opdl_add_event_handlers(struct rte_eventdev *dev);
+int build_all_dependencies(struct rte_eventdev *dev);
+int check_queues_linked(struct rte_eventdev *dev);
+int create_queues_and_rings(struct rte_eventdev *dev);
+int initialise_all_other_ports(struct rte_eventdev *dev);
+int initialise_queue_zero_ports(struct rte_eventdev *dev);
+int assign_internal_queue_ids(struct rte_eventdev *dev);
+void destroy_queues_and_rings(struct rte_eventdev *dev);
+int opdl_selftest(void);
+
+#endif /* _OPDL_EVDEV_H_ */
diff --git a/drivers/event/opdl/opdl_evdev_init.c b/drivers/event/opdl/opdl_evdev_init.c
new file mode 100644
index 00000000..1454de53
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev_init.c
@@ -0,0 +1,940 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memzone.h>
+
+#include "opdl_evdev.h"
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+
+static __rte_always_inline uint32_t
+enqueue_check(struct opdl_port *p,
+		const struct rte_event ev[],
+		uint16_t num,
+		uint16_t num_events)
+{
+	uint16_t i;
+
+	if (p->opdl->do_validation) {
+
+		for (i = 0; i < num; i++) {
+			if (ev[i].queue_id != p->next_external_qid) {
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "ERROR - port:[%u] - event wants"
+					     " to enq to q_id[%u],"
+					     " but should be [%u]",
+					     opdl_pmd_dev_id(p->opdl),
+					     p->id,
+					     ev[i].queue_id,
+					     p->next_external_qid);
+				rte_errno = -EINVAL;
+				return 0;
+			}
+		}
+
+		/* Stats */
+		if (p->p_type == OPDL_PURE_RX_PORT ||
+				p->p_type == OPDL_ASYNC_PORT) {
+			/* Stats */
+			if (num_events) {
+				p->port_stat[claim_pkts_requested] += num;
+				p->port_stat[claim_pkts_granted] += num_events;
+				p->port_stat[claim_non_empty]++;
+				p->start_cycles = rte_rdtsc();
+			} else {
+				p->port_stat[claim_empty]++;
+				p->start_cycles = 0;
+			}
+		} else {
+			if (p->start_cycles) {
+				uint64_t end_cycles = rte_rdtsc();
+				p->port_stat[total_cycles] +=
+					end_cycles - p->start_cycles;
+			}
+		}
+	} else {
+		if (num > 0 &&
+				ev[0].queue_id != p->next_external_qid) {
+			rte_errno = -EINVAL;
+			return 0;
+		}
+	}
+
+	return num;
+}
+
+static __rte_always_inline void
+update_on_dequeue(struct opdl_port *p,
+		struct rte_event ev[],
+		uint16_t num,
+		uint16_t num_events)
+{
+	if (p->opdl->do_validation) {
+		int16_t i;
+		for (i = 0; i < num; i++)
+			ev[i].queue_id =
+				p->opdl->queue[p->queue_id].external_qid;
+
+		/* Stats */
+		if (num_events) {
+			p->port_stat[claim_pkts_requested] += num;
+			p->port_stat[claim_pkts_granted] += num_events;
+			p->port_stat[claim_non_empty]++;
+			p->start_cycles = rte_rdtsc();
+		} else {
+			p->port_stat[claim_empty]++;
+			p->start_cycles = 0;
+		}
+	} else {
+		if (num > 0)
+			ev[0].queue_id =
+				p->opdl->queue[p->queue_id].external_qid;
+	}
+}
+
+
+/*
+ * Error RX enqueue:
+ *
+ *
+ */
+
+static uint16_t
+opdl_rx_error_enqueue(struct opdl_port *p,
+		const struct rte_event ev[],
+		uint16_t num)
+{
+	RTE_SET_USED(p);
+	RTE_SET_USED(ev);
+	RTE_SET_USED(num);
+
+	rte_errno = -ENOSPC;
+
+	return 0;
+}
+
+/*
+ * RX enqueue:
+ *
+ * This function handles enqueue for a single input stage_inst with
+ *	threadsafe disabled or enabled. eg 1 thread using a stage_inst or
+ *	multiple threads sharing a stage_inst
+ */
+
+static uint16_t
+opdl_rx_enqueue(struct opdl_port *p,
+		const struct rte_event ev[],
+		uint16_t num)
+{
+	uint16_t enqueued = 0;
+
+	enqueued = opdl_ring_input(opdl_stage_get_opdl_ring(p->enq_stage_inst),
+				   ev,
+				   num,
+				   false);
+	if (!enqueue_check(p, ev, num, enqueued))
+		return 0;
+
+
+	if (enqueued < num)
+		rte_errno = -ENOSPC;
+
+	return enqueued;
+}
+
+/*
+ * Error TX handler
+ *
+ */
+
+static uint16_t
+opdl_tx_error_dequeue(struct opdl_port *p,
+		struct rte_event ev[],
+		uint16_t num)
+{
+	RTE_SET_USED(p);
+	RTE_SET_USED(ev);
+	RTE_SET_USED(num);
+
+	rte_errno = -ENOSPC;
+
+	return 0;
+}
+
+/*
+ * TX single threaded claim
+ *
+ * This function handles dequeue for a single worker stage_inst with
+ *	threadsafe disabled. eg 1 thread using an stage_inst
+ */
+
+static uint16_t
+opdl_tx_dequeue_single_thread(struct opdl_port *p,
+			struct rte_event ev[],
+			uint16_t num)
+{
+	uint16_t returned;
+
+	struct opdl_ring  *ring;
+
+	ring = opdl_stage_get_opdl_ring(p->deq_stage_inst);
+
+	returned = opdl_ring_copy_to_burst(ring,
+					   p->deq_stage_inst,
+					   ev,
+					   num,
+					   false);
+
+	update_on_dequeue(p, ev, num, returned);
+
+	return returned;
+}
+
+/*
+ * TX multi threaded claim
+ *
+ * This function handles dequeue for multiple worker stage_inst with
+ *	threadsafe disabled. eg multiple stage_inst each with its own instance
+ */
+
+static uint16_t
+opdl_tx_dequeue_multi_inst(struct opdl_port *p,
+			struct rte_event ev[],
+			uint16_t num)
+{
+	uint32_t num_events = 0;
+
+	num_events = opdl_stage_claim(p->deq_stage_inst,
+				    (void *)ev,
+				    num,
+				    NULL,
+				    false,
+				    false);
+
+	update_on_dequeue(p, ev, num, num_events);
+
+	return opdl_stage_disclaim(p->deq_stage_inst, num_events, false);
+}
+
+
+/*
+ * Worker thread claim
+ *
+ */
+
+static uint16_t
+opdl_claim(struct opdl_port *p, struct rte_event ev[], uint16_t num)
+{
+	uint32_t num_events = 0;
+
+	if (unlikely(num > MAX_OPDL_CONS_Q_DEPTH)) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "Attempt to dequeue num of events larger than port (%d) max",
+			     opdl_pmd_dev_id(p->opdl),
+			     p->id);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+
+	num_events = opdl_stage_claim(p->deq_stage_inst,
+			(void *)ev,
+			num,
+			NULL,
+			false,
+			p->atomic_claim);
+
+
+	update_on_dequeue(p, ev, num, num_events);
+
+	return num_events;
+}
+
+/*
+ * Worker thread disclaim
+ */
+
+static uint16_t
+opdl_disclaim(struct opdl_port *p, const struct rte_event ev[], uint16_t num)
+{
+	uint16_t enqueued = 0;
+
+	uint32_t i = 0;
+
+	for (i = 0; i < num; i++)
+		opdl_ring_cas_slot(p->enq_stage_inst, &ev[i],
+				i, p->atomic_claim);
+
+	enqueued = opdl_stage_disclaim(p->enq_stage_inst,
+				       num,
+				       false);
+
+	return enqueue_check(p, ev, num, enqueued);
+}
+
+static __rte_always_inline struct opdl_stage *
+stage_for_port(struct opdl_queue *q, unsigned int i)
+{
+	if (q->q_pos == OPDL_Q_POS_START || q->q_pos == OPDL_Q_POS_MIDDLE)
+		return q->ports[i]->enq_stage_inst;
+	else
+		return q->ports[i]->deq_stage_inst;
+}
+
+static int opdl_add_deps(struct opdl_evdev *device,
+			 int q_id,
+			 int deps_q_id)
+{
+	unsigned int i, j;
+	int status;
+	struct opdl_ring  *ring;
+	struct opdl_queue *queue = &device->queue[q_id];
+	struct opdl_queue *queue_deps = &device->queue[deps_q_id];
+	struct opdl_stage *dep_stages[OPDL_PORTS_MAX];
+
+	/* sanity check that all stages are for same opdl ring */
+	for (i = 0; i < queue->nb_ports; i++) {
+		struct opdl_ring *r =
+			opdl_stage_get_opdl_ring(stage_for_port(queue, i));
+		for (j = 0; j < queue_deps->nb_ports; j++) {
+			struct opdl_ring *rj =
+				opdl_stage_get_opdl_ring(
+						stage_for_port(queue_deps, j));
+			if (r != rj) {
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "Stages and dependents"
+					     " are not for same opdl ring",
+					     opdl_pmd_dev_id(device));
+				uint32_t k;
+				for (k = 0; k < device->nb_opdls; k++) {
+					opdl_ring_dump(device->opdl[k],
+							stdout);
+				}
+				return -EINVAL;
+			}
+		}
+	}
+
+	/* Gather all stages instance in deps */
+	for (i = 0; i < queue_deps->nb_ports; i++)
+		dep_stages[i] = stage_for_port(queue_deps, i);
+
+
+	/* Add all deps for each port->stage_inst in this queue */
+	for (i = 0; i < queue->nb_ports; i++) {
+
+		ring = opdl_stage_get_opdl_ring(stage_for_port(queue, i));
+
+		status = opdl_stage_deps_add(ring,
+				stage_for_port(queue, i),
+				queue->ports[i]->num_instance,
+				queue->ports[i]->instance_id,
+				dep_stages,
+				queue_deps->nb_ports);
+		if (status < 0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+opdl_add_event_handlers(struct rte_eventdev *dev)
+{
+	int err = 0;
+
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+	unsigned int i;
+
+	for (i = 0; i < device->max_port_nb; i++) {
+
+		struct opdl_port *port = &device->ports[i];
+
+		if (port->configured) {
+			if (port->p_type == OPDL_PURE_RX_PORT) {
+				port->enq = opdl_rx_enqueue;
+				port->deq = opdl_tx_error_dequeue;
+
+			} else if (port->p_type == OPDL_PURE_TX_PORT) {
+
+				port->enq = opdl_rx_error_enqueue;
+
+				if (port->num_instance == 1)
+					port->deq =
+						opdl_tx_dequeue_single_thread;
+				else
+					port->deq = opdl_tx_dequeue_multi_inst;
+
+			} else if (port->p_type == OPDL_REGULAR_PORT) {
+
+				port->enq = opdl_disclaim;
+				port->deq = opdl_claim;
+
+			} else if (port->p_type == OPDL_ASYNC_PORT) {
+
+				port->enq = opdl_rx_enqueue;
+
+				/* Always single instance */
+				port->deq = opdl_tx_dequeue_single_thread;
+			} else {
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "port:[%u] has invalid port type - ",
+					     opdl_pmd_dev_id(port->opdl),
+					     port->id);
+				err = -EINVAL;
+				break;
+			}
+			port->initialized = 1;
+		}
+	}
+
+	if (!err)
+		fprintf(stdout, "Success - enqueue/dequeue handler(s) added\n");
+	return err;
+}
+
+int
+build_all_dependencies(struct rte_eventdev *dev)
+{
+
+	int err = 0;
+	unsigned int i;
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	uint8_t start_qid = 0;
+
+	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+		struct opdl_queue *queue = &device->queue[i];
+		if (!queue->initialized)
+			break;
+
+		if (queue->q_pos == OPDL_Q_POS_START) {
+			start_qid = i;
+			continue;
+		}
+
+		if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
+			err = opdl_add_deps(device, i, i-1);
+			if (err < 0) {
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "dependency addition for queue:[%u] - FAILED",
+					     dev->data->dev_id,
+					     queue->external_qid);
+				break;
+			}
+		}
+
+		if (queue->q_pos == OPDL_Q_POS_END) {
+			/* Add this dependency */
+			err = opdl_add_deps(device, i, i-1);
+			if (err < 0) {
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "dependency addition for queue:[%u] - FAILED",
+					     dev->data->dev_id,
+					     queue->external_qid);
+				break;
+			}
+			/* Add dependency for rx on tx */
+			err = opdl_add_deps(device, start_qid, i);
+			if (err < 0) {
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "dependency addition for queue:[%u] - FAILED",
+					     dev->data->dev_id,
+					     queue->external_qid);
+				break;
+			}
+		}
+	}
+
+	if (!err)
+		fprintf(stdout, "Success - dependencies built\n");
+
+	return err;
+}
+int
+check_queues_linked(struct rte_eventdev *dev)
+{
+
+	int err = 0;
+	unsigned int i;
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+	uint32_t nb_iq = 0;
+
+	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+		struct opdl_queue *queue = &device->queue[i];
+
+		if (!queue->initialized)
+			break;
+
+		if (queue->external_qid == OPDL_INVALID_QID)
+			nb_iq++;
+
+		if (queue->nb_ports == 0) {
+			PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+				     "queue:[%u] has no associated ports",
+				     dev->data->dev_id,
+				     i);
+			err = -EINVAL;
+			break;
+		}
+	}
+	if (!err) {
+		if ((i - nb_iq) != device->max_queue_nb) {
+			PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+				     "%u queues counted but should be %u",
+				     dev->data->dev_id,
+				     i - nb_iq,
+				     device->max_queue_nb);
+			err = -1;
+		}
+
+	}
+	return err;
+}
+
+void
+destroy_queues_and_rings(struct rte_eventdev *dev)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+	uint32_t i;
+
+	for (i = 0; i < device->nb_opdls; i++) {
+		if (device->opdl[i])
+			opdl_ring_free(device->opdl[i]);
+	}
+
+	memset(&device->queue,
+			0,
+			sizeof(struct opdl_queue)
+			* RTE_EVENT_MAX_QUEUES_PER_DEV);
+}
+
+#define OPDL_ID(d)(d->nb_opdls - 1)
+
+static __rte_always_inline void
+initialise_queue(struct opdl_evdev *device,
+		enum queue_pos pos,
+		int32_t i)
+{
+	struct opdl_queue *queue = &device->queue[device->nb_queues];
+
+	if (i == -1) {
+		queue->q_type = OPDL_Q_TYPE_ORDERED;
+		queue->external_qid = OPDL_INVALID_QID;
+	} else {
+		queue->q_type = device->q_md[i].type;
+		queue->external_qid = device->q_md[i].ext_id;
+		/* Add ex->in for queues setup */
+		device->q_map_ex_to_in[queue->external_qid] = device->nb_queues;
+	}
+	queue->opdl_id = OPDL_ID(device);
+	queue->q_pos = pos;
+	queue->nb_ports = 0;
+	queue->configured = 1;
+
+	device->nb_queues++;
+}
+
+
+static __rte_always_inline int
+create_opdl(struct opdl_evdev *device)
+{
+	int err = 0;
+
+	char name[RTE_MEMZONE_NAMESIZE];
+
+	snprintf(name, RTE_MEMZONE_NAMESIZE,
+			"%s_%u", device->service_name, device->nb_opdls);
+
+	device->opdl[device->nb_opdls] =
+		opdl_ring_create(name,
+				device->nb_events_limit,
+				sizeof(struct rte_event),
+				device->max_port_nb * 2,
+				device->socket);
+
+	if (!device->opdl[device->nb_opdls]) {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "opdl ring %u creation - FAILED",
+			     opdl_pmd_dev_id(device),
+			     device->nb_opdls);
+		err = -EINVAL;
+	} else {
+		device->nb_opdls++;
+	}
+	return err;
+}
+
+static __rte_always_inline int
+create_link_opdl(struct opdl_evdev *device, uint32_t index)
+{
+
+	int err = 0;
+
+	if (device->q_md[index + 1].type !=
+			OPDL_Q_TYPE_SINGLE_LINK) {
+
+		/* async queue with regular
+		 * queue following it
+		 */
+
+		/* create a new opdl ring */
+		err = create_opdl(device);
+		if (!err) {
+			/* create an initial
+			 * dummy queue for new opdl
+			 */
+			initialise_queue(device,
+					OPDL_Q_POS_START,
+					-1);
+		} else {
+			err = -EINVAL;
+		}
+	} else {
+		PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+			     "queue %u, two consecutive"
+			     " SINGLE_LINK queues, not allowed",
+			     opdl_pmd_dev_id(device),
+			     index);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+int
+create_queues_and_rings(struct rte_eventdev *dev)
+{
+	int err = 0;
+
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	device->nb_queues = 0;
+
+	if (device->nb_ports != device->max_port_nb) {
+		PMD_DRV_LOG(ERR, "Number ports setup:%u NOT EQUAL to max port"
+				" number:%u for this device",
+				device->nb_ports,
+				device->max_port_nb);
+		err = -1;
+	}
+
+	if (!err) {
+		/* We will have at least one opdl so create it now */
+		err = create_opdl(device);
+	}
+
+	if (!err) {
+
+		/* Create 1st "dummy" queue */
+		initialise_queue(device,
+				 OPDL_Q_POS_START,
+				 -1);
+
+		uint32_t i;
+		for (i = 0; i < device->nb_q_md; i++) {
+
+			/* Check */
+			if (!device->q_md[i].setup) {
+
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "queue meta data slot %u"
+					     " not setup - FAILING",
+					     dev->data->dev_id,
+					     i);
+				err = -EINVAL;
+				break;
+			} else if (device->q_md[i].type !=
+					OPDL_Q_TYPE_SINGLE_LINK) {
+
+				if (!device->q_md[i + 1].setup) {
+					/* Create a simple ORDERED/ATOMIC
+					 * queue at the end
+					 */
+					initialise_queue(device,
+							OPDL_Q_POS_END,
+							i);
+
+				} else {
+					/* Create a simple ORDERED/ATOMIC
+					 * queue in the middle
+					 */
+					initialise_queue(device,
+							OPDL_Q_POS_MIDDLE,
+							i);
+				}
+			} else if (device->q_md[i].type ==
+					OPDL_Q_TYPE_SINGLE_LINK) {
+
+				/* create last queue for this opdl */
+				initialise_queue(device,
+						OPDL_Q_POS_END,
+						i);
+
+				err = create_link_opdl(device, i);
+
+				if (err)
+					break;
+
+
+			}
+		}
+	}
+	if (err)
+		destroy_queues_and_rings(dev);
+
+	return err;
+}
+
+
+int
+initialise_all_other_ports(struct rte_eventdev *dev)
+{
+	int err = 0;
+	struct opdl_stage *stage_inst = NULL;
+
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	uint32_t i;
+	for (i = 0; i < device->nb_ports; i++) {
+		struct opdl_port *port = &device->ports[i];
+		struct opdl_queue *queue = &device->queue[port->queue_id];
+
+		if (port->queue_id == 0) {
+			continue;
+		} else if (queue->q_type != OPDL_Q_TYPE_SINGLE_LINK) {
+
+			if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
+
+				/* Regular port with claim/disclaim */
+				stage_inst = opdl_stage_add(
+					device->opdl[queue->opdl_id],
+						false,
+						false);
+				port->deq_stage_inst = stage_inst;
+				port->enq_stage_inst = stage_inst;
+
+				if (queue->q_type == OPDL_Q_TYPE_ATOMIC)
+					port->atomic_claim = true;
+				else
+					port->atomic_claim = false;
+
+				port->p_type =  OPDL_REGULAR_PORT;
+
+				/* Add the port to the queue array of ports */
+				queue->ports[queue->nb_ports] = port;
+				port->instance_id = queue->nb_ports;
+				queue->nb_ports++;
+			} else if (queue->q_pos == OPDL_Q_POS_END) {
+
+				/* tx port  */
+				stage_inst = opdl_stage_add(
+					device->opdl[queue->opdl_id],
+						false,
+						false);
+				port->deq_stage_inst = stage_inst;
+				port->enq_stage_inst = NULL;
+				port->p_type = OPDL_PURE_TX_PORT;
+
+				/* Add the port to the queue array of ports */
+				queue->ports[queue->nb_ports] = port;
+				port->instance_id = queue->nb_ports;
+				queue->nb_ports++;
+			} else {
+
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "port %u:, linked incorrectly"
+					     " to a q_pos START/INVALID %u",
+					     opdl_pmd_dev_id(port->opdl),
+					     port->id,
+					     queue->q_pos);
+				err = -EINVAL;
+				break;
+			}
+
+		} else if (queue->q_type == OPDL_Q_TYPE_SINGLE_LINK) {
+
+			port->p_type = OPDL_ASYNC_PORT;
+
+			/* -- tx -- */
+			stage_inst = opdl_stage_add(
+				device->opdl[queue->opdl_id],
+					false,
+					false); /* First stage */
+			port->deq_stage_inst = stage_inst;
+
+			/* Add the port to the queue array of ports */
+			queue->ports[queue->nb_ports] = port;
+			port->instance_id = queue->nb_ports;
+			queue->nb_ports++;
+
+			if (queue->nb_ports > 1) {
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "queue %u:, setup as SINGLE_LINK"
+					     " but has more than one port linked",
+					     opdl_pmd_dev_id(port->opdl),
+					     queue->external_qid);
+				err = -EINVAL;
+				break;
+			}
+
+			/* -- single instance rx for next opdl -- */
+			uint8_t next_qid =
+				device->q_map_ex_to_in[queue->external_qid] + 1;
+			if (next_qid < RTE_EVENT_MAX_QUEUES_PER_DEV &&
+					device->queue[next_qid].configured) {
+
+				/* Remap the queue */
+				queue = &device->queue[next_qid];
+
+				stage_inst = opdl_stage_add(
+					device->opdl[queue->opdl_id],
+						false,
+						true);
+				port->enq_stage_inst = stage_inst;
+
+				/* Add the port to the queue array of ports */
+				queue->ports[queue->nb_ports] = port;
+				port->instance_id = queue->nb_ports;
+				queue->nb_ports++;
+				if (queue->nb_ports > 1) {
+					PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+						"dummy queue %u: for "
+						"port %u, "
+						"SINGLE_LINK but has more "
+						"than one port linked",
+						opdl_pmd_dev_id(port->opdl),
+						next_qid,
+						port->id);
+					err = -EINVAL;
+					break;
+				}
+				/* Set this queue to initialized as it is never
+				 * referenced by any ports
+				 */
+				queue->initialized = 1;
+			}
+		}
+	}
+
+	/* Now that all ports are initialised we need to
+	 * setup the last bit of stage md
+	 */
+	if (!err) {
+		for (i = 0; i < device->nb_ports; i++) {
+			struct opdl_port *port = &device->ports[i];
+			struct opdl_queue *queue =
+				&device->queue[port->queue_id];
+
+			if (port->configured &&
+					(port->queue_id != OPDL_INVALID_QID)) {
+				if (queue->nb_ports == 0) {
+					PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+						"queue:[%u] has no ports"
+						" linked to it",
+						opdl_pmd_dev_id(port->opdl),
+						port->id);
+					err = -EINVAL;
+					break;
+				}
+
+				port->num_instance = queue->nb_ports;
+				port->initialized = 1;
+				queue->initialized = 1;
+			} else {
+				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+					     "Port:[%u] not configured  invalid"
+					     " queue configuration",
+					     opdl_pmd_dev_id(port->opdl),
+					     port->id);
+				err = -EINVAL;
+				break;
+			}
+		}
+	}
+	return err;
+}
+
+int
+initialise_queue_zero_ports(struct rte_eventdev *dev)
+{
+	int err = 0;
+	uint8_t mt_rx = 0;
+	struct opdl_stage *stage_inst = NULL;
+	struct opdl_queue *queue = NULL;
+
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	/* Assign queue zero and figure out how many Q0 ports we have */
+	uint32_t i;
+	for (i = 0; i < device->nb_ports; i++) {
+		struct opdl_port *port = &device->ports[i];
+		if (port->queue_id == OPDL_INVALID_QID) {
+			port->queue_id = 0;
+			port->external_qid = OPDL_INVALID_QID;
+			port->p_type = OPDL_PURE_RX_PORT;
+			mt_rx++;
+		}
+	}
+
+	/* Create the stage */
+	stage_inst = opdl_stage_add(device->opdl[0],
+			(mt_rx > 1 ? true : false),
+			true);
+	if (stage_inst) {
+
+		/* Assign the new created input stage to all relevant ports */
+		for (i = 0; i < device->nb_ports; i++) {
+			struct opdl_port *port = &device->ports[i];
+			if (port->queue_id == 0) {
+				queue = &device->queue[port->queue_id];
+				port->enq_stage_inst = stage_inst;
+				port->deq_stage_inst = NULL;
+				port->configured = 1;
+				port->initialized = 1;
+
+				queue->ports[queue->nb_ports] = port;
+				port->instance_id = queue->nb_ports;
+				queue->nb_ports++;
+			}
+		}
+	} else {
+		err = -1;
+	}
+	return err;
+}
+
+int
+assign_internal_queue_ids(struct rte_eventdev *dev)
+{
+	int err = 0;
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+	uint32_t i;
+
+	for (i = 0; i < device->nb_ports; i++) {
+		struct opdl_port *port = &device->ports[i];
+		if (port->external_qid != OPDL_INVALID_QID) {
+			port->queue_id =
+				device->q_map_ex_to_in[port->external_qid];
+
+			/* Now do the external_qid of the next queue */
+			struct opdl_queue *queue =
+				&device->queue[port->queue_id];
+			if (queue->q_pos == OPDL_Q_POS_END)
+				port->next_external_qid =
+				device->queue[port->queue_id + 2].external_qid;
+			else
+				port->next_external_qid =
+				device->queue[port->queue_id + 1].external_qid;
+		}
+	}
+	return err;
+}
diff --git a/drivers/event/opdl/opdl_evdev_xstats.c b/drivers/event/opdl/opdl_evdev_xstats.c
new file mode 100644
index 00000000..0e6c6bd5
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev_xstats.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include "opdl_evdev.h"
+#include "opdl_log.h"
+
+static const char * const port_xstat_str[] = {
+
+	"claim_pkts_requested",
+	"claim_pkts_granted",
+	"claim_non_empty",
+	"claim_empty",
+	"total_cycles",
+};
+
+
+void
+opdl_xstats_init(struct rte_eventdev *dev)
+{
+	uint32_t i, j;
+
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return;
+
+	for (i = 0; i < device->max_port_nb; i++) {
+		struct opdl_port *port = &device->ports[i];
+
+		for (j = 0; j < max_num_port_xstat; j++) {
+			uint32_t index = (i * max_num_port_xstat) + j;
+
+			/* Name */
+			sprintf(device->port_xstat[index].stat.name,
+			       "port_%02u_%s",
+			       i,
+			       port_xstat_str[j]);
+
+			/* ID */
+			device->port_xstat[index].id = index;
+
+			/* Stats ptr */
+			device->port_xstat[index].value = &port->port_stat[j];
+		}
+	}
+}
+
+int
+opdl_xstats_uninit(struct rte_eventdev *dev)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return 0;
+
+	memset(device->port_xstat,
+	       0,
+	       sizeof(device->port_xstat));
+
+	return 0;
+}
+
+int
+opdl_xstats_get_names(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode,
+		uint8_t queue_port_id,
+		struct rte_event_dev_xstats_name *xstats_names,
+		unsigned int *ids, unsigned int size)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return -ENOTSUP;
+
+	if (mode == RTE_EVENT_DEV_XSTATS_DEVICE ||
+			mode == RTE_EVENT_DEV_XSTATS_QUEUE)
+		return -EINVAL;
+
+	if (queue_port_id >= device->max_port_nb)
+		return -EINVAL;
+
+	if (size < max_num_port_xstat)
+		return max_num_port_xstat;
+
+	uint32_t port_idx = queue_port_id * max_num_port_xstat;
+
+	uint32_t j;
+	for (j = 0; j < max_num_port_xstat; j++) {
+
+		strcpy(xstats_names[j].name,
+				device->port_xstat[j + port_idx].stat.name);
+		ids[j] = device->port_xstat[j + port_idx].id;
+	}
+
+	return max_num_port_xstat;
+}
+
+int
+opdl_xstats_get(const struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode,
+		uint8_t queue_port_id,
+		const unsigned int ids[],
+		uint64_t values[], unsigned int n)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return -ENOTSUP;
+
+	if (mode == RTE_EVENT_DEV_XSTATS_DEVICE ||
+			mode == RTE_EVENT_DEV_XSTATS_QUEUE)
+		return -EINVAL;
+
+	if (queue_port_id >= device->max_port_nb)
+		return -EINVAL;
+
+	if (n > max_num_port_xstat)
+		return -EINVAL;
+
+	uint32_t p_start = queue_port_id * max_num_port_xstat;
+	uint32_t p_finish = p_start + max_num_port_xstat;
+
+	uint32_t i;
+	for (i = 0; i < n; i++) {
+		if (ids[i] < p_start || ids[i] >= p_finish)
+			return -EINVAL;
+
+		values[i] = *(device->port_xstat[ids[i]].value);
+	}
+
+	return n;
+}
+
+uint64_t
+opdl_xstats_get_by_name(const struct rte_eventdev *dev,
+		const char *name, unsigned int *id)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return -ENOTSUP;
+
+	uint32_t max_index = device->max_port_nb * max_num_port_xstat;
+
+	uint32_t i;
+	for (i = 0; i < max_index; i++) {
+
+		if (strncmp(name,
+			   device->port_xstat[i].stat.name,
+			   RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0) {
+			if (id != NULL)
+				*id = i;
+			if (device->port_xstat[i].value)
+				return *(device->port_xstat[i].value);
+			break;
+		}
+	}
+	return -EINVAL;
+}
+
+int
+opdl_xstats_reset(struct rte_eventdev *dev,
+		enum rte_event_dev_xstats_mode mode,
+		int16_t queue_port_id, const uint32_t ids[],
+		uint32_t nb_ids)
+{
+	struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+	if (!device->do_validation)
+		return -ENOTSUP;
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(mode);
+	RTE_SET_USED(queue_port_id);
+	RTE_SET_USED(ids);
+	RTE_SET_USED(nb_ids);
+
+	return -ENOTSUP;
+}
diff --git a/drivers/event/opdl/opdl_log.h b/drivers/event/opdl/opdl_log.h
new file mode 100644
index 00000000..ae5221c1
--- /dev/null
+++ b/drivers/event/opdl/opdl_log.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_LOGS_H_
+#define _OPDL_LOGS_H_
+
+#include <rte_log.h>
+
+extern int opdl_logtype_driver;
+
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, opdl_logtype_driver, "%s(): " fmt, \
+			__func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+
+
+#endif /* _OPDL_LOGS_H_ */
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
new file mode 100644
index 00000000..eca7712b
--- /dev/null
+++ b/drivers/event/opdl/opdl_ring.c
@@ -0,0 +1,1233 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal_memconfig.h>
+
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+#define LIB_NAME "opdl_ring"
+
+#define OPDL_NAME_SIZE 64
+
+
+#define OPDL_EVENT_MASK  (0xFFFF0000000FFFFFULL)
+
+int opdl_logtype_driver;
+
+/* Types of dependency between stages */
+enum dep_type {
+	DEP_NONE = 0,  /* no dependency */
+	DEP_DIRECT,  /* stage has direct dependency */
+	DEP_INDIRECT,  /* in-direct dependency through other stage(s) */
+	DEP_SELF,  /* stage dependency on itself, used to detect loops */
+};
+
+/* Shared section of stage state.
+ * Care is needed when accessing and the layout is important, especially to
+ * limit the adjacent cache-line HW prefetcher from impacting performance.
+ */
+struct shared_state {
+	/* Last known minimum sequence number of dependencies, used for multi
+	 * thread operation
+	 */
+	uint32_t available_seq;
+	char _pad1[RTE_CACHE_LINE_SIZE * 3];
+	uint32_t head;  /* Head sequence number (for multi thread operation) */
+	char _pad2[RTE_CACHE_LINE_SIZE * 3];
+	struct opdl_stage *stage;  /* back pointer */
+	uint32_t tail;  /* Tail sequence number */
+	char _pad3[RTE_CACHE_LINE_SIZE * 2];
+} __rte_cache_aligned;
+
+/* A structure to keep track of "unfinished" claims. This is only used for
+ * stages that are threadsafe. Each lcore accesses its own instance of this
+ * structure to record the entries it has claimed. This allows one lcore to make
+ * multiple claims without being blocked by another. When disclaiming it moves
+ * forward the shared tail when the shared tail matches the tail value recorded
+ * here.
+ */
+struct claim_manager {
+	uint32_t num_to_disclaim;
+	uint32_t num_claimed;
+	uint32_t mgr_head;
+	uint32_t mgr_tail;
+	struct {
+		uint32_t head;
+		uint32_t tail;
+	} claims[OPDL_DISCLAIMS_PER_LCORE];
+} __rte_cache_aligned;
+
+/* Context for each stage of opdl_ring.
+ * Calculations on sequence numbers need to be done with other uint32_t values
+ * so that results are modulus 2^32, and not undefined.
+ */
+struct opdl_stage {
+	struct opdl_ring *t;  /* back pointer, set at init */
+	uint32_t num_slots;  /* Number of slots for entries, set at init */
+	uint32_t index;  /* ID for this stage, set at init */
+	bool threadsafe;  /* Set to 1 if this stage supports threadsafe use */
+	/* Last known min seq number of dependencies for used for single thread
+	 * operation
+	 */
+	uint32_t available_seq;
+	uint32_t head;  /* Current head for single-thread operation */
+	uint32_t shadow_head;  /* Shadow head for single-thread operation */
+	uint32_t nb_instance;  /* Number of instances */
+	uint32_t instance_id;  /* ID of this stage instance */
+	uint16_t num_claimed;  /* Number of slots claimed */
+	uint16_t num_event;		/* Number of events */
+	uint32_t seq;			/* sequence number  */
+	uint32_t num_deps;  /* Number of direct dependencies */
+	/* Keep track of all dependencies, used during init only */
+	enum dep_type *dep_tracking;
+	/* Direct dependencies of this stage */
+	struct shared_state **deps;
+	/* Other stages read this! */
+	struct shared_state shared __rte_cache_aligned;
+	/* For managing disclaims in multi-threaded processing stages */
+	struct claim_manager pending_disclaims[RTE_MAX_LCORE]
+					       __rte_cache_aligned;
+} __rte_cache_aligned;
+
+/* Context for opdl_ring */
+struct opdl_ring {
+	char name[OPDL_NAME_SIZE];  /* OPDL queue instance name */
+	int socket;  /* NUMA socket that memory is allocated on */
+	uint32_t num_slots;  /* Number of slots for entries */
+	uint32_t mask;  /* Mask for sequence numbers (num_slots - 1) */
+	uint32_t slot_size;  /* Size of each slot in bytes */
+	uint32_t num_stages;  /* Number of stages that have been added */
+	uint32_t max_num_stages;  /* Max number of stages */
+	/* Stages indexed by ID */
+	struct opdl_stage *stages;
+	/* Memory for storing slot data */
+	uint8_t slots[0] __rte_cache_aligned;
+};
+
+
+/* Return input stage of a opdl_ring */
+static __rte_always_inline struct opdl_stage *
+input_stage(const struct opdl_ring *t)
+{
+	return &t->stages[0];
+}
+
+/* Check if a stage is the input stage */
+static __rte_always_inline bool
+is_input_stage(const struct opdl_stage *s)
+{
+	return s->index == 0;
+}
+
+/* Get slot pointer from sequence number */
+static __rte_always_inline void *
+get_slot(const struct opdl_ring *t, uint32_t n)
+{
+	return (void *)(uintptr_t)&t->slots[(n & t->mask) * t->slot_size];
+}
+
+/* Find how many entries are available for processing */
+static __rte_always_inline uint32_t
+available(const struct opdl_stage *s)
+{
+	if (s->threadsafe == true) {
+		uint32_t n = __atomic_load_n(&s->shared.available_seq,
+				__ATOMIC_ACQUIRE) -
+				__atomic_load_n(&s->shared.head,
+				__ATOMIC_ACQUIRE);
+
+		/* Return 0 if available_seq needs to be updated */
+		return (n <= s->num_slots) ? n : 0;
+	}
+
+	/* Single threaded */
+	return s->available_seq - s->head;
+}
+
+/* Read sequence number of dependencies and find minimum */
+static __rte_always_inline void
+update_available_seq(struct opdl_stage *s)
+{
+	uint32_t i;
+	uint32_t this_tail = s->shared.tail;
+	uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+	/* Input stage sequence numbers are greater than the sequence numbers of
+	 * its dependencies so an offset of t->num_slots is needed when
+	 * calculating available slots and also the condition which is used to
+	 * determine the dependencies minimum sequence number must be reverted.
+	 */
+	uint32_t wrap;
+
+	if (is_input_stage(s)) {
+		wrap = s->num_slots;
+		for (i = 1; i < s->num_deps; i++) {
+			uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
+					__ATOMIC_ACQUIRE);
+			if ((this_tail - seq) > (this_tail - min_seq))
+				min_seq = seq;
+		}
+	} else {
+		wrap = 0;
+		for (i = 1; i < s->num_deps; i++) {
+			uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
+					__ATOMIC_ACQUIRE);
+			if ((seq - this_tail) < (min_seq - this_tail))
+				min_seq = seq;
+		}
+	}
+
+	if (s->threadsafe == false)
+		s->available_seq = min_seq + wrap;
+	else
+		__atomic_store_n(&s->shared.available_seq, min_seq + wrap,
+				__ATOMIC_RELEASE);
+}
+
+/* Wait until the number of available slots reaches number requested */
+static __rte_always_inline void
+wait_for_available(struct opdl_stage *s, uint32_t n)
+{
+	while (available(s) < n) {
+		rte_pause();
+		update_available_seq(s);
+	}
+}
+
+/* Return number of slots to process based on number requested and mode */
+static __rte_always_inline uint32_t
+num_to_process(struct opdl_stage *s, uint32_t n, bool block)
+{
+	/* Don't read tail sequences of dependencies if not needed */
+	if (available(s) >= n)
+		return n;
+
+	update_available_seq(s);
+
+	if (block == false) {
+		uint32_t avail = available(s);
+
+		if (avail == 0) {
+			rte_pause();
+			return 0;
+		}
+		return (avail <= n) ? avail : n;
+	}
+
+	if (unlikely(n > s->num_slots)) {
+		PMD_DRV_LOG(ERR, "%u entries is more than max (%u)",
+				n, s->num_slots);
+		return 0;  /* Avoid infinite loop */
+	}
+	/* blocking */
+	wait_for_available(s, n);
+	return n;
+}
+
+/* Copy entries in to slots with wrap-around */
+static __rte_always_inline void
+copy_entries_in(struct opdl_ring *t, uint32_t start, const void *entries,
+		uint32_t num_entries)
+{
+	uint32_t slot_size = t->slot_size;
+	uint32_t slot_index = start & t->mask;
+
+	if (slot_index + num_entries <= t->num_slots) {
+		rte_memcpy(get_slot(t, start), entries,
+				num_entries * slot_size);
+	} else {
+		uint32_t split = t->num_slots - slot_index;
+
+		rte_memcpy(get_slot(t, start), entries, split * slot_size);
+		rte_memcpy(get_slot(t, 0),
+				RTE_PTR_ADD(entries, split * slot_size),
+				(num_entries - split) * slot_size);
+	}
+}
+
+/* Copy entries out from slots with wrap-around */
+static __rte_always_inline void
+copy_entries_out(struct opdl_ring *t, uint32_t start, void *entries,
+		uint32_t num_entries)
+{
+	uint32_t slot_size = t->slot_size;
+	uint32_t slot_index = start & t->mask;
+
+	if (slot_index + num_entries <= t->num_slots) {
+		rte_memcpy(entries, get_slot(t, start),
+				num_entries * slot_size);
+	} else {
+		uint32_t split = t->num_slots - slot_index;
+
+		rte_memcpy(entries, get_slot(t, start), split * slot_size);
+		rte_memcpy(RTE_PTR_ADD(entries, split * slot_size),
+				get_slot(t, 0),
+				(num_entries - split) * slot_size);
+	}
+}
+
+/* Input function optimised for single thread */
+static __rte_always_inline uint32_t
+opdl_ring_input_singlethread(struct opdl_ring *t, const void *entries,
+		uint32_t num_entries, bool block)
+{
+	struct opdl_stage *s = input_stage(t);
+	uint32_t head = s->head;
+
+	num_entries = num_to_process(s, num_entries, block);
+	if (num_entries == 0)
+		return 0;
+
+	copy_entries_in(t, head, entries, num_entries);
+
+	s->head += num_entries;
+	__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+	return num_entries;
+}
+
+/* Convert head and tail of claim_manager into valid index */
+static __rte_always_inline uint32_t
+claim_mgr_index(uint32_t n)
+{
+	return n & (OPDL_DISCLAIMS_PER_LCORE - 1);
+}
+
+/* Check if there are available slots in claim_manager */
+static __rte_always_inline bool
+claim_mgr_available(struct claim_manager *mgr)
+{
+	return (mgr->mgr_head < (mgr->mgr_tail + OPDL_DISCLAIMS_PER_LCORE)) ?
+			true : false;
+}
+
+/* Record a new claim. Only use after first checking an entry is available */
+static __rte_always_inline void
+claim_mgr_add(struct claim_manager *mgr, uint32_t tail, uint32_t head)
+{
+	if ((mgr->mgr_head != mgr->mgr_tail) &&
+			(mgr->claims[claim_mgr_index(mgr->mgr_head - 1)].head ==
+			tail)) {
+		/* Combine with previous claim */
+		mgr->claims[claim_mgr_index(mgr->mgr_head - 1)].head = head;
+	} else {
+		mgr->claims[claim_mgr_index(mgr->mgr_head)].head = head;
+		mgr->claims[claim_mgr_index(mgr->mgr_head)].tail = tail;
+		mgr->mgr_head++;
+	}
+
+	mgr->num_claimed += (head - tail);
+}
+
+/* Read the oldest recorded claim */
+static __rte_always_inline bool
+claim_mgr_read(struct claim_manager *mgr, uint32_t *tail, uint32_t *head)
+{
+	if (mgr->mgr_head == mgr->mgr_tail)
+		return false;
+
+	*head = mgr->claims[claim_mgr_index(mgr->mgr_tail)].head;
+	*tail = mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail;
+	return true;
+}
+
+/* Remove the oldest recorded claim. Only use after first reading the entry */
+static __rte_always_inline void
+claim_mgr_remove(struct claim_manager *mgr)
+{
+	mgr->num_claimed -= (mgr->claims[claim_mgr_index(mgr->mgr_tail)].head -
+			mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail);
+	mgr->mgr_tail++;
+}
+
+/* Update tail in the oldest claim. Only use after first reading the entry */
+static __rte_always_inline void
+claim_mgr_move_tail(struct claim_manager *mgr, uint32_t num_entries)
+{
+	mgr->num_claimed -= num_entries;
+	mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail += num_entries;
+}
+
+static __rte_always_inline void
+opdl_stage_disclaim_multithread_n(struct opdl_stage *s,
+		uint32_t num_entries, bool block)
+{
+	struct claim_manager *disclaims = &s->pending_disclaims[rte_lcore_id()];
+	uint32_t head;
+	uint32_t tail;
+
+	while (num_entries) {
+		bool ret = claim_mgr_read(disclaims, &tail, &head);
+
+		if (ret == false)
+			break;  /* nothing is claimed */
+		/* There should be no race condition here. If shared.tail
+		 * matches, no other core can update it until this one does.
+		 */
+		if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+				tail) {
+			if (num_entries >= (head - tail)) {
+				claim_mgr_remove(disclaims);
+				__atomic_store_n(&s->shared.tail, head,
+						__ATOMIC_RELEASE);
+				num_entries -= (head - tail);
+			} else {
+				claim_mgr_move_tail(disclaims, num_entries);
+				__atomic_store_n(&s->shared.tail,
+						num_entries + tail,
+						__ATOMIC_RELEASE);
+				num_entries = 0;
+			}
+		} else if (block == false)
+			break;  /* blocked by other thread */
+		/* Keep going until num_entries are disclaimed. */
+		rte_pause();
+	}
+
+	disclaims->num_to_disclaim = num_entries;
+}
+
+/* Move head atomically, returning number of entries available to process and
+ * the original value of head. For non-input stages, the claim is recorded
+ * so that the tail can be updated later by opdl_stage_disclaim().
+ */
+static __rte_always_inline void
+move_head_atomically(struct opdl_stage *s, uint32_t *num_entries,
+		uint32_t *old_head, bool block, bool claim_func)
+{
+	uint32_t orig_num_entries = *num_entries;
+	uint32_t ret;
+	struct claim_manager *disclaims = &s->pending_disclaims[rte_lcore_id()];
+
+	/* Attempt to disclaim any outstanding claims */
+	opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
+			false);
+
+	*old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+	while (true) {
+		bool success;
+		/* If called by opdl_ring_input(), claim does not need to be
+		 * recorded, as there will be no disclaim.
+		 */
+		if (claim_func) {
+			/* Check that the claim can be recorded */
+			ret = claim_mgr_available(disclaims);
+			if (ret == false) {
+				/* exit out if claim can't be recorded */
+				*num_entries = 0;
+				return;
+			}
+		}
+
+		*num_entries = num_to_process(s, orig_num_entries, block);
+		if (*num_entries == 0)
+			return;
+
+		success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+				*old_head + *num_entries,
+				true,  /* may fail spuriously */
+				__ATOMIC_RELEASE,  /* memory order on success */
+				__ATOMIC_ACQUIRE);  /* memory order on fail */
+		if (likely(success))
+			break;
+		rte_pause();
+	}
+
+	if (claim_func)
+		/* Store the claim record */
+		claim_mgr_add(disclaims, *old_head, *old_head + *num_entries);
+}
+
+/* Input function that supports multiple threads */
+static __rte_always_inline uint32_t
+opdl_ring_input_multithread(struct opdl_ring *t, const void *entries,
+		uint32_t num_entries, bool block)
+{
+	struct opdl_stage *s = input_stage(t);
+	uint32_t old_head;
+
+	move_head_atomically(s, &num_entries, &old_head, block, false);
+	if (num_entries == 0)
+		return 0;
+
+	copy_entries_in(t, old_head, entries, num_entries);
+
+	/* If another thread started inputting before this one, but hasn't
+	 * finished, we need to wait for it to complete to update the tail.
+	 */
+	while (unlikely(__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) !=
+			old_head))
+		rte_pause();
+
+	__atomic_store_n(&s->shared.tail, old_head + num_entries,
+			__ATOMIC_RELEASE);
+
+	return num_entries;
+}
+
+static __rte_always_inline uint32_t
+opdl_first_entry_id(uint32_t start_seq, uint8_t nb_p_lcores,
+		uint8_t this_lcore)
+{
+	return ((nb_p_lcores <= 1) ? 0 :
+			(nb_p_lcores - (start_seq % nb_p_lcores) + this_lcore) %
+			nb_p_lcores);
+}
+
+/* Claim slots to process, optimised for single-thread operation */
+static __rte_always_inline uint32_t
+opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
+		uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
+{
+	uint32_t i = 0, j = 0,  offset;
+	void *get_slots;
+	struct rte_event *ev;
+	RTE_SET_USED(seq);
+	struct opdl_ring *t = s->t;
+	uint8_t *entries_offset = (uint8_t *)entries;
+
+	if (!atomic) {
+
+		offset = opdl_first_entry_id(s->seq, s->nb_instance,
+				s->instance_id);
+
+		num_entries = s->nb_instance * num_entries;
+
+		num_entries = num_to_process(s, num_entries, block);
+
+		for (; offset < num_entries; offset += s->nb_instance) {
+			get_slots = get_slot(t, s->head + offset);
+			memcpy(entries_offset, get_slots, t->slot_size);
+			entries_offset += t->slot_size;
+			i++;
+		}
+	} else {
+		num_entries = num_to_process(s, num_entries, block);
+
+		for (j = 0; j < num_entries; j++) {
+			ev = (struct rte_event *)get_slot(t, s->head+j);
+			if ((ev->flow_id%s->nb_instance) == s->instance_id) {
+				memcpy(entries_offset, ev, t->slot_size);
+				entries_offset += t->slot_size;
+				i++;
+			}
+		}
+	}
+	s->shadow_head = s->head;
+	s->head += num_entries;
+	s->num_claimed = num_entries;
+	s->num_event = i;
+
+	/* automatically disclaim entries if number of rte_events is zero */
+	if (unlikely(i == 0))
+		opdl_stage_disclaim(s, 0, false);
+
+	return i;
+}
+
+/* Thread-safe version of function to claim slots for processing */
+static __rte_always_inline uint32_t
+opdl_stage_claim_multithread(struct opdl_stage *s, void *entries,
+		uint32_t num_entries, uint32_t *seq, bool block)
+{
+	uint32_t old_head;
+	struct opdl_ring *t = s->t;
+	uint32_t i = 0, offset;
+	uint8_t *entries_offset = (uint8_t *)entries;
+
+	if (seq == NULL) {
+		PMD_DRV_LOG(ERR, "Invalid seq PTR");
+		return 0;
+	}
+	offset = opdl_first_entry_id(*seq, s->nb_instance, s->instance_id);
+	num_entries = offset + (s->nb_instance * num_entries);
+
+	move_head_atomically(s, &num_entries, &old_head, block, true);
+
+	for (; offset < num_entries; offset += s->nb_instance) {
+		memcpy(entries_offset, get_slot(t, s->head + offset),
+			t->slot_size);
+		entries_offset += t->slot_size;
+		i++;
+	}
+
+	*seq = old_head;
+
+	return i;
+}
+
+/* Claim and copy slot pointers, optimised for single-thread operation */
+static __rte_always_inline uint32_t
+opdl_stage_claim_copy_singlethread(struct opdl_stage *s, void *entries,
+		uint32_t num_entries, uint32_t *seq, bool block)
+{
+	num_entries = num_to_process(s, num_entries, block);
+	if (num_entries == 0)
+		return 0;
+	copy_entries_out(s->t, s->head, entries, num_entries);
+	if (seq != NULL)
+		*seq = s->head;
+	s->head += num_entries;
+	return num_entries;
+}
+
+/* Thread-safe version of function to claim and copy pointers to slots */
+static __rte_always_inline uint32_t
+opdl_stage_claim_copy_multithread(struct opdl_stage *s, void *entries,
+		uint32_t num_entries, uint32_t *seq, bool block)
+{
+	uint32_t old_head;
+
+	move_head_atomically(s, &num_entries, &old_head, block, true);
+	if (num_entries == 0)
+		return 0;
+	copy_entries_out(s->t, old_head, entries, num_entries);
+	if (seq != NULL)
+		*seq = old_head;
+	return num_entries;
+}
+
+static __rte_always_inline void
+opdl_stage_disclaim_singlethread_n(struct opdl_stage *s,
+		uint32_t num_entries)
+{
+	uint32_t old_tail = s->shared.tail;
+
+	if (unlikely(num_entries > (s->head - old_tail))) {
+		PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
+				num_entries, s->head - old_tail);
+		num_entries = s->head - old_tail;
+	}
+	__atomic_store_n(&s->shared.tail, num_entries + old_tail,
+			__ATOMIC_RELEASE);
+}
+
+uint32_t
+opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
+		bool block)
+{
+	if (input_stage(t)->threadsafe == false)
+		return opdl_ring_input_singlethread(t, entries, num_entries,
+				block);
+	else
+		return opdl_ring_input_multithread(t, entries, num_entries,
+				block);
+}
+
+uint32_t
+opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
+		const void *entries, uint32_t num_entries, bool block)
+{
+	uint32_t head = s->head;
+
+	num_entries = num_to_process(s, num_entries, block);
+
+	if (num_entries == 0)
+		return 0;
+
+	copy_entries_in(t, head, entries, num_entries);
+
+	s->head += num_entries;
+	__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+	return num_entries;
+
+}
+
+uint32_t
+opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
+		void *entries, uint32_t num_entries, bool block)
+{
+	uint32_t head = s->head;
+
+	num_entries = num_to_process(s, num_entries, block);
+	if (num_entries == 0)
+		return 0;
+
+	copy_entries_out(t, head, entries, num_entries);
+
+	s->head += num_entries;
+	__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+	return num_entries;
+}
+
+uint32_t
+opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries)
+{
+	/* return (num_to_process(s, num_entries, false)); */
+
+	if (available(s) >= num_entries)
+		return num_entries;
+
+	update_available_seq(s);
+
+	uint32_t avail = available(s);
+
+	if (avail == 0) {
+		rte_pause();
+		return 0;
+	}
+	return (avail <= num_entries) ? avail : num_entries;
+}
+
+uint32_t
+opdl_stage_claim(struct opdl_stage *s, void *entries,
+		uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
+{
+	if (s->threadsafe == false)
+		return opdl_stage_claim_singlethread(s, entries, num_entries,
+				seq, block, atomic);
+	else
+		return opdl_stage_claim_multithread(s, entries, num_entries,
+				seq, block);
+}
+
+uint32_t
+opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
+		uint32_t num_entries, uint32_t *seq, bool block)
+{
+	if (s->threadsafe == false)
+		return opdl_stage_claim_copy_singlethread(s, entries,
+				num_entries, seq, block);
+	else
+		return opdl_stage_claim_copy_multithread(s, entries,
+				num_entries, seq, block);
+}
+
+void
+opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
+		bool block)
+{
+
+	if (s->threadsafe == false) {
+		opdl_stage_disclaim_singlethread_n(s, s->num_claimed);
+	} else {
+		struct claim_manager *disclaims =
+			&s->pending_disclaims[rte_lcore_id()];
+
+		if (unlikely(num_entries > s->num_slots)) {
+			PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
+					num_entries, disclaims->num_claimed);
+			num_entries = disclaims->num_claimed;
+		}
+
+		num_entries = RTE_MIN(num_entries + disclaims->num_to_disclaim,
+				disclaims->num_claimed);
+		opdl_stage_disclaim_multithread_n(s, num_entries, block);
+	}
+}
+
+int
+opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block)
+{
+	if (num_entries != s->num_event) {
+		rte_errno = -EINVAL;
+		return 0;
+	}
+	if (s->threadsafe == false) {
+		__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+		s->seq += s->num_claimed;
+		s->shadow_head = s->head;
+		s->num_claimed = 0;
+	} else {
+		struct claim_manager *disclaims =
+				&s->pending_disclaims[rte_lcore_id()];
+		opdl_stage_disclaim_multithread_n(s, disclaims->num_claimed,
+				block);
+	}
+	return num_entries;
+}
+
+uint32_t
+opdl_ring_available(struct opdl_ring *t)
+{
+	return opdl_stage_available(&t->stages[0]);
+}
+
+uint32_t
+opdl_stage_available(struct opdl_stage *s)
+{
+	update_available_seq(s);
+	return available(s);
+}
+
+void
+opdl_ring_flush(struct opdl_ring *t)
+{
+	struct opdl_stage *s = input_stage(t);
+
+	wait_for_available(s, s->num_slots);
+}
+
+/******************** Non performance sensitive functions ********************/
+
+/* Initial setup of a new stage's context */
+static int
+init_stage(struct opdl_ring *t, struct opdl_stage *s, bool threadsafe,
+		bool is_input)
+{
+	uint32_t available = (is_input) ? t->num_slots : 0;
+
+	s->t = t;
+	s->num_slots = t->num_slots;
+	s->index = t->num_stages;
+	s->threadsafe = threadsafe;
+	s->shared.stage = s;
+
+	/* Alloc memory for deps */
+	s->dep_tracking = rte_zmalloc_socket(LIB_NAME,
+			t->max_num_stages * sizeof(enum dep_type),
+			0, t->socket);
+	if (s->dep_tracking == NULL)
+		return -ENOMEM;
+
+	s->deps = rte_zmalloc_socket(LIB_NAME,
+			t->max_num_stages * sizeof(struct shared_state *),
+			0, t->socket);
+	if (s->deps == NULL) {
+		rte_free(s->dep_tracking);
+		return -ENOMEM;
+	}
+
+	s->dep_tracking[s->index] = DEP_SELF;
+
+	if (threadsafe == true)
+		s->shared.available_seq = available;
+	else
+		s->available_seq = available;
+
+	return 0;
+}
+
+/* Add direct or indirect dependencies between stages */
+static int
+add_dep(struct opdl_stage *dependent, const struct opdl_stage *dependency,
+		enum dep_type type)
+{
+	struct opdl_ring *t = dependent->t;
+	uint32_t i;
+
+	/* Add new direct dependency */
+	if ((type == DEP_DIRECT) &&
+			(dependent->dep_tracking[dependency->index] ==
+					DEP_NONE)) {
+		PMD_DRV_LOG(DEBUG, "%s:%u direct dependency on %u",
+				t->name, dependent->index, dependency->index);
+		dependent->dep_tracking[dependency->index] = DEP_DIRECT;
+	}
+
+	/* Add new indirect dependency or change direct to indirect */
+	if ((type == DEP_INDIRECT) &&
+			((dependent->dep_tracking[dependency->index] ==
+			DEP_NONE) ||
+			(dependent->dep_tracking[dependency->index] ==
+			DEP_DIRECT))) {
+		PMD_DRV_LOG(DEBUG, "%s:%u indirect dependency on %u",
+				t->name, dependent->index, dependency->index);
+		dependent->dep_tracking[dependency->index] = DEP_INDIRECT;
+	}
+
+	/* Shouldn't happen... */
+	if ((dependent->dep_tracking[dependency->index] == DEP_SELF) &&
+			(dependent != input_stage(t))) {
+		PMD_DRV_LOG(ERR, "Loop in dependency graph %s:%u",
+				t->name, dependent->index);
+		return -EINVAL;
+	}
+
+	/* Keep going to dependencies of the dependency, until input stage */
+	if (dependency != input_stage(t))
+		for (i = 0; i < dependency->num_deps; i++) {
+			int ret = add_dep(dependent, dependency->deps[i]->stage,
+					DEP_INDIRECT);
+
+			if (ret < 0)
+				return ret;
+		}
+
+	/* Make list of sequence numbers for direct dependencies only */
+	if (type == DEP_DIRECT)
+		for (i = 0, dependent->num_deps = 0; i < t->num_stages; i++)
+			if (dependent->dep_tracking[i] == DEP_DIRECT) {
+				if ((i == 0) && (dependent->num_deps > 1))
+					rte_panic("%s:%u depends on > input",
+							t->name,
+							dependent->index);
+				dependent->deps[dependent->num_deps++] =
+						&t->stages[i].shared;
+			}
+
+	return 0;
+}
+
+struct opdl_ring *
+opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
+		uint32_t max_num_stages, int socket)
+{
+	struct opdl_ring *t;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	int mz_flags = 0;
+	struct opdl_stage *st = NULL;
+	const struct rte_memzone *mz = NULL;
+	size_t alloc_size = RTE_CACHE_LINE_ROUNDUP(sizeof(*t) +
+			(num_slots * slot_size));
+
+	/* Compile time checking */
+	RTE_BUILD_BUG_ON((sizeof(struct shared_state) & RTE_CACHE_LINE_MASK) !=
+			0);
+	RTE_BUILD_BUG_ON((offsetof(struct opdl_stage, shared) &
+			RTE_CACHE_LINE_MASK) != 0);
+	RTE_BUILD_BUG_ON((offsetof(struct opdl_ring, slots) &
+			RTE_CACHE_LINE_MASK) != 0);
+	RTE_BUILD_BUG_ON(!rte_is_power_of_2(OPDL_DISCLAIMS_PER_LCORE));
+
+	/* Parameter checking */
+	if (name == NULL) {
+		PMD_DRV_LOG(ERR, "name param is NULL");
+		return NULL;
+	}
+	if (!rte_is_power_of_2(num_slots)) {
+		PMD_DRV_LOG(ERR, "num_slots (%u) for %s is not power of 2",
+				num_slots, name);
+		return NULL;
+	}
+
+	/* Alloc memory for stages */
+	st = rte_zmalloc_socket(LIB_NAME,
+		max_num_stages * sizeof(struct opdl_stage),
+		RTE_CACHE_LINE_SIZE, socket);
+	if (st == NULL)
+		goto exit_fail;
+
+	snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, name);
+
+	/* Alloc memory for memzone */
+	mz = rte_memzone_reserve(mz_name, alloc_size, socket, mz_flags);
+	if (mz == NULL)
+		goto exit_fail;
+
+	t = mz->addr;
+
+	/* Initialise opdl_ring queue */
+	memset(t, 0, sizeof(*t));
+	snprintf(t->name, sizeof(t->name), "%s", name);
+	t->socket = socket;
+	t->num_slots = num_slots;
+	t->mask = num_slots - 1;
+	t->slot_size = slot_size;
+	t->max_num_stages = max_num_stages;
+	t->stages = st;
+
+	PMD_DRV_LOG(DEBUG, "Created %s at %p (num_slots=%u,socket=%i,slot_size=%u)",
+			t->name, t, num_slots, socket, slot_size);
+
+	return t;
+
+exit_fail:
+	PMD_DRV_LOG(ERR, "Cannot reserve memory");
+	rte_free(st);
+	rte_memzone_free(mz);
+
+	return NULL;
+}
+
+void *
+opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index)
+{
+	return get_slot(t, index);
+}
+
+bool
+opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+		uint32_t index, bool atomic)
+{
+	uint32_t i = 0, j = 0, offset;
+	struct opdl_ring *t = s->t;
+	struct rte_event *ev_orig = NULL;
+	bool ev_updated = false;
+	uint64_t  ev_temp = 0;
+
+	if (index > s->num_event) {
+		PMD_DRV_LOG(ERR, "index is overflow");
+		return ev_updated;
+	}
+
+	ev_temp = ev->event&OPDL_EVENT_MASK;
+
+	if (!atomic) {
+		offset = opdl_first_entry_id(s->seq, s->nb_instance,
+				s->instance_id);
+		offset += index*s->nb_instance;
+		ev_orig = get_slot(t, s->shadow_head+offset);
+		if ((ev_orig->event&OPDL_EVENT_MASK) != ev_temp) {
+			ev_orig->event = ev->event;
+			ev_updated = true;
+		}
+		if (ev_orig->u64 != ev->u64) {
+			ev_orig->u64 = ev->u64;
+			ev_updated = true;
+		}
+
+	} else {
+		for (i = 0; i < s->num_claimed; i++) {
+			ev_orig = (struct rte_event *)
+				get_slot(t, s->shadow_head+i);
+
+			if ((ev_orig->flow_id%s->nb_instance) ==
+					s->instance_id) {
+
+				if (j == index) {
+					if ((ev_orig->event&OPDL_EVENT_MASK) !=
+							ev_temp) {
+						ev_orig->event = ev->event;
+						ev_updated = true;
+					}
+					if (ev_orig->u64 != ev->u64) {
+						ev_orig->u64 = ev->u64;
+						ev_updated = true;
+					}
+
+					break;
+				}
+				j++;
+			}
+		}
+
+	}
+
+	return ev_updated;
+}
+
+int
+opdl_ring_get_socket(const struct opdl_ring *t)
+{
+	return t->socket;
+}
+
+uint32_t
+opdl_ring_get_num_slots(const struct opdl_ring *t)
+{
+	return t->num_slots;
+}
+
+const char *
+opdl_ring_get_name(const struct opdl_ring *t)
+{
+	return t->name;
+}
+
+/* Check dependency list is valid for a given opdl_ring */
+static int
+check_deps(struct opdl_ring *t, struct opdl_stage *deps[],
+		uint32_t num_deps)
+{
+	unsigned int i;
+
+	for (i = 0; i < num_deps; ++i) {
+		if (!deps[i]) {
+			PMD_DRV_LOG(ERR, "deps[%u] is NULL", i);
+			return -EINVAL;
+		}
+		if (t != deps[i]->t) {
+			PMD_DRV_LOG(ERR, "deps[%u] is in opdl_ring %s, not %s",
+					i, deps[i]->t->name, t->name);
+			return -EINVAL;
+		}
+	}
+	if (num_deps > t->num_stages) {
+		PMD_DRV_LOG(ERR, "num_deps (%u) > number stages (%u)",
+				num_deps, t->num_stages);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+struct opdl_stage *
+opdl_stage_add(struct opdl_ring *t, bool threadsafe, bool is_input)
+{
+	struct opdl_stage *s;
+
+	/* Parameter checking */
+	if (!t) {
+		PMD_DRV_LOG(ERR, "opdl_ring is NULL");
+		return NULL;
+	}
+	if (t->num_stages == t->max_num_stages) {
+		PMD_DRV_LOG(ERR, "%s has max number of stages (%u)",
+				t->name, t->max_num_stages);
+		return NULL;
+	}
+
+	s = &t->stages[t->num_stages];
+
+	if (((uintptr_t)&s->shared & RTE_CACHE_LINE_MASK) != 0)
+		PMD_DRV_LOG(WARNING, "Tail seq num (%p) of %s stage not cache aligned",
+				&s->shared, t->name);
+
+	if (init_stage(t, s, threadsafe, is_input) < 0) {
+		PMD_DRV_LOG(ERR, "Cannot reserve memory");
+		return NULL;
+	}
+	t->num_stages++;
+
+	return s;
+}
+
+uint32_t
+opdl_stage_deps_add(struct opdl_ring *t, struct opdl_stage *s,
+		uint32_t nb_instance, uint32_t instance_id,
+		struct opdl_stage *deps[],
+		uint32_t num_deps)
+{
+	uint32_t i;
+	int ret = 0;
+
+	if ((num_deps > 0) && (!deps)) {
+		PMD_DRV_LOG(ERR, "%s stage has NULL dependencies", t->name);
+		return -1;
+	}
+	ret = check_deps(t, deps, num_deps);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < num_deps; i++) {
+		ret = add_dep(s, deps[i], DEP_DIRECT);
+		if (ret < 0)
+			return ret;
+	}
+
+	s->nb_instance = nb_instance;
+	s->instance_id = instance_id;
+
+	return ret;
+}
+
+struct opdl_stage *
+opdl_ring_get_input_stage(const struct opdl_ring *t)
+{
+	return input_stage(t);
+}
+
+int
+opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
+		uint32_t num_deps)
+{
+	unsigned int i;
+	int ret;
+
+	if ((num_deps == 0) || (!deps)) {
+		PMD_DRV_LOG(ERR, "cannot set NULL dependencies");
+		return -EINVAL;
+	}
+
+	ret = check_deps(s->t, deps, num_deps);
+	if (ret < 0)
+		return ret;
+
+	/* Update deps */
+	for (i = 0; i < num_deps; i++)
+		s->deps[i] = &deps[i]->shared;
+	s->num_deps = num_deps;
+
+	return 0;
+}
+
+struct opdl_ring *
+opdl_stage_get_opdl_ring(const struct opdl_stage *s)
+{
+	return s->t;
+}
+
+void
+opdl_ring_dump(const struct opdl_ring *t, FILE *f)
+{
+	uint32_t i;
+
+	if (t == NULL) {
+		fprintf(f, "NULL OPDL!\n");
+		return;
+	}
+	fprintf(f, "OPDL \"%s\": num_slots=%u; mask=%#x; slot_size=%u; num_stages=%u; socket=%i\n",
+			t->name, t->num_slots, t->mask, t->slot_size,
+			t->num_stages, t->socket);
+	for (i = 0; i < t->num_stages; i++) {
+		uint32_t j;
+		const struct opdl_stage *s = &t->stages[i];
+
+		fprintf(f, "  %s[%u]: threadsafe=%s; head=%u; available_seq=%u; tail=%u; deps=%u",
+				t->name, i, (s->threadsafe) ? "true" : "false",
+				(s->threadsafe) ? s->shared.head : s->head,
+				(s->threadsafe) ? s->shared.available_seq :
+				s->available_seq,
+				s->shared.tail, (s->num_deps > 0) ?
+				s->deps[0]->stage->index : 0);
+		for (j = 1; j < s->num_deps; j++)
+			fprintf(f, ",%u", s->deps[j]->stage->index);
+		fprintf(f, "\n");
+	}
+	fflush(f);
+}
+
+void
+opdl_ring_free(struct opdl_ring *t)
+{
+	uint32_t i;
+	const struct rte_memzone *mz;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+
+	if (t == NULL) {
+		PMD_DRV_LOG(DEBUG, "Freeing NULL OPDL Ring!");
+		return;
+	}
+
+	PMD_DRV_LOG(DEBUG, "Freeing %s opdl_ring at %p", t->name, t);
+
+	for (i = 0; i < t->num_stages; ++i) {
+		rte_free(t->stages[i].deps);
+		rte_free(t->stages[i].dep_tracking);
+	}
+
+	rte_free(t->stages);
+
+	snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, t->name);
+	mz = rte_memzone_lookup(mz_name);
+	if (rte_memzone_free(mz) != 0)
+		PMD_DRV_LOG(ERR, "Cannot free memzone for %s", t->name);
+}
+
+/* search a opdl_ring from its name */
+struct opdl_ring *
+opdl_ring_lookup(const char *name)
+{
+	const struct rte_memzone *mz;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+
+	snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, name);
+
+	mz = rte_memzone_lookup(mz_name);
+	if (mz == NULL)
+		return NULL;
+
+	return mz->addr;
+}
+
+void
+opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe)
+{
+	s->threadsafe = threadsafe;
+}
diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h
new file mode 100644
index 00000000..9e8c33e6
--- /dev/null
+++ b/drivers/event/opdl/opdl_ring.h
@@ -0,0 +1,600 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_H_
+#define _OPDL_H_
+
+/**
+ * @file
+ * The "opdl_ring" is a data structure that contains a fixed number of slots,
+ * with each slot having the same, but configurable, size. Entries are input
+ * into the opdl_ring by copying into available slots. Once in the opdl_ring,
+ * an entry is processed by a number of stages, with the ordering of stage
+ * processing controlled by making stages dependent on one or more other stages.
+ * An entry is not available for a stage to process until it has been processed
+ * by that stages dependencies. Entries are always made available for
+ * processing in the same order that they were input in to the opdl_ring.
+ * Inputting is considered as a stage that depends on all other stages,
+ * and is also a dependency of all stages.
+ *
+ * Inputting and processing in a stage can support multi-threading. Note that
+ * multi-thread processing can also be done by making stages co-operate e.g. two
+ * stages where one processes the even packets and the other processes odd
+ * packets.
+ *
+ * A opdl_ring can be used as the basis for pipeline based applications. Instead
+ * of each stage in a pipeline dequeueing from a ring, processing and enqueueing
+ * to another ring, it can process entries in-place on the ring. If stages do
+ * not depend on each other, they can run in parallel.
+ *
+ * The opdl_ring works with entries of configurable size, these could be
+ * pointers to mbufs, pointers to mbufs with application specific meta-data,
+ * tasks etc.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_eventdev.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef OPDL_DISCLAIMS_PER_LCORE
+/** Multi-threaded processing allows one thread to process multiple batches in a
+ * stage, while another thread is processing a single large batch. This number
+ * controls how many non-contiguous batches one stage can process before being
+ * blocked by the other stage.
+ */
+#define OPDL_DISCLAIMS_PER_LCORE 8
+#endif
+
+/** Opaque handle to a opdl_ring instance */
+struct opdl_ring;
+
+/** Opaque handle to a single stage in a opdl_ring */
+struct opdl_stage;
+
+/**
+ * Create a new instance of a opdl_ring.
+ *
+ * @param name
+ *   String containing the name to give the new opdl_ring instance.
+ * @param num_slots
+ *   How many slots the opdl_ring contains. Must be a power a 2!
+ * @param slot_size
+ *   How many bytes in each slot.
+ * @param max_num_stages
+ *   Maximum number of stages.
+ * @param socket
+ *   The NUMA socket (or SOCKET_ID_ANY) to allocate the memory used for this
+ *   opdl_ring instance.
+ * @param threadsafe
+ *   Whether to support multiple threads inputting to the opdl_ring or not.
+ *   Enabling this may have a negative impact on performance if only one thread
+ *   will be inputting.
+ *
+ * @return
+ *   A pointer to a new opdl_ring instance, or NULL on error.
+ */
+struct opdl_ring *
+opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
+		uint32_t max_num_stages, int socket);
+
+/**
+ * Get pointer to individual slot in a opdl_ring.
+ *
+ * @param t
+ *   The opdl_ring.
+ * @param index
+ *   Index of slot. If greater than the number of slots it will be masked to be
+ *   within correct range.
+ *
+ * @return
+ *   A pointer to that slot.
+ */
+void *
+opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index);
+
+/**
+ * Get NUMA socket used by a opdl_ring.
+ *
+ * @param t
+ *   The opdl_ring.
+ *
+ * @return
+ *   NUMA socket.
+ */
+int
+opdl_ring_get_socket(const struct opdl_ring *t);
+
+/**
+ * Get number of slots in a opdl_ring.
+ *
+ * @param t
+ *   The opdl_ring.
+ *
+ * @return
+ *   Number of slots.
+ */
+uint32_t
+opdl_ring_get_num_slots(const struct opdl_ring *t);
+
+/**
+ * Get name of a opdl_ring.
+ *
+ * @param t
+ *   The opdl_ring.
+ *
+ * @return
+ *   Name string.
+ */
+const char *
+opdl_ring_get_name(const struct opdl_ring *t);
+
+/**
+ * Adds a new processing stage to a specified opdl_ring instance. Adding a stage
+ * while there are entries in the opdl_ring being processed will cause undefined
+ * behaviour.
+ *
+ * @param t
+ *   The opdl_ring to add the stage to.
+ * @param deps
+ *   An array of pointers to other stages that this stage depends on. The other
+ *   stages must be part of the same opdl_ring! Note that input is an implied
+ *   dependency. This can be NULL if num_deps is 0.
+ * @param num_deps
+ *   The size of the deps array.
+ * @param threadsafe
+ *   Whether to support multiple threads processing this stage or  not.
+ *   Enabling this may have a negative impact on performance if only one thread
+ *   will be processing this stage.
+ * @param is_input
+ *   Indication to nitialise the stage with all slots available or none
+ *
+ * @return
+ *   A pointer to the new stage, or NULL on error.
+ */
+struct opdl_stage *
+opdl_stage_add(struct opdl_ring *t, bool threadsafe, bool is_input);
+
+/**
+ * Returns the input stage of a opdl_ring to be used by other API functions.
+ *
+ * @param t
+ *   The opdl_ring.
+ *
+ * @return
+ *   A pointer to the input stage.
+ */
+struct opdl_stage *
+opdl_ring_get_input_stage(const struct opdl_ring *t);
+
+/**
+ * Sets the dependencies for a stage (clears all the previous deps!). Changing
+ * dependencies while there are entries in the opdl_ring being processed will
+ * cause undefined behaviour.
+ *
+ * @param s
+ *   The stage to set the dependencies for.
+ * @param deps
+ *   An array of pointers to other stages that this stage will depends on. The
+ *   other stages must be part of the same opdl_ring!
+ * @param num_deps
+ *   The size of the deps array. This must be > 0.
+ *
+ * @return
+ *   0 on success, a negative value on error.
+ */
+int
+opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
+		uint32_t num_deps);
+
+/**
+ * Returns the opdl_ring that a stage belongs to.
+ *
+ * @param s
+ *   The stage
+ *
+ * @return
+ *   A pointer to the opdl_ring that the stage belongs to.
+ */
+struct opdl_ring *
+opdl_stage_get_opdl_ring(const struct opdl_stage *s);
+
+/**
+ * Inputs a new batch of entries into the opdl_ring. This function is only
+ * threadsafe (with the same opdl_ring parameter) if the threadsafe parameter of
+ * opdl_ring_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ *   The opdl_ring to input entries in to.
+ * @param entries
+ *   An array of entries that will be copied in to the opdl_ring.
+ * @param num_entries
+ *   The size of the entries array.
+ * @param block
+ *   If this is true, the function blocks until enough slots are available to
+ *   input all the requested entries. If false, then the function inputs as
+ *   many entries as currently possible.
+ *
+ * @return
+ *   The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
+		bool block);
+
+/**
+ * Inputs a new batch of entries into a opdl stage. This function is only
+ * threadsafe (with the same opdl parameter) if the threadsafe parameter of
+ * opdl_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ *   The opdl ring to input entries in to.
+ * @param s
+ *   The stage to copy entries to.
+ * @param entries
+ *   An array of entries that will be copied in to the opdl ring.
+ * @param num_entries
+ *   The size of the entries array.
+ * @param block
+ *   If this is true, the function blocks until enough slots are available to
+ *   input all the requested entries. If false, then the function inputs as
+ *   many entries as currently possible.
+ *
+ * @return
+ *   The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
+			const void *entries, uint32_t num_entries, bool block);
+
+/**
+ * Copy a batch of entries from the opdl ring. This function is only
+ * threadsafe (with the same opdl parameter) if the threadsafe parameter of
+ * opdl_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ *   The opdl ring to copy entries from.
+ * @param s
+ *   The stage to copy entries from.
+ * @param entries
+ *   An array of entries that will be copied from the opdl ring.
+ * @param num_entries
+ *   The size of the entries array.
+ * @param block
+ *   If this is true, the function blocks until enough slots are available to
+ *   input all the requested entries. If false, then the function inputs as
+ *   many entries as currently possible.
+ *
+ * @return
+ *   The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
+		void *entries, uint32_t num_entries, bool block);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. This function is threadsafe using same opdl_stage parameter if
+ * the stage was created with threadsafe set to true, otherwise it is only
+ * threadsafe with a different opdl_stage per thread. For performance
+ * reasons, this function does not check input parameters.
+ *
+ * @param s
+ *   The opdl_ring stage to read entries in.
+ * @param entries
+ *   An array of pointers to entries that will be filled in by this function.
+ * @param num_entries
+ *   The number of entries to attempt to claim for processing (and the size of
+ *   the entries array).
+ * @param seq
+ *   If not NULL, this is set to the value of the internal stage sequence number
+ *   associated with the first entry returned.
+ * @param block
+ *   If this is true, the function blocks until num_entries slots are available
+ *   to process. If false, then the function claims as many entries as
+ *   currently possible.
+ *
+ * @param atomic
+ *   if this is true, the function will return event according to event flow id
+ * @return
+ *   The number of pointers to entries filled in to the entries array.
+ */
+uint32_t
+opdl_stage_claim(struct opdl_stage *s, void *entries,
+		uint32_t num_entries, uint32_t *seq, bool block, bool atomic);
+
+uint32_t
+opdl_stage_deps_add(struct opdl_ring *t, struct opdl_stage *s,
+		uint32_t nb_instance, uint32_t instance_id,
+		struct opdl_stage *deps[], uint32_t num_deps);
+
+/**
+ * A function to check how many entries are ready to be claimed.
+ *
+ * @param entries
+ *   An array of pointers to entries.
+ * @param num_entries
+ *   Number of entries in an array.
+ * @param arg
+ *   An opaque pointer to data passed to the claim function.
+ * @param block
+ *   When set to true, the function should wait until num_entries are ready to
+ *   be processed. Otherwise it should return immediately.
+ *
+ * @return
+ *   Number of entries ready to be claimed.
+ */
+typedef uint32_t (opdl_ring_check_entries_t)(void *entries[],
+		uint32_t num_entries, void *arg, bool block);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. Each entry is checked by the passed check() function and depending
+ * on block value, it waits until num_entries are ready or returns immediately.
+ * This function is only threadsafe with a different opdl_stage per thread.
+ *
+ * @param s
+ *   The opdl_ring stage to read entries in.
+ * @param entries
+ *   An array of pointers to entries that will be filled in by this function.
+ * @param num_entries
+ *   The number of entries to attempt to claim for processing (and the size of
+ *   the entries array).
+ * @param seq
+ *   If not NULL, this is set to the value of the internal stage sequence number
+ *   associated with the first entry returned.
+ * @param block
+ *   If this is true, the function blocks until num_entries ready slots are
+ *   available to process. If false, then the function claims as many ready
+ *   entries as currently possible.
+ * @param check
+ *   Pointer to a function called to check entries.
+ * @param arg
+ *   Opaque data passed to check() function.
+ *
+ * @return
+ *   The number of pointers to ready entries filled in to the entries array.
+ */
+uint32_t
+opdl_stage_claim_check(struct opdl_stage *s, void **entries,
+		uint32_t num_entries, uint32_t *seq, bool block,
+		opdl_ring_check_entries_t *check, void *arg);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. This function is threadsafe using same opdl_stage parameter if
+ * the stage was created with threadsafe set to true, otherwise it is only
+ * threadsafe with a different opdl_stage per thread.
+ *
+ * The difference between this function and opdl_stage_claim() is that this
+ * function copies the entries from the opdl_ring. Note that any changes made to
+ * the copied entries will not be reflected back in to the entries in the
+ * opdl_ring, so this function probably only makes sense if the entries are
+ * pointers to other data. For performance reasons, this function does not check
+ * input parameters.
+ *
+ * @param s
+ *   The opdl_ring stage to read entries in.
+ * @param entries
+ *   An array of entries that will be filled in by this function.
+ * @param num_entries
+ *   The number of entries to attempt to claim for processing (and the size of
+ *   the entries array).
+ * @param seq
+ *   If not NULL, this is set to the value of the internal stage sequence number
+ *   associated with the first entry returned.
+ * @param block
+ *   If this is true, the function blocks until num_entries slots are available
+ *   to process. If false, then the function claims as many entries as
+ *   currently possible.
+ *
+ * @return
+ *   The number of entries copied in to the entries array.
+ */
+uint32_t
+opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
+		uint32_t num_entries, uint32_t *seq, bool block);
+
+/**
+ * This function must be called when a stage has finished its processing of
+ * entries, to make them available to any dependent stages. All entries that are
+ * claimed by the calling thread in the stage will be disclaimed. It is possible
+ * to claim multiple batches before disclaiming. For performance reasons, this
+ * function does not check input parameters.
+ *
+ * @param s
+ *   The opdl_ring stage in which to disclaim all claimed entries.
+ *
+ * @param block
+ *   Entries are always made available to a stage in the same order that they
+ *   were input in the stage. If a stage is multithread safe, this may mean that
+ *   full disclaiming of a batch of entries can not be considered complete until
+ *   all earlier threads in the stage have disclaimed. If this parameter is true
+ *   then the function blocks until all entries are fully disclaimed, otherwise
+ *   it disclaims as many as currently possible, with non fully disclaimed
+ *   batches stored until the next call to a claim or disclaim function for this
+ *   stage on this thread.
+ *
+ *   If a thread is not going to process any more entries in this stage, it
+ *   *must* first call this function with this parameter set to true to ensure
+ *   it does not block the entire opdl_ring.
+ *
+ *   In a single threaded stage, this parameter has no effect.
+ */
+int
+opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries,
+		bool block);
+
+/**
+ * This function can be called when a stage has finished its processing of
+ * entries, to make them available to any dependent stages. The difference
+ * between this function and opdl_stage_disclaim() is that here only a
+ * portion of entries are disclaimed, not all of them. For performance reasons,
+ * this function does not check input parameters.
+ *
+ * @param s
+ *   The opdl_ring stage in which to disclaim entries.
+ *
+ * @param num_entries
+ *   The number of entries to disclaim.
+ *
+ * @param block
+ *   Entries are always made available to a stage in the same order that they
+ *   were input in the stage. If a stage is multithread safe, this may mean that
+ *   full disclaiming of a batch of entries can not be considered complete until
+ *   all earlier threads in the stage have disclaimed. If this parameter is true
+ *   then the function blocks until the specified number of entries has been
+ *   disclaimed (or there are no more entries to disclaim). Otherwise it
+ *   disclaims as many claims as currently possible and an attempt to disclaim
+ *   them is made the next time a claim or disclaim function for this stage on
+ *   this thread is called.
+ *
+ *   In a single threaded stage, this parameter has no effect.
+ */
+void
+opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
+		bool block);
+
+/**
+ * Check how many entries can be input.
+ *
+ * @param t
+ *   The opdl_ring instance to check.
+ *
+ * @return
+ *   The number of new entries currently allowed to be input.
+ */
+uint32_t
+opdl_ring_available(struct opdl_ring *t);
+
+/**
+ * Check how many entries can be processed in a stage.
+ *
+ * @param s
+ *   The stage to check.
+ *
+ * @return
+ *   The number of entries currently available to be processed in this stage.
+ */
+uint32_t
+opdl_stage_available(struct opdl_stage *s);
+
+/**
+ * Check how many entries are available to be processed.
+ *
+ * NOTE : DOES NOT CHANGE ANY STATE WITHIN THE STAGE
+ *
+ * @param s
+ *   The stage to check.
+ *
+ * @param num_entries
+ *   The number of entries to check for availability.
+ *
+ * @return
+ *   The number of entries currently available to be processed in this stage.
+ */
+uint32_t
+opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries);
+
+/**
+ * Create empty stage instance and return the pointer.
+ *
+ * @param t
+ *   The pointer of  opdl_ring.
+ *
+ * @param threadsafe
+ *    enable multiple thread or not.
+ * @return
+ *   The pointer of one empty stage instance.
+ */
+struct opdl_stage *
+opdl_stage_create(struct opdl_ring *t,  bool threadsafe);
+
+/**
+ * Prints information on opdl_ring instance and all its stages
+ *
+ * @param t
+ *   The stage to print info on.
+ * @param f
+ *   Where to print the info.
+ */
+void
+opdl_ring_dump(const struct opdl_ring *t, FILE *f);
+
+/**
+ * Blocks until all entries in a opdl_ring have been processed by all stages.
+ *
+ * @param t
+ *   The opdl_ring instance to flush.
+ */
+void
+opdl_ring_flush(struct opdl_ring *t);
+
+/**
+ * Deallocates all resources used by a opdl_ring instance
+ *
+ * @param t
+ *   The opdl_ring instance to free.
+ */
+void
+opdl_ring_free(struct opdl_ring *t);
+
+/**
+ * Search for a opdl_ring by its name
+ *
+ * @param name
+ *   The name of the opdl_ring.
+ * @return
+ *   The pointer to the opdl_ring matching the name, or NULL if not found.
+ *
+ */
+struct opdl_ring *
+opdl_ring_lookup(const char *name);
+
+/**
+ * Set a opdl_stage to threadsafe variable.
+ *
+ * @param s
+ *   The opdl_stage.
+ * @param threadsafe
+ *   Threadsafe value.
+ */
+void
+opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
+
+
+/**
+ * Compare the event descriptor with original version in the ring.
+ * if key field event descriptor is changed by application, then
+ * update the slot in the ring otherwise do nothing with it.
+ * the key field is flow_id, prioirty, mbuf, impl_opaque
+ *
+ * @param s
+ *   The opdl_stage.
+ * @param ev
+ *   pointer of the event descriptor.
+ * @param index
+ *   index of the event descriptor.
+ * @param atomic
+ *   queue type associate with the stage.
+ * @return
+ *   if the evevnt key field is changed compare with previous record.
+ */
+
+bool
+opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+		uint32_t index, bool atomic);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* _OPDL_H_ */
diff --git a/drivers/event/opdl/opdl_test.c b/drivers/event/opdl/opdl_test.c
new file mode 100644
index 00000000..5868ec1b
--- /dev/null
+++ b/drivers/event/opdl/opdl_test.c
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_pause.h>
+
+#include "opdl_evdev.h"
+#include "opdl_log.h"
+
+
+#define MAX_PORTS 16
+#define MAX_QIDS 16
+#define NUM_PACKETS (1<<18)
+#define NUM_EVENTS 256
+#define BURST_SIZE 32
+
+
+
+static int evdev;
+
+struct test {
+	struct rte_mempool *mbuf_pool;
+	uint8_t port[MAX_PORTS];
+	uint8_t qid[MAX_QIDS];
+	int nb_qids;
+};
+
+static struct rte_mempool *eventdev_func_mempool;
+
+static __rte_always_inline struct rte_mbuf *
+rte_gen_arp(int portid, struct rte_mempool *mp)
+{
+	/*
+	 * len = 14 + 46
+	 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
+	 */
+	static const uint8_t arp_request[] = {
+		/*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
+		0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
+		/*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
+		0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
+		/*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+		0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		/*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00
+	};
+	struct rte_mbuf *m;
+	int pkt_len = sizeof(arp_request) - 1;
+
+	m = rte_pktmbuf_alloc(mp);
+	if (!m)
+		return 0;
+
+	memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
+		arp_request, pkt_len);
+	rte_pktmbuf_pkt_len(m) = pkt_len;
+	rte_pktmbuf_data_len(m) = pkt_len;
+
+	RTE_SET_USED(portid);
+
+	return m;
+}
+
+/* initialization and config */
+static __rte_always_inline int
+init(struct test *t, int nb_queues, int nb_ports)
+{
+	struct rte_event_dev_config config = {
+			.nb_event_queues = nb_queues,
+			.nb_event_ports = nb_ports,
+			.nb_event_queue_flows = 1024,
+			.nb_events_limit = 4096,
+			.nb_event_port_dequeue_depth = 128,
+			.nb_event_port_enqueue_depth = 128,
+	};
+	int ret;
+
+	void *temp = t->mbuf_pool; /* save and restore mbuf pool */
+
+	memset(t, 0, sizeof(*t));
+	t->mbuf_pool = temp;
+
+	ret = rte_event_dev_configure(evdev, &config);
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "%d: Error configuring device\n", __LINE__);
+	return ret;
+};
+
+static __rte_always_inline int
+create_ports(struct test *t, int num_ports)
+{
+	int i;
+	static const struct rte_event_port_conf conf = {
+			.new_event_threshold = 1024,
+			.dequeue_depth = 32,
+			.enqueue_depth = 32,
+	};
+	if (num_ports > MAX_PORTS)
+		return -1;
+
+	for (i = 0; i < num_ports; i++) {
+		if (rte_event_port_setup(evdev, i, &conf) < 0) {
+			PMD_DRV_LOG(ERR, "Error setting up port %d\n", i);
+			return -1;
+		}
+		t->port[i] = i;
+	}
+
+	return 0;
+};
+
+static __rte_always_inline int
+create_queues_type(struct test *t, int num_qids, enum queue_type flags)
+{
+	int i;
+	uint8_t type;
+
+	switch (flags) {
+	case OPDL_Q_TYPE_ORDERED:
+		type = RTE_SCHED_TYPE_ORDERED;
+		break;
+	case OPDL_Q_TYPE_ATOMIC:
+		type = RTE_SCHED_TYPE_ATOMIC;
+		break;
+	default:
+		type = 0;
+	}
+
+	/* Q creation */
+	const struct rte_event_queue_conf conf = {
+		.event_queue_cfg =
+		(flags == OPDL_Q_TYPE_SINGLE_LINK ?
+		 RTE_EVENT_QUEUE_CFG_SINGLE_LINK : 0),
+		.schedule_type = type,
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.nb_atomic_flows = 1024,
+		.nb_atomic_order_sequences = 1024,
+	};
+
+	for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) {
+		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+			PMD_DRV_LOG(ERR, "%d: error creating qid %d\n ",
+					__LINE__, i);
+			return -1;
+		}
+		t->qid[i] = i;
+	}
+
+	t->nb_qids += num_qids;
+
+	if (t->nb_qids > MAX_QIDS)
+		return -1;
+
+	return 0;
+}
+
+
+/* destruction */
+static __rte_always_inline int
+cleanup(struct test *t __rte_unused)
+{
+	rte_event_dev_stop(evdev);
+	rte_event_dev_close(evdev);
+	PMD_DRV_LOG(ERR, "clean up for test done\n");
+	return 0;
+};
+
+static int
+ordered_basic(struct test *t)
+{
+	const uint8_t rx_port = 0;
+	const uint8_t w1_port = 1;
+	const uint8_t w3_port = 3;
+	const uint8_t tx_port = 4;
+	int err;
+	uint32_t i;
+	uint32_t deq_pkts;
+	struct rte_mbuf *mbufs[3];
+
+	const uint32_t MAGIC_SEQN = 1234;
+
+	/* Create instance with 5 ports */
+	if (init(t, 2, tx_port+1) < 0 ||
+	    create_ports(t, tx_port+1) < 0 ||
+	    create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) {
+		PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/*
+	 * CQ mapping to QID
+	 * We need three ports, all mapped to the same ordered qid0. Then we'll
+	 * take a packet out to each port, re-enqueue in reverse order,
+	 * then make sure the reordering has taken place properly when we
+	 * dequeue from the tx_port.
+	 *
+	 * Simplified test setup diagram:
+	 *
+	 * rx_port        w1_port
+	 *        \     /         \
+	 *         qid0 - w2_port - qid1
+	 *              \         /     \
+	 *                w3_port        tx_port
+	 */
+	/* CQ mapping to QID for LB ports (directed mapped on create) */
+	for (i = w1_port; i <= w3_port; i++) {
+		err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+				1);
+		if (err != 1) {
+			PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
+					__LINE__);
+			cleanup(t);
+			return -1;
+		}
+	}
+
+	err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
+			1);
+	if (err != 1) {
+		PMD_DRV_LOG(ERR, "%d: error mapping TX  qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+	/* Enqueue 3 packets to the rx port */
+	for (i = 0; i < 3; i++) {
+		struct rte_event ev;
+		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+		if (!mbufs[i]) {
+			PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+
+		ev.queue_id = t->qid[0];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.mbuf = mbufs[i];
+		mbufs[i]->seqn = MAGIC_SEQN + i;
+
+		/* generate pkt and enqueue */
+		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+		if (err != 1) {
+			PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+					__LINE__, i, err);
+			return -1;
+		}
+	}
+
+	/* use extra slot to make logic in loops easier */
+	struct rte_event deq_ev[w3_port + 1];
+
+	uint32_t  seq  = 0;
+
+	/* Dequeue the 3 packets, one from each worker port */
+	for (i = w1_port; i <= w3_port; i++) {
+		deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+				&deq_ev[i], 1, 0);
+		if (deq_pkts != 1) {
+			PMD_DRV_LOG(ERR, "%d: Failed to deq\n", __LINE__);
+			rte_event_dev_dump(evdev, stdout);
+			return -1;
+		}
+		seq = deq_ev[i].mbuf->seqn  - MAGIC_SEQN;
+
+		if (seq != (i-1)) {
+			PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
+					"port number is %u\n", seq, i);
+			return -1;
+		}
+	}
+
+	/* Enqueue each packet in reverse order, flushing after each one */
+	for (i = w3_port; i >= w1_port; i--) {
+
+		deq_ev[i].op = RTE_EVENT_OP_FORWARD;
+		deq_ev[i].queue_id = t->qid[1];
+		err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
+		if (err != 1) {
+			PMD_DRV_LOG(ERR, "%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+
+	/* dequeue from the tx ports, we should get 3 packets */
+	deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+			3, 0);
+
+	/* Check to see if we've got all 3 packets */
+	if (deq_pkts != 3) {
+		PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
+			__LINE__, deq_pkts, tx_port);
+		rte_event_dev_dump(evdev, stdout);
+		return 1;
+	}
+
+	/* Destroy the instance */
+	cleanup(t);
+
+	return 0;
+}
+
+
+static int
+atomic_basic(struct test *t)
+{
+	const uint8_t rx_port = 0;
+	const uint8_t w1_port = 1;
+	const uint8_t w3_port = 3;
+	const uint8_t tx_port = 4;
+	int err;
+	int i;
+	uint32_t deq_pkts;
+	struct rte_mbuf *mbufs[3];
+	const uint32_t MAGIC_SEQN = 1234;
+
+	/* Create instance with 5 ports */
+	if (init(t, 2, tx_port+1) < 0 ||
+	    create_ports(t, tx_port+1) < 0 ||
+	    create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) {
+		PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+
+	/*
+	 * CQ mapping to QID
+	 * We need three ports, all mapped to the same ordered qid0. Then we'll
+	 * take a packet out to each port, re-enqueue in reverse order,
+	 * then make sure the reordering has taken place properly when we
+	 * dequeue from the tx_port.
+	 *
+	 * Simplified test setup diagram:
+	 *
+	 * rx_port        w1_port
+	 *        \     /         \
+	 *         qid0 - w2_port - qid1
+	 *              \         /     \
+	 *                w3_port        tx_port
+	 */
+	/* CQ mapping to QID for Atomic  ports (directed mapped on create) */
+	for (i = w1_port; i <= w3_port; i++) {
+		err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+				1);
+		if (err != 1) {
+			PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
+					__LINE__);
+			cleanup(t);
+			return -1;
+		}
+	}
+
+	err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
+			1);
+	if (err != 1) {
+		PMD_DRV_LOG(ERR, "%d: error mapping TX  qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/* Enqueue 3 packets to the rx port */
+	for (i = 0; i < 3; i++) {
+		struct rte_event ev;
+		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+		if (!mbufs[i]) {
+			PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+
+		ev.queue_id = t->qid[0];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.flow_id = 1;
+		ev.mbuf = mbufs[i];
+		mbufs[i]->seqn = MAGIC_SEQN + i;
+
+		/* generate pkt and enqueue */
+		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+		if (err != 1) {
+			PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+					__LINE__, i, err);
+			return -1;
+		}
+	}
+
+	/* use extra slot to make logic in loops easier */
+	struct rte_event deq_ev[w3_port + 1];
+
+	/* Dequeue the 3 packets, one from each worker port */
+	for (i = w1_port; i <= w3_port; i++) {
+
+		deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+				deq_ev, 3, 0);
+
+		if (t->port[i] != 2) {
+			if (deq_pkts != 0) {
+				PMD_DRV_LOG(ERR, "%d: deq none zero !\n",
+						__LINE__);
+				rte_event_dev_dump(evdev, stdout);
+				return -1;
+			}
+		} else {
+
+			if (deq_pkts != 3) {
+				PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !\n",
+						__LINE__, deq_pkts);
+				rte_event_dev_dump(evdev, stdout);
+				return -1;
+			}
+
+			int j;
+			for (j = 0; j < 3; j++) {
+				deq_ev[j].op = RTE_EVENT_OP_FORWARD;
+				deq_ev[j].queue_id = t->qid[1];
+			}
+
+			err = rte_event_enqueue_burst(evdev, t->port[i],
+					deq_ev, 3);
+
+			if (err != 3) {
+				PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, "
+						"retval = %u\n",
+						t->port[i], 3, err);
+				return -1;
+			}
+
+		}
+
+	}
+
+
+	/* dequeue from the tx ports, we should get 3 packets */
+	deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+			3, 0);
+
+	/* Check to see if we've got all 3 packets */
+	if (deq_pkts != 3) {
+		PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
+			__LINE__, deq_pkts, tx_port);
+		rte_event_dev_dump(evdev, stdout);
+		return 1;
+	}
+
+	cleanup(t);
+
+	return 0;
+}
+static __rte_always_inline int
+check_qid_stats(uint32_t id[], int index)
+{
+
+	if (index == 0) {
+		if (id[0] != 3 || id[1] != 3
+				|| id[2] != 3)
+			return -1;
+	} else if (index == 1) {
+		if (id[0] != 5 || id[1] != 5
+				|| id[2] != 2)
+			return -1;
+	} else if (index == 2) {
+		if (id[0] != 3 || id[1] != 1
+				|| id[2] != 1)
+			return -1;
+	}
+
+	return 0;
+}
+
+
+static int
+check_statistics(void)
+{
+	int num_ports = 3; /* Hard-coded for this app */
+	int i;
+
+	for (i = 0; i < num_ports; i++) {
+		int num_stats, num_stats_returned;
+
+		num_stats = rte_event_dev_xstats_names_get(0,
+				RTE_EVENT_DEV_XSTATS_PORT,
+				i,
+				NULL,
+				NULL,
+				0);
+		if (num_stats > 0) {
+
+			uint32_t id[num_stats];
+			struct rte_event_dev_xstats_name names[num_stats];
+			uint64_t values[num_stats];
+
+			num_stats_returned = rte_event_dev_xstats_names_get(0,
+					RTE_EVENT_DEV_XSTATS_PORT,
+					i,
+					names,
+					id,
+					num_stats);
+
+			if (num_stats == num_stats_returned) {
+				num_stats_returned = rte_event_dev_xstats_get(0,
+						RTE_EVENT_DEV_XSTATS_PORT,
+						i,
+						id,
+						values,
+						num_stats);
+
+				if (num_stats == num_stats_returned) {
+					int err;
+
+					err = check_qid_stats(id, i);
+
+					if (err)
+						return err;
+
+				} else {
+					return -1;
+				}
+			} else {
+				return -1;
+			}
+		} else {
+			return -1;
+		}
+	}
+	return 0;
+}
+
+#define OLD_NUM_PACKETS 3
+#define NEW_NUM_PACKETS 2
+static int
+single_link_w_stats(struct test *t)
+{
+	const uint8_t rx_port = 0;
+	const uint8_t w1_port = 1;
+	const uint8_t tx_port = 2;
+	int err;
+	int i;
+	uint32_t deq_pkts;
+	struct rte_mbuf *mbufs[3];
+	RTE_SET_USED(mbufs);
+
+	/* Create instance with 3 ports */
+	if (init(t, 2, tx_port + 1) < 0 ||
+	    create_ports(t, 3) < 0 || /* 0,1,2 */
+	    create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
+	    create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
+		PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+
+	/*
+	 *
+	 * Simplified test setup diagram:
+	 *
+	 * rx_port(0)
+	 *           \
+	 *            qid0 - w1_port(1) - qid1
+	 *                                    \
+	 *                                     tx_port(2)
+	 */
+
+	err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
+				  1);
+	if (err != 1) {
+		PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
+		       __LINE__,
+		       t->port[1],
+		       t->qid[0]);
+		cleanup(t);
+		return -1;
+	}
+
+	err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL,
+				  1);
+	if (err != 1) {
+		PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
+		       __LINE__,
+		       t->port[2],
+		       t->qid[1]);
+		cleanup(t);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) != 0) {
+		PMD_DRV_LOG(ERR, "%d: failed to start device\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	/*
+	 * Enqueue 3 packets to the rx port
+	 */
+	for (i = 0; i < 3; i++) {
+		struct rte_event ev;
+		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+		if (!mbufs[i]) {
+			PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+
+		ev.queue_id = t->qid[0];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.mbuf = mbufs[i];
+		mbufs[i]->seqn = 1234 + i;
+
+		/* generate pkt and enqueue */
+		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+		if (err != 1) {
+			PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+			       __LINE__,
+			       t->port[rx_port],
+			       err);
+			return -1;
+		}
+	}
+
+	/* Dequeue the 3 packets, from SINGLE_LINK worker port */
+	struct rte_event deq_ev[3];
+
+	deq_pkts = rte_event_dequeue_burst(evdev,
+					   t->port[w1_port],
+					   deq_ev, 3, 0);
+
+	if (deq_pkts != 3) {
+		PMD_DRV_LOG(ERR, "%d: deq not 3 !\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	/* Just enqueue 2 onto new ring */
+	for (i = 0; i < NEW_NUM_PACKETS; i++)
+		deq_ev[i].queue_id = t->qid[1];
+
+	deq_pkts = rte_event_enqueue_burst(evdev,
+					   t->port[w1_port],
+					   deq_ev,
+					   NEW_NUM_PACKETS);
+
+	if (deq_pkts != 2) {
+		PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!\n", __LINE__, deq_pkts);
+		cleanup(t);
+		return -1;
+	}
+
+	/* dequeue from the tx ports, we should get 2 packets */
+	deq_pkts = rte_event_dequeue_burst(evdev,
+					   t->port[tx_port],
+					   deq_ev,
+					   3,
+					   0);
+
+	/* Check to see if we've got all 2 packets */
+	if (deq_pkts != 2) {
+		PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d\n",
+			__LINE__, deq_pkts, tx_port);
+		cleanup(t);
+		return -1;
+	}
+
+	if (!check_statistics()) {
+		PMD_DRV_LOG(ERR, "xstats check failed");
+		cleanup(t);
+		return -1;
+	}
+
+	cleanup(t);
+
+	return 0;
+}
+
+static int
+single_link(struct test *t)
+{
+	/* const uint8_t rx_port = 0; */
+	/* const uint8_t w1_port = 1; */
+	/* const uint8_t w3_port = 3; */
+	const uint8_t tx_port = 2;
+	int err;
+	struct rte_mbuf *mbufs[3];
+	RTE_SET_USED(mbufs);
+
+	/* Create instance with 5 ports */
+	if (init(t, 2, tx_port+1) < 0 ||
+	    create_ports(t, 3) < 0 || /* 0,1,2 */
+	    create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
+	    create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
+		PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+
+	/*
+	 *
+	 * Simplified test setup diagram:
+	 *
+	 * rx_port(0)
+	 *           \
+	 *            qid0 - w1_port(1) - qid1
+	 *                                    \
+	 *                                     tx_port(2)
+	 */
+
+	err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
+				  1);
+	if (err != 1) {
+		PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL,
+				  1);
+	if (err != 1) {
+		PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) == 0) {
+		PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 "
+				"SINGLE_LINK PORT\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	cleanup(t);
+
+	return 0;
+}
+
+
+static __rte_always_inline void
+populate_event_burst(struct rte_event ev[],
+		     uint8_t qid,
+		     uint16_t num_events)
+{
+	uint16_t i;
+	for (i = 0; i < num_events; i++) {
+		ev[i].flow_id = 1;
+		ev[i].op = RTE_EVENT_OP_NEW;
+		ev[i].sched_type = RTE_SCHED_TYPE_ORDERED;
+		ev[i].queue_id = qid;
+		ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
+		ev[i].sub_event_type = 0;
+		ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+		ev[i].mbuf = (struct rte_mbuf *)0xdead0000;
+	}
+}
+
+#define NUM_QUEUES 3
+#define BATCH_SIZE 32
+
+static int
+qid_basic(struct test *t)
+{
+	int err = 0;
+
+	uint8_t q_id = 0;
+	uint8_t p_id = 0;
+
+	uint32_t num_events;
+	uint32_t i;
+
+	struct rte_event ev[BATCH_SIZE];
+
+	/* Create instance with 4 ports */
+	if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 ||
+	    create_ports(t, NUM_QUEUES+1) < 0 ||
+	    create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) {
+		PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	for (i = 0; i < NUM_QUEUES; i++) {
+		int nb_linked;
+		q_id = i;
+
+		nb_linked = rte_event_port_link(evdev,
+				i+1, /* port = q_id + 1*/
+				&q_id,
+				NULL,
+				1);
+
+		if (nb_linked != 1) {
+
+			PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u\n",
+					__FILE__,
+					__LINE__,
+					i + 1,
+					q_id);
+
+			err = -1;
+			break;
+		}
+
+	}
+
+
+	/* Try and link to the same port again */
+	if (!err) {
+		uint8_t t_qid = 0;
+		if (rte_event_port_link(evdev,
+					1,
+					&t_qid,
+					NULL,
+					1) > 0) {
+			PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail\n",
+					__FILE__,
+					__LINE__);
+			err = -1;
+		}
+
+		uint32_t test_num_events;
+
+		if (!err) {
+			test_num_events = rte_event_dequeue_burst(evdev,
+					p_id,
+					ev,
+					BATCH_SIZE,
+					0);
+			if (test_num_events != 0) {
+				PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device\n",
+						__FILE__,
+						__LINE__,
+						p_id);
+				err = -1;
+			}
+		}
+
+		if (!err) {
+			test_num_events = rte_event_enqueue_burst(evdev,
+					p_id,
+					ev,
+					BATCH_SIZE);
+			if (test_num_events != 0) {
+				PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device\n",
+						__FILE__,
+						__LINE__,
+						p_id);
+				err = -1;
+			}
+		}
+	}
+
+
+	/* Start the devicea */
+	if (!err) {
+		if (rte_event_dev_start(evdev) < 0) {
+			PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
+					__FILE__,
+					__LINE__);
+			err = -1;
+		}
+	}
+
+
+	/* Check we can't do any more links now that device is started.*/
+	if (!err) {
+		uint8_t t_qid = 0;
+		if (rte_event_port_link(evdev,
+					1,
+					&t_qid,
+					NULL,
+					1) > 0) {
+			PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail\n",
+					__FILE__,
+					__LINE__);
+			err = -1;
+		}
+	}
+
+	if (!err) {
+
+		q_id = 0;
+
+		populate_event_burst(ev,
+				q_id,
+				BATCH_SIZE);
+
+		num_events = rte_event_enqueue_burst(evdev,
+				p_id,
+				ev,
+				BATCH_SIZE);
+		if (num_events != BATCH_SIZE) {
+			PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets\n",
+					__FILE__,
+					__LINE__);
+			err = -1;
+		}
+	}
+
+	if (!err) {
+		while (++p_id < NUM_QUEUES) {
+
+			num_events = rte_event_dequeue_burst(evdev,
+					p_id,
+					ev,
+					BATCH_SIZE,
+					0);
+
+			if (num_events != BATCH_SIZE) {
+				PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u\n",
+						__FILE__,
+						__LINE__,
+						p_id);
+				err = -1;
+				break;
+			}
+
+			if (ev[0].queue_id != q_id) {
+				PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]\n",
+						__FILE__,
+						__LINE__,
+						p_id,
+						ev[0].queue_id,
+						q_id);
+				err = -1;
+				break;
+			}
+
+			populate_event_burst(ev,
+					++q_id,
+					BATCH_SIZE);
+
+			num_events = rte_event_enqueue_burst(evdev,
+					p_id,
+					ev,
+					BATCH_SIZE);
+			if (num_events != BATCH_SIZE) {
+				PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u\n",
+						__FILE__,
+						__LINE__,
+						p_id,
+						q_id);
+				err = -1;
+				break;
+			}
+		}
+	}
+
+	if (!err) {
+		num_events = rte_event_dequeue_burst(evdev,
+				p_id,
+				ev,
+				BATCH_SIZE,
+				0);
+		if (num_events != BATCH_SIZE) {
+			PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u\n",
+					__FILE__,
+					__LINE__,
+					p_id);
+			err = -1;
+		}
+	}
+
+	cleanup(t);
+
+	return err;
+}
+
+
+
+int
+opdl_selftest(void)
+{
+	struct test *t = malloc(sizeof(struct test));
+	int ret;
+
+	const char *eventdev_name = "event_opdl0";
+
+	evdev = rte_event_dev_get_dev_id(eventdev_name);
+
+	if (evdev < 0) {
+		PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.\n",
+				__LINE__, eventdev_name);
+		/* turn on stats by default */
+		if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) {
+			PMD_DRV_LOG(ERR, "Error creating eventdev\n");
+			free(t);
+			return -1;
+		}
+		evdev = rte_event_dev_get_dev_id(eventdev_name);
+		if (evdev < 0) {
+			PMD_DRV_LOG(ERR, "Error finding newly created eventdev\n");
+			free(t);
+			return -1;
+		}
+	}
+
+	/* Only create mbuf pool once, reuse for each test run */
+	if (!eventdev_func_mempool) {
+		eventdev_func_mempool = rte_pktmbuf_pool_create(
+				"EVENTDEV_SW_SA_MBUF_POOL",
+				(1<<12), /* 4k buffers */
+				32 /*MBUF_CACHE_SIZE*/,
+				0,
+				512, /* use very small mbufs */
+				rte_socket_id());
+		if (!eventdev_func_mempool) {
+			PMD_DRV_LOG(ERR, "ERROR creating mempool\n");
+			free(t);
+			return -1;
+		}
+	}
+	t->mbuf_pool = eventdev_func_mempool;
+
+	PMD_DRV_LOG(ERR, "*** Running Ordered Basic test...\n");
+	ret = ordered_basic(t);
+
+	PMD_DRV_LOG(ERR, "*** Running Atomic Basic test...\n");
+	ret = atomic_basic(t);
+
+
+	PMD_DRV_LOG(ERR, "*** Running QID  Basic test...\n");
+	ret = qid_basic(t);
+
+	PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test...\n");
+	ret = single_link(t);
+
+	PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test...\n");
+	ret = single_link_w_stats(t);
+
+	/*
+	 * Free test instance, free  mempool
+	 */
+	rte_mempool_free(t->mbuf_pool);
+	free(t);
+
+	if (ret != 0)
+		return ret;
+	return 0;
+
+}
diff --git a/drivers/event/opdl/rte_pmd_evdev_opdl_version.map b/drivers/event/opdl/rte_pmd_evdev_opdl_version.map
new file mode 100644
index 00000000..58b94270
--- /dev/null
+++ b/drivers/event/opdl/rte_pmd_evdev_opdl_version.map
@@ -0,0 +1,3 @@
+DPDK_18.02 {
+	local: *;
+};
diff --git a/drivers/event/skeleton/Makefile b/drivers/event/skeleton/Makefile
index a24738b1..0f7f07ea 100644
--- a/drivers/event/skeleton/Makefile
+++ b/drivers/event/skeleton/Makefile
@@ -1,33 +1,5 @@
-#   BSD LICENSE
-#
-#   Copyright(c) 2016 Cavium, Inc. All rights reserved.
-#   All rights reserved.
-#
-#   Redistribution and use in source and binary forms, with or without
-#   modification, are permitted provided that the following conditions
-#   are met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright
-#       notice, this list of conditions and the following disclaimer in
-#       the documentation and/or other materials provided with the
-#       distribution.
-#     * Neither the name of Cavium, Inc nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
 #
 
 include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/event/skeleton/meson.build b/drivers/event/skeleton/meson.build
new file mode 100644
index 00000000..acfe1565
--- /dev/null
+++ b/drivers/event/skeleton/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('skeleton_eventdev.c')
+deps += ['bus_pci', 'bus_vdev']
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index bb554c36..7f467568 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -1,33 +1,5 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium, Inc. 2016.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
  */
 
 #include <assert.h>
@@ -237,6 +209,7 @@ skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 	port_conf->new_event_threshold = 32 * 1024;
 	port_conf->dequeue_depth = 16;
 	port_conf->enqueue_depth = 16;
+	port_conf->disable_implicit_release = 0;
 }
 
 static void
diff --git a/drivers/event/skeleton/skeleton_eventdev.h b/drivers/event/skeleton/skeleton_eventdev.h
index 32064721..ba64b8ae 100644
--- a/drivers/event/skeleton/skeleton_eventdev.h
+++ b/drivers/event/skeleton/skeleton_eventdev.h
@@ -1,33 +1,5 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium, Inc. 2016.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
  */
 
 #ifndef __SKELETON_EVENTDEV_H__
diff --git a/drivers/event/sw/Makefile b/drivers/event/sw/Makefile
index 2f2b67ba..81236a39 100644
--- a/drivers/event/sw/Makefile
+++ b/drivers/event/sw/Makefile
@@ -1,32 +1,5 @@
-#   BSD LICENSE
-#
-#   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
-#
-#   Redistribution and use in source and binary forms, with or without
-#   modification, are permitted provided that the following conditions
-#   are met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright
-#       notice, this list of conditions and the following disclaimer in
-#       the documentation and/or other materials provided with the
-#       distribution.
-#     * Neither the name of Intel Corporation nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016-2017 Intel Corporation
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
@@ -34,6 +7,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
 LIB = librte_pmd_sw_event.a
 
 # build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 # for older GCC versions, allow us to initialize an event using
@@ -44,6 +18,7 @@ CFLAGS += -Wno-missing-field-initializers
 endif
 endif
 LDLIBS += -lrte_eal -lrte_eventdev -lrte_kvargs -lrte_ring
+LDLIBS += -lrte_mempool -lrte_mbuf
 LDLIBS += -lrte_bus_vdev
 
 # library version
@@ -57,6 +32,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_worker.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_scheduler.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_xstats.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_selftest.c
 
 # export include files
 SYMLINK-y-include +=
diff --git a/drivers/event/sw/event_ring.h b/drivers/event/sw/event_ring.h
index 734a3b4b..02308728 100644
--- a/drivers/event/sw/event_ring.h
+++ b/drivers/event/sw/event_ring.h
@@ -1,33 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
  */
 
 /*
diff --git a/drivers/event/sw/iq_chunk.h b/drivers/event/sw/iq_chunk.h
new file mode 100644
index 00000000..31d013ea
--- /dev/null
+++ b/drivers/event/sw/iq_chunk.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _IQ_CHUNK_H_
+#define _IQ_CHUNK_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <rte_eventdev.h>
+
+#define IQ_ROB_NAMESIZE 12
+
+struct sw_queue_chunk {
+	struct rte_event events[SW_EVS_PER_Q_CHUNK];
+	struct sw_queue_chunk *next;
+} __rte_cache_aligned;
+
+static __rte_always_inline bool
+iq_empty(struct sw_iq *iq)
+{
+	return (iq->count == 0);
+}
+
+static __rte_always_inline uint16_t
+iq_count(const struct sw_iq *iq)
+{
+	return iq->count;
+}
+
+static __rte_always_inline struct sw_queue_chunk *
+iq_alloc_chunk(struct sw_evdev *sw)
+{
+	struct sw_queue_chunk *chunk = sw->chunk_list_head;
+	sw->chunk_list_head = chunk->next;
+	chunk->next = NULL;
+	return chunk;
+}
+
+static __rte_always_inline void
+iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
+{
+	chunk->next = sw->chunk_list_head;
+	sw->chunk_list_head = chunk;
+}
+
+static __rte_always_inline void
+iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
+{
+	while (head) {
+		struct sw_queue_chunk *next;
+		next = head->next;
+		iq_free_chunk(sw, head);
+		head = next;
+	}
+}
+
+static __rte_always_inline void
+iq_init(struct sw_evdev *sw, struct sw_iq *iq)
+{
+	iq->head = iq_alloc_chunk(sw);
+	iq->tail = iq->head;
+	iq->head_idx = 0;
+	iq->tail_idx = 0;
+	iq->count = 0;
+}
+
+static __rte_always_inline void
+iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
+{
+	iq->tail->events[iq->tail_idx++] = *ev;
+	iq->count++;
+
+	if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
+		/* The number of chunks is defined in relation to the total
+		 * number of inflight events and number of IQS such that
+		 * allocation will always succeed.
+		 */
+		struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
+		iq->tail->next = chunk;
+		iq->tail = chunk;
+		iq->tail_idx = 0;
+	}
+}
+
+static __rte_always_inline void
+iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
+{
+	iq->head_idx++;
+	iq->count--;
+
+	if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
+		struct sw_queue_chunk *next = iq->head->next;
+		iq_free_chunk(sw, iq->head);
+		iq->head = next;
+		iq->head_idx = 0;
+	}
+}
+
+static __rte_always_inline const struct rte_event *
+iq_peek(struct sw_iq *iq)
+{
+	return &iq->head->events[iq->head_idx];
+}
+
+/* Note: the caller must ensure that count <= iq_count() */
+static __rte_always_inline uint16_t
+iq_dequeue_burst(struct sw_evdev *sw,
+		 struct sw_iq *iq,
+		 struct rte_event *ev,
+		 uint16_t count)
+{
+	struct sw_queue_chunk *current;
+	uint16_t total, index;
+
+	count = RTE_MIN(count, iq_count(iq));
+
+	current = iq->head;
+	index = iq->head_idx;
+	total = 0;
+
+	/* Loop over the chunks */
+	while (1) {
+		struct sw_queue_chunk *next;
+		for (; index < SW_EVS_PER_Q_CHUNK;) {
+			ev[total++] = current->events[index++];
+
+			if (unlikely(total == count))
+				goto done;
+		}
+
+		/* Move to the next chunk */
+		next = current->next;
+		iq_free_chunk(sw, current);
+		current = next;
+		index = 0;
+	}
+
+done:
+	if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
+		struct sw_queue_chunk *next = current->next;
+		iq_free_chunk(sw, current);
+		iq->head = next;
+		iq->head_idx = 0;
+	} else {
+		iq->head = current;
+		iq->head_idx = index;
+	}
+
+	iq->count -= total;
+
+	return total;
+}
+
+static __rte_always_inline void
+iq_put_back(struct sw_evdev *sw,
+	    struct sw_iq *iq,
+	    struct rte_event *ev,
+	    unsigned int count)
+{
+	/* Put back events that fit in the current head chunk. If necessary,
+	 * put back events in a new head chunk. The caller must ensure that
+	 * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
+	 * needed.
+	 */
+	uint16_t avail_space = iq->head_idx;
+
+	if (avail_space >= count) {
+		const uint16_t idx = avail_space - count;
+		uint16_t i;
+
+		for (i = 0; i < count; i++)
+			iq->head->events[idx + i] = ev[i];
+
+		iq->head_idx = idx;
+	} else if (avail_space < count) {
+		const uint16_t remaining = count - avail_space;
+		struct sw_queue_chunk *new_head;
+		uint16_t i;
+
+		for (i = 0; i < avail_space; i++)
+			iq->head->events[i] = ev[remaining + i];
+
+		new_head = iq_alloc_chunk(sw);
+		new_head->next = iq->head;
+		iq->head = new_head;
+		iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
+
+		for (i = 0; i < remaining; i++)
+			iq->head->events[iq->head_idx + i] = ev[i];
+	}
+
+	iq->count += count;
+}
+
+#endif /* _IQ_CHUNK_H_ */
diff --git a/drivers/event/sw/iq_ring.h b/drivers/event/sw/iq_ring.h
deleted file mode 100644
index 64cf6784..00000000
--- a/drivers/event/sw/iq_ring.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Ring structure definitions used for the internal ring buffers of the
- * SW eventdev implementation. These are designed for single-core use only.
- */
-#ifndef _IQ_RING_
-#define _IQ_RING_
-
-#include <stdint.h>
-
-#include <rte_common.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_eventdev.h>
-
-#define IQ_RING_NAMESIZE 12
-#define QID_IQ_DEPTH 512
-#define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1)
-
-struct iq_ring {
-	char name[IQ_RING_NAMESIZE] __rte_cache_aligned;
-	uint16_t write_idx;
-	uint16_t read_idx;
-
-	struct rte_event ring[QID_IQ_DEPTH];
-};
-
-static inline struct iq_ring *
-iq_ring_create(const char *name, unsigned int socket_id)
-{
-	struct iq_ring *retval;
-
-	retval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id);
-	if (retval == NULL)
-		goto end;
-
-	snprintf(retval->name, sizeof(retval->name), "%s", name);
-	retval->write_idx = retval->read_idx = 0;
-end:
-	return retval;
-}
-
-static inline void
-iq_ring_destroy(struct iq_ring *r)
-{
-	rte_free(r);
-}
-
-static __rte_always_inline uint16_t
-iq_ring_count(const struct iq_ring *r)
-{
-	return r->write_idx - r->read_idx;
-}
-
-static __rte_always_inline uint16_t
-iq_ring_free_count(const struct iq_ring *r)
-{
-	return QID_IQ_MASK - iq_ring_count(r);
-}
-
-static __rte_always_inline uint16_t
-iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
-{
-	const uint16_t read = r->read_idx;
-	uint16_t write = r->write_idx;
-	const uint16_t space = read + QID_IQ_MASK - write;
-	uint16_t i;
-
-	if (space < nb_qes)
-		nb_qes = space;
-
-	for (i = 0; i < nb_qes; i++, write++)
-		r->ring[write & QID_IQ_MASK] = qes[i];
-
-	r->write_idx = write;
-
-	return nb_qes;
-}
-
-static __rte_always_inline uint16_t
-iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
-{
-	uint16_t read = r->read_idx;
-	const uint16_t write = r->write_idx;
-	const uint16_t items = write - read;
-	uint16_t i;
-
-	for (i = 0; i < nb_qes; i++, read++)
-		qes[i] = r->ring[read & QID_IQ_MASK];
-
-	if (items < nb_qes)
-		nb_qes = items;
-
-	r->read_idx += nb_qes;
-
-	return nb_qes;
-}
-
-/* assumes there is space, from a previous dequeue_burst */
-static __rte_always_inline uint16_t
-iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
-{
-	uint16_t i, read = r->read_idx;
-
-	for (i = nb_qes; i-- > 0; )
-		r->ring[--read & QID_IQ_MASK] = qes[i];
-
-	r->read_idx = read;
-	return nb_qes;
-}
-
-static __rte_always_inline const struct rte_event *
-iq_ring_peek(const struct iq_ring *r)
-{
-	return &r->ring[r->read_idx & QID_IQ_MASK];
-}
-
-static __rte_always_inline void
-iq_ring_pop(struct iq_ring *r)
-{
-	r->read_idx++;
-}
-
-static __rte_always_inline int
-iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
-{
-	const uint16_t read = r->read_idx;
-	const uint16_t write = r->write_idx;
-	const uint16_t space = read + QID_IQ_MASK - write;
-
-	if (space == 0)
-		return -1;
-
-	r->ring[write & QID_IQ_MASK] = *qe;
-
-	r->write_idx = write + 1;
-
-	return 0;
-}
-
-#endif
diff --git a/drivers/event/sw/meson.build b/drivers/event/sw/meson.build
new file mode 100644
index 00000000..30d22164
--- /dev/null
+++ b/drivers/event/sw/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('sw_evdev_scheduler.c',
+	'sw_evdev_selftest.c',
+	'sw_evdev_worker.c',
+	'sw_evdev_xstats.c',
+	'sw_evdev.c'
+)
+deps += ['hash', 'bus_vdev']
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index fd110797..6672fd8e 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -1,33 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
  */
 
 #include <inttypes.h>
@@ -41,7 +13,7 @@
 #include <rte_service_component.h>
 
 #include "sw_evdev.h"
-#include "iq_ring.h"
+#include "iq_chunk.h"
 
 #define EVENTDEV_NAME_SW_PMD event_sw
 #define NUMA_NODE_ARG "numa_node"
@@ -62,6 +34,7 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
 	RTE_SET_USED(priorities);
 	for (i = 0; i < num; i++) {
 		struct sw_qid *q = &sw->qids[queues[i]];
+		unsigned int j;
 
 		/* check for qid map overflow */
 		if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
@@ -74,6 +47,15 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
 			break;
 		}
 
+		for (j = 0; j < q->cq_num_mapped_cqs; j++) {
+			if (q->cq_map[j] == p->id)
+				break;
+		}
+
+		/* check if port is already linked */
+		if (j < q->cq_num_mapped_cqs)
+			continue;
+
 		if (q->type == SW_SCHED_TYPE_DIRECT) {
 			/* check directed qids only map to one port */
 			if (p->num_qids_mapped > 0) {
@@ -181,6 +163,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 	}
 
 	p->inflight_max = conf->new_event_threshold;
+	p->implicit_release = !conf->disable_implicit_release;
 
 	/* check if ring exists, same as rx_worker above */
 	snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
@@ -231,18 +214,9 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
 	unsigned int i;
 	int dev_id = sw->data->dev_id;
 	int socket_id = sw->data->socket_id;
-	char buf[IQ_RING_NAMESIZE];
+	char buf[IQ_ROB_NAMESIZE];
 	struct sw_qid *qid = &sw->qids[idx];
 
-	for (i = 0; i < SW_IQS_MAX; i++) {
-		snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
-		qid->iq[i] = iq_ring_create(buf, socket_id);
-		if (!qid->iq[i]) {
-			SW_LOG_DBG("ring create failed");
-			goto cleanup;
-		}
-	}
-
 	/* Initialize the FID structures to no pinning (-1), and zero packets */
 	const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
 	for (i = 0; i < RTE_DIM(qid->fids); i++)
@@ -320,11 +294,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
 	return 0;
 
 cleanup:
-	for (i = 0; i < SW_IQS_MAX; i++) {
-		if (qid->iq[i])
-			iq_ring_destroy(qid->iq[i]);
-	}
-
 	if (qid->reorder_buffer) {
 		rte_free(qid->reorder_buffer);
 		qid->reorder_buffer = NULL;
@@ -338,6 +307,19 @@ cleanup:
 	return -EINVAL;
 }
 
+static void
+sw_queue_release(struct rte_eventdev *dev, uint8_t id)
+{
+	struct sw_evdev *sw = sw_pmd_priv(dev);
+	struct sw_qid *qid = &sw->qids[id];
+
+	if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+		rte_free(qid->reorder_buffer);
+		rte_ring_free(qid->reorder_buffer_freelist);
+	}
+	memset(qid, 0, sizeof(*qid));
+}
+
 static int
 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 		const struct rte_event_queue_conf *conf)
@@ -355,24 +337,46 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 	}
 
 	struct sw_evdev *sw = sw_pmd_priv(dev);
+
+	if (sw->qids[queue_id].initialized)
+		sw_queue_release(dev, queue_id);
+
 	return qid_init(sw, queue_id, type, conf);
 }
 
 static void
-sw_queue_release(struct rte_eventdev *dev, uint8_t id)
+sw_init_qid_iqs(struct sw_evdev *sw)
 {
-	struct sw_evdev *sw = sw_pmd_priv(dev);
-	struct sw_qid *qid = &sw->qids[id];
-	uint32_t i;
+	int i, j;
 
-	for (i = 0; i < SW_IQS_MAX; i++)
-		iq_ring_destroy(qid->iq[i]);
+	/* Initialize the IQ memory of all configured qids */
+	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+		struct sw_qid *qid = &sw->qids[i];
 
-	if (qid->type == RTE_SCHED_TYPE_ORDERED) {
-		rte_free(qid->reorder_buffer);
-		rte_ring_free(qid->reorder_buffer_freelist);
+		if (!qid->initialized)
+			continue;
+
+		for (j = 0; j < SW_IQS_MAX; j++)
+			iq_init(sw, &qid->iq[j]);
+	}
+}
+
+static void
+sw_clean_qid_iqs(struct sw_evdev *sw)
+{
+	int i, j;
+
+	/* Release the IQ memory of all configured qids */
+	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+		struct sw_qid *qid = &sw->qids[i];
+
+		for (j = 0; j < SW_IQS_MAX; j++) {
+			if (!qid->iq[j].head)
+				continue;
+			iq_free_chunk_list(sw, qid->iq[j].head);
+			qid->iq[j].head = NULL;
+		}
 	}
-	memset(qid, 0, sizeof(*qid));
 }
 
 static void
@@ -402,6 +406,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
 	port_conf->new_event_threshold = 1024;
 	port_conf->dequeue_depth = 16;
 	port_conf->enqueue_depth = 16;
+	port_conf->disable_implicit_release = 0;
 }
 
 static int
@@ -410,12 +415,36 @@ sw_dev_configure(const struct rte_eventdev *dev)
 	struct sw_evdev *sw = sw_pmd_priv(dev);
 	const struct rte_eventdev_data *data = dev->data;
 	const struct rte_event_dev_config *conf = &data->dev_conf;
+	int num_chunks, i;
 
 	sw->qid_count = conf->nb_event_queues;
 	sw->port_count = conf->nb_event_ports;
 	sw->nb_events_limit = conf->nb_events_limit;
 	rte_atomic32_set(&sw->inflights, 0);
 
+	/* Number of chunks sized for worst-case spread of events across IQs */
+	num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
+			sw->qid_count*SW_IQS_MAX*2;
+
+	/* If this is a reconfiguration, free the previous IQ allocation. All
+	 * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
+	 * will be reinitialized in sw_start().
+	 */
+	if (sw->chunks)
+		rte_free(sw->chunks);
+
+	sw->chunks = rte_malloc_socket(NULL,
+				       sizeof(struct sw_queue_chunk) *
+				       num_chunks,
+				       0,
+				       sw->data->socket_id);
+	if (!sw->chunks)
+		return -ENOMEM;
+
+	sw->chunk_list_head = NULL;
+	for (i = 0; i < num_chunks; i++)
+		iq_free_chunk(sw, &sw->chunks[i]);
+
 	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
 		return -ENOTSUP;
 
@@ -450,9 +479,14 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
 			.max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
 			.max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
 			.max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
-			.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
-					RTE_EVENT_DEV_CAP_BURST_MODE |
-					RTE_EVENT_DEV_CAP_EVENT_QOS),
+			.event_dev_cap = (
+				RTE_EVENT_DEV_CAP_QUEUE_QOS |
+				RTE_EVENT_DEV_CAP_BURST_MODE |
+				RTE_EVENT_DEV_CAP_EVENT_QOS |
+				RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
+				RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+				RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+				RTE_EVENT_DEV_CAP_NONSEQ_MODE),
 	};
 
 	*info = evdev_sw_info;
@@ -589,17 +623,16 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
 		uint32_t iq;
 		uint32_t iq_printed = 0;
 		for (iq = 0; iq < SW_IQS_MAX; iq++) {
-			if (!qid->iq[iq]) {
+			if (!qid->iq[iq].head) {
 				fprintf(f, "\tiq %d is not initialized.\n", iq);
 				iq_printed = 1;
 				continue;
 			}
-			uint32_t used = iq_ring_count(qid->iq[iq]);
-			uint32_t free = iq_ring_free_count(qid->iq[iq]);
-			const char *col = (free == 0) ? COL_RED : COL_RESET;
+			uint32_t used = iq_count(&qid->iq[iq]);
+			const char *col = COL_RESET;
 			if (used > 0) {
-				fprintf(f, "\t%siq %d: Used %d\tFree %d"
-					COL_RESET"\n", col, iq, used, free);
+				fprintf(f, "\t%siq %d: Used %d"
+					COL_RESET"\n", col, iq, used);
 				iq_printed = 1;
 			}
 		}
@@ -632,8 +665,8 @@ sw_start(struct rte_eventdev *dev)
 
 	/* check all queues are configured and mapped to ports*/
 	for (i = 0; i < sw->qid_count; i++)
-		if (sw->qids[i].iq[0] == NULL ||
-				sw->qids[i].cq_num_mapped_cqs == 0) {
+		if (!sw->qids[i].initialized ||
+		    sw->qids[i].cq_num_mapped_cqs == 0) {
 			SW_LOG_ERR("Queue %d not configured\n", i);
 			return -ENOLINK;
 		}
@@ -654,6 +687,8 @@ sw_start(struct rte_eventdev *dev)
 		}
 	}
 
+	sw_init_qid_iqs(sw);
+
 	if (sw_xstats_init(sw) < 0)
 		return -EINVAL;
 
@@ -667,6 +702,7 @@ static void
 sw_stop(struct rte_eventdev *dev)
 {
 	struct sw_evdev *sw = sw_pmd_priv(dev);
+	sw_clean_qid_iqs(sw);
 	sw_xstats_uninit(sw);
 	sw->started = 0;
 	rte_smp_wmb();
@@ -759,6 +795,8 @@ sw_probe(struct rte_vdev_device *vdev)
 			.xstats_get_names = sw_xstats_get_names,
 			.xstats_get_by_name = sw_xstats_get_by_name,
 			.xstats_reset = sw_xstats_reset,
+
+			.dev_selftest = test_sw_eventdev,
 	};
 
 	static const char *const args[] = {
@@ -891,3 +929,15 @@ static struct rte_vdev_driver evdev_sw_pmd_drv = {
 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
 		SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");
+
+/* declared extern in header, for access from other .c files */
+int eventdev_sw_log_level;
+
+RTE_INIT(evdev_sw_init_log);
+static void
+evdev_sw_init_log(void)
+{
+	eventdev_sw_log_level = rte_log_register("pmd.event.sw");
+	if (eventdev_sw_log_level >= 0)
+		rte_log_set_level(eventdev_sw_log_level, RTE_LOG_NOTICE);
+}
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index e0dec910..d90b96d4 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -1,38 +1,11 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
  */
 
 #ifndef _SW_EVDEV_H_
 #define _SW_EVDEV_H_
 
+#include "sw_evdev_log.h"
 #include <rte_eventdev.h>
 #include <rte_eventdev_pmd_vdev.h>
 #include <rte_atomic.h>
@@ -49,6 +22,10 @@
 #define MAX_SW_PROD_Q_DEPTH 4096
 #define SW_FRAGMENTS_MAX 16
 
+/* Should be power-of-two minus one, to leave room for the next pointer */
+#define SW_EVS_PER_Q_CHUNK 255
+#define SW_Q_CHUNK_SIZE ((SW_EVS_PER_Q_CHUNK + 1) * sizeof(struct rte_event))
+
 /* report dequeue burst sizes in buckets */
 #define SW_DEQ_STAT_BUCKET_SHIFT 2
 /* how many packets pulled from port by sched */
@@ -88,26 +65,6 @@ static const uint8_t sw_qe_flag_map[] = {
 		QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
 };
 
-#ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG
-#define SW_LOG_INFO(fmt, args...) \
-	RTE_LOG(INFO, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
-			SW_PMD_NAME, \
-			__func__, __LINE__, ## args)
-
-#define SW_LOG_DBG(fmt, args...) \
-	RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
-			SW_PMD_NAME, \
-			__func__, __LINE__, ## args)
-#else
-#define SW_LOG_INFO(fmt, args...)
-#define SW_LOG_DBG(fmt, args...)
-#endif
-
-#define SW_LOG_ERR(fmt, args...) \
-	RTE_LOG(ERR, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
-			SW_PMD_NAME, \
-			__func__, __LINE__, ## args)
-
 /* Records basic event stats at a given point. Used in port and qid structs */
 struct sw_point_stats {
 	uint64_t rx_pkts;
@@ -130,6 +87,14 @@ struct reorder_buffer_entry {
 	struct rte_event fragments[SW_FRAGMENTS_MAX];
 };
 
+struct sw_iq {
+	struct sw_queue_chunk *head;
+	struct sw_queue_chunk *tail;
+	uint16_t head_idx;
+	uint16_t tail_idx;
+	uint16_t count;
+};
+
 struct sw_qid {
 	/* set when the QID has been initialized */
 	uint8_t initialized;
@@ -142,7 +107,7 @@ struct sw_qid {
 	struct sw_point_stats stats;
 
 	/* Internal priority rings for packets */
-	struct iq_ring *iq[SW_IQS_MAX];
+	struct sw_iq iq[SW_IQS_MAX];
 	uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */
 	uint64_t iq_pkt_count[SW_IQS_MAX];
 
@@ -201,6 +166,7 @@ struct sw_port {
 	uint16_t outstanding_releases __rte_cache_aligned;
 	uint16_t inflight_max; /* app requested max inflights for this port */
 	uint16_t inflight_credits; /* num credits this port has right now */
+	uint8_t implicit_release; /* release events before dequeueing */
 
 	uint16_t last_dequeue_burst_sz; /* how big the burst was */
 	uint64_t last_dequeue_ticks; /* used to track burst processing time */
@@ -253,6 +219,8 @@ struct sw_evdev {
 
 	/* Internal queues - one per logical queue */
 	struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
+	struct sw_queue_chunk *chunk_list_head;
+	struct sw_queue_chunk *chunks;
 
 	/* Cache how many packets are in each cq */
 	uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
@@ -319,5 +287,6 @@ int sw_xstats_reset(struct rte_eventdev *dev,
 		const uint32_t ids[],
 		uint32_t nb_ids);
 
+int test_sw_eventdev(void);
 
 #endif /* _SW_EVDEV_H_ */
diff --git a/drivers/event/sw/sw_evdev_log.h b/drivers/event/sw/sw_evdev_log.h
new file mode 100644
index 00000000..f76825ab
--- /dev/null
+++ b/drivers/event/sw/sw_evdev_log.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _SW_EVDEV_LOG_H_
+#define _SW_EVDEV_LOG_H_
+
+extern int eventdev_sw_log_level;
+
+#define SW_LOG_IMPL(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, eventdev_sw_log_level, "%s" fmt "\n", \
+			__func__, ##args)
+
+#define SW_LOG_INFO(fmt, args...) \
+	SW_LOG_IMPL(INFO, fmt, ## args)
+
+#define SW_LOG_DBG(fmt, args...) \
+	SW_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define SW_LOG_ERR(fmt, args...) \
+	SW_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _SW_EVDEV_LOG_H_ */
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index 8a2c9d4f..3106eb33 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -1,40 +1,12 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
  */
 
 #include <rte_ring.h>
 #include <rte_hash_crc.h>
 #include <rte_event_ring.h>
 #include "sw_evdev.h"
-#include "iq_ring.h"
+#include "iq_chunk.h"
 
 #define SW_IQS_MASK (SW_IQS_MAX-1)
 
@@ -71,7 +43,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
 	 */
 	uint32_t qid_id = qid->id;
 
-	iq_ring_dequeue_burst(qid->iq[iq_num], qes, count);
+	iq_dequeue_burst(sw, &qid->iq[iq_num], qes, count);
 	for (i = 0; i < count; i++) {
 		const struct rte_event *qe = &qes[i];
 		const uint16_t flow_id = SW_HASH_FLOWID(qes[i].flow_id);
@@ -130,7 +102,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
 			p->cq_buf_count = 0;
 		}
 	}
-	iq_ring_put_back(qid->iq[iq_num], blocked_qes, nb_blocked);
+	iq_put_back(sw, &qid->iq[iq_num], blocked_qes, nb_blocked);
 
 	return count - nb_blocked;
 }
@@ -156,7 +128,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
 				rte_ring_count(qid->reorder_buffer_freelist));
 
 	for (i = 0; i < count; i++) {
-		const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]);
+		const struct rte_event *qe = iq_peek(&qid->iq[iq_num]);
 		uint32_t cq_check_count = 0;
 		uint32_t cq;
 
@@ -193,7 +165,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
 					(void *)&p->hist_list[head].rob_entry);
 
 		sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe;
-		iq_ring_pop(qid->iq[iq_num]);
+		iq_pop(sw, &qid->iq[iq_num]);
 
 		rte_compiler_barrier();
 		p->inflights++;
@@ -218,8 +190,8 @@ sw_schedule_dir_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
 		return 0;
 
 	/* burst dequeue from the QID IQ ring */
-	struct iq_ring *ring = qid->iq[iq_num];
-	uint32_t ret = iq_ring_dequeue_burst(ring,
+	struct sw_iq *iq = &qid->iq[iq_num];
+	uint32_t ret = iq_dequeue_burst(sw, iq,
 			&port->cq_buf[port->cq_buf_count], count_free);
 	port->cq_buf_count += ret;
 
@@ -252,7 +224,7 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw)
 			continue;
 
 		uint32_t pkts_done = 0;
-		uint32_t count = iq_ring_count(qid->iq[iq_num]);
+		uint32_t count = iq_count(&qid->iq[iq_num]);
 
 		if (count > 0) {
 			if (type == SW_SCHED_TYPE_DIRECT)
@@ -324,22 +296,15 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
 					continue;
 				}
 
-				struct sw_qid *dest_qid_ptr =
-					&sw->qids[dest_qid];
-				const struct iq_ring *dest_iq_ptr =
-					dest_qid_ptr->iq[dest_iq];
-				if (iq_ring_free_count(dest_iq_ptr) == 0)
-					break;
-
 				pkts_iter++;
 
 				struct sw_qid *q = &sw->qids[dest_qid];
-				struct iq_ring *r = q->iq[dest_iq];
+				struct sw_iq *iq = &q->iq[dest_iq];
 
 				/* we checked for space above, so enqueue must
 				 * succeed
 				 */
-				iq_ring_enqueue(r, qe);
+				iq_enqueue(sw, iq, qe);
 				q->iq_pkt_mask |= (1 << (dest_iq));
 				q->iq_pkt_count[dest_iq]++;
 				q->stats.rx_pkts++;
@@ -404,10 +369,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
 		uint32_t iq_num = PRIO_TO_IQ(qe->priority);
 		struct sw_qid *qid = &sw->qids[qe->queue_id];
 
-		if ((flags & QE_FLAG_VALID) &&
-				iq_ring_free_count(qid->iq[iq_num]) == 0)
-			break;
-
 		/* now process based on flags. Note that for directed
 		 * queues, the enqueue_flush masks off all but the
 		 * valid flag. This makes FWD and PARTIAL enqueues just
@@ -471,7 +432,7 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
 			 */
 
 			qid->iq_pkt_mask |= (1 << (iq_num));
-			iq_ring_enqueue(qid->iq[iq_num], qe);
+			iq_enqueue(sw, &qid->iq[iq_num], qe);
 			qid->iq_pkt_count[iq_num]++;
 			qid->stats.rx_pkts++;
 			pkts_iter++;
@@ -516,10 +477,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
 
 		uint32_t iq_num = PRIO_TO_IQ(qe->priority);
 		struct sw_qid *qid = &sw->qids[qe->queue_id];
-		struct iq_ring *iq_ring = qid->iq[iq_num];
-
-		if (iq_ring_free_count(iq_ring) == 0)
-			break; /* move to next port */
+		struct sw_iq *iq = &qid->iq[iq_num];
 
 		port->stats.rx_pkts++;
 
@@ -527,7 +485,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
 		 * into the qid at the right priority
 		 */
 		qid->iq_pkt_mask |= (1 << (iq_num));
-		iq_ring_enqueue(iq_ring, qe);
+		iq_enqueue(sw, iq, qe);
 		qid->iq_pkt_count[iq_num]++;
 		qid->stats.rx_pkts++;
 		pkts_iter++;
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
new file mode 100644
index 00000000..78d30e07
--- /dev/null
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -0,0 +1,3245 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_pause.h>
+#include <rte_service.h>
+#include <rte_service_component.h>
+#include <rte_bus_vdev.h>
+
+#include "sw_evdev.h"
+
+#define MAX_PORTS 16
+#define MAX_QIDS 16
+#define NUM_PACKETS (1<<18)
+
+static int evdev;
+
+struct test {
+	struct rte_mempool *mbuf_pool;
+	uint8_t port[MAX_PORTS];
+	uint8_t qid[MAX_QIDS];
+	int nb_qids;
+	uint32_t service_id;
+};
+
+static struct rte_event release_ev;
+
+static inline struct rte_mbuf *
+rte_gen_arp(int portid, struct rte_mempool *mp)
+{
+	/*
+	 * len = 14 + 46
+	 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
+	 */
+	static const uint8_t arp_request[] = {
+		/*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
+		0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
+		/*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
+		0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
+		/*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+		0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		/*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00
+	};
+	struct rte_mbuf *m;
+	int pkt_len = sizeof(arp_request) - 1;
+
+	m = rte_pktmbuf_alloc(mp);
+	if (!m)
+		return 0;
+
+	memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
+		arp_request, pkt_len);
+	rte_pktmbuf_pkt_len(m) = pkt_len;
+	rte_pktmbuf_data_len(m) = pkt_len;
+
+	RTE_SET_USED(portid);
+
+	return m;
+}
+
+static void
+xstats_print(void)
+{
+	const uint32_t XSTATS_MAX = 1024;
+	uint32_t i;
+	uint32_t ids[XSTATS_MAX];
+	uint64_t values[XSTATS_MAX];
+	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+	for (i = 0; i < XSTATS_MAX; i++)
+		ids[i] = i;
+
+	/* Device names / values */
+	int ret = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+					xstats_names, ids, XSTATS_MAX);
+	if (ret < 0) {
+		printf("%d: xstats names get() returned error\n",
+			__LINE__);
+		return;
+	}
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE,
+					0, ids, values, ret);
+	if (ret > (signed int)XSTATS_MAX)
+		printf("%s %d: more xstats available than space\n",
+				__func__, __LINE__);
+	for (i = 0; (signed int)i < ret; i++) {
+		printf("%d : %s : %"PRIu64"\n",
+				i, xstats_names[i].name, values[i]);
+	}
+
+	/* Port names / values */
+	ret = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_PORT, 0,
+					xstats_names, ids, XSTATS_MAX);
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_PORT, 1,
+					ids, values, ret);
+	if (ret > (signed int)XSTATS_MAX)
+		printf("%s %d: more xstats available than space\n",
+				__func__, __LINE__);
+	for (i = 0; (signed int)i < ret; i++) {
+		printf("%d : %s : %"PRIu64"\n",
+				i, xstats_names[i].name, values[i]);
+	}
+
+	/* Queue names / values */
+	ret = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+					xstats_names, ids, XSTATS_MAX);
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_QUEUE,
+					1, ids, values, ret);
+	if (ret > (signed int)XSTATS_MAX)
+		printf("%s %d: more xstats available than space\n",
+				__func__, __LINE__);
+	for (i = 0; (signed int)i < ret; i++) {
+		printf("%d : %s : %"PRIu64"\n",
+				i, xstats_names[i].name, values[i]);
+	}
+}
+
+/* initialization and config */
+static inline int
+init(struct test *t, int nb_queues, int nb_ports)
+{
+	struct rte_event_dev_config config = {
+			.nb_event_queues = nb_queues,
+			.nb_event_ports = nb_ports,
+			.nb_event_queue_flows = 1024,
+			.nb_events_limit = 4096,
+			.nb_event_port_dequeue_depth = 128,
+			.nb_event_port_enqueue_depth = 128,
+	};
+	int ret;
+
+	void *temp = t->mbuf_pool; /* save and restore mbuf pool */
+
+	memset(t, 0, sizeof(*t));
+	t->mbuf_pool = temp;
+
+	ret = rte_event_dev_configure(evdev, &config);
+	if (ret < 0)
+		printf("%d: Error configuring device\n", __LINE__);
+	return ret;
+};
+
+static inline int
+create_ports(struct test *t, int num_ports)
+{
+	int i;
+	static const struct rte_event_port_conf conf = {
+			.new_event_threshold = 1024,
+			.dequeue_depth = 32,
+			.enqueue_depth = 64,
+			.disable_implicit_release = 0,
+	};
+	if (num_ports > MAX_PORTS)
+		return -1;
+
+	for (i = 0; i < num_ports; i++) {
+		if (rte_event_port_setup(evdev, i, &conf) < 0) {
+			printf("Error setting up port %d\n", i);
+			return -1;
+		}
+		t->port[i] = i;
+	}
+
+	return 0;
+}
+
+static inline int
+create_lb_qids(struct test *t, int num_qids, uint32_t flags)
+{
+	int i;
+
+	/* Q creation */
+	const struct rte_event_queue_conf conf = {
+			.schedule_type = flags,
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+			.nb_atomic_flows = 1024,
+			.nb_atomic_order_sequences = 1024,
+	};
+
+	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
+		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+			printf("%d: error creating qid %d\n", __LINE__, i);
+			return -1;
+		}
+		t->qid[i] = i;
+	}
+	t->nb_qids += num_qids;
+	if (t->nb_qids > MAX_QIDS)
+		return -1;
+
+	return 0;
+}
+
+static inline int
+create_atomic_qids(struct test *t, int num_qids)
+{
+	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
+}
+
+static inline int
+create_ordered_qids(struct test *t, int num_qids)
+{
+	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
+}
+
+
+static inline int
+create_unordered_qids(struct test *t, int num_qids)
+{
+	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
+}
+
+static inline int
+create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
+{
+	int i;
+
+	/* Q creation */
+	static const struct rte_event_queue_conf conf = {
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+	};
+
+	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
+		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+			printf("%d: error creating qid %d\n", __LINE__, i);
+			return -1;
+		}
+		t->qid[i] = i;
+
+		if (rte_event_port_link(evdev, ports[i - t->nb_qids],
+				&t->qid[i], NULL, 1) != 1) {
+			printf("%d: error creating link for qid %d\n",
+					__LINE__, i);
+			return -1;
+		}
+	}
+	t->nb_qids += num_qids;
+	if (t->nb_qids > MAX_QIDS)
+		return -1;
+
+	return 0;
+}
+
+/* destruction */
+static inline int
+cleanup(struct test *t __rte_unused)
+{
+	rte_event_dev_stop(evdev);
+	rte_event_dev_close(evdev);
+	return 0;
+};
+
+struct test_event_dev_stats {
+	uint64_t rx_pkts;       /**< Total packets received */
+	uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
+	uint64_t tx_pkts;       /**< Total packets transmitted */
+
+	/** Packets received on this port */
+	uint64_t port_rx_pkts[MAX_PORTS];
+	/** Packets dropped on this port */
+	uint64_t port_rx_dropped[MAX_PORTS];
+	/** Packets inflight on this port */
+	uint64_t port_inflight[MAX_PORTS];
+	/** Packets transmitted on this port */
+	uint64_t port_tx_pkts[MAX_PORTS];
+	/** Packets received on this qid */
+	uint64_t qid_rx_pkts[MAX_QIDS];
+	/** Packets dropped on this qid */
+	uint64_t qid_rx_dropped[MAX_QIDS];
+	/** Packets transmitted on this qid */
+	uint64_t qid_tx_pkts[MAX_QIDS];
+};
+
+static inline int
+test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
+{
+	static uint32_t i;
+	static uint32_t total_ids[3]; /* rx, tx and drop */
+	static uint32_t port_rx_pkts_ids[MAX_PORTS];
+	static uint32_t port_rx_dropped_ids[MAX_PORTS];
+	static uint32_t port_inflight_ids[MAX_PORTS];
+	static uint32_t port_tx_pkts_ids[MAX_PORTS];
+	static uint32_t qid_rx_pkts_ids[MAX_QIDS];
+	static uint32_t qid_rx_dropped_ids[MAX_QIDS];
+	static uint32_t qid_tx_pkts_ids[MAX_QIDS];
+
+
+	stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
+			"dev_rx", &total_ids[0]);
+	stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
+			"dev_drop", &total_ids[1]);
+	stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
+			"dev_tx", &total_ids[2]);
+	for (i = 0; i < MAX_PORTS; i++) {
+		char name[32];
+		snprintf(name, sizeof(name), "port_%u_rx", i);
+		stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
+				dev_id, name, &port_rx_pkts_ids[i]);
+		snprintf(name, sizeof(name), "port_%u_drop", i);
+		stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
+				dev_id, name, &port_rx_dropped_ids[i]);
+		snprintf(name, sizeof(name), "port_%u_inflight", i);
+		stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
+				dev_id, name, &port_inflight_ids[i]);
+		snprintf(name, sizeof(name), "port_%u_tx", i);
+		stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
+				dev_id, name, &port_tx_pkts_ids[i]);
+	}
+	for (i = 0; i < MAX_QIDS; i++) {
+		char name[32];
+		snprintf(name, sizeof(name), "qid_%u_rx", i);
+		stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
+				dev_id, name, &qid_rx_pkts_ids[i]);
+		snprintf(name, sizeof(name), "qid_%u_drop", i);
+		stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
+				dev_id, name, &qid_rx_dropped_ids[i]);
+		snprintf(name, sizeof(name), "qid_%u_tx", i);
+		stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
+				dev_id, name, &qid_tx_pkts_ids[i]);
+	}
+
+	return 0;
+}
+
+/* run_prio_packet_test
+ * This performs a basic packet priority check on the test instance passed in.
+ * It is factored out of the main priority tests as the same tests must be
+ * performed to ensure prioritization of each type of QID.
+ *
+ * Requirements:
+ *  - An initialized test structure, including mempool
+ *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
+ *  - t->qid[0] is the QID to be tested
+ *  - if LB QID, the CQ must be mapped to the QID.
+ */
+static int
+run_prio_packet_test(struct test *t)
+{
+	int err;
+	const uint32_t MAGIC_SEQN[] = {4711, 1234};
+	const uint32_t PRIORITY[] = {
+		RTE_EVENT_DEV_PRIORITY_NORMAL,
+		RTE_EVENT_DEV_PRIORITY_HIGHEST
+	};
+	unsigned int i;
+	for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
+		/* generate pkt and enqueue */
+		struct rte_event ev;
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+		arp->seqn = MAGIC_SEQN[i];
+
+		ev = (struct rte_event){
+			.priority = PRIORITY[i],
+			.op = RTE_EVENT_OP_NEW,
+			.queue_id = t->qid[0],
+			.mbuf = arp
+		};
+		err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+		if (err < 0) {
+			printf("%d: error failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	struct test_event_dev_stats stats;
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: error failed to get stats\n", __LINE__);
+		return -1;
+	}
+
+	if (stats.port_rx_pkts[t->port[0]] != 2) {
+		printf("%d: error stats incorrect for directed port\n",
+				__LINE__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+
+	struct rte_event ev, ev2;
+	uint32_t deq_pkts;
+	deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
+	if (deq_pkts != 1) {
+		printf("%d: error failed to deq\n", __LINE__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+	if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
+		printf("%d: first packet out not highest priority\n",
+				__LINE__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+	rte_pktmbuf_free(ev.mbuf);
+
+	deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
+	if (deq_pkts != 1) {
+		printf("%d: error failed to deq\n", __LINE__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+	if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
+		printf("%d: second packet out not lower priority\n",
+				__LINE__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+	rte_pktmbuf_free(ev2.mbuf);
+
+	cleanup(t);
+	return 0;
+}
+
+static int
+test_single_directed_packet(struct test *t)
+{
+	const int rx_enq = 0;
+	const int wrk_enq = 2;
+	int err;
+
+	/* Create instance with 3 directed QIDs going to 3 ports */
+	if (init(t, 3, 3) < 0 ||
+			create_ports(t, 3) < 0 ||
+			create_directed_qids(t, 3, t->port) < 0)
+		return -1;
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/************** FORWARD ****************/
+	struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+	struct rte_event ev = {
+			.op = RTE_EVENT_OP_NEW,
+			.queue_id = wrk_enq,
+			.mbuf = arp,
+	};
+
+	if (!arp) {
+		printf("%d: gen of pkt failed\n", __LINE__);
+		return -1;
+	}
+
+	const uint32_t MAGIC_SEQN = 4711;
+	arp->seqn = MAGIC_SEQN;
+
+	/* generate pkt and enqueue */
+	err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
+	if (err < 0) {
+		printf("%d: error failed to enqueue\n", __LINE__);
+		return -1;
+	}
+
+	/* Run schedule() as dir packets may need to be re-ordered */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	struct test_event_dev_stats stats;
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: error failed to get stats\n", __LINE__);
+		return -1;
+	}
+
+	if (stats.port_rx_pkts[rx_enq] != 1) {
+		printf("%d: error stats incorrect for directed port\n",
+				__LINE__);
+		return -1;
+	}
+
+	uint32_t deq_pkts;
+	deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
+	if (deq_pkts != 1) {
+		printf("%d: error failed to deq\n", __LINE__);
+		return -1;
+	}
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (stats.port_rx_pkts[wrk_enq] != 0 &&
+			stats.port_rx_pkts[wrk_enq] != 1) {
+		printf("%d: error directed stats post-dequeue\n", __LINE__);
+		return -1;
+	}
+
+	if (ev.mbuf->seqn != MAGIC_SEQN) {
+		printf("%d: error magic sequence number not dequeued\n",
+				__LINE__);
+		return -1;
+	}
+
+	rte_pktmbuf_free(ev.mbuf);
+	cleanup(t);
+	return 0;
+}
+
+static int
+test_directed_forward_credits(struct test *t)
+{
+	uint32_t i;
+	int32_t err;
+
+	if (init(t, 1, 1) < 0 ||
+			create_ports(t, 1) < 0 ||
+			create_directed_qids(t, 1, t->port) < 0)
+		return -1;
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	struct rte_event ev = {
+			.op = RTE_EVENT_OP_NEW,
+			.queue_id = 0,
+	};
+
+	for (i = 0; i < 1000; i++) {
+		err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+		if (err < 0) {
+			printf("%d: error failed to enqueue\n", __LINE__);
+			return -1;
+		}
+		rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+		uint32_t deq_pkts;
+		deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
+		if (deq_pkts != 1) {
+			printf("%d: error failed to deq\n", __LINE__);
+			return -1;
+		}
+
+		/* re-write event to be a forward, and continue looping it */
+		ev.op = RTE_EVENT_OP_FORWARD;
+	}
+
+	cleanup(t);
+	return 0;
+}
+
+
+static int
+test_priority_directed(struct test *t)
+{
+	if (init(t, 1, 1) < 0 ||
+			create_ports(t, 1) < 0 ||
+			create_directed_qids(t, 1, t->port) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	return run_prio_packet_test(t);
+}
+
+static int
+test_priority_atomic(struct test *t)
+{
+	if (init(t, 1, 1) < 0 ||
+			create_ports(t, 1) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* map the QID */
+	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+		printf("%d: error mapping qid to port\n", __LINE__);
+		return -1;
+	}
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	return run_prio_packet_test(t);
+}
+
+static int
+test_priority_ordered(struct test *t)
+{
+	if (init(t, 1, 1) < 0 ||
+			create_ports(t, 1) < 0 ||
+			create_ordered_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* map the QID */
+	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+		printf("%d: error mapping qid to port\n", __LINE__);
+		return -1;
+	}
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	return run_prio_packet_test(t);
+}
+
+static int
+test_priority_unordered(struct test *t)
+{
+	if (init(t, 1, 1) < 0 ||
+			create_ports(t, 1) < 0 ||
+			create_unordered_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* map the QID */
+	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+		printf("%d: error mapping qid to port\n", __LINE__);
+		return -1;
+	}
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	return run_prio_packet_test(t);
+}
+
+static int
+burst_packets(struct test *t)
+{
+	/************** CONFIG ****************/
+	uint32_t i;
+	int err;
+	int ret;
+
+	/* Create instance with 2 ports and 2 queues */
+	if (init(t, 2, 2) < 0 ||
+			create_ports(t, 2) < 0 ||
+			create_atomic_qids(t, 2) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* CQ mapping to QID */
+	ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
+	if (ret != 1) {
+		printf("%d: error mapping lb qid0\n", __LINE__);
+		return -1;
+	}
+	ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
+	if (ret != 1) {
+		printf("%d: error mapping lb qid1\n", __LINE__);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/************** FORWARD ****************/
+	const uint32_t rx_port = 0;
+	const uint32_t NUM_PKTS = 2;
+
+	for (i = 0; i < NUM_PKTS; i++) {
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		if (!arp) {
+			printf("%d: error generating pkt\n", __LINE__);
+			return -1;
+		}
+
+		struct rte_event ev = {
+				.op = RTE_EVENT_OP_NEW,
+				.queue_id = i % 2,
+				.flow_id = i % 3,
+				.mbuf = arp,
+		};
+		/* generate pkt and enqueue */
+		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+		if (err < 0) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	/* Check stats for all NUM_PKTS arrived to sched core */
+	struct test_event_dev_stats stats;
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: failed to get stats\n", __LINE__);
+		return -1;
+	}
+	if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
+		printf("%d: Sched core didn't receive all %d pkts\n",
+				__LINE__, NUM_PKTS);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+
+	uint32_t deq_pkts;
+	int p;
+
+	deq_pkts = 0;
+	/******** DEQ QID 1 *******/
+	do {
+		struct rte_event ev;
+		p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
+		deq_pkts += p;
+		rte_pktmbuf_free(ev.mbuf);
+	} while (p);
+
+	if (deq_pkts != NUM_PKTS/2) {
+		printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
+				__LINE__);
+		return -1;
+	}
+
+	/******** DEQ QID 2 *******/
+	deq_pkts = 0;
+	do {
+		struct rte_event ev;
+		p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
+		deq_pkts += p;
+		rte_pktmbuf_free(ev.mbuf);
+	} while (p);
+	if (deq_pkts != NUM_PKTS/2) {
+		printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
+				__LINE__);
+		return -1;
+	}
+
+	cleanup(t);
+	return 0;
+}
+
+static int
+abuse_inflights(struct test *t)
+{
+	const int rx_enq = 0;
+	const int wrk_enq = 2;
+	int err;
+
+	/* Create instance with 4 ports */
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* CQ mapping to QID */
+	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/* Enqueue op only */
+	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
+	if (err < 0) {
+		printf("%d: Failed to enqueue\n", __LINE__);
+		return -1;
+	}
+
+	/* schedule */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	struct test_event_dev_stats stats;
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: failed to get stats\n", __LINE__);
+		return -1;
+	}
+
+	if (stats.rx_pkts != 0 ||
+			stats.tx_pkts != 0 ||
+			stats.port_inflight[wrk_enq] != 0) {
+		printf("%d: Sched core didn't handle pkt as expected\n",
+				__LINE__);
+		return -1;
+	}
+
+	cleanup(t);
+	return 0;
+}
+
+static int
+xstats_tests(struct test *t)
+{
+	const int wrk_enq = 2;
+	int err;
+
+	/* Create instance with 4 ports */
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* CQ mapping to QID */
+	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	const uint32_t XSTATS_MAX = 1024;
+
+	uint32_t i;
+	uint32_t ids[XSTATS_MAX];
+	uint64_t values[XSTATS_MAX];
+	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+	for (i = 0; i < XSTATS_MAX; i++)
+		ids[i] = i;
+
+	/* Device names / values */
+	int ret = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE,
+					0, xstats_names, ids, XSTATS_MAX);
+	if (ret != 6) {
+		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+		return -1;
+	}
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE,
+					0, ids, values, ret);
+	if (ret != 6) {
+		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+		return -1;
+	}
+
+	/* Port names / values */
+	ret = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_PORT, 0,
+					xstats_names, ids, XSTATS_MAX);
+	if (ret != 21) {
+		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+		return -1;
+	}
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_PORT, 0,
+					ids, values, ret);
+	if (ret != 21) {
+		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+		return -1;
+	}
+
+	/* Queue names / values */
+	ret = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_QUEUE,
+					0, xstats_names, ids, XSTATS_MAX);
+	if (ret != 16) {
+		printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
+		return -1;
+	}
+
+	/* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_QUEUE,
+					1, ids, values, ret);
+	if (ret != -EINVAL) {
+		printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
+		return -1;
+	}
+
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_QUEUE,
+					0, ids, values, ret);
+	if (ret != 16) {
+		printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
+		return -1;
+	}
+
+	/* enqueue packets to check values */
+	for (i = 0; i < 3; i++) {
+		struct rte_event ev;
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+		ev.queue_id = t->qid[i];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.mbuf = arp;
+		ev.flow_id = 7;
+		arp->seqn = i;
+
+		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+		if (err != 1) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	/* Device names / values */
+	int num_stats = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+					xstats_names, ids, XSTATS_MAX);
+	if (num_stats < 0)
+		goto fail;
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE,
+					0, ids, values, num_stats);
+	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+	for (i = 0; (signed int)i < ret; i++) {
+		if (expected[i] != values[i]) {
+			printf(
+				"%d Error xstat %d (id %d) %s : %"PRIu64
+				", expect %"PRIu64"\n",
+				__LINE__, i, ids[i], xstats_names[i].name,
+				values[i], expected[i]);
+			goto fail;
+		}
+	}
+
+	ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
+					0, NULL, 0);
+
+	/* ensure reset statistics are zero-ed */
+	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE,
+					0, ids, values, num_stats);
+	for (i = 0; (signed int)i < ret; i++) {
+		if (expected_zero[i] != values[i]) {
+			printf(
+				"%d Error, xstat %d (id %d) %s : %"PRIu64
+				", expect %"PRIu64"\n",
+				__LINE__, i, ids[i], xstats_names[i].name,
+				values[i], expected_zero[i]);
+			goto fail;
+		}
+	}
+
+	/* port reset checks */
+	num_stats = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_PORT, 0,
+					xstats_names, ids, XSTATS_MAX);
+	if (num_stats < 0)
+		goto fail;
+	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
+					0, ids, values, num_stats);
+
+	static const uint64_t port_expected[] = {
+		3 /* rx */,
+		0 /* tx */,
+		0 /* drop */,
+		0 /* inflights */,
+		0 /* avg pkt cycles */,
+		29 /* credits */,
+		0 /* rx ring used */,
+		4096 /* rx ring free */,
+		0 /* cq ring used */,
+		32 /* cq ring free */,
+		0 /* dequeue calls */,
+		/* 10 dequeue burst buckets */
+		0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0,
+	};
+	if (ret != RTE_DIM(port_expected)) {
+		printf(
+			"%s %d: wrong number of port stats (%d), expected %zu\n",
+			__func__, __LINE__, ret, RTE_DIM(port_expected));
+	}
+
+	for (i = 0; (signed int)i < ret; i++) {
+		if (port_expected[i] != values[i]) {
+			printf(
+				"%s : %d: Error stat %s is %"PRIu64
+				", expected %"PRIu64"\n",
+				__func__, __LINE__, xstats_names[i].name,
+				values[i], port_expected[i]);
+			goto fail;
+		}
+	}
+
+	ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
+					0, NULL, 0);
+
+	/* ensure reset statistics are zero-ed */
+	static const uint64_t port_expected_zero[] = {
+		0 /* rx */,
+		0 /* tx */,
+		0 /* drop */,
+		0 /* inflights */,
+		0 /* avg pkt cycles */,
+		29 /* credits */,
+		0 /* rx ring used */,
+		4096 /* rx ring free */,
+		0 /* cq ring used */,
+		32 /* cq ring free */,
+		0 /* dequeue calls */,
+		/* 10 dequeue burst buckets */
+		0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0,
+	};
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_PORT,
+					0, ids, values, num_stats);
+	for (i = 0; (signed int)i < ret; i++) {
+		if (port_expected_zero[i] != values[i]) {
+			printf(
+				"%d, Error, xstat %d (id %d) %s : %"PRIu64
+				", expect %"PRIu64"\n",
+				__LINE__, i, ids[i], xstats_names[i].name,
+				values[i], port_expected_zero[i]);
+			goto fail;
+		}
+	}
+
+	/* QUEUE STATS TESTS */
+	num_stats = rte_event_dev_xstats_names_get(evdev,
+						RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+						xstats_names, ids, XSTATS_MAX);
+	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
+					0, ids, values, num_stats);
+	if (ret < 0) {
+		printf("xstats get returned %d\n", ret);
+		goto fail;
+	}
+	if ((unsigned int)ret > XSTATS_MAX)
+		printf("%s %d: more xstats available than space\n",
+				__func__, __LINE__);
+
+	static const uint64_t queue_expected[] = {
+		3 /* rx */,
+		3 /* tx */,
+		0 /* drop */,
+		3 /* inflights */,
+		0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
+		/* QID-to-Port: pinned_flows, packets */
+		0, 0,
+		0, 0,
+		1, 3,
+		0, 0,
+	};
+	for (i = 0; (signed int)i < ret; i++) {
+		if (queue_expected[i] != values[i]) {
+			printf(
+				"%d, Error, xstat %d (id %d) %s : %"PRIu64
+				", expect %"PRIu64"\n",
+				__LINE__, i, ids[i], xstats_names[i].name,
+				values[i], queue_expected[i]);
+			goto fail;
+		}
+	}
+
+	/* Reset the queue stats here */
+	ret = rte_event_dev_xstats_reset(evdev,
+					RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+					NULL,
+					0);
+
+	/* Verify that the resetable stats are reset, and others are not */
+	static const uint64_t queue_expected_zero[] = {
+		0 /* rx */,
+		0 /* tx */,
+		0 /* drop */,
+		3 /* inflight */,
+		0, 0, 0, 0, /* 4 iq used */
+		/* QID-to-Port: pinned_flows, packets */
+		0, 0,
+		0, 0,
+		1, 0,
+		0, 0,
+	};
+
+	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+					ids, values, num_stats);
+	int fails = 0;
+	for (i = 0; (signed int)i < ret; i++) {
+		if (queue_expected_zero[i] != values[i]) {
+			printf(
+				"%d, Error, xstat %d (id %d) %s : %"PRIu64
+				", expect %"PRIu64"\n",
+				__LINE__, i, ids[i], xstats_names[i].name,
+				values[i], queue_expected_zero[i]);
+			fails++;
+		}
+	}
+	if (fails) {
+		printf("%d : %d of values were not as expected above\n",
+				__LINE__, fails);
+		goto fail;
+	}
+
+	cleanup(t);
+	return 0;
+
+fail:
+	rte_event_dev_dump(0, stdout);
+	cleanup(t);
+	return -1;
+}
+
+
+static int
+xstats_id_abuse_tests(struct test *t)
+{
+	int err;
+	const uint32_t XSTATS_MAX = 1024;
+	const uint32_t link_port = 2;
+
+	uint32_t ids[XSTATS_MAX];
+	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+	/* Create instance with 4 ports */
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		goto fail;
+	}
+
+	err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		goto fail;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		goto fail;
+	}
+
+	/* no test for device, as it ignores the port/q number */
+	int num_stats = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_PORT,
+					UINT8_MAX-1, xstats_names, ids,
+					XSTATS_MAX);
+	if (num_stats != 0) {
+		printf("%d: expected %d stats, got return %d\n", __LINE__,
+				0, num_stats);
+		goto fail;
+	}
+
+	num_stats = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_QUEUE,
+					UINT8_MAX-1, xstats_names, ids,
+					XSTATS_MAX);
+	if (num_stats != 0) {
+		printf("%d: expected %d stats, got return %d\n", __LINE__,
+				0, num_stats);
+		goto fail;
+	}
+
+	cleanup(t);
+	return 0;
+fail:
+	cleanup(t);
+	return -1;
+}
+
+static int
+port_reconfig_credits(struct test *t)
+{
+	if (init(t, 1, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	uint32_t i;
+	const uint32_t NUM_ITERS = 32;
+	for (i = 0; i < NUM_ITERS; i++) {
+		const struct rte_event_queue_conf conf = {
+			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+			.nb_atomic_flows = 1024,
+			.nb_atomic_order_sequences = 1024,
+		};
+		if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+			printf("%d: error creating qid\n", __LINE__);
+			return -1;
+		}
+		t->qid[0] = 0;
+
+		static const struct rte_event_port_conf port_conf = {
+				.new_event_threshold = 128,
+				.dequeue_depth = 32,
+				.enqueue_depth = 64,
+				.disable_implicit_release = 0,
+		};
+		if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
+			printf("%d Error setting up port\n", __LINE__);
+			return -1;
+		}
+
+		int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
+		if (links != 1) {
+			printf("%d: error mapping lb qid\n", __LINE__);
+			goto fail;
+		}
+
+		if (rte_event_dev_start(evdev) < 0) {
+			printf("%d: Error with start call\n", __LINE__);
+			goto fail;
+		}
+
+		const uint32_t NPKTS = 1;
+		uint32_t j;
+		for (j = 0; j < NPKTS; j++) {
+			struct rte_event ev;
+			struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+			if (!arp) {
+				printf("%d: gen of pkt failed\n", __LINE__);
+				goto fail;
+			}
+			ev.queue_id = t->qid[0];
+			ev.op = RTE_EVENT_OP_NEW;
+			ev.mbuf = arp;
+			int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+			if (err != 1) {
+				printf("%d: Failed to enqueue\n", __LINE__);
+				rte_event_dev_dump(0, stdout);
+				goto fail;
+			}
+		}
+
+		rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+		struct rte_event ev[NPKTS];
+		int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
+							NPKTS, 0);
+		if (deq != 1)
+			printf("%d error; no packet dequeued\n", __LINE__);
+
+		/* let cleanup below stop the device on last iter */
+		if (i != NUM_ITERS-1)
+			rte_event_dev_stop(evdev);
+	}
+
+	cleanup(t);
+	return 0;
+fail:
+	cleanup(t);
+	return -1;
+}
+
+static int
+port_single_lb_reconfig(struct test *t)
+{
+	if (init(t, 2, 2) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		goto fail;
+	}
+
+	static const struct rte_event_queue_conf conf_lb_atomic = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.nb_atomic_flows = 1024,
+		.nb_atomic_order_sequences = 1024,
+	};
+	if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
+		printf("%d: error creating qid\n", __LINE__);
+		goto fail;
+	}
+
+	static const struct rte_event_queue_conf conf_single_link = {
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+	};
+	if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
+		printf("%d: error creating qid\n", __LINE__);
+		goto fail;
+	}
+
+	struct rte_event_port_conf port_conf = {
+		.new_event_threshold = 128,
+		.dequeue_depth = 32,
+		.enqueue_depth = 64,
+		.disable_implicit_release = 0,
+	};
+	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
+		printf("%d Error setting up port\n", __LINE__);
+		goto fail;
+	}
+	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
+		printf("%d Error setting up port\n", __LINE__);
+		goto fail;
+	}
+
+	/* link port to lb queue */
+	uint8_t queue_id = 0;
+	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
+		printf("%d: error creating link for qid\n", __LINE__);
+		goto fail;
+	}
+
+	int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
+	if (ret != 1) {
+		printf("%d: Error unlinking lb port\n", __LINE__);
+		goto fail;
+	}
+
+	queue_id = 1;
+	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
+		printf("%d: error creating link for qid\n", __LINE__);
+		goto fail;
+	}
+
+	queue_id = 0;
+	int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		goto fail;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		goto fail;
+	}
+
+	cleanup(t);
+	return 0;
+fail:
+	cleanup(t);
+	return -1;
+}
+
+static int
+xstats_brute_force(struct test *t)
+{
+	uint32_t i;
+	const uint32_t XSTATS_MAX = 1024;
+	uint32_t ids[XSTATS_MAX];
+	uint64_t values[XSTATS_MAX];
+	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+
+	/* Create instance with 4 ports */
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		goto fail;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		goto fail;
+	}
+
+	for (i = 0; i < XSTATS_MAX; i++)
+		ids[i] = i;
+
+	for (i = 0; i < 3; i++) {
+		uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
+		uint32_t j;
+		for (j = 0; j < UINT8_MAX; j++) {
+			rte_event_dev_xstats_names_get(evdev, mode,
+				j, xstats_names, ids, XSTATS_MAX);
+
+			rte_event_dev_xstats_get(evdev, mode, j, ids,
+						 values, XSTATS_MAX);
+		}
+	}
+
+	cleanup(t);
+	return 0;
+fail:
+	cleanup(t);
+	return -1;
+}
+
+static int
+xstats_id_reset_tests(struct test *t)
+{
+	const int wrk_enq = 2;
+	int err;
+
+	/* Create instance with 4 ports */
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* CQ mapping to QID */
+	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		goto fail;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		goto fail;
+	}
+
+#define XSTATS_MAX 1024
+	int ret;
+	uint32_t i;
+	uint32_t ids[XSTATS_MAX];
+	uint64_t values[XSTATS_MAX];
+	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+	for (i = 0; i < XSTATS_MAX; i++)
+		ids[i] = i;
+
+#define NUM_DEV_STATS 6
+	/* Device names / values */
+	int num_stats = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE,
+					0, xstats_names, ids, XSTATS_MAX);
+	if (num_stats != NUM_DEV_STATS) {
+		printf("%d: expected %d stats, got return %d\n", __LINE__,
+				NUM_DEV_STATS, num_stats);
+		goto fail;
+	}
+	ret = rte_event_dev_xstats_get(evdev,
+					RTE_EVENT_DEV_XSTATS_DEVICE,
+					0, ids, values, num_stats);
+	if (ret != NUM_DEV_STATS) {
+		printf("%d: expected %d stats, got return %d\n", __LINE__,
+				NUM_DEV_STATS, ret);
+		goto fail;
+	}
+
+#define NPKTS 7
+	for (i = 0; i < NPKTS; i++) {
+		struct rte_event ev;
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			goto fail;
+		}
+		ev.queue_id = t->qid[i];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.mbuf = arp;
+		arp->seqn = i;
+
+		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+		if (err != 1) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			goto fail;
+		}
+	}
+
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	static const char * const dev_names[] = {
+		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
+		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+	};
+	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+	for (i = 0; (int)i < ret; i++) {
+		unsigned int id;
+		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+								dev_names[i],
+								&id);
+		if (id != i) {
+			printf("%d: %s id incorrect, expected %d got %d\n",
+					__LINE__, dev_names[i], i, id);
+			goto fail;
+		}
+		if (val != dev_expected[i]) {
+			printf("%d: %s value incorrect, expected %"
+				PRIu64" got %d\n", __LINE__, dev_names[i],
+				dev_expected[i], id);
+			goto fail;
+		}
+		/* reset to zero */
+		int reset_ret = rte_event_dev_xstats_reset(evdev,
+						RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+						&id,
+						1);
+		if (reset_ret) {
+			printf("%d: failed to reset successfully\n", __LINE__);
+			goto fail;
+		}
+		dev_expected[i] = 0;
+		/* check value again */
+		val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
+		if (val != dev_expected[i]) {
+			printf("%d: %s value incorrect, expected %"PRIu64
+				" got %"PRIu64"\n", __LINE__, dev_names[i],
+				dev_expected[i], val);
+			goto fail;
+		}
+	};
+
+/* 48 is stat offset from start of the devices whole xstats.
+ * This WILL break every time we add a statistic to a port
+ * or the device, but there is no other way to test
+ */
+#define PORT_OFF 48
+/* num stats for the tested port. CQ size adds more stats to a port */
+#define NUM_PORT_STATS 21
+/* the port to test. */
+#define PORT 2
+	num_stats = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_PORT, PORT,
+					xstats_names, ids, XSTATS_MAX);
+	if (num_stats != NUM_PORT_STATS) {
+		printf("%d: expected %d stats, got return %d\n",
+			__LINE__, NUM_PORT_STATS, num_stats);
+		goto fail;
+	}
+	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
+					ids, values, num_stats);
+
+	if (ret != NUM_PORT_STATS) {
+		printf("%d: expected %d stats, got return %d\n",
+				__LINE__, NUM_PORT_STATS, ret);
+		goto fail;
+	}
+	static const char * const port_names[] = {
+		"port_2_rx",
+		"port_2_tx",
+		"port_2_drop",
+		"port_2_inflight",
+		"port_2_avg_pkt_cycles",
+		"port_2_credits",
+		"port_2_rx_ring_used",
+		"port_2_rx_ring_free",
+		"port_2_cq_ring_used",
+		"port_2_cq_ring_free",
+		"port_2_dequeue_calls",
+		"port_2_dequeues_returning_0",
+		"port_2_dequeues_returning_1-4",
+		"port_2_dequeues_returning_5-8",
+		"port_2_dequeues_returning_9-12",
+		"port_2_dequeues_returning_13-16",
+		"port_2_dequeues_returning_17-20",
+		"port_2_dequeues_returning_21-24",
+		"port_2_dequeues_returning_25-28",
+		"port_2_dequeues_returning_29-32",
+		"port_2_dequeues_returning_33-36",
+	};
+	uint64_t port_expected[] = {
+		0, /* rx */
+		NPKTS, /* tx */
+		0, /* drop */
+		NPKTS, /* inflight */
+		0, /* avg pkt cycles */
+		0, /* credits */
+		0, /* rx ring used */
+		4096, /* rx ring free */
+		NPKTS,  /* cq ring used */
+		25, /* cq ring free */
+		0, /* dequeue zero calls */
+		0, 0, 0, 0, 0, /* 10 dequeue buckets */
+		0, 0, 0, 0, 0,
+	};
+	uint64_t port_expected_zero[] = {
+		0, /* rx */
+		0, /* tx */
+		0, /* drop */
+		NPKTS, /* inflight */
+		0, /* avg pkt cycles */
+		0, /* credits */
+		0, /* rx ring used */
+		4096, /* rx ring free */
+		NPKTS,  /* cq ring used */
+		25, /* cq ring free */
+		0, /* dequeue zero calls */
+		0, 0, 0, 0, 0, /* 10 dequeue buckets */
+		0, 0, 0, 0, 0,
+	};
+	if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
+			RTE_DIM(port_names) != NUM_PORT_STATS) {
+		printf("%d: port array of wrong size\n", __LINE__);
+		goto fail;
+	}
+
+	int failed = 0;
+	for (i = 0; (int)i < ret; i++) {
+		unsigned int id;
+		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+								port_names[i],
+								&id);
+		if (id != i + PORT_OFF) {
+			printf("%d: %s id incorrect, expected %d got %d\n",
+					__LINE__, port_names[i], i+PORT_OFF,
+					id);
+			failed = 1;
+		}
+		if (val != port_expected[i]) {
+			printf("%d: %s value incorrect, expected %"PRIu64
+				" got %d\n", __LINE__, port_names[i],
+				port_expected[i], id);
+			failed = 1;
+		}
+		/* reset to zero */
+		int reset_ret = rte_event_dev_xstats_reset(evdev,
+						RTE_EVENT_DEV_XSTATS_PORT, PORT,
+						&id,
+						1);
+		if (reset_ret) {
+			printf("%d: failed to reset successfully\n", __LINE__);
+			failed = 1;
+		}
+		/* check value again */
+		val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
+		if (val != port_expected_zero[i]) {
+			printf("%d: %s value incorrect, expected %"PRIu64
+				" got %"PRIu64"\n", __LINE__, port_names[i],
+				port_expected_zero[i], val);
+			failed = 1;
+		}
+	};
+	if (failed)
+		goto fail;
+
+/* num queue stats */
+#define NUM_Q_STATS 16
+/* queue offset from start of the devices whole xstats.
+ * This will break every time we add a statistic to a device/port/queue
+ */
+#define QUEUE_OFF 90
+	const uint32_t queue = 0;
+	num_stats = rte_event_dev_xstats_names_get(evdev,
+					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
+					xstats_names, ids, XSTATS_MAX);
+	if (num_stats != NUM_Q_STATS) {
+		printf("%d: expected %d stats, got return %d\n",
+			__LINE__, NUM_Q_STATS, num_stats);
+		goto fail;
+	}
+	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
+					queue, ids, values, num_stats);
+	if (ret != NUM_Q_STATS) {
+		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+		goto fail;
+	}
+	static const char * const queue_names[] = {
+		"qid_0_rx",
+		"qid_0_tx",
+		"qid_0_drop",
+		"qid_0_inflight",
+		"qid_0_iq_0_used",
+		"qid_0_iq_1_used",
+		"qid_0_iq_2_used",
+		"qid_0_iq_3_used",
+		"qid_0_port_0_pinned_flows",
+		"qid_0_port_0_packets",
+		"qid_0_port_1_pinned_flows",
+		"qid_0_port_1_packets",
+		"qid_0_port_2_pinned_flows",
+		"qid_0_port_2_packets",
+		"qid_0_port_3_pinned_flows",
+		"qid_0_port_3_packets",
+	};
+	uint64_t queue_expected[] = {
+		7, /* rx */
+		7, /* tx */
+		0, /* drop */
+		7, /* inflight */
+		0, /* iq 0 used */
+		0, /* iq 1 used */
+		0, /* iq 2 used */
+		0, /* iq 3 used */
+		/* QID-to-Port: pinned_flows, packets */
+		0, 0,
+		0, 0,
+		1, 7,
+		0, 0,
+	};
+	uint64_t queue_expected_zero[] = {
+		0, /* rx */
+		0, /* tx */
+		0, /* drop */
+		7, /* inflight */
+		0, /* iq 0 used */
+		0, /* iq 1 used */
+		0, /* iq 2 used */
+		0, /* iq 3 used */
+		/* QID-to-Port: pinned_flows, packets */
+		0, 0,
+		0, 0,
+		1, 0,
+		0, 0,
+	};
+	if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
+			RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
+			RTE_DIM(queue_names) != NUM_Q_STATS) {
+		printf("%d : queue array of wrong size\n", __LINE__);
+		goto fail;
+	}
+
+	failed = 0;
+	for (i = 0; (int)i < ret; i++) {
+		unsigned int id;
+		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+								queue_names[i],
+								&id);
+		if (id != i + QUEUE_OFF) {
+			printf("%d: %s id incorrect, expected %d got %d\n",
+					__LINE__, queue_names[i], i+QUEUE_OFF,
+					id);
+			failed = 1;
+		}
+		if (val != queue_expected[i]) {
+			printf("%d: %d: %s value , expected %"PRIu64
+				" got %"PRIu64"\n", i, __LINE__,
+				queue_names[i], queue_expected[i], val);
+			failed = 1;
+		}
+		/* reset to zero */
+		int reset_ret = rte_event_dev_xstats_reset(evdev,
+						RTE_EVENT_DEV_XSTATS_QUEUE,
+						queue, &id, 1);
+		if (reset_ret) {
+			printf("%d: failed to reset successfully\n", __LINE__);
+			failed = 1;
+		}
+		/* check value again */
+		val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
+							0);
+		if (val != queue_expected_zero[i]) {
+			printf("%d: %s value incorrect, expected %"PRIu64
+				" got %"PRIu64"\n", __LINE__, queue_names[i],
+				queue_expected_zero[i], val);
+			failed = 1;
+		}
+	};
+
+	if (failed)
+		goto fail;
+
+	cleanup(t);
+	return 0;
+fail:
+	cleanup(t);
+	return -1;
+}
+
+static int
+ordered_reconfigure(struct test *t)
+{
+	if (init(t, 1, 1) < 0 ||
+			create_ports(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	const struct rte_event_queue_conf conf = {
+			.schedule_type = RTE_SCHED_TYPE_ORDERED,
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+			.nb_atomic_flows = 1024,
+			.nb_atomic_order_sequences = 1024,
+	};
+
+	if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+		printf("%d: error creating qid\n", __LINE__);
+		goto failed;
+	}
+
+	if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+		printf("%d: error creating qid, for 2nd time\n", __LINE__);
+		goto failed;
+	}
+
+	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	cleanup(t);
+	return 0;
+failed:
+	cleanup(t);
+	return -1;
+}
+
+static int
+qid_priorities(struct test *t)
+{
+	/* Test works by having a CQ with enough empty space for all packets,
+	 * and enqueueing 3 packets to 3 QIDs. They must return based on the
+	 * priority of the QID, not the ingress order, to pass the test
+	 */
+	unsigned int i;
+	/* Create instance with 1 ports, and 3 qids */
+	if (init(t, 3, 1) < 0 ||
+			create_ports(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	for (i = 0; i < 3; i++) {
+		/* Create QID */
+		const struct rte_event_queue_conf conf = {
+			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+			/* increase priority (0 == highest), as we go */
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
+			.nb_atomic_flows = 1024,
+			.nb_atomic_order_sequences = 1024,
+		};
+
+		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+			printf("%d: error creating qid %d\n", __LINE__, i);
+			return -1;
+		}
+		t->qid[i] = i;
+	}
+	t->nb_qids = i;
+	/* map all QIDs to port */
+	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/* enqueue 3 packets, setting seqn and QID to check priority */
+	for (i = 0; i < 3; i++) {
+		struct rte_event ev;
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+		ev.queue_id = t->qid[i];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.mbuf = arp;
+		arp->seqn = i;
+
+		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+		if (err != 1) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	/* dequeue packets, verify priority was upheld */
+	struct rte_event ev[32];
+	uint32_t deq_pkts =
+		rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
+	if (deq_pkts != 3) {
+		printf("%d: failed to deq packets\n", __LINE__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+	for (i = 0; i < 3; i++) {
+		if (ev[i].mbuf->seqn != 2-i) {
+			printf(
+				"%d: qid priority test: seqn %d incorrectly prioritized\n",
+					__LINE__, i);
+		}
+	}
+
+	cleanup(t);
+	return 0;
+}
+
+static int
+load_balancing(struct test *t)
+{
+	const int rx_enq = 0;
+	int err;
+	uint32_t i;
+
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	for (i = 0; i < 3; i++) {
+		/* map port 1 - 3 inclusive */
+		if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
+				NULL, 1) != 1) {
+			printf("%d: error mapping qid to port %d\n",
+					__LINE__, i);
+			return -1;
+		}
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/************** FORWARD ****************/
+	/*
+	 * Create a set of flows that test the load-balancing operation of the
+	 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
+	 * with a new flow, which should be sent to the 3rd mapped CQ
+	 */
+	static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
+
+	for (i = 0; i < RTE_DIM(flows); i++) {
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+
+		struct rte_event ev = {
+				.op = RTE_EVENT_OP_NEW,
+				.queue_id = t->qid[0],
+				.flow_id = flows[i],
+				.mbuf = arp,
+		};
+		/* generate pkt and enqueue */
+		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+		if (err < 0) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	struct test_event_dev_stats stats;
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: failed to get stats\n", __LINE__);
+		return -1;
+	}
+
+	if (stats.port_inflight[1] != 4) {
+		printf("%d:%s: port 1 inflight not correct\n", __LINE__,
+				__func__);
+		return -1;
+	}
+	if (stats.port_inflight[2] != 2) {
+		printf("%d:%s: port 2 inflight not correct\n", __LINE__,
+				__func__);
+		return -1;
+	}
+	if (stats.port_inflight[3] != 3) {
+		printf("%d:%s: port 3 inflight not correct\n", __LINE__,
+				__func__);
+		return -1;
+	}
+
+	cleanup(t);
+	return 0;
+}
+
+static int
+load_balancing_history(struct test *t)
+{
+	struct test_event_dev_stats stats = {0};
+	const int rx_enq = 0;
+	int err;
+	uint32_t i;
+
+	/* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0)
+		return -1;
+
+	/* CQ mapping to QID */
+	if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
+		printf("%d: error mapping port 1 qid\n", __LINE__);
+		return -1;
+	}
+	if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
+		printf("%d: error mapping port 2 qid\n", __LINE__);
+		return -1;
+	}
+	if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
+		printf("%d: error mapping port 3 qid\n", __LINE__);
+		return -1;
+	}
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Create a set of flows that test the load-balancing operation of the
+	 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
+	 * the packet from CQ 0, send in a new set of flows. Ensure that:
+	 *  1. The new flow 3 gets into the empty CQ0
+	 *  2. packets for existing flow gets added into CQ1
+	 *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
+	 *     more outstanding pkts
+	 *
+	 *  This test makes sure that when a flow ends (i.e. all packets
+	 *  have been completed for that flow), that the flow can be moved
+	 *  to a different CQ when new packets come in for that flow.
+	 */
+	static uint32_t flows1[] = {0, 1, 1, 2};
+
+	for (i = 0; i < RTE_DIM(flows1); i++) {
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		struct rte_event ev = {
+				.flow_id = flows1[i],
+				.op = RTE_EVENT_OP_NEW,
+				.queue_id = t->qid[0],
+				.event_type = RTE_EVENT_TYPE_CPU,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.mbuf = arp
+		};
+
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+		arp->hash.rss = flows1[i];
+		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+		if (err < 0) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+
+	/* call the scheduler */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	/* Dequeue the flow 0 packet from port 1, so that we can then drop */
+	struct rte_event ev;
+	if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
+		printf("%d: failed to dequeue\n", __LINE__);
+		return -1;
+	}
+	if (ev.mbuf->hash.rss != flows1[0]) {
+		printf("%d: unexpected flow received\n", __LINE__);
+		return -1;
+	}
+
+	/* drop the flow 0 packet from port 1 */
+	rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
+
+	/* call the scheduler */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	/*
+	 * Set up the next set of flows, first a new flow to fill up
+	 * CQ 0, so that the next flow 0 packet should go to CQ2
+	 */
+	static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
+
+	for (i = 0; i < RTE_DIM(flows2); i++) {
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		struct rte_event ev = {
+				.flow_id = flows2[i],
+				.op = RTE_EVENT_OP_NEW,
+				.queue_id = t->qid[0],
+				.event_type = RTE_EVENT_TYPE_CPU,
+				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+				.mbuf = arp
+		};
+
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+		arp->hash.rss = flows2[i];
+
+		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+		if (err < 0) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+
+	/* schedule */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d:failed to get stats\n", __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Now check the resulting inflights on each port.
+	 */
+	if (stats.port_inflight[1] != 3) {
+		printf("%d:%s: port 1 inflight not correct\n", __LINE__,
+				__func__);
+		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+				(unsigned int)stats.port_inflight[1],
+				(unsigned int)stats.port_inflight[2],
+				(unsigned int)stats.port_inflight[3]);
+		return -1;
+	}
+	if (stats.port_inflight[2] != 4) {
+		printf("%d:%s: port 2 inflight not correct\n", __LINE__,
+				__func__);
+		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+				(unsigned int)stats.port_inflight[1],
+				(unsigned int)stats.port_inflight[2],
+				(unsigned int)stats.port_inflight[3]);
+		return -1;
+	}
+	if (stats.port_inflight[3] != 2) {
+		printf("%d:%s: port 3 inflight not correct\n", __LINE__,
+				__func__);
+		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+				(unsigned int)stats.port_inflight[1],
+				(unsigned int)stats.port_inflight[2],
+				(unsigned int)stats.port_inflight[3]);
+		return -1;
+	}
+
+	for (i = 1; i <= 3; i++) {
+		struct rte_event ev;
+		while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
+			rte_event_enqueue_burst(evdev, i, &release_ev, 1);
+	}
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	cleanup(t);
+	return 0;
+}
+
+static int
+invalid_qid(struct test *t)
+{
+	struct test_event_dev_stats stats;
+	const int rx_enq = 0;
+	int err;
+	uint32_t i;
+
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* CQ mapping to QID */
+	for (i = 0; i < 4; i++) {
+		err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
+				NULL, 1);
+		if (err != 1) {
+			printf("%d: error mapping port 1 qid\n", __LINE__);
+			return -1;
+		}
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Send in a packet with an invalid qid to the scheduler.
+	 * We should see the packed enqueued OK, but the inflights for
+	 * that packet should not be incremented, and the rx_dropped
+	 * should be incremented.
+	 */
+	static uint32_t flows1[] = {20};
+
+	for (i = 0; i < RTE_DIM(flows1); i++) {
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+
+		struct rte_event ev = {
+				.op = RTE_EVENT_OP_NEW,
+				.queue_id = t->qid[0] + flows1[i],
+				.flow_id = i,
+				.mbuf = arp,
+		};
+		/* generate pkt and enqueue */
+		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+		if (err < 0) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+
+	/* call the scheduler */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: failed to get stats\n", __LINE__);
+		return -1;
+	}
+
+	/*
+	 * Now check the resulting inflights on the port, and the rx_dropped.
+	 */
+	if (stats.port_inflight[0] != 0) {
+		printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
+				__func__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+	if (stats.port_rx_dropped[0] != 1) {
+		printf("%d:%s: port 1 drops\n", __LINE__, __func__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+	/* each packet drop should only be counted in one place - port or dev */
+	if (stats.rx_dropped != 0) {
+		printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
+				__func__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+
+	cleanup(t);
+	return 0;
+}
+
+static int
+single_packet(struct test *t)
+{
+	const uint32_t MAGIC_SEQN = 7321;
+	struct rte_event ev;
+	struct test_event_dev_stats stats;
+	const int rx_enq = 0;
+	const int wrk_enq = 2;
+	int err;
+
+	/* Create instance with 4 ports */
+	if (init(t, 1, 4) < 0 ||
+			create_ports(t, 4) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* CQ mapping to QID */
+	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/************** Gen pkt and enqueue ****************/
+	struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+	if (!arp) {
+		printf("%d: gen of pkt failed\n", __LINE__);
+		return -1;
+	}
+
+	ev.op = RTE_EVENT_OP_NEW;
+	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+	ev.mbuf = arp;
+	ev.queue_id = 0;
+	ev.flow_id = 3;
+	arp->seqn = MAGIC_SEQN;
+
+	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+	if (err < 0) {
+		printf("%d: Failed to enqueue\n", __LINE__);
+		return -1;
+	}
+
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: failed to get stats\n", __LINE__);
+		return -1;
+	}
+
+	if (stats.rx_pkts != 1 ||
+			stats.tx_pkts != 1 ||
+			stats.port_inflight[wrk_enq] != 1) {
+		printf("%d: Sched core didn't handle pkt as expected\n",
+				__LINE__);
+		rte_event_dev_dump(evdev, stdout);
+		return -1;
+	}
+
+	uint32_t deq_pkts;
+
+	deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
+	if (deq_pkts < 1) {
+		printf("%d: Failed to deq\n", __LINE__);
+		return -1;
+	}
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: failed to get stats\n", __LINE__);
+		return -1;
+	}
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (ev.mbuf->seqn != MAGIC_SEQN) {
+		printf("%d: magic sequence number not dequeued\n", __LINE__);
+		return -1;
+	}
+
+	rte_pktmbuf_free(ev.mbuf);
+	err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
+	if (err < 0) {
+		printf("%d: Failed to enqueue\n", __LINE__);
+		return -1;
+	}
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (stats.port_inflight[wrk_enq] != 0) {
+		printf("%d: port inflight not correct\n", __LINE__);
+		return -1;
+	}
+
+	cleanup(t);
+	return 0;
+}
+
+static int
+inflight_counts(struct test *t)
+{
+	struct rte_event ev;
+	struct test_event_dev_stats stats;
+	const int rx_enq = 0;
+	const int p1 = 1;
+	const int p2 = 2;
+	int err;
+	int i;
+
+	/* Create instance with 4 ports */
+	if (init(t, 2, 3) < 0 ||
+			create_ports(t, 3) < 0 ||
+			create_atomic_qids(t, 2) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* CQ mapping to QID */
+	err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+	err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
+	if (err != 1) {
+		printf("%d: error mapping lb qid\n", __LINE__);
+		cleanup(t);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/************** FORWARD ****************/
+#define QID1_NUM 5
+	for (i = 0; i < QID1_NUM; i++) {
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			goto err;
+		}
+
+		ev.queue_id =  t->qid[0];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.mbuf = arp;
+		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+		if (err != 1) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			goto err;
+		}
+	}
+#define QID2_NUM 3
+	for (i = 0; i < QID2_NUM; i++) {
+		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+
+		if (!arp) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			goto err;
+		}
+		ev.queue_id =  t->qid[1];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.mbuf = arp;
+		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+		if (err != 1) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			goto err;
+		}
+	}
+
+	/* schedule */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (err) {
+		printf("%d: failed to get stats\n", __LINE__);
+		goto err;
+	}
+
+	if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
+			stats.tx_pkts != QID1_NUM + QID2_NUM) {
+		printf("%d: Sched core didn't handle pkt as expected\n",
+				__LINE__);
+		goto err;
+	}
+
+	if (stats.port_inflight[p1] != QID1_NUM) {
+		printf("%d: %s port 1 inflight not correct\n", __LINE__,
+				__func__);
+		goto err;
+	}
+	if (stats.port_inflight[p2] != QID2_NUM) {
+		printf("%d: %s port 2 inflight not correct\n", __LINE__,
+				__func__);
+		goto err;
+	}
+
+	/************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
+	/* port 1 */
+	struct rte_event events[QID1_NUM + QID2_NUM];
+	uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
+			RTE_DIM(events), 0);
+
+	if (deq_pkts != QID1_NUM) {
+		printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
+		goto err;
+	}
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (stats.port_inflight[p1] != QID1_NUM) {
+		printf("%d: port 1 inflight decrement after DEQ != 0\n",
+				__LINE__);
+		goto err;
+	}
+	for (i = 0; i < QID1_NUM; i++) {
+		err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
+				1);
+		if (err != 1) {
+			printf("%d: %s rte enqueue of inf release failed\n",
+				__LINE__, __func__);
+			goto err;
+		}
+	}
+
+	/*
+	 * As the scheduler core decrements inflights, it needs to run to
+	 * process packets to act on the drop messages
+	 */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (stats.port_inflight[p1] != 0) {
+		printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
+		goto err;
+	}
+
+	/* port2 */
+	deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
+			RTE_DIM(events), 0);
+	if (deq_pkts != QID2_NUM) {
+		printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
+		goto err;
+	}
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (stats.port_inflight[p2] != QID2_NUM) {
+		printf("%d: port 1 inflight decrement after DEQ != 0\n",
+				__LINE__);
+		goto err;
+	}
+	for (i = 0; i < QID2_NUM; i++) {
+		err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
+				1);
+		if (err != 1) {
+			printf("%d: %s rte enqueue of inf release failed\n",
+				__LINE__, __func__);
+			goto err;
+		}
+	}
+
+	/*
+	 * As the scheduler core decrements inflights, it needs to run to
+	 * process packets to act on the drop messages
+	 */
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	err = test_event_dev_stats_get(evdev, &stats);
+	if (stats.port_inflight[p2] != 0) {
+		printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
+		goto err;
+	}
+	cleanup(t);
+	return 0;
+
+err:
+	rte_event_dev_dump(evdev, stdout);
+	cleanup(t);
+	return -1;
+}
+
+static int
+parallel_basic(struct test *t, int check_order)
+{
+	const uint8_t rx_port = 0;
+	const uint8_t w1_port = 1;
+	const uint8_t w3_port = 3;
+	const uint8_t tx_port = 4;
+	int err;
+	int i;
+	uint32_t deq_pkts, j;
+	struct rte_mbuf *mbufs[3];
+	struct rte_mbuf *mbufs_out[3] = { 0 };
+	const uint32_t MAGIC_SEQN = 1234;
+
+	/* Create instance with 4 ports */
+	if (init(t, 2, tx_port + 1) < 0 ||
+			create_ports(t, tx_port + 1) < 0 ||
+			(check_order ?  create_ordered_qids(t, 1) :
+				create_unordered_qids(t, 1)) < 0 ||
+			create_directed_qids(t, 1, &tx_port)) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/*
+	 * CQ mapping to QID
+	 * We need three ports, all mapped to the same ordered qid0. Then we'll
+	 * take a packet out to each port, re-enqueue in reverse order,
+	 * then make sure the reordering has taken place properly when we
+	 * dequeue from the tx_port.
+	 *
+	 * Simplified test setup diagram:
+	 *
+	 * rx_port        w1_port
+	 *        \     /         \
+	 *         qid0 - w2_port - qid1
+	 *              \         /     \
+	 *                w3_port        tx_port
+	 */
+	/* CQ mapping to QID for LB ports (directed mapped on create) */
+	for (i = w1_port; i <= w3_port; i++) {
+		err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+				1);
+		if (err != 1) {
+			printf("%d: error mapping lb qid\n", __LINE__);
+			cleanup(t);
+			return -1;
+		}
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	/* Enqueue 3 packets to the rx port */
+	for (i = 0; i < 3; i++) {
+		struct rte_event ev;
+		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+		if (!mbufs[i]) {
+			printf("%d: gen of pkt failed\n", __LINE__);
+			return -1;
+		}
+
+		ev.queue_id = t->qid[0];
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.mbuf = mbufs[i];
+		mbufs[i]->seqn = MAGIC_SEQN + i;
+
+		/* generate pkt and enqueue */
+		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+		if (err != 1) {
+			printf("%d: Failed to enqueue pkt %u, retval = %u\n",
+					__LINE__, i, err);
+			return -1;
+		}
+	}
+
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	/* use extra slot to make logic in loops easier */
+	struct rte_event deq_ev[w3_port + 1];
+
+	/* Dequeue the 3 packets, one from each worker port */
+	for (i = w1_port; i <= w3_port; i++) {
+		deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+				&deq_ev[i], 1, 0);
+		if (deq_pkts != 1) {
+			printf("%d: Failed to deq\n", __LINE__);
+			rte_event_dev_dump(evdev, stdout);
+			return -1;
+		}
+	}
+
+	/* Enqueue each packet in reverse order, flushing after each one */
+	for (i = w3_port; i >= w1_port; i--) {
+
+		deq_ev[i].op = RTE_EVENT_OP_FORWARD;
+		deq_ev[i].queue_id = t->qid[1];
+		err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
+		if (err != 1) {
+			printf("%d: Failed to enqueue\n", __LINE__);
+			return -1;
+		}
+	}
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	/* dequeue from the tx ports, we should get 3 packets */
+	deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+			3, 0);
+
+	/* Check to see if we've got all 3 packets */
+	if (deq_pkts != 3) {
+		printf("%d: expected 3 pkts at tx port got %d from port %d\n",
+			__LINE__, deq_pkts, tx_port);
+		rte_event_dev_dump(evdev, stdout);
+		return 1;
+	}
+
+	/* Check to see if the sequence numbers are in expected order */
+	if (check_order) {
+		for (j = 0 ; j < deq_pkts ; j++) {
+			if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
+				printf(
+					"%d: Incorrect sequence number(%d) from port %d\n",
+					__LINE__, mbufs_out[j]->seqn, tx_port);
+				return -1;
+			}
+		}
+	}
+
+	/* Destroy the instance */
+	cleanup(t);
+	return 0;
+}
+
+static int
+ordered_basic(struct test *t)
+{
+	return parallel_basic(t, 1);
+}
+
+static int
+unordered_basic(struct test *t)
+{
+	return parallel_basic(t, 0);
+}
+
+static int
+holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
+{
+	const struct rte_event new_ev = {
+			.op = RTE_EVENT_OP_NEW
+			/* all other fields zero */
+	};
+	struct rte_event ev = new_ev;
+	unsigned int rx_port = 0; /* port we get the first flow on */
+	char rx_port_used_stat[64];
+	char rx_port_free_stat[64];
+	char other_port_used_stat[64];
+
+	if (init(t, 1, 2) < 0 ||
+			create_ports(t, 2) < 0 ||
+			create_atomic_qids(t, 1) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+	int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
+	if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
+			nb_links != 1) {
+		printf("%d: Error links queue to ports\n", __LINE__);
+		goto err;
+	}
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		goto err;
+	}
+
+	/* send one packet and see where it goes, port 0 or 1 */
+	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+		printf("%d: Error doing first enqueue\n", __LINE__);
+		goto err;
+	}
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
+			!= 1)
+		rx_port = 1;
+
+	snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
+			"port_%u_cq_ring_used", rx_port);
+	snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
+			"port_%u_cq_ring_free", rx_port);
+	snprintf(other_port_used_stat, sizeof(other_port_used_stat),
+			"port_%u_cq_ring_used", rx_port ^ 1);
+	if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
+			!= 1) {
+		printf("%d: Error, first event not scheduled\n", __LINE__);
+		goto err;
+	}
+
+	/* now fill up the rx port's queue with one flow to cause HOLB */
+	do {
+		ev = new_ev;
+		if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+			printf("%d: Error with enqueue\n", __LINE__);
+			goto err;
+		}
+		rte_service_run_iter_on_app_lcore(t->service_id, 1);
+	} while (rte_event_dev_xstats_by_name_get(evdev,
+				rx_port_free_stat, NULL) != 0);
+
+	/* one more packet, which needs to stay in IQ - i.e. HOLB */
+	ev = new_ev;
+	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+		printf("%d: Error with enqueue\n", __LINE__);
+		goto err;
+	}
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	/* check that the other port still has an empty CQ */
+	if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
+			!= 0) {
+		printf("%d: Error, second port CQ is not empty\n", __LINE__);
+		goto err;
+	}
+	/* check IQ now has one packet */
+	if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
+			!= 1) {
+		printf("%d: Error, QID does not have exactly 1 packet\n",
+			__LINE__);
+		goto err;
+	}
+
+	/* send another flow, which should pass the other IQ entry */
+	ev = new_ev;
+	ev.flow_id = 1;
+	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+		printf("%d: Error with enqueue\n", __LINE__);
+		goto err;
+	}
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+	if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
+			!= 1) {
+		printf("%d: Error, second flow did not pass out first\n",
+			__LINE__);
+		goto err;
+	}
+
+	if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
+			!= 1) {
+		printf("%d: Error, QID does not have exactly 1 packet\n",
+			__LINE__);
+		goto err;
+	}
+	cleanup(t);
+	return 0;
+err:
+	rte_event_dev_dump(evdev, stdout);
+	cleanup(t);
+	return -1;
+}
+
+static int
+worker_loopback_worker_fn(void *arg)
+{
+	struct test *t = arg;
+	uint8_t port = t->port[1];
+	int count = 0;
+	int enqd;
+
+	/*
+	 * Takes packets from the input port and then loops them back through
+	 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
+	 * so each packet goes through 8*16 = 128 times.
+	 */
+	printf("%d: \tWorker function started\n", __LINE__);
+	while (count < NUM_PACKETS) {
+#define BURST_SIZE 32
+		struct rte_event ev[BURST_SIZE];
+		uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
+				BURST_SIZE, 0);
+		if (nb_rx == 0) {
+			rte_pause();
+			continue;
+		}
+
+		for (i = 0; i < nb_rx; i++) {
+			ev[i].queue_id++;
+			if (ev[i].queue_id != 8) {
+				ev[i].op = RTE_EVENT_OP_FORWARD;
+				enqd = rte_event_enqueue_burst(evdev, port,
+						&ev[i], 1);
+				if (enqd != 1) {
+					printf("%d: Can't enqueue FWD!!\n",
+							__LINE__);
+					return -1;
+				}
+				continue;
+			}
+
+			ev[i].queue_id = 0;
+			ev[i].mbuf->udata64++;
+			if (ev[i].mbuf->udata64 != 16) {
+				ev[i].op = RTE_EVENT_OP_FORWARD;
+				enqd = rte_event_enqueue_burst(evdev, port,
+						&ev[i], 1);
+				if (enqd != 1) {
+					printf("%d: Can't enqueue FWD!!\n",
+							__LINE__);
+					return -1;
+				}
+				continue;
+			}
+			/* we have hit 16 iterations through system - drop */
+			rte_pktmbuf_free(ev[i].mbuf);
+			count++;
+			ev[i].op = RTE_EVENT_OP_RELEASE;
+			enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
+			if (enqd != 1) {
+				printf("%d drop enqueue failed\n", __LINE__);
+				return -1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
+worker_loopback_producer_fn(void *arg)
+{
+	struct test *t = arg;
+	uint8_t port = t->port[0];
+	uint64_t count = 0;
+
+	printf("%d: \tProducer function started\n", __LINE__);
+	while (count < NUM_PACKETS) {
+		struct rte_mbuf *m = 0;
+		do {
+			m = rte_pktmbuf_alloc(t->mbuf_pool);
+		} while (m == NULL);
+
+		m->udata64 = 0;
+
+		struct rte_event ev = {
+				.op = RTE_EVENT_OP_NEW,
+				.queue_id = t->qid[0],
+				.flow_id = (uintptr_t)m & 0xFFFF,
+				.mbuf = m,
+		};
+
+		if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
+			while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
+					1)
+				rte_pause();
+		}
+
+		count++;
+	}
+
+	return 0;
+}
+
+static int
+worker_loopback(struct test *t, uint8_t disable_implicit_release)
+{
+	/* use a single producer core, and a worker core to see what happens
+	 * if the worker loops packets back multiple times
+	 */
+	struct test_event_dev_stats stats;
+	uint64_t print_cycles = 0, cycles = 0;
+	uint64_t tx_pkts = 0;
+	int err;
+	int w_lcore, p_lcore;
+
+	if (init(t, 8, 2) < 0 ||
+			create_atomic_qids(t, 8) < 0) {
+		printf("%d: Error initializing device\n", __LINE__);
+		return -1;
+	}
+
+	/* RX with low max events */
+	static struct rte_event_port_conf conf = {
+			.dequeue_depth = 32,
+			.enqueue_depth = 64,
+	};
+	/* beware: this cannot be initialized in the static above as it would
+	 * only be initialized once - and this needs to be set for multiple runs
+	 */
+	conf.new_event_threshold = 512;
+	conf.disable_implicit_release = disable_implicit_release;
+
+	if (rte_event_port_setup(evdev, 0, &conf) < 0) {
+		printf("Error setting up RX port\n");
+		return -1;
+	}
+	t->port[0] = 0;
+	/* TX with higher max events */
+	conf.new_event_threshold = 4096;
+	if (rte_event_port_setup(evdev, 1, &conf) < 0) {
+		printf("Error setting up TX port\n");
+		return -1;
+	}
+	t->port[1] = 1;
+
+	/* CQ mapping to QID */
+	err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
+	if (err != 8) { /* should have mapped all queues*/
+		printf("%d: error mapping port 2 to all qids\n", __LINE__);
+		return -1;
+	}
+
+	if (rte_event_dev_start(evdev) < 0) {
+		printf("%d: Error with start call\n", __LINE__);
+		return -1;
+	}
+
+	p_lcore = rte_get_next_lcore(
+			/* start core */ -1,
+			/* skip master */ 1,
+			/* wrap */ 0);
+	w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
+
+	rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
+	rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
+
+	print_cycles = cycles = rte_get_timer_cycles();
+	while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
+			rte_eal_get_lcore_state(w_lcore) != FINISHED) {
+
+		rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+		uint64_t new_cycles = rte_get_timer_cycles();
+
+		if (new_cycles - print_cycles > rte_get_timer_hz()) {
+			test_event_dev_stats_get(evdev, &stats);
+			printf(
+				"%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
+				__LINE__, stats.rx_pkts, stats.tx_pkts);
+
+			print_cycles = new_cycles;
+		}
+		if (new_cycles - cycles > rte_get_timer_hz() * 3) {
+			test_event_dev_stats_get(evdev, &stats);
+			if (stats.tx_pkts == tx_pkts) {
+				rte_event_dev_dump(evdev, stdout);
+				printf("Dumping xstats:\n");
+				xstats_print();
+				printf(
+					"%d: No schedules for seconds, deadlock\n",
+					__LINE__);
+				return -1;
+			}
+			tx_pkts = stats.tx_pkts;
+			cycles = new_cycles;
+		}
+	}
+	rte_service_run_iter_on_app_lcore(t->service_id, 1);
+	/* ensure all completions are flushed */
+
+	rte_eal_mp_wait_lcore();
+
+	cleanup(t);
+	return 0;
+}
+
+static struct rte_mempool *eventdev_func_mempool;
+
+int
+test_sw_eventdev(void)
+{
+	struct test *t;
+	int ret;
+
+	t = malloc(sizeof(struct test));
+	if (t == NULL)
+		return -1;
+	/* manually initialize the op, older gcc's complain on static
+	 * initialization of struct elements that are a bitfield.
+	 */
+	release_ev.op = RTE_EVENT_OP_RELEASE;
+
+	const char *eventdev_name = "event_sw";
+	evdev = rte_event_dev_get_dev_id(eventdev_name);
+	if (evdev < 0) {
+		printf("%d: Eventdev %s not found - creating.\n",
+				__LINE__, eventdev_name);
+		if (rte_vdev_init(eventdev_name, NULL) < 0) {
+			printf("Error creating eventdev\n");
+			goto test_fail;
+		}
+		evdev = rte_event_dev_get_dev_id(eventdev_name);
+		if (evdev < 0) {
+			printf("Error finding newly created eventdev\n");
+			goto test_fail;
+		}
+	}
+
+	if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
+		printf("Failed to get service ID for software event dev\n");
+		goto test_fail;
+	}
+
+	rte_service_runstate_set(t->service_id, 1);
+	rte_service_set_runstate_mapped_check(t->service_id, 0);
+
+	/* Only create mbuf pool once, reuse for each test run */
+	if (!eventdev_func_mempool) {
+		eventdev_func_mempool = rte_pktmbuf_pool_create(
+				"EVENTDEV_SW_SA_MBUF_POOL",
+				(1<<12), /* 4k buffers */
+				32 /*MBUF_CACHE_SIZE*/,
+				0,
+				512, /* use very small mbufs */
+				rte_socket_id());
+		if (!eventdev_func_mempool) {
+			printf("ERROR creating mempool\n");
+			goto test_fail;
+		}
+	}
+	t->mbuf_pool = eventdev_func_mempool;
+	printf("*** Running Single Directed Packet test...\n");
+	ret = test_single_directed_packet(t);
+	if (ret != 0) {
+		printf("ERROR - Single Directed Packet test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Directed Forward Credit test...\n");
+	ret = test_directed_forward_credits(t);
+	if (ret != 0) {
+		printf("ERROR - Directed Forward Credit test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Single Load Balanced Packet test...\n");
+	ret = single_packet(t);
+	if (ret != 0) {
+		printf("ERROR - Single Packet test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Unordered Basic test...\n");
+	ret = unordered_basic(t);
+	if (ret != 0) {
+		printf("ERROR -  Unordered Basic test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Ordered Basic test...\n");
+	ret = ordered_basic(t);
+	if (ret != 0) {
+		printf("ERROR -  Ordered Basic test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Burst Packets test...\n");
+	ret = burst_packets(t);
+	if (ret != 0) {
+		printf("ERROR - Burst Packets test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Load Balancing test...\n");
+	ret = load_balancing(t);
+	if (ret != 0) {
+		printf("ERROR - Load Balancing test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Prioritized Directed test...\n");
+	ret = test_priority_directed(t);
+	if (ret != 0) {
+		printf("ERROR - Prioritized Directed test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Prioritized Atomic test...\n");
+	ret = test_priority_atomic(t);
+	if (ret != 0) {
+		printf("ERROR - Prioritized Atomic test FAILED.\n");
+		goto test_fail;
+	}
+
+	printf("*** Running Prioritized Ordered test...\n");
+	ret = test_priority_ordered(t);
+	if (ret != 0) {
+		printf("ERROR - Prioritized Ordered test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Prioritized Unordered test...\n");
+	ret = test_priority_unordered(t);
+	if (ret != 0) {
+		printf("ERROR - Prioritized Unordered test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Invalid QID test...\n");
+	ret = invalid_qid(t);
+	if (ret != 0) {
+		printf("ERROR - Invalid QID test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Load Balancing History test...\n");
+	ret = load_balancing_history(t);
+	if (ret != 0) {
+		printf("ERROR - Load Balancing History test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Inflight Count test...\n");
+	ret = inflight_counts(t);
+	if (ret != 0) {
+		printf("ERROR - Inflight Count test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Abuse Inflights test...\n");
+	ret = abuse_inflights(t);
+	if (ret != 0) {
+		printf("ERROR - Abuse Inflights test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running XStats test...\n");
+	ret = xstats_tests(t);
+	if (ret != 0) {
+		printf("ERROR - XStats test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running XStats ID Reset test...\n");
+	ret = xstats_id_reset_tests(t);
+	if (ret != 0) {
+		printf("ERROR - XStats ID Reset test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running XStats Brute Force test...\n");
+	ret = xstats_brute_force(t);
+	if (ret != 0) {
+		printf("ERROR - XStats Brute Force test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running XStats ID Abuse test...\n");
+	ret = xstats_id_abuse_tests(t);
+	if (ret != 0) {
+		printf("ERROR - XStats ID Abuse test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running QID Priority test...\n");
+	ret = qid_priorities(t);
+	if (ret != 0) {
+		printf("ERROR - QID Priority test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Ordered Reconfigure test...\n");
+	ret = ordered_reconfigure(t);
+	if (ret != 0) {
+		printf("ERROR - Ordered Reconfigure test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Port LB Single Reconfig test...\n");
+	ret = port_single_lb_reconfig(t);
+	if (ret != 0) {
+		printf("ERROR - Port LB Single Reconfig test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Port Reconfig Credits test...\n");
+	ret = port_reconfig_credits(t);
+	if (ret != 0) {
+		printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
+		goto test_fail;
+	}
+	printf("*** Running Head-of-line-blocking test...\n");
+	ret = holb(t);
+	if (ret != 0) {
+		printf("ERROR - Head-of-line-blocking test FAILED.\n");
+		goto test_fail;
+	}
+	if (rte_lcore_count() >= 3) {
+		printf("*** Running Worker loopback test...\n");
+		ret = worker_loopback(t, 0);
+		if (ret != 0) {
+			printf("ERROR - Worker loopback test FAILED.\n");
+			return ret;
+		}
+
+		printf("*** Running Worker loopback test (implicit release disabled)...\n");
+		ret = worker_loopback(t, 1);
+		if (ret != 0) {
+			printf("ERROR - Worker loopback test FAILED.\n");
+			goto test_fail;
+		}
+	} else {
+		printf("### Not enough cores for worker loopback tests.\n");
+		printf("### Need at least 3 cores for the tests.\n");
+	}
+
+	/*
+	 * Free test instance, leaving mempool initialized, and a pointer to it
+	 * in static eventdev_func_mempool, as it is re-used on re-runs
+	 */
+	free(t);
+
+	printf("SW Eventdev Selftest Successful.\n");
+	return 0;
+test_fail:
+	free(t);
+	printf("SW Eventdev Selftest Failed.\n");
+	return -1;
+}
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index b3b3b17e..67151f77 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -1,33 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
  */
 
 #include <rte_atomic.h>
@@ -85,6 +57,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
 	struct sw_port *p = port;
 	struct sw_evdev *sw = (void *)p->sw;
 	uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
+	uint32_t credit_update_quanta = sw->credit_update_quanta;
 	int new = 0;
 
 	if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
@@ -98,7 +71,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
 
 	if (p->inflight_credits < new) {
 		/* check if event enqueue brings port over max threshold */
-		uint32_t credit_update_quanta = sw->credit_update_quanta;
 		if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
 			return 0;
 
@@ -109,7 +81,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
 			return 0;
 	}
 
-	uint32_t forwards = 0;
 	for (i = 0; i < num; i++) {
 		int op = ev[i].op;
 		int outstanding = p->outstanding_releases > 0;
@@ -118,7 +89,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
 		p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
 		p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
 					outstanding;
-		forwards += (op == RTE_EVENT_OP_FORWARD);
 
 		new_ops[i] = sw_qe_flag_map[op];
 		new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
@@ -131,15 +101,12 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
 			p->outstanding_releases--;
 
 		/* error case: branch to avoid touching p->stats */
-		if (unlikely(invalid_qid)) {
+		if (unlikely(invalid_qid && op != RTE_EVENT_OP_RELEASE)) {
 			p->stats.rx_dropped++;
 			p->inflight_credits++;
 		}
 	}
 
-	/* handle directed port forward credits */
-	p->inflight_credits -= forwards * p->is_directed;
-
 	/* returns number of events actually enqueued */
 	uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
 					     new_ops);
@@ -152,6 +119,13 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
 		p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
 		p->last_dequeue_ticks = 0;
 	}
+
+	/* Replenish credits if enough releases are performed */
+	if (p->inflight_credits >= credit_update_quanta * 2) {
+		rte_atomic32_sub(&sw->inflights, credit_update_quanta);
+		p->inflight_credits -= credit_update_quanta;
+	}
+
 	return enq;
 }
 
@@ -167,41 +141,39 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
 {
 	RTE_SET_USED(wait);
 	struct sw_port *p = (void *)port;
-	struct sw_evdev *sw = (void *)p->sw;
 	struct rte_event_ring *ring = p->cq_worker_ring;
-	uint32_t credit_update_quanta = sw->credit_update_quanta;
 
 	/* check that all previous dequeues have been released */
-	if (!p->is_directed) {
+	if (p->implicit_release) {
+		struct sw_evdev *sw = (void *)p->sw;
+		uint32_t credit_update_quanta = sw->credit_update_quanta;
 		uint16_t out_rels = p->outstanding_releases;
 		uint16_t i;
 		for (i = 0; i < out_rels; i++)
 			sw_event_release(p, i);
+
+		/* Replenish credits if enough releases are performed */
+		if (p->inflight_credits >= credit_update_quanta * 2) {
+			rte_atomic32_sub(&sw->inflights, credit_update_quanta);
+			p->inflight_credits -= credit_update_quanta;
+		}
 	}
 
 	/* returns number of events actually dequeued */
 	uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
 	if (unlikely(ndeq == 0)) {
-		p->outstanding_releases = 0;
 		p->zero_polls++;
 		p->total_polls++;
 		goto end;
 	}
 
-	/* only add credits for directed ports - LB ports send RELEASEs */
-	p->inflight_credits += ndeq * p->is_directed;
-	p->outstanding_releases = ndeq;
+	p->outstanding_releases += ndeq;
 	p->last_dequeue_burst_sz = ndeq;
 	p->last_dequeue_ticks = rte_get_timer_cycles();
 	p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
 	p->total_polls++;
 
 end:
-	if (p->inflight_credits >= credit_update_quanta * 2 &&
-			p->inflight_credits > credit_update_quanta + ndeq) {
-		rte_atomic32_sub(&sw->inflights, credit_update_quanta);
-		p->inflight_credits -= credit_update_quanta;
-	}
 	return ndeq;
 }
 
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index 61a5c33b..7a6caa64 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -1,38 +1,10 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
  */
 
 #include <rte_event_ring.h>
 #include "sw_evdev.h"
-#include "iq_ring.h"
+#include "iq_chunk.h"
 
 enum xstats_type {
 	/* common stats */
@@ -53,7 +25,6 @@ enum xstats_type {
 	pkt_cycles,
 	poll_return, /* for zero-count and used also for port bucket loop */
 	/* qid_specific */
-	iq_size,
 	iq_used,
 	/* qid port mapping specific */
 	pinned,
@@ -144,7 +115,6 @@ get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
 			return infl;
 		} while (0);
 		break;
-	case iq_size: return RTE_DIM(qid->iq[0]->ring);
 	default: return -1;
 	}
 }
@@ -157,7 +127,7 @@ get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
 	const int iq_idx = extra_arg;
 
 	switch (type) {
-	case iq_used: return iq_ring_count(qid->iq[iq_idx]);
+	case iq_used: return iq_count(&qid->iq[iq_idx]);
 	default: return -1;
 	}
 }
@@ -236,13 +206,13 @@ sw_xstats_init(struct sw_evdev *sw)
 	/* all bucket dequeues are allowed to be reset, handled in loop below */
 
 	static const char * const qid_stats[] = {"rx", "tx", "drop",
-			"inflight", "iq_size"
+			"inflight"
 	};
 	static const enum xstats_type qid_types[] = { rx, tx, dropped,
-			inflight, iq_size
+			inflight
 	};
 	static const uint8_t qid_reset_allowed[] = {1, 1, 1,
-			0, 0
+			0
 	};
 
 	static const char * const qid_iq_stats[] = { "used" };
-- 
cgit