aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/event
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/event')
-rw-r--r--drivers/event/Makefile6
-rw-r--r--drivers/event/dpaa2/Makefile60
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c692
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h114
-rw-r--r--drivers/event/dpaa2/dpaa2_hw_dpcon.c139
-rw-r--r--drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map3
-rw-r--r--drivers/event/octeontx/Makefile4
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_ssovf.h4
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c11
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h10
-rw-r--r--drivers/event/octeontx/ssovf_mbox.c6
-rw-r--r--drivers/event/octeontx/ssovf_probe.c4
-rw-r--r--drivers/event/octeontx/ssovf_worker.c49
-rw-r--r--drivers/event/octeontx/ssovf_worker.h27
-rw-r--r--drivers/event/skeleton/Makefile4
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c38
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.h7
-rw-r--r--drivers/event/sw/event_ring.h14
-rw-r--r--drivers/event/sw/iq_ring.h20
-rw-r--r--drivers/event/sw/sw_evdev.c103
-rw-r--r--drivers/event/sw/sw_evdev.h10
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c24
-rw-r--r--drivers/event/sw/sw_evdev_worker.c33
-rw-r--r--drivers/event/sw/sw_evdev_xstats.c28
24 files changed, 1274 insertions, 136 deletions
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index 1cf389e8..3f6b8988 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -39,5 +39,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
DEPDIRS-sw = $(core-libs) librte_kvargs librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
DEPDIRS-octeontx = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+DEPDIRS-dpaa2 = $(core-libs) librte_bus_fslmc
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
new file mode 100644
index 00000000..3497d09d
--- /dev/null
+++ b/drivers/event/dpaa2/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_event_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
new file mode 100644
index 00000000..cf2d2741
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -0,0 +1,692 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/epoll.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_vdev.h>
+
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include "dpaa2_eventdev.h"
+#include <portal/dpaa2_hw_pvt.h>
+#include <mc/fsl_dpci.h>
+
+/* Clarifications
+ * Evendev = SoC Instance
+ * Eventport = DPIO Instance
+ * Eventqueue = DPCON Instance
+ * 1 Eventdev can have N Eventqueue
+ * Soft Event Flow is DPCI Instance
+ */
+
+static uint16_t
+dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_eventdev *ev_dev =
+ ((struct dpaa2_io_portal_t *)port)->eventdev;
+ struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
+ uint32_t queue_id = ev[0].queue_id;
+ struct evq_info_t *evq_info = &priv->evq_info[queue_id];
+ uint32_t fqid;
+ struct qbman_swp *swp;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t loop, frames_to_send;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ uint16_t num_tx = 0;
+ int ret;
+
+ RTE_SET_USED(port);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (nb_events) {
+ frames_to_send = (nb_events >> 3) ?
+ MAX_TX_RING_SLOTS : nb_events;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ const struct rte_event *event = &ev[num_tx + loop];
+
+ if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
+ fqid = evq_info->dpci->queue[
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
+ else
+ fqid = evq_info->dpci->queue[
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
+ qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
+
+ if (event->impl_opaque) {
+ uint8_t dqrr_index = event->impl_opaque - 1;
+
+ qbman_eq_desc_set_dca(&eqdesc[loop], 1,
+ dqrr_index, 0);
+ DPAA2_PER_LCORE_DPIO->dqrr_size--;
+ DPAA2_PER_LCORE_DPIO->dqrr_held &=
+ ~(1 << dqrr_index);
+ }
+
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+
+ /*
+ * todo - need to align with hw context data
+ * to avoid copy
+ */
+ struct rte_event *ev_temp = rte_malloc(NULL,
+ sizeof(struct rte_event), 0);
+ rte_memcpy(ev_temp, event, sizeof(struct rte_event));
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
+ DPAA2_SET_FD_LEN((&fd_arr[loop]),
+ sizeof(struct rte_event));
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_enqueue_multiple_eqdesc(swp,
+ &eqdesc[loop], &fd_arr[loop],
+ frames_to_send - loop);
+ }
+ num_tx += frames_to_send;
+ nb_events -= frames_to_send;
+ }
+
+ return num_tx;
+}
+
+static uint16_t
+dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
+{
+ return dpaa2_eventdev_enqueue_burst(port, ev, 1);
+}
+
+static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
+{
+ struct epoll_event epoll_ev;
+ int ret, i = 0;
+
+ qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
+ QBMAN_SWP_INTERRUPT_DQRI);
+
+RETRY:
+ ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
+ &epoll_ev, 1, timeout_ticks);
+ if (ret < 1) {
+ /* sometimes due to some spurious interrupts epoll_wait fails
+ * with errno EINTR. so here we are retrying epoll_wait in such
+ * case to avoid the problem.
+ */
+ if (errno == EINTR) {
+ PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
+ if (i++ > 10)
+ PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
+ goto RETRY;
+ }
+ }
+}
+
+static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
+static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
+
+ RTE_SET_USED(swp);
+
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+ ev->impl_opaque = dqrr_index + 1;
+ DPAA2_PER_LCORE_DPIO->dqrr_size++;
+ DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ const struct qbman_result *dq;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct dpaa2_queue *rxq;
+ int num_pkts = 0, ret, i = 0;
+
+ RTE_SET_USED(port);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ /* Check if there are atomic contexts to be released */
+ while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
+ if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
+ dq = qbman_get_dqrr_from_idx(swp, i);
+ qbman_swp_dqrr_consume(swp, dq);
+ DPAA2_PER_LCORE_DPIO->dqrr_size--;
+ }
+ i++;
+ }
+ DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
+
+ do {
+ dq = qbman_swp_dqrr_next(swp);
+ if (!dq) {
+ if (!num_pkts && timeout_ticks) {
+ dpaa2_eventdev_dequeue_wait(timeout_ticks);
+ timeout_ticks = 0;
+ continue;
+ }
+ return num_pkts;
+ }
+
+ fd = qbman_result_DQ_fd(dq);
+
+ rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
+ if (rxq) {
+ rxq->cb(swp, fd, dq, &ev[num_pkts]);
+ } else {
+ qbman_swp_dqrr_consume(swp, dq);
+ PMD_DRV_LOG(ERR, "Null Return VQ received\n");
+ return 0;
+ }
+
+ num_pkts++;
+ } while (num_pkts < nb_events);
+
+ return num_pkts;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
+static void
+dpaa2_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+ dev_info->min_dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_dequeue_timeout_ns =
+ DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
+ dev_info->dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_event_queues = priv->max_event_queues;
+ dev_info->max_event_queue_flows =
+ DPAA2_EVENT_MAX_QUEUE_FLOWS;
+ dev_info->max_event_queue_priority_levels =
+ DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+ dev_info->max_event_priority_levels =
+ DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+ dev_info->max_event_ports = RTE_MAX_LCORE;
+ dev_info->max_event_port_dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ dev_info->max_event_port_enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_BURST_MODE;
+}
+
+static int
+dpaa2_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+
+ PMD_DRV_FUNC_TRACE();
+
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ priv->nb_event_queues = conf->nb_event_queues;
+ priv->nb_event_ports = conf->nb_event_ports;
+ priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+ priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+ priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+ priv->event_dev_cfg = conf->event_dev_cfg;
+
+ PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
+ return 0;
+}
+
+static int
+dpaa2_eventdev_start(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_stop(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+}
+
+static int
+dpaa2_eventdev_close(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+ RTE_SET_USED(queue_conf);
+
+ queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
+ RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct evq_info_t *evq_info =
+ &priv->evq_info[queue_id];
+
+ PMD_DRV_FUNC_TRACE();
+
+ evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+ RTE_SET_USED(port_conf);
+
+ port_conf->new_event_threshold =
+ DPAA2_EVENT_MAX_NUM_EVENTS;
+ port_conf->dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ port_conf->enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+}
+
+static void
+dpaa2_eventdev_port_release(void *port)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port);
+}
+
+static int
+dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port_conf);
+
+ if (!dpaa2_io_portal[port_id].dpio_dev) {
+ dpaa2_io_portal[port_id].dpio_dev =
+ dpaa2_get_qbman_swp(port_id);
+ rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
+ if (!dpaa2_io_portal[port_id].dpio_dev)
+ return -1;
+ }
+
+ dpaa2_io_portal[port_id].eventdev = dev;
+ dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
+ return 0;
+}
+
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_io_portal_t *dpaa2_portal = port;
+ struct evq_info_t *evq_info;
+ int i;
+
+ PMD_DRV_FUNC_TRACE();
+
+ for (i = 0; i < nb_unlinks; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
+ 0, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ evq_info->link = 0;
+ }
+
+ return (int)nb_unlinks;
+}
+
+static int
+dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_io_portal_t *dpaa2_portal = port;
+ struct evq_info_t *evq_info;
+ uint8_t channel_index;
+ int ret, i, n;
+
+ PMD_DRV_FUNC_TRACE();
+
+ for (i = 0; i < nb_links; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ if (evq_info->link)
+ continue;
+
+ ret = dpio_add_static_dequeue_channel(
+ dpaa2_portal->dpio_dev->dpio,
+ CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id, &channel_index);
+ if (ret < 0) {
+ PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
+ ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ evq_info->link = 1;
+ }
+
+ RTE_SET_USED(priorities);
+
+ return (int)nb_links;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &priv->evq_info[queues[n]];
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
+ 0, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ evq_info->link = 0;
+ }
+ return ret;
+}
+
+static int
+dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ uint32_t scale = 1;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ *timeout_ticks = ns * scale;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+}
+
+static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
+ .dev_infos_get = dpaa2_eventdev_info_get,
+ .dev_configure = dpaa2_eventdev_configure,
+ .dev_start = dpaa2_eventdev_start,
+ .dev_stop = dpaa2_eventdev_stop,
+ .dev_close = dpaa2_eventdev_close,
+ .queue_def_conf = dpaa2_eventdev_queue_def_conf,
+ .queue_setup = dpaa2_eventdev_queue_setup,
+ .queue_release = dpaa2_eventdev_queue_release,
+ .port_def_conf = dpaa2_eventdev_port_def_conf,
+ .port_setup = dpaa2_eventdev_port_setup,
+ .port_release = dpaa2_eventdev_port_release,
+ .port_link = dpaa2_eventdev_port_link,
+ .port_unlink = dpaa2_eventdev_port_unlink,
+ .timeout_ticks = dpaa2_eventdev_timeout_ticks,
+ .dump = dpaa2_eventdev_dump
+};
+
+static int
+dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
+ struct dpaa2_dpcon_dev *dpcon_dev)
+{
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ int ret, i;
+
+ /*Do settings to get the frame on a DPCON object*/
+ rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
+ DPCI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
+ rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
+ rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
+
+ dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
+ dpaa2_eventdev_process_parallel;
+ dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
+ dpaa2_eventdev_process_atomic;
+
+ for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
+ rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
+ ret = dpci_set_rx_queue(&dpci_dev->dpci,
+ CMD_PRI_LOW,
+ dpci_dev->token, i,
+ &rx_queue_cfg);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "set_rx_q failed with err code: %d", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_create(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa2_eventdev *priv;
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+ int ret;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct dpaa2_eventdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &dpaa2_eventdev_ops;
+ eventdev->schedule = NULL;
+ eventdev->enqueue = dpaa2_eventdev_enqueue;
+ eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->dequeue = dpaa2_eventdev_dequeue;
+ eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ priv->max_event_queues = 0;
+
+ do {
+ dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
+ if (!dpcon_dev)
+ break;
+ priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
+
+ dpci_dev = rte_dpaa2_alloc_dpci_dev();
+ if (!dpci_dev) {
+ rte_dpaa2_free_dpcon_dev(dpcon_dev);
+ break;
+ }
+ priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
+
+ ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "dpci setup failed with err code: %d", ret);
+ return ret;
+ }
+ priv->max_event_queues++;
+ } while (dpcon_dev && dpci_dev);
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Initializing %s", name);
+ return dpaa2_eventdev_create(name);
+}
+
+static int
+dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Closing %s", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
+ .probe = dpaa2_eventdev_probe,
+ .remove = dpaa2_eventdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
new file mode 100644
index 00000000..f79f78aa
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -0,0 +1,114 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA2_EVENTDEV_H__
+#define __DPAA2_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include <mc/fsl_dpcon.h>
+#include <mc/fsl_mc_sys.h>
+
+#define EVENTDEV_NAME_DPAA2_PMD event_dpaa2
+
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV_DEBUG
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#define PMD_DRV_FUNC_TRACE() do { } while (0)
+#endif
+
+#define PMD_DRV_ERR(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+#define DPAA2_EVENT_DEFAULT_DPCI_PRIO 0
+
+#define DPAA2_EVENT_MAX_QUEUES 16
+#define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT 1
+#define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
+#define DPAA2_EVENT_MAX_QUEUE_FLOWS 2048
+#define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
+#define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
+#define DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_NUM_EVENTS (INT32_MAX - 1)
+
+#define DPAA2_EVENT_QUEUE_ATOMIC_FLOWS 2048
+#define DPAA2_EVENT_QUEUE_ORDER_SEQUENCES 2048
+
+enum {
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE,
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE,
+ DPAA2_EVENT_DPCI_MAX_QUEUES
+};
+
+struct dpaa2_dpcon_dev {
+ TAILQ_ENTRY(dpaa2_dpcon_dev) next;
+ struct fsl_mc_io dpcon;
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpcon_id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+ uint8_t channel_index;
+};
+
+struct evq_info_t {
+ /* DPcon device */
+ struct dpaa2_dpcon_dev *dpcon;
+ /* Attached DPCI device */
+ struct dpaa2_dpci_dev *dpci;
+ /* Configuration provided by the user */
+ uint32_t event_queue_cfg;
+ uint8_t link;
+};
+
+struct dpaa2_eventdev {
+ struct evq_info_t evq_info[DPAA2_EVENT_MAX_QUEUES];
+ uint32_t dequeue_timeout_ns;
+ uint8_t max_event_queues;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint8_t resvd_1;
+ uint32_t nb_event_queue_flows;
+ uint32_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_enqueue_depth;
+ uint32_t event_dev_cfg;
+};
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
+
+#endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
new file mode 100644
index 00000000..d3e73f90
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -0,0 +1,139 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_vfio.h>
+#include <mc/fsl_dpcon.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include "dpaa2_eventdev.h"
+
+TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev);
+static struct dpcon_dev_list dpcon_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpcon_dev_list); /*!< DPCON device list */
+
+static int
+rte_dpaa2_create_dpcon_device(struct fslmc_vfio_device *vdev __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpcon_id)
+{
+ struct dpaa2_dpcon_dev *dpcon_node;
+ struct dpcon_attr attr;
+ int ret;
+
+ /* Allocate DPAA2 dpcon handle */
+ dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0);
+ if (!dpcon_node) {
+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPCON Device");
+ return -1;
+ }
+
+ /* Open the dpcon object */
+ dpcon_node->dpcon.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpcon_open(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_id, &dpcon_node->token);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Resource alloc failure with err code: %d",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Get the device attributes */
+ ret = dpcon_get_attributes(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_node->token, &attr);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Reading device failed with err code: %d",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Updating device specific private information*/
+ dpcon_node->qbman_ch_id = attr.qbman_ch_id;
+ dpcon_node->num_priorities = attr.num_priorities;
+ dpcon_node->dpcon_id = dpcon_id;
+ rte_atomic16_init(&dpcon_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
+
+ PMD_DRV_LOG(DEBUG, "DPAA2: Added [dpcon.%d]", dpcon_id);
+
+ return 0;
+}
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Get DPCON dev handle from list using index */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev && rte_atomic16_test_and_set(&dpcon_dev->in_use))
+ break;
+ }
+
+ return dpcon_dev;
+}
+
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Match DPCON handle and mark it free */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev == dpcon) {
+ rte_atomic16_dec(&dpcon_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpcon_obj = {
+ .object_id = DPAA2_MC_DPCON_DEVID,
+ .create = rte_dpaa2_create_dpcon_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpcon, rte_dpaa2_dpcon_obj);
diff --git a/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map b/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
new file mode 100644
index 00000000..1c0b7559
--- /dev/null
+++ b/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
@@ -0,0 +1,3 @@
+DPDK_17.08 {
+ local: *;
+};
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index aca3d095..e5661ca8 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 Cavium Networks. All rights reserved.
+# Copyright(c) 2017 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium Networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
index 3da7cfdd..ba6d5142 100644
--- a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
+++ b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index c80a4437..d829b491 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,6 +30,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <inttypes.h>
+
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_dev.h>
@@ -156,6 +158,8 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
dev->schedule = NULL;
dev->enqueue = ssows_enq;
dev->enqueue_burst = ssows_enq_burst;
+ dev->enqueue_new_burst = ssows_enq_new_burst;
+ dev->enqueue_forward_burst = ssows_enq_fwd_burst;
dev->dequeue = ssows_deq;
dev->dequeue_burst = ssows_deq_burst;
@@ -170,6 +174,7 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
{
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
dev_info->max_event_queues = edev->max_event_queues;
@@ -194,6 +199,8 @@ ssovf_configure(const struct rte_eventdev *dev)
ssovf_func_trace();
deq_tmo_ns = conf->dequeue_timeout_ns;
+ if (deq_tmo_ns == 0)
+ deq_tmo_ns = edev->min_deq_timeout_ns;
if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
edev->is_timeout_deq = 1;
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 6e0a3521..1cdc8104 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -34,7 +34,7 @@
#define __SSOVF_EVDEV_H__
#include <rte_config.h>
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
#include <rte_io.h>
#include "rte_pmd_octeontx_ssovf.h"
@@ -190,6 +190,10 @@ ssovf_pmd_priv(const struct rte_eventdev *eventdev)
uint16_t ssows_enq(void *port, const struct rte_event *ev);
uint16_t ssows_enq_burst(void *port,
const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_new_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_fwd_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks);
diff --git a/drivers/event/octeontx/ssovf_mbox.c b/drivers/event/octeontx/ssovf_mbox.c
index 7394a3a9..764414b5 100644
--- a/drivers/event/octeontx/ssovf_mbox.c
+++ b/drivers/event/octeontx/ssovf_mbox.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -172,7 +172,7 @@ mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
error:
ssovf_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
- m->tag_own, rx_hdr.tag, hdr->msg, hdr->coproc, res,
+ m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
hdr->res_code);
return res;
}
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
index b644ebde..e1c0c6d5 100644
--- a/drivers/event/octeontx/ssovf_probe.c
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index ad3fe684..5e17c7b8 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,7 +32,7 @@
#include "ssovf_worker.h"
-static force_inline void
+static __rte_always_inline void
ssows_new_event(struct ssows *ws, const struct rte_event *ev)
{
const uint64_t event_ptr = ev->u64;
@@ -43,7 +43,7 @@ ssows_new_event(struct ssows *ws, const struct rte_event *ev)
ssows_add_work(ws, event_ptr, tag, new_tt, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
{
const uint8_t cur_tt = ws->cur_tt;
@@ -72,7 +72,7 @@ ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
#define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
-static force_inline void
+static __rte_always_inline void
ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
{
const uint64_t event_ptr = ev->u64;
@@ -95,7 +95,7 @@ ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
ssows_add_work(ws, event_ptr, tag, new_tt, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
@@ -112,39 +112,39 @@ ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
ssows_fwd_group(ws, ev, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_release_event(struct ssows *ws)
{
if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
ssows_swtag_untag(ws);
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
{
struct ssows *ws = port;
RTE_SET_USED(timeout_ticks);
- ssows_swtag_wait(ws);
if (ws->swtag_req) {
ws->swtag_req = 0;
+ ssows_swtag_wait(ws);
return 1;
} else {
return ssows_get_work(ws, ev);
}
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
{
struct ssows *ws = port;
uint64_t iter;
uint16_t ret = 1;
- ssows_swtag_wait(ws);
if (ws->swtag_req) {
ws->swtag_req = 0;
+ ssows_swtag_wait(ws);
} else {
ret = ssows_get_work(ws, ev);
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
@@ -171,7 +171,7 @@ ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
return ssows_deq_timeout(port, ev, timeout_ticks);
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_enq(void *port, const struct rte_event *ev)
{
struct ssows *ws = port;
@@ -179,6 +179,7 @@ ssows_enq(void *port, const struct rte_event *ev)
switch (ev->op) {
case RTE_EVENT_OP_NEW:
+ rte_smp_wmb();
ssows_new_event(ws, ev);
break;
case RTE_EVENT_OP_FORWARD:
@@ -200,6 +201,30 @@ ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
return ssows_enq(port, ev);
}
+uint16_t __hot
+ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ uint16_t i;
+ struct ssows *ws = port;
+
+ rte_smp_wmb();
+ for (i = 0; i < nb_events; i++)
+ ssows_new_event(ws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __hot
+ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ struct ssows *ws = port;
+ RTE_SET_USED(nb_events);
+
+ ssows_forward_event(ws, ev);
+
+ return 1;
+}
+
void
ssows_flush_events(struct ssows *ws, uint8_t queue_id)
{
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 300dfae8..55f72555 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,6 +32,7 @@
#include <rte_common.h>
+#include <rte_branch_prediction.h>
#include "ssovf_evdev.h"
@@ -42,17 +43,13 @@ enum {
SSO_SYNC_EMPTY
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
#ifndef __hot
#define __hot __attribute__((hot))
#endif
/* SSO Operations */
-static force_inline uint16_t
+static __rte_always_inline uint16_t
ssows_get_work(struct ssows *ws, struct rte_event *ev)
{
uint64_t get_work0, get_work1;
@@ -70,7 +67,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
return !!get_work1;
}
-static force_inline void
+static __rte_always_inline void
ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uint8_t grp)
{
@@ -80,7 +77,7 @@ ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uint8_t grp)
{
@@ -92,7 +89,7 @@ ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
SSOW_VHWS_OP_SWTAG_FULL0));
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
{
uint64_t val;
@@ -101,7 +98,7 @@ ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
{
uint64_t val;
@@ -110,27 +107,27 @@ ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_untag(struct ssows *ws)
{
ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
ws->cur_tt = SSO_SYNC_UNTAGGED;
}
-static force_inline void
+static __rte_always_inline void
ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
{
ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
SSOW_VHWS_OP_UPD_WQP_GRP0));
}
-static force_inline void
+static __rte_always_inline void
ssows_desched(struct ssows *ws)
{
ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_wait(struct ssows *ws)
{
/* Wait for the SWTAG/SWTAG_FULL operation */
diff --git a/drivers/event/skeleton/Makefile b/drivers/event/skeleton/Makefile
index c2b2456b..e6d58711 100644
--- a/drivers/event/skeleton/Makefile
+++ b/drivers/event/skeleton/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium Networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium Networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 800bd76e..bcd20556 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -43,10 +43,9 @@
#include <rte_dev.h>
#include <rte_eal.h>
#include <rte_log.h>
+#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
#include <rte_lcore.h>
#include <rte_vdev.h>
@@ -130,6 +129,7 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
dev_info->max_event_port_enqueue_depth = 16;
dev_info->max_num_events = (1ULL << 20);
dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS;
}
@@ -427,18 +427,28 @@ static const struct rte_pci_id pci_id_skeleton_map[] = {
},
};
-static struct rte_eventdev_driver pci_eventdev_skeleton_pmd = {
- .pci_drv = {
- .id_table = pci_id_skeleton_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_event_pmd_pci_probe,
- .remove = rte_event_pmd_pci_remove,
- },
- .eventdev_init = skeleton_eventdev_init,
- .dev_private_size = sizeof(struct skeleton_eventdev),
+static int
+event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_probe(pci_drv, pci_dev,
+ sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
+}
+
+static int
+event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
+ .id_table = pci_id_skeleton_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = event_skeleton_pci_probe,
+ .remove = event_skeleton_pci_remove,
};
-RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
/* VDEV based event device */
diff --git a/drivers/event/skeleton/skeleton_eventdev.h b/drivers/event/skeleton/skeleton_eventdev.h
index 1ce62da7..32064721 100644
--- a/drivers/event/skeleton/skeleton_eventdev.h
+++ b/drivers/event/skeleton/skeleton_eventdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -33,7 +33,8 @@
#ifndef __SKELETON_EVENTDEV_H__
#define __SKELETON_EVENTDEV_H__
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_pci.h>
+#include <rte_eventdev_pmd_vdev.h>
#ifdef RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG
#define PMD_DRV_LOG(level, fmt, args...) \
diff --git a/drivers/event/sw/event_ring.h b/drivers/event/sw/event_ring.h
index cdaee95d..734a3b4b 100644
--- a/drivers/event/sw/event_ring.h
+++ b/drivers/event/sw/event_ring.h
@@ -61,10 +61,6 @@ struct qe_ring {
struct rte_event ring[0] __rte_cache_aligned;
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
static inline struct qe_ring *
qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)
{
@@ -91,19 +87,19 @@ qe_ring_destroy(struct qe_ring *r)
rte_free(r);
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_count(const struct qe_ring *r)
{
return r->write_idx - r->read_idx;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_free_count(const struct qe_ring *r)
{
return r->size - qe_ring_count(r);
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
unsigned int nb_qes, uint16_t *free_count)
{
@@ -130,7 +126,7 @@ qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
return nb_qes;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
unsigned int nb_qes, uint8_t *ops)
{
@@ -157,7 +153,7 @@ qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
return nb_qes;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,
unsigned int nb_qes)
{
diff --git a/drivers/event/sw/iq_ring.h b/drivers/event/sw/iq_ring.h
index d480d156..64cf6784 100644
--- a/drivers/event/sw/iq_ring.h
+++ b/drivers/event/sw/iq_ring.h
@@ -56,10 +56,6 @@ struct iq_ring {
struct rte_event ring[QID_IQ_DEPTH];
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
static inline struct iq_ring *
iq_ring_create(const char *name, unsigned int socket_id)
{
@@ -81,19 +77,19 @@ iq_ring_destroy(struct iq_ring *r)
rte_free(r);
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_count(const struct iq_ring *r)
{
return r->write_idx - r->read_idx;
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_free_count(const struct iq_ring *r)
{
return QID_IQ_MASK - iq_ring_count(r);
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
const uint16_t read = r->read_idx;
@@ -112,7 +108,7 @@ iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
return nb_qes;
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
uint16_t read = r->read_idx;
@@ -132,7 +128,7 @@ iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
}
/* assumes there is space, from a previous dequeue_burst */
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
uint16_t i, read = r->read_idx;
@@ -144,19 +140,19 @@ iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
return nb_qes;
}
-static force_inline const struct rte_event *
+static __rte_always_inline const struct rte_event *
iq_ring_peek(const struct iq_ring *r)
{
return &r->ring[r->read_idx & QID_IQ_MASK];
}
-static force_inline void
+static __rte_always_inline void
iq_ring_pop(struct iq_ring *r)
{
r->read_idx++;
}
-static force_inline int
+static __rte_always_inline int
iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
{
const uint16_t read = r->read_idx;
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a31aaa66..9c534b7f 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -30,6 +30,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <inttypes.h>
#include <string.h>
#include <rte_vdev.h>
@@ -37,10 +38,11 @@
#include <rte_kvargs.h>
#include <rte_ring.h>
#include <rte_errno.h>
+#include <rte_event_ring.h>
+#include <rte_service_component.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
#define EVENTDEV_NAME_SW_PMD event_sw
#define NUMA_NODE_ARG "numa_node"
@@ -90,7 +92,8 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
} else if (q->type == RTE_SCHED_TYPE_ORDERED) {
p->num_ordered_qids++;
p->num_qids_mapped++;
- } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
+ } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
+ q->type == RTE_SCHED_TYPE_PARALLEL) {
p->num_qids_mapped++;
}
@@ -138,7 +141,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
{
struct sw_evdev *sw = sw_pmd_priv(dev);
struct sw_port *p = &sw->ports[port_id];
- char buf[QE_RING_NAMESIZE];
+ char buf[RTE_RING_NAMESIZE];
unsigned int i;
struct rte_event_dev_info info;
@@ -159,10 +162,19 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
p->id = port_id;
p->sw = sw;
- snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
- "rx_worker_ring");
- p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
- dev->data->socket_id);
+ /* check to see if rings exists - port_setup() can be called multiple
+ * times legally (assuming device is stopped). If ring exists, free it
+ * to so it gets re-created with the correct size
+ */
+ snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
+ port_id, "rx_worker_ring");
+ struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
if (p->rx_worker_ring == NULL) {
SW_LOG_ERR("Error creating RX worker ring for port %d\n",
port_id);
@@ -171,12 +183,18 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
p->inflight_max = conf->new_event_threshold;
- snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
- "cq_worker_ring");
- p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
- dev->data->socket_id);
+ /* check if ring exists, same as rx_worker above */
+ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
+ port_id, "cq_worker_ring");
+ existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
if (p->cq_worker_ring == NULL) {
- qe_ring_destroy(p->rx_worker_ring);
+ rte_event_ring_free(p->rx_worker_ring);
SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
port_id);
return -1;
@@ -202,8 +220,8 @@ sw_port_release(void *port)
if (p == NULL)
return;
- qe_ring_destroy(p->rx_worker_ring);
- qe_ring_destroy(p->cq_worker_ring);
+ rte_event_ring_free(p->rx_worker_ring);
+ rte_event_ring_free(p->cq_worker_ring);
memset(p, 0, sizeof(*p));
}
@@ -435,6 +453,7 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
.max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
.max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS),
};
@@ -509,8 +528,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
fprintf(f, "\n");
if (p->rx_worker_ring) {
- uint64_t used = qe_ring_count(p->rx_worker_ring);
- uint64_t space = qe_ring_free_count(p->rx_worker_ring);
+ uint64_t used = rte_event_ring_count(p->rx_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->rx_worker_ring);
const char *col = (space == 0) ? COL_RED : COL_RESET;
fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
PRIu64 COL_RESET"\n", col, used, space);
@@ -518,8 +538,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
fprintf(f, "\trx ring not initialized.\n");
if (p->cq_worker_ring) {
- uint64_t used = qe_ring_count(p->cq_worker_ring);
- uint64_t space = qe_ring_free_count(p->cq_worker_ring);
+ uint64_t used = rte_event_ring_count(p->cq_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->cq_worker_ring);
const char *col = (space == 0) ? COL_RED : COL_RESET;
fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
PRIu64 COL_RESET"\n", col, used, space);
@@ -559,12 +580,13 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
inflights += qid->fids[flow].pcount;
}
- uint32_t cq;
- fprintf(f, "\tInflights: %u\tFlows pinned per port: ",
- inflights);
- for (cq = 0; cq < sw->port_count; cq++)
- fprintf(f, "%d ", affinities_per_port[cq]);
- fprintf(f, "\n");
+ uint32_t port;
+ fprintf(f, "\tPer Port Stats:\n");
+ for (port = 0; port < sw->port_count; port++) {
+ fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
+ qid->to_port[port]);
+ fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
+ }
uint32_t iq;
uint32_t iq_printed = 0;
@@ -593,6 +615,13 @@ sw_start(struct rte_eventdev *dev)
{
unsigned int i, j;
struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ /* check a service core is mapped to this service */
+ struct rte_service_spec *s = rte_service_get_by_name(sw->service_name);
+ if (!rte_service_is_running(s))
+ SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
+ s->name);
+
/* check all ports are set up */
for (i = 0; i < sw->port_count; i++)
if (sw->ports[i].rx_worker_ring == NULL) {
@@ -695,6 +724,14 @@ set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
return 0;
}
+
+static int32_t sw_sched_service_func(void *args)
+{
+ struct rte_eventdev *dev = args;
+ sw_event_schedule(dev);
+ return 0;
+}
+
static int
sw_probe(struct rte_vdev_device *vdev)
{
@@ -792,6 +829,8 @@ sw_probe(struct rte_vdev_device *vdev)
dev->dev_ops = &evdev_sw_ops;
dev->enqueue = sw_event_enqueue;
dev->enqueue_burst = sw_event_enqueue_burst;
+ dev->enqueue_new_burst = sw_event_enqueue_burst;
+ dev->enqueue_forward_burst = sw_event_enqueue_burst;
dev->dequeue = sw_event_dequeue;
dev->dequeue_burst = sw_event_dequeue_burst;
dev->schedule = sw_event_schedule;
@@ -806,6 +845,22 @@ sw_probe(struct rte_vdev_device *vdev)
sw->credit_update_quanta = credit_quanta;
sw->sched_quanta = sched_quanta;
+ /* register service with EAL */
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "%s_service", name);
+ snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
+ name);
+ service.socket_id = socket_id;
+ service.callback = sw_sched_service_func;
+ service.callback_userdata = (void *)dev;
+
+ int32_t ret = rte_service_register(&service);
+ if (ret) {
+ SW_LOG_ERR("service register() failed");
+ return -ENOEXEC;
+ }
+
return 0;
}
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 61c671d6..71de3c14 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -34,7 +34,7 @@
#define _SW_EVDEV_H_
#include <rte_eventdev.h>
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
#include <rte_atomic.h>
#define SW_DEFAULT_CREDIT_QUANTA 32
@@ -59,6 +59,7 @@
#define EVENTDEV_NAME_SW_PMD event_sw
#define SW_PMD_NAME RTE_STR(event_sw)
+#define SW_PMD_NAME_MAX 64
#define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
@@ -149,6 +150,7 @@ struct sw_qid {
uint32_t cq_num_mapped_cqs;
uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
uint32_t cq_map[SW_PORTS_MAX];
+ uint64_t to_port[SW_PORTS_MAX];
/* Track flow ids for atomic load balancing */
struct sw_fid_t fids[SW_QID_NUM_FIDS];
@@ -189,9 +191,9 @@ struct sw_port {
int16_t num_ordered_qids;
/** Ring and buffer for pulling events from workers for scheduling */
- struct qe_ring *rx_worker_ring __rte_cache_aligned;
+ struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
/** Ring and buffer for pushing packets to workers after scheduling */
- struct qe_ring *cq_worker_ring;
+ struct rte_event_ring *cq_worker_ring;
/* hole */
@@ -275,6 +277,8 @@ struct sw_evdev {
/* store num stats and offset of the stats for each queue */
uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ char service_name[SW_PMD_NAME_MAX];
};
static inline struct sw_evdev *
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index a333a6f0..8a2c9d4f 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -32,9 +32,9 @@
#include <rte_ring.h>
#include <rte_hash_crc.h>
+#include <rte_event_ring.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
#define SW_IQS_MASK (SW_IQS_MAX-1)
@@ -119,11 +119,12 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
p->stats.tx_pkts++;
qid->stats.tx_pkts++;
+ qid->to_port[cq]++;
/* if we just filled in the last slot, flush the buffer */
if (sw->cq_ring_space[cq] == 0) {
- struct qe_ring *worker = p->cq_worker_ring;
- qe_ring_enqueue_burst(worker, p->cq_buf,
+ struct rte_event_ring *worker = p->cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, p->cq_buf,
p->cq_buf_count,
&sw->cq_ring_space[cq]);
p->cq_buf_count = 0;
@@ -170,7 +171,8 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
cq = qid->cq_map[cq_idx];
if (++cq_idx == qid->cq_num_mapped_cqs)
cq_idx = 0;
- } while (qe_ring_free_count(sw->ports[cq].cq_worker_ring) == 0 ||
+ } while (rte_event_ring_free_count(
+ sw->ports[cq].cq_worker_ring) == 0 ||
sw->ports[cq].inflights == SW_PORT_HIST_LIST);
struct sw_port *p = &sw->ports[cq];
@@ -362,17 +364,17 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
return pkts_iter;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
{
RTE_SET_USED(sw);
- struct qe_ring *worker = port->rx_worker_ring;
+ struct rte_event_ring *worker = port->rx_worker_ring;
port->pp_buf_start = 0;
- port->pp_buf_count = qe_ring_dequeue_burst(worker, port->pp_buf,
- RTE_DIM(port->pp_buf));
+ port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf,
+ RTE_DIM(port->pp_buf), NULL);
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
__pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
{
static struct reorder_buffer_entry dummy_rob;
@@ -585,8 +587,8 @@ sw_event_schedule(struct rte_eventdev *dev)
* worker cores: aka, do the ring transfers batched.
*/
for (i = 0; i < sw->port_count; i++) {
- struct qe_ring *worker = sw->ports[i].cq_worker_ring;
- qe_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
+ struct rte_event_ring *worker = sw->ports[i].cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
sw->ports[i].cq_buf_count,
&sw->cq_ring_space[i]);
sw->ports[i].cq_buf_count = 0;
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index 9cb6bef5..d76d3d5c 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -32,9 +32,9 @@
#include <rte_atomic.h>
#include <rte_cycles.h>
+#include <rte_event_ring.h>
#include "sw_evdev.h"
-#include "event_ring.h"
#define PORT_ENQUEUE_MAX_BURST_SIZE 64
@@ -52,13 +52,31 @@ sw_event_release(struct sw_port *p, uint8_t index)
ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
uint16_t free_count;
- qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
+ rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
/* each release returns one credit */
p->outstanding_releases--;
p->inflight_credits++;
}
+/*
+ * special-case of rte_event_ring enqueue, with overriding the ops member on
+ * the events that get written to the ring.
+ */
+static inline unsigned int
+enqueue_burst_with_ops(struct rte_event_ring *r, const struct rte_event *events,
+ unsigned int n, uint8_t *ops)
+{
+ struct rte_event tmp_evs[PORT_ENQUEUE_MAX_BURST_SIZE];
+ unsigned int i;
+
+ memcpy(tmp_evs, events, n * sizeof(events[0]));
+ for (i = 0; i < n; i++)
+ tmp_evs[i].op = ops[i];
+
+ return rte_event_ring_enqueue_burst(r, tmp_evs, n, NULL);
+}
+
uint16_t
sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
{
@@ -87,6 +105,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
return 0;
}
+ uint32_t forwards = 0;
for (i = 0; i < num; i++) {
int op = ev[i].op;
int outstanding = p->outstanding_releases > 0;
@@ -95,6 +114,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
outstanding;
+ forwards += (op == RTE_EVENT_OP_FORWARD);
new_ops[i] = sw_qe_flag_map[op];
new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
@@ -113,8 +133,11 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
}
}
+ /* handle directed port forward credits */
+ p->inflight_credits -= forwards * p->is_directed;
+
/* returns number of events actually enqueued */
- uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
+ uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
new_ops);
if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
uint64_t burst_ticks = rte_get_timer_cycles() -
@@ -141,7 +164,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
RTE_SET_USED(wait);
struct sw_port *p = (void *)port;
struct sw_evdev *sw = (void *)p->sw;
- struct qe_ring *ring = p->cq_worker_ring;
+ struct rte_event_ring *ring = p->cq_worker_ring;
uint32_t credit_update_quanta = sw->credit_update_quanta;
/* check that all previous dequeues have been released */
@@ -153,7 +176,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
}
/* returns number of events actually dequeued */
- uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
+ uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
if (unlikely(ndeq == 0)) {
p->outstanding_releases = 0;
p->zero_polls++;
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index c7b1abe8..8cb6d88d 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -30,9 +30,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_event_ring.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
enum xstats_type {
/* common stats */
@@ -57,6 +57,7 @@ enum xstats_type {
iq_used,
/* qid port mapping specific */
pinned,
+ pkts, /* note: qid-to-port pkts */
};
typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
@@ -104,10 +105,10 @@ get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
case calls: return p->total_polls;
case credits: return p->inflight_credits;
case poll_return: return p->zero_polls;
- case rx_used: return qe_ring_count(p->rx_worker_ring);
- case rx_free: return qe_ring_free_count(p->rx_worker_ring);
- case tx_used: return qe_ring_count(p->cq_worker_ring);
- case tx_free: return qe_ring_free_count(p->cq_worker_ring);
+ case rx_used: return rte_event_ring_count(p->rx_worker_ring);
+ case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
+ case tx_used: return rte_event_ring_count(p->cq_worker_ring);
+ case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
default: return -1;
}
}
@@ -179,6 +180,8 @@ get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
return pin;
} while (0);
break;
+ case pkts:
+ return qid->to_port[port];
default: return -1;
}
}
@@ -246,8 +249,11 @@ sw_xstats_init(struct sw_evdev *sw)
static const enum xstats_type qid_iq_types[] = { iq_used };
/* reset allowed */
- static const char * const qid_port_stats[] = { "pinned_flows" };
- static const enum xstats_type qid_port_types[] = { pinned };
+ static const char * const qid_port_stats[] = { "pinned_flows",
+ "packets"
+ };
+ static const enum xstats_type qid_port_types[] = { pinned, pkts };
+ static const uint8_t qid_port_reset_allowed[] = {0, 1};
/* reset allowed */
/* ---- end of stat definitions ---- */
@@ -312,8 +318,9 @@ sw_xstats_init(struct sw_evdev *sw)
port, port_stats[i]);
}
- for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
- SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
+ for (bkt = 0; bkt < (rte_event_ring_get_capacity(
+ sw->ports[port].cq_worker_ring) >>
+ SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
sw->xstats[stat] = (struct sw_xstats_entry){
.fn = get_port_bucket_stat,
@@ -376,7 +383,8 @@ sw_xstats_init(struct sw_evdev *sw)
.stat = qid_port_types[i],
.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
.extra_arg = port,
- .reset_allowed = 0,
+ .reset_allowed =
+ qid_port_reset_allowed[i],
};
snprintf(sname, sizeof(sname),
"qid_%u_port_%u_%s",