aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/event
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/event')
-rw-r--r--drivers/event/Makefile4
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.c4
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.h2
-rw-r--r--drivers/event/dpaa/meson.build10
-rw-r--r--drivers/event/dpaa2/Makefile6
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c86
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h1
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev_logs.h10
-rw-r--r--drivers/event/dpaa2/dpaa2_hw_dpcon.c15
-rw-r--r--drivers/event/dpaa2/meson.build11
-rw-r--r--drivers/event/meson.build2
-rw-r--r--drivers/event/octeontx/Makefile12
-rw-r--r--drivers/event/octeontx/meson.build9
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c59
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h17
-rw-r--r--drivers/event/octeontx/ssovf_evdev_selftest.c36
-rw-r--r--drivers/event/octeontx/ssovf_probe.c290
-rw-r--r--drivers/event/octeontx/ssovf_worker.c17
-rw-r--r--drivers/event/octeontx/timvf_evdev.c407
-rw-r--r--drivers/event/octeontx/timvf_evdev.h225
-rw-r--r--drivers/event/octeontx/timvf_probe.c148
-rw-r--r--drivers/event/octeontx/timvf_worker.c199
-rw-r--r--drivers/event/octeontx/timvf_worker.h443
-rw-r--r--drivers/event/opdl/opdl_evdev.c2
-rw-r--r--drivers/event/opdl/opdl_evdev_init.c3
-rw-r--r--drivers/event/opdl/opdl_ring.c97
-rw-r--r--drivers/event/opdl/opdl_ring.h16
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c2
-rw-r--r--drivers/event/sw/sw_evdev.c33
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c17
-rw-r--r--drivers/event/sw/sw_evdev_worker.c6
31 files changed, 2055 insertions, 134 deletions
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index c3d89a15..f301d8dc 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -7,8 +7,12 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+endif
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 00068015..5443ef56 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -29,7 +29,7 @@
#include <rte_event_eth_rx_adapter.h>
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
-#include <rte_cycles_64.h>
+#include <rte_cycles.h>
#include <dpaa_ethdev.h>
#include "dpaa_eventdev.h"
@@ -571,7 +571,7 @@ dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
return 0;
}
-static const struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct rte_eventdev_ops dpaa_eventdev_ops = {
.dev_infos_get = dpaa_event_dev_info_get,
.dev_configure = dpaa_event_dev_configure,
.dev_start = dpaa_event_dev_start,
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 918fe35c..583e46ca 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -26,7 +26,7 @@
#define DPAA_EVENT_MAX_QUEUE_FLOWS 2048
#define DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
#define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
-#define DPAA_EVENT_MAX_EVENT_PORT RTE_MAX_LCORE
+#define DPAA_EVENT_MAX_EVENT_PORT RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
#define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL
#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID ((uint64_t)-1)
diff --git a/drivers/event/dpaa/meson.build b/drivers/event/dpaa/meson.build
new file mode 100644
index 00000000..0914f858
--- /dev/null
+++ b/drivers/event/dpaa/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['pmd_dpaa']
+sources = files('dpaa_eventdev.c')
+
+allow_experimental_apis = true
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index b26862cd..5e1a6320 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -18,7 +18,8 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
-LDLIBS += -lrte_eal -lrte_eventdev -lrte_bus_fslmc -lrte_pmd_dpaa2
+LDLIBS += -lrte_eal -lrte_eventdev
+LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2
LDLIBS += -lrte_bus_vdev
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
@@ -28,6 +29,9 @@ EXPORT_MAP := rte_pmd_dpaa2_event_version.map
LIBABIVER := 1
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
#
# all source are stored in SRCS-y
#
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index c3e6fbff..cd801bfb 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -72,7 +72,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
return 0;
}
}
@@ -87,10 +87,10 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
const struct rte_event *event = &ev[num_tx + loop];
if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
- fqid = evq_info->dpci->queue[
+ fqid = evq_info->dpci->rx_queue[
DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
else
- fqid = evq_info->dpci->queue[
+ fqid = evq_info->dpci->rx_queue[
DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
/* Prepare enqueue descriptor */
@@ -122,11 +122,12 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (!loop)
return num_tx;
frames_to_send = loop;
- DPAA2_EVENTDEV_ERR("Unable to allocate memory");
+ DPAA2_EVENTDEV_ERR(
+ "Unable to allocate event object");
goto send_partial;
}
rte_memcpy(ev_temp, event, sizeof(struct rte_event));
- DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
DPAA2_SET_FD_LEN((&fd_arr[loop]),
sizeof(struct rte_event));
}
@@ -153,26 +154,12 @@ dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
{
struct epoll_event epoll_ev;
- int ret, i = 0;
qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
QBMAN_SWP_INTERRUPT_DQRI);
-RETRY:
- ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
+ epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
&epoll_ev, 1, timeout_ticks);
- if (ret < 1) {
- /* sometimes due to some spurious interrupts epoll_wait fails
- * with errno EINTR. so here we are retrying epoll_wait in such
- * case to avoid the problem.
- */
- if (errno == EINTR) {
- DPAA2_EVENTDEV_DEBUG("epoll_wait fails\n");
- if (i++ > 10)
- DPAA2_EVENTDEV_DEBUG("Dequeue burst Failed\n");
- goto RETRY;
- }
- }
}
static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
@@ -182,7 +169,7 @@ static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
struct rte_event *ev)
{
struct rte_event *ev_temp =
- (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
RTE_SET_USED(rxq);
@@ -199,7 +186,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
struct rte_event *ev)
{
struct rte_event *ev_temp =
- (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
RTE_SET_USED(swp);
@@ -227,7 +214,7 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
return 0;
}
}
@@ -258,12 +245,12 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
qbman_swp_prefetch_dqrr_next(swp);
fd = qbman_result_DQ_fd(dq);
- rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
+ rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
if (rxq) {
rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
} else {
qbman_swp_dqrr_consume(swp, dq);
- DPAA2_EVENTDEV_ERR("Null Return VQ received\n");
+ DPAA2_EVENTDEV_ERR("Null Return VQ received");
return 0;
}
@@ -335,7 +322,7 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
priv->event_dev_cfg = conf->event_dev_cfg;
DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
- dev->data->dev_id);
+ dev->data->dev_id);
return 0;
}
@@ -473,7 +460,6 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
0, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id);
- evq_info->link = 0;
}
return (int)nb_unlinks;
@@ -494,23 +480,20 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
for (i = 0; i < nb_links; i++) {
evq_info = &priv->evq_info[queues[i]];
- if (evq_info->link)
- continue;
ret = dpio_add_static_dequeue_channel(
dpaa2_portal->dpio_dev->dpio,
CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id, &channel_index);
if (ret < 0) {
- DPAA2_EVENTDEV_ERR("Static dequeue cfg failed with ret: %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
goto err;
}
qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
channel_index, 1);
evq_info->dpcon->channel_index = channel_index;
- evq_info->link = 1;
}
RTE_SET_USED(priorities);
@@ -524,7 +507,6 @@ err:
dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
0, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id);
- evq_info->link = 0;
}
return ret;
}
@@ -587,8 +569,8 @@ dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_attach(eth_dev, i,
dpcon_id, queue_conf);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
goto fail;
}
}
@@ -620,7 +602,8 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
dpcon_id, queue_conf);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
return ret;
}
return 0;
@@ -639,8 +622,8 @@ dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
ret = dpaa2_eth_eventq_detach(eth_dev, i);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
return ret;
}
}
@@ -662,7 +645,8 @@ dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
return ret;
}
@@ -693,7 +677,7 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
return 0;
}
-static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static struct rte_eventdev_ops dpaa2_eventdev_ops = {
.dev_infos_get = dpaa2_eventdev_info_get,
.dev_configure = dpaa2_eventdev_configure,
.dev_start = dpaa2_eventdev_start,
@@ -730,20 +714,21 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
- dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
dpaa2_eventdev_process_parallel;
- dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
dpaa2_eventdev_process_atomic;
for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
- rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
+ rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
ret = dpci_set_rx_queue(&dpci_dev->dpci,
CMD_PRI_LOW,
dpci_dev->token, i,
&rx_queue_cfg);
if (ret) {
DPAA2_EVENTDEV_ERR(
- "set_rx_q failed with err code: %d", ret);
+ "DPCI Rx queue setup failed: err(%d)",
+ ret);
return ret;
}
}
@@ -763,7 +748,7 @@ dpaa2_eventdev_create(const char *name)
sizeof(struct dpaa2_eventdev),
rte_socket_id());
if (eventdev == NULL) {
- DPAA2_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
+ DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
goto fail;
}
@@ -798,7 +783,7 @@ dpaa2_eventdev_create(const char *name)
ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
if (ret) {
DPAA2_EVENTDEV_ERR(
- "dpci setup failed with err code: %d", ret);
+ "DPCI setup failed: err(%d)", ret);
return ret;
}
priv->max_event_queues++;
@@ -836,3 +821,12 @@ static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
};
RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
+
+RTE_INIT(dpaa2_eventdev_init_log);
+static void
+dpaa2_eventdev_init_log(void)
+{
+ dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
+ if (dpaa2_logtype_event >= 0)
+ rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
+}
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index 91c8f2a3..229f66af 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -63,7 +63,6 @@ struct evq_info_t {
struct dpaa2_dpci_dev *dpci;
/* Configuration provided by the user */
uint32_t event_queue_cfg;
- uint8_t link;
};
struct dpaa2_eventdev {
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index 7d250c3f..48f1abd1 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -9,13 +9,15 @@
extern int dpaa2_logtype_event;
#define DPAA2_EVENTDEV_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "%s(): " fmt "\n", \
- __func__, ##args)
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "dpaa2_event: " \
+ fmt "\n", ##args)
+
+#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_event, "dpaa2_event: %s(): " \
+ fmt "\n", __func__, ##args)
#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_LOG(DEBUG, " >>")
-#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
- DPAA2_EVENTDEV_LOG(DEBUG, fmt, ## args)
#define DPAA2_EVENTDEV_INFO(fmt, args...) \
DPAA2_EVENTDEV_LOG(INFO, fmt, ## args)
#define DPAA2_EVENTDEV_ERR(fmt, args...) \
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
index f2377b98..d64e588a 100644
--- a/drivers/event/dpaa2/dpaa2_hw_dpcon.c
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -20,11 +20,11 @@
#include <rte_dev.h>
#include <rte_ethdev_driver.h>
-#include <fslmc_logs.h>
#include <rte_fslmc.h>
#include <mc/fsl_dpcon.h>
#include <portal/dpaa2_hw_pvt.h>
#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev);
static struct dpcon_dev_list dpcon_dev_list
@@ -42,7 +42,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
/* Allocate DPAA2 dpcon handle */
dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0);
if (!dpcon_node) {
- PMD_DRV_LOG(ERR, "Memory allocation failed for DPCON Device");
+ DPAA2_EVENTDEV_ERR(
+ "Memory allocation failed for dpcon device");
return -1;
}
@@ -51,8 +52,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
ret = dpcon_open(&dpcon_node->dpcon,
CMD_PRI_LOW, dpcon_id, &dpcon_node->token);
if (ret) {
- PMD_DRV_LOG(ERR, "Resource alloc failure with err code: %d",
- ret);
+ DPAA2_EVENTDEV_ERR("Unable to open dpcon device: err(%d)",
+ ret);
rte_free(dpcon_node);
return -1;
}
@@ -61,8 +62,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
ret = dpcon_get_attributes(&dpcon_node->dpcon,
CMD_PRI_LOW, dpcon_node->token, &attr);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "Reading device failed with err code: %d",
- ret);
+ DPAA2_EVENTDEV_ERR("dpcon attribute fetch failed: err(%d)",
+ ret);
rte_free(dpcon_node);
return -1;
}
@@ -75,8 +76,6 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpcon.%d]\n", dpcon_id);
-
return 0;
}
diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
new file mode 100644
index 00000000..de7a4615
--- /dev/null
+++ b/drivers/event/dpaa2/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['bus_vdev', 'pmd_dpaa2']
+sources = files('dpaa2_hw_dpcon.c',
+ 'dpaa2_eventdev.c')
+
+allow_experimental_apis = true
diff --git a/drivers/event/meson.build b/drivers/event/meson.build
index d7bc4854..e9511993 100644
--- a/drivers/event/meson.build
+++ b/drivers/event/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['skeleton', 'sw', 'octeontx']
+drivers = ['dpaa', 'dpaa2', 'octeontx', 'skeleton', 'sw']
std_deps = ['eventdev', 'kvargs']
config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD'
driver_name_fmt = 'rte_pmd_@0@_event'
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index 0e49efd8..90ad2217 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -10,10 +10,12 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_octeontx_ssovf.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
+CFLAGS += -DALLOW_EXPERIMENTAL_API
-LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx -lrte_pmd_octeontx
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx
LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
LDLIBS += -lrte_bus_vdev
@@ -27,18 +29,26 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_probe.c
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
+CFLAGS_timvf_worker.o += -fno-prefetch-loop-arrays
ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
CFLAGS_ssovf_worker.o += -Ofast
+CFLAGS_timvf_worker.o += -Ofast
else
CFLAGS_ssovf_worker.o += -O3 -ffast-math
+CFLAGS_timvf_worker.o += -O3 -ffast-math
endif
else
CFLAGS_ssovf_worker.o += -Ofast
+CFLAGS_timvf_worker.o += -Ofast
endif
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/octeontx/meson.build b/drivers/event/octeontx/meson.build
index 358fc9fc..04185533 100644
--- a/drivers/event/octeontx/meson.build
+++ b/drivers/event/octeontx/meson.build
@@ -3,7 +3,12 @@
sources = files('ssovf_worker.c',
'ssovf_evdev.c',
- 'ssovf_evdev_selftest.c'
+ 'ssovf_evdev_selftest.c',
+ 'ssovf_probe.c',
+ 'timvf_worker.c',
+ 'timvf_evdev.c',
+ 'timvf_probe.c'
)
-deps += ['mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
+allow_experimental_apis = true
+deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index a1086077..2df70b52 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -18,8 +18,10 @@
#include <rte_bus_vdev.h>
#include "ssovf_evdev.h"
+#include "timvf_evdev.h"
int otx_logtype_ssovf;
+static uint8_t timvf_enable_stats;
RTE_INIT(otx_ssovf_init_log);
static void
@@ -49,7 +51,7 @@ ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
hdr.vfid = 0;
memset(info, 0, len);
- return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
+ return octeontx_mbox_send(&hdr, NULL, 0, info, len);
}
struct ssovf_mbox_getwork_wait {
@@ -69,7 +71,7 @@ ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
hdr.vfid = 0;
tmo_set.wait_ns = timeout_ns;
- ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0);
if (ret)
ssovf_log_err("Failed to set getwork timeout(%d)", ret);
@@ -99,7 +101,7 @@ ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
grp.affinity = 0xff;
grp.priority = prio / 32; /* Normalize to 0 to 7 */
- ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0);
if (ret)
ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
@@ -125,7 +127,7 @@ ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
memset(&ns2iter, 0, len);
ns2iter.wait_ns = ns;
- ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
+ ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
if (ret < 0 || (ret != len)) {
ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
return -EIO;
@@ -276,7 +278,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
return -ENOMEM;
}
- ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
+ ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
if (ws->base == NULL) {
rte_free(ws);
ssovf_log_err("Failed to get hws base addr port=%d", port_id);
@@ -290,7 +292,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
ws->port = port_id;
for (q = 0; q < edev->nb_event_queues; q++) {
- ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
+ ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
if (ws->grps[q] == NULL) {
rte_free(ws);
ssovf_log_err("Failed to get grp%d base addr", q);
@@ -529,9 +531,9 @@ ssovf_start(struct rte_eventdev *dev)
for (i = 0; i < edev->nb_event_queues; i++) {
/* Consume all the events through HWS0 */
- ssows_flush_events(dev->data->ports[0], i);
+ ssows_flush_events(dev->data->ports[0], i, NULL, NULL);
- base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
base += SSO_VHGRP_QCTL;
ssovf_write64(1, base); /* Enable SSO group */
}
@@ -541,6 +543,16 @@ ssovf_start(struct rte_eventdev *dev)
}
static void
+ssows_handle_event(void *arg, struct rte_event event)
+{
+ struct rte_eventdev *dev = arg;
+
+ if (dev->dev_ops->dev_stop_flush != NULL)
+ dev->dev_ops->dev_stop_flush(dev->data->dev_id, event,
+ dev->data->dev_stop_flush_arg);
+}
+
+static void
ssovf_stop(struct rte_eventdev *dev)
{
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
@@ -557,9 +569,10 @@ ssovf_stop(struct rte_eventdev *dev)
for (i = 0; i < edev->nb_event_queues; i++) {
/* Consume all the events through HWS0 */
- ssows_flush_events(dev->data->ports[0], i);
+ ssows_flush_events(dev->data->ports[0], i,
+ ssows_handle_event, dev);
- base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
base += SSO_VHGRP_QCTL;
ssovf_write64(0, base); /* Disable SSO group */
}
@@ -590,8 +603,16 @@ ssovf_selftest(const char *key __rte_unused, const char *value,
return 0;
}
+static int
+ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+{
+ return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
+ timvf_enable_stats);
+}
+
/* Initialize and register event driver with DPDK Application */
-static const struct rte_eventdev_ops ssovf_ops = {
+static struct rte_eventdev_ops ssovf_ops = {
.dev_infos_get = ssovf_info_get,
.dev_configure = ssovf_configure,
.queue_def_conf = ssovf_queue_def_conf,
@@ -610,6 +631,8 @@ static const struct rte_eventdev_ops ssovf_ops = {
.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+ .timer_adapter_caps_get = ssovf_timvf_caps_get,
+
.dev_selftest = test_eventdev_octeontx,
.dump = ssovf_dump,
@@ -621,7 +644,7 @@ static const struct rte_eventdev_ops ssovf_ops = {
static int
ssovf_vdev_probe(struct rte_vdev_device *vdev)
{
- struct octeontx_ssovf_info oinfo;
+ struct ssovf_info oinfo;
struct ssovf_mbox_dev_info info;
struct ssovf_evdev *edev;
struct rte_eventdev *eventdev;
@@ -633,6 +656,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
static const char *const args[] = {
SSOVF_SELFTEST_ARG,
+ TIMVF_ENABLE_STATS_ARG,
NULL
};
@@ -660,6 +684,15 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
rte_kvargs_free(kvlist);
return ret;
}
+
+ ret = rte_kvargs_process(kvlist,
+ TIMVF_ENABLE_STATS_ARG,
+ ssovf_selftest, &timvf_enable_stats);
+ if (ret != 0) {
+ ssovf_log_err("%s: Error in timvf stats", name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
}
rte_kvargs_free(kvlist);
@@ -679,7 +712,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
return 0;
}
- ret = octeontx_ssovf_info(&oinfo);
+ ret = ssovf_info(&oinfo);
if (ret) {
ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
goto error;
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index d1825b4f..18293e96 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -119,6 +119,16 @@ do { \
} while (0)
#endif
+struct ssovf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_ssovfs; /* Total sso groups available in domain */
+ uint8_t total_ssowvfs;/* Total sso hws available in domain */
+};
+
+enum ssovf_type {
+ OCTEONTX_SSO_GROUP, /* SSO group vf */
+ OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
+};
struct ssovf_evdev {
uint8_t max_event_queues;
@@ -164,8 +174,13 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
uint64_t timeout_ticks);
uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks);
-void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
+
+typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg);
void ssows_reset(struct ssows *ws);
+int ssovf_info(struct ssovf_info *info);
+void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
int test_eventdev_octeontx(void);
#endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c
index 5e012a95..239362fc 100644
--- a/drivers/event/octeontx/ssovf_evdev_selftest.c
+++ b/drivers/event/octeontx/ssovf_evdev_selftest.c
@@ -688,6 +688,40 @@ test_multi_queue_enq_multi_port_deq(void)
nr_ports, 0xff /* invalid */);
}
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+ unsigned int *count = arg;
+
+ RTE_SET_USED(dev_id);
+ if (event.event_type == RTE_EVENT_TYPE_CPU)
+ *count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+ unsigned int total_events = MAX_EVENTS, count = 0;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+ if (ret)
+ return -2;
+ rte_event_dev_stop(evdev);
+ ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+ if (ret)
+ return -3;
+ RTE_TEST_ASSERT_EQUAL(total_events, count,
+ "count mismatch total_events=%d count=%d",
+ total_events, count);
+ return 0;
+}
+
static int
validate_queue_to_port_single_link(uint32_t index, uint8_t port,
struct rte_event *ev)
@@ -1415,6 +1449,8 @@ test_eventdev_octeontx(void)
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_multi_queue_enq_single_port_deq);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_dev_stop_flush);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_multi_queue_enq_multi_port_deq);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_queue_to_port_single_link);
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
new file mode 100644
index 00000000..b3db596d
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include "octeontx_mbox.h"
+#include "ssovf_evdev.h"
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
+#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
+
+#define SSO_MAX_VHGRP (64)
+#define SSO_MAX_VHWS (32)
+
+#define SSO_VHGRP_AQ_THR (0x1E0ULL)
+
+struct ssovf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+};
+
+struct ssowvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct ssowvf_identify {
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct ssodev {
+ uint8_t total_ssovfs;
+ uint8_t total_ssowvfs;
+ struct ssovf_res grp[SSO_MAX_VHGRP];
+ struct ssowvf_res hws[SSO_MAX_VHWS];
+};
+
+static struct ssodev sdev;
+
+/* Interface functions */
+int
+ssovf_info(struct ssovf_info *info)
+{
+ uint8_t i;
+ uint16_t domain;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL)
+ return -EINVAL;
+
+ if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0)
+ return -ENODEV;
+
+ domain = sdev.grp[0].domain;
+ for (i = 0; i < sdev.total_ssovfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.grp[i].vfid != i ||
+ sdev.grp[i].bar0 == NULL ||
+ sdev.grp[i].domain != domain) {
+ mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.grp[i].vfid,
+ domain, sdev.grp[i].domain,
+ sdev.grp[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < sdev.total_ssowvfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.hws[i].vfid != i ||
+ sdev.hws[i].bar0 == NULL ||
+ sdev.hws[i].domain != domain) {
+ mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.hws[i].vfid,
+ domain, sdev.hws[i].domain,
+ sdev.hws[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ info->domain = domain;
+ info->total_ssovfs = sdev.total_ssovfs;
+ info->total_ssowvfs = sdev.total_ssowvfs;
+ return 0;
+}
+
+void*
+ssovf_bar(enum ssovf_type type, uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ type > OCTEONTX_SSO_HWS)
+ return NULL;
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ if (id >= sdev.total_ssovfs)
+ return NULL;
+ } else {
+ if (id >= sdev.total_ssowvfs)
+ return NULL;
+ }
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ switch (bar) {
+ case 0:
+ return sdev.grp[id].bar0;
+ case 2:
+ return sdev.grp[id].bar2;
+ default:
+ return NULL;
+ }
+ } else {
+ switch (bar) {
+ case 0:
+ return sdev.hws[id].bar0;
+ case 2:
+ return sdev.hws[id].bar2;
+ case 4:
+ return sdev.hws[id].bar4;
+ default:
+ return NULL;
+ }
+ }
+}
+
+/* SSOWVF pcie device aka event port probe */
+
+static int
+ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint16_t vfid;
+ struct ssowvf_res *res;
+ struct ssowvf_identify *id;
+ uint8_t *ram_mbox_base;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ mbox_log_err("Empty bars %p %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
+ mbox_log_err("Bar4 len mismatch %d != %d",
+ SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
+ return -EINVAL;
+ }
+
+ id = pci_dev->mem_resource[4].addr;
+ vfid = id->vfid;
+ if (vfid >= SSO_MAX_VHWS) {
+ mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
+ return -EINVAL;
+ }
+
+ res = &sdev.hws[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = id->domain;
+
+ sdev.total_ssowvfs++;
+ if (vfid == 0) {
+ ram_mbox_base = ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
+ if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base)) {
+ mbox_log_err("Invalid Failed to set ram mbox base");
+ return -EINVAL;
+ }
+ }
+
+ rte_wmb();
+ mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
+ res->vfid, sdev.total_ssowvfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssowvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOWS_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssowvf = {
+ .id_table = pci_ssowvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssowvf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf);
+
+/* SSOVF pcie device aka event queue probe */
+
+static int
+ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint8_t *idreg;
+ struct ssovf_res *res;
+ uint8_t *reg;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ mbox_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+ idreg += SSO_VHGRP_AQ_THR;
+ val = rte_read64(idreg);
+
+ /* Write back the default value of aq_thr */
+ rte_write64((1ULL << 33) - 1, idreg);
+ vfid = (val >> 16) & 0xffff;
+ if (vfid >= SSO_MAX_VHGRP) {
+ mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
+ return -EINVAL;
+ }
+
+ res = &sdev.grp[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->domain = val & 0xffff;
+
+ sdev.total_ssovfs++;
+ if (vfid == 0) {
+ reg = ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
+ reg += SSO_VHGRP_PF_MBOX(1);
+ if (octeontx_mbox_set_reg(reg)) {
+ mbox_log_err("Invalid Failed to set mbox_reg");
+ return -EINVAL;
+ }
+ }
+
+ rte_wmb();
+ mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
+ res->vfid, sdev.total_ssovfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssovf = {
+ .id_table = pci_ssovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 753c1e9f..d8bbc714 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -198,13 +198,13 @@ ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
}
void
-ssows_flush_events(struct ssows *ws, uint8_t queue_id)
+ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg)
{
uint32_t reg_off;
- uint64_t aq_cnt = 1;
- uint64_t cq_ds_cnt = 1;
- uint64_t enable, get_work0, get_work1;
- uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+ struct rte_event ev;
+ uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
+ uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
enable = ssovf_read64(base + SSO_VHGRP_QCTL);
if (!enable)
@@ -219,11 +219,10 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id)
cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
/* Extract cq and ds count */
cq_ds_cnt &= 0x1FFF1FFF0000;
- ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
+ ssows_get_work(ws, &ev);
+ if (fn != NULL && ev.u64 != 0)
+ fn(arg, ev);
}
-
- RTE_SET_USED(get_work0);
- RTE_SET_USED(get_work1);
}
void
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
new file mode 100644
index 00000000..c4fbd2d8
--- /dev/null
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "timvf_evdev.h"
+
+int otx_logtype_timvf;
+
+RTE_INIT(otx_timvf_init_log);
+static void
+otx_timvf_init_log(void)
+{
+ otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
+ if (otx_logtype_timvf >= 0)
+ rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
+}
+
+struct __rte_packed timvf_mbox_dev_info {
+ uint64_t ring_active[4];
+ uint64_t clk_freq;
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+static int
+timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct timvf_mbox_dev_info);
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_GET_DEV_INFO;
+ hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
+
+ memset(info, 0, len);
+ return octeontx_mbox_send(&hdr, NULL, 0, info, len);
+}
+
+static void
+timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer_adapter_info *adptr_info)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ adptr_info->max_tmo_ns = timr->max_tout;
+ adptr_info->min_resolution_ns = timr->tck_nsec;
+ rte_memcpy(&adptr_info->conf, &adptr->data->conf,
+ sizeof(struct rte_event_timer_adapter_conf));
+}
+
+static int
+timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct timvf_ctrl_reg);
+ int ret;
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_SET_RING_INFO;
+ hdr.vfid = ring_id;
+
+ ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
+ if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
+ return -EACCES;
+ return 0;
+}
+
+static int
+timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_RING_START_CYC_GET;
+ hdr.vfid = ring_id;
+ *now = 0;
+ return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
+}
+
+static int
+optimize_bucket_parameters(struct timvf_ring *timr)
+{
+ uint32_t hbkts;
+ uint32_t lbkts;
+ uint64_t tck_nsec;
+
+ hbkts = rte_align32pow2(timr->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ hbkts = 0;
+
+ lbkts = rte_align32prevpow2(timr->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ lbkts = 0;
+
+ if (!hbkts && !lbkts)
+ return 0;
+
+ if (!hbkts) {
+ timr->nb_bkts = lbkts;
+ goto end;
+ } else if (!lbkts) {
+ timr->nb_bkts = hbkts;
+ goto end;
+ }
+
+ timr->nb_bkts = (hbkts - timr->nb_bkts) <
+ (timr->nb_bkts - lbkts) ? hbkts : lbkts;
+end:
+ timr->get_target_bkt = bkt_and;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
+ (timr->nb_bkts - 1)), 10);
+ return 1;
+}
+
+static int
+timvf_ring_start(const struct rte_event_timer_adapter *adptr)
+{
+ int ret;
+ uint8_t use_fpa = 0;
+ uint64_t interval;
+ uintptr_t pool;
+ struct timvf_ctrl_reg rctrl;
+ struct timvf_mbox_dev_info dinfo;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_mbox_dev_info_get(&dinfo);
+ if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
+ return -EINVAL;
+
+ /* Calculate the interval cycles according to clock source. */
+ switch (timr->clk_src) {
+ case TIM_CLK_SRC_SCLK:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ case TIM_CLK_SRC_GPIO:
+ /* GPIO doesn't work on tck_nsec. */
+ interval = 0;
+ break;
+ case TIM_CLK_SRC_GTI:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ case TIM_CLK_SRC_PTP:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ default:
+ timvf_log_err("Unsupported clock source configured %d",
+ timr->clk_src);
+ return -EINVAL;
+ }
+
+ if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
+ use_fpa = 1;
+
+ /*CTRL0 register.*/
+ rctrl.rctrl0 = interval;
+
+ /*CTRL1 register.*/
+ rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 |
+ 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
+ 1ull << 47 /* ENA */ |
+ 1ull << 44 /* ENA_LDWB */ |
+ (timr->nb_bkts - 1);
+
+ rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
+
+ if (use_fpa) {
+ pool = (uintptr_t)((struct rte_mempool *)
+ timr->chunk_pool)->pool_id;
+ ret = octeontx_fpa_bufpool_gpool(pool);
+ if (ret < 0) {
+ timvf_log_dbg("Unable to get gaura id");
+ ret = -ENOMEM;
+ goto error;
+ }
+ timvf_write64((uint64_t)ret,
+ (uint8_t *)timr->vbar0 + TIM_VRING_AURA);
+ } else {
+ rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
+ }
+
+ timvf_write64((uintptr_t)timr->bkt,
+ (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
+ timvf_set_chunk_refill(timr, use_fpa);
+ if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
+ ret = -EACCES;
+ goto error;
+ }
+
+ if (timvf_get_start_cyc(&timr->ring_start_cyc,
+ timr->tim_ring_id) < 0) {
+ ret = -EACCES;
+ goto error;
+ }
+ timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
+ timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
+ timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
+ " maxtmo %"PRIu64"\n",
+ timr->nb_bkts, timr->tck_nsec, interval,
+ timr->max_tout);
+
+ return 0;
+error:
+ rte_free(timr->bkt);
+ rte_mempool_free(timr->chunk_pool);
+ return ret;
+}
+
+static int
+timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct timvf_ctrl_reg rctrl = {0};
+ rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
+ rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
+ rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
+ rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
+
+ if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
+ return -EACCES;
+ return 0;
+}
+
+static int
+timvf_ring_create(struct rte_event_timer_adapter *adptr)
+{
+ char pool_name[25];
+ int ret;
+ uint64_t nb_timers;
+ struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
+ struct timvf_ring *timr;
+ struct timvf_info tinfo;
+ const char *mempool_ops;
+ unsigned int mp_flags = 0;
+
+ if (timvf_info(&tinfo) < 0)
+ return -ENODEV;
+
+ if (adptr->data->id >= tinfo.total_timvfs)
+ return -ENODEV;
+
+ timr = rte_zmalloc("octeontx_timvf_priv",
+ sizeof(struct timvf_ring), 0);
+ if (timr == NULL)
+ return -ENOMEM;
+
+ adptr->data->adapter_priv = timr;
+ /* Check config parameters. */
+ if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
+ (!rcfg->timer_tick_ns ||
+ rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
+ timvf_log_err("Too low timer ticks");
+ goto cfg_err;
+ }
+
+ timr->clk_src = (int) rcfg->clk_src;
+ timr->tim_ring_id = adptr->data->id;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
+ timr->max_tout = rcfg->max_tmo_ns;
+ timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
+ timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
+ timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
+ nb_timers = rcfg->nb_timers;
+ timr->get_target_bkt = bkt_mod;
+
+ timr->nb_chunks = nb_timers / nb_chunk_slots;
+
+ /* Try to optimize the bucket parameters. */
+ if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
+ && !rte_is_power_of_2(timr->nb_bkts)) {
+ if (optimize_bucket_parameters(timr)) {
+ timvf_log_info("Optimized configured values");
+ timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts);
+ timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
+ } else
+ timvf_log_info("Failed to Optimize configured values");
+ }
+
+ if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
+ mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
+ timvf_log_info("Using single producer mode");
+ }
+
+ timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
+ (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
+ 0);
+ if (timr->bkt == NULL)
+ goto mem_err;
+
+ snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
+ timr->tim_ring_id);
+ timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
+ timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
+ mp_flags);
+
+ if (!timr->chunk_pool) {
+ rte_free(timr->bkt);
+ timvf_log_err("Unable to create chunkpool.");
+ return -ENOMEM;
+ }
+
+ mempool_ops = rte_mbuf_best_mempool_ops();
+ ret = rte_mempool_set_ops_byname(timr->chunk_pool,
+ mempool_ops, NULL);
+
+ if (ret != 0) {
+ timvf_log_err("Unable to set chunkpool ops.");
+ goto mem_err;
+ }
+
+ ret = rte_mempool_populate_default(timr->chunk_pool);
+ if (ret < 0) {
+ timvf_log_err("Unable to set populate chunkpool.");
+ goto mem_err;
+ }
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
+ timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
+ timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
+
+ return 0;
+mem_err:
+ rte_free(timr);
+ return -ENOMEM;
+cfg_err:
+ rte_free(timr);
+ return -EINVAL;
+}
+
+static int
+timvf_ring_free(struct rte_event_timer_adapter *adptr)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ rte_mempool_free(timr->chunk_pool);
+ rte_free(timr->bkt);
+ rte_free(adptr->data->adapter_priv);
+ return 0;
+}
+
+static int
+timvf_stats_get(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ struct timvf_ring *timr = adapter->data->adapter_priv;
+ uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
+
+ stats->evtim_exp_count = timr->tim_arm_cnt;
+ stats->ev_enq_count = timr->tim_arm_cnt;
+ stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
+ &timr->fast_div);
+ return 0;
+}
+
+static int
+timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
+{
+ struct timvf_ring *timr = adapter->data->adapter_priv;
+
+ timr->tim_arm_cnt = 0;
+ return 0;
+}
+
+static struct rte_event_timer_adapter_ops timvf_ops = {
+ .init = timvf_ring_create,
+ .uninit = timvf_ring_free,
+ .start = timvf_ring_start,
+ .stop = timvf_ring_stop,
+ .get_info = timvf_ring_info_get,
+};
+
+int
+timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
+ uint8_t enable_stats)
+{
+ RTE_SET_USED(dev);
+
+ if (enable_stats) {
+ timvf_ops.stats_get = timvf_stats_get;
+ timvf_ops.stats_reset = timvf_stats_reset;
+ }
+
+ if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
+ timvf_ops.arm_burst = enable_stats ?
+ timvf_timer_arm_burst_sp_stats :
+ timvf_timer_arm_burst_sp;
+ else
+ timvf_ops.arm_burst = enable_stats ?
+ timvf_timer_arm_burst_mp_stats :
+ timvf_timer_arm_burst_mp;
+
+ timvf_ops.arm_tmo_tick_burst = enable_stats ?
+ timvf_timer_arm_tmo_brst_stats :
+ timvf_timer_arm_tmo_brst;
+ timvf_ops.cancel_burst = timvf_timer_cancel_burst;
+ *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
+ *ops = &timvf_ops;
+ return 0;
+}
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
new file mode 100644
index 00000000..0185593f
--- /dev/null
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __TIMVF_EVDEV_H__
+#define __TIMVF_EVDEV_H__
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_eventdev.h>
+#include <rte_event_timer_adapter.h>
+#include <rte_event_timer_adapter_pmd.h>
+#include <rte_io.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_prefetch.h>
+#include <rte_reciprocal.h>
+
+#include <octeontx_mbox.h>
+#include <octeontx_fpavf.h>
+
+#define timvf_log(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
+ "[%s] %s() " fmt "\n", \
+ RTE_STR(event_timer_octeontx), __func__, ## args)
+
+#define timvf_log_info(fmt, ...) timvf_log(INFO, fmt, ##__VA_ARGS__)
+#define timvf_log_dbg(fmt, ...) timvf_log(DEBUG, fmt, ##__VA_ARGS__)
+#define timvf_log_err(fmt, ...) timvf_log(ERR, fmt, ##__VA_ARGS__)
+#define timvf_func_trace timvf_log_dbg
+
+#define TIM_COPROC (8)
+#define TIM_GET_DEV_INFO (1)
+#define TIM_GET_RING_INFO (2)
+#define TIM_SET_RING_INFO (3)
+#define TIM_RING_START_CYC_GET (4)
+
+#define TIM_MAX_RINGS (64)
+#define TIM_DEV_PER_NODE (1)
+#define TIM_VF_PER_DEV (64)
+#define TIM_RING_PER_DEV (TIM_VF_PER_DEV)
+#define TIM_RING_NODE_SHIFT (6)
+#define TIM_RING_MASK ((TIM_RING_PER_DEV) - 1)
+#define TIM_RING_INVALID (-1)
+
+#define TIM_MIN_INTERVAL (1E3)
+#define TIM_MAX_INTERVAL ((1ull << 32) - 1)
+#define TIM_MAX_BUCKETS (1ull << 20)
+#define TIM_CHUNK_SIZE (4096)
+#define TIM_MAX_CHUNKS_PER_BUCKET (1ull << 32)
+
+#define TIMVF_MAX_BURST (8)
+
+/* TIM VF Control/Status registers (CSRs): */
+/* VF_BAR0: */
+#define TIM_VF_NRSPERR_INT (0x0)
+#define TIM_VF_NRSPERR_INT_W1S (0x8)
+#define TIM_VF_NRSPERR_ENA_W1C (0x10)
+#define TIM_VF_NRSPERR_ENA_W1S (0x18)
+#define TIM_VRING_FR_RN_CYCLES (0x20)
+#define TIM_VRING_FR_RN_GPIOS (0x28)
+#define TIM_VRING_FR_RN_GTI (0x30)
+#define TIM_VRING_FR_RN_PTP (0x38)
+#define TIM_VRING_CTL0 (0x40)
+#define TIM_VRING_CTL1 (0x50)
+#define TIM_VRING_CTL2 (0x60)
+#define TIM_VRING_BASE (0x100)
+#define TIM_VRING_AURA (0x108)
+#define TIM_VRING_REL (0x110)
+
+#define TIM_CTL1_W0_S_BUCKET 20
+#define TIM_CTL1_W0_M_BUCKET ((1ull << (40 - 20)) - 1)
+
+#define TIM_BUCKET_W1_S_NUM_ENTRIES (0) /*Shift*/
+#define TIM_BUCKET_W1_M_NUM_ENTRIES ((1ull << (32 - 0)) - 1)
+#define TIM_BUCKET_W1_S_SBT (32)
+#define TIM_BUCKET_W1_M_SBT ((1ull << (33 - 32)) - 1)
+#define TIM_BUCKET_W1_S_HBT (33)
+#define TIM_BUCKET_W1_M_HBT ((1ull << (34 - 33)) - 1)
+#define TIM_BUCKET_W1_S_BSK (34)
+#define TIM_BUCKET_W1_M_BSK ((1ull << (35 - 34)) - 1)
+#define TIM_BUCKET_W1_S_LOCK (40)
+#define TIM_BUCKET_W1_M_LOCK ((1ull << (48 - 40)) - 1)
+#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48)
+#define TIM_BUCKET_W1_M_CHUNK_REMAINDER ((1ull << (64 - 48)) - 1)
+
+#define TIM_BUCKET_SEMA \
+ (TIM_BUCKET_CHUNK_REMAIN)
+
+#define TIM_BUCKET_CHUNK_REMAIN \
+ (TIM_BUCKET_W1_M_CHUNK_REMAINDER << TIM_BUCKET_W1_S_CHUNK_REMAINDER)
+
+#define TIM_BUCKET_LOCK \
+ (TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK)
+
+#define TIM_BUCKET_SEMA_WLOCK \
+ (TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
+
+#define NSEC_PER_SEC 1E9
+#define NSEC2CLK(__ns, __freq) (((__ns) * (__freq)) / NSEC_PER_SEC)
+#define CLK2NSEC(__clk, __freq) (((__clk) * NSEC_PER_SEC) / (__freq))
+
+#define timvf_read64 rte_read64_relaxed
+#define timvf_write64 rte_write64_relaxed
+
+#define TIMVF_ENABLE_STATS_ARG ("timvf_stats")
+
+extern int otx_logtype_timvf;
+static const uint16_t nb_chunk_slots = (TIM_CHUNK_SIZE / 16) - 1;
+
+struct timvf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_timvfs; /* Total timvf available in domain */
+};
+
+enum timvf_clk_src {
+ TIM_CLK_SRC_SCLK = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
+ TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
+ TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+};
+
+/* TIM_MEM_BUCKET */
+struct tim_mem_bucket {
+ uint64_t first_chunk;
+ union {
+ uint64_t w1;
+ struct {
+ uint32_t nb_entry;
+ uint8_t sbt:1;
+ uint8_t hbt:1;
+ uint8_t bsk:1;
+ uint8_t rsvd:5;
+ uint8_t lock;
+ int16_t chunk_remainder;
+ };
+ };
+ uint64_t current_chunk;
+ uint64_t pad;
+} __rte_packed __rte_aligned(8);
+
+struct tim_mem_entry {
+ uint64_t w0;
+ uint64_t wqe;
+} __rte_packed;
+
+struct timvf_ctrl_reg {
+ uint64_t rctrl0;
+ uint64_t rctrl1;
+ uint64_t rctrl2;
+ uint8_t use_pmu;
+} __rte_packed;
+
+struct timvf_ring;
+
+typedef uint32_t (*bkt_id)(const uint32_t bkt_tcks, const uint32_t nb_bkts);
+typedef struct tim_mem_entry * (*refill_chunk)(
+ struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr);
+
+struct timvf_ring {
+ bkt_id get_target_bkt;
+ refill_chunk refill_chunk;
+ struct rte_reciprocal_u64 fast_div;
+ uint64_t ring_start_cyc;
+ uint32_t nb_bkts;
+ struct tim_mem_bucket *bkt;
+ void *chunk_pool;
+ uint64_t tck_int;
+ volatile uint64_t tim_arm_cnt;
+ uint64_t tck_nsec;
+ void *vbar0;
+ void *bkt_pos;
+ uint64_t max_tout;
+ uint64_t nb_chunks;
+ enum timvf_clk_src clk_src;
+ uint16_t tim_ring_id;
+} __rte_cache_aligned;
+
+static __rte_always_inline uint32_t
+bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts)
+{
+ return rel_bkt % nb_bkts;
+}
+
+static __rte_always_inline uint32_t
+bkt_and(uint32_t rel_bkt, uint32_t nb_bkts)
+{
+ return rel_bkt & (nb_bkts - 1);
+}
+
+int timvf_info(struct timvf_info *tinfo);
+void *timvf_bar(uint8_t id, uint8_t bar);
+int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
+ uint8_t enable_stats);
+uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_sp_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_mp_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers);
+uint16_t timvf_timer_arm_tmo_brst_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers);
+void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
+
+#endif /* __TIMVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/timvf_probe.c b/drivers/event/octeontx/timvf_probe.c
new file mode 100644
index 00000000..08dbd2be
--- /dev/null
+++ b/drivers/event/octeontx/timvf_probe.c
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include <octeontx_mbox.h>
+
+#include "ssovf_evdev.h"
+#include "timvf_evdev.h"
+
+#ifndef PCI_VENDOR_ID_CAVIUM
+#define PCI_VENDOR_ID_CAVIUM (0x177D)
+#endif
+
+#define PCI_DEVICE_ID_OCTEONTX_TIM_VF (0xA051)
+#define TIM_MAX_RINGS (64)
+
+struct timvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct timdev {
+ uint8_t total_timvfs;
+ struct timvf_res rings[TIM_MAX_RINGS];
+};
+
+static struct timdev tdev;
+
+int
+timvf_info(struct timvf_info *tinfo)
+{
+ int i;
+ struct ssovf_info info;
+
+ if (tinfo == NULL)
+ return -EINVAL;
+
+ if (!tdev.total_timvfs)
+ return -ENODEV;
+
+ if (ssovf_info(&info) < 0)
+ return -EINVAL;
+
+ for (i = 0; i < tdev.total_timvfs; i++) {
+ if (info.domain != tdev.rings[i].domain) {
+ timvf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, tdev.rings[i].vfid,
+ info.domain, tdev.rings[i].domain,
+ tdev.rings[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ tinfo->total_timvfs = tdev.total_timvfs;
+ tinfo->domain = info.domain;
+ return 0;
+}
+
+void*
+timvf_bar(uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return NULL;
+
+ if (id > tdev.total_timvfs)
+ return NULL;
+
+ switch (bar) {
+ case 0:
+ return tdev.rings[id].bar0;
+ case 4:
+ return tdev.rings[id].bar4;
+ default:
+ return NULL;
+ }
+}
+
+static int
+timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ struct timvf_res *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ timvf_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ val = rte_read64((uint8_t *)pci_dev->mem_resource[0].addr +
+ 0x100 /* TIM_VRINGX_BASE */);
+ vfid = (val >> 23) & 0xff;
+ if (vfid >= TIM_MAX_RINGS) {
+ timvf_log_err("Invalid vfid(%d/%d)", vfid, TIM_MAX_RINGS);
+ return -EINVAL;
+ }
+
+ res = &tdev.rings[tdev.total_timvfs];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = (val >> 7) & 0xffff;
+ tdev.total_timvfs++;
+ rte_wmb();
+
+ timvf_log_dbg("Domain=%d VFid=%d bar0 %p total_timvfs=%d", res->domain,
+ res->vfid, pci_dev->mem_resource[0].addr,
+ tdev.total_timvfs);
+ return 0;
+}
+
+
+static const struct rte_pci_id pci_timvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_TIM_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_timvf = {
+ .id_table = pci_timvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = timvf_probe,
+ .remove = NULL,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_timvf, pci_timvf);
diff --git a/drivers/event/octeontx/timvf_worker.c b/drivers/event/octeontx/timvf_worker.c
new file mode 100644
index 00000000..50790e19
--- /dev/null
+++ b/drivers/event/octeontx/timvf_worker.c
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "timvf_worker.h"
+
+static inline int
+timvf_timer_reg_checks(const struct timvf_ring * const timr,
+ struct rte_event_timer * const tim)
+{
+ if (unlikely(tim->state)) {
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ rte_errno = EALREADY;
+ goto fail;
+ }
+
+ if (unlikely(!tim->timeout_ticks ||
+ tim->timeout_ticks >= timr->nb_bkts)) {
+ tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
+ : RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ rte_errno = EINVAL;
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static inline void
+timvf_format_event(const struct rte_event_timer * const tim,
+ struct tim_mem_entry * const entry)
+{
+ entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
+ (tim->ev.event & 0xFFFFFFFFF);
+ entry->wqe = tim->ev.u64;
+}
+
+uint16_t
+timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ RTE_SET_USED(adptr);
+ int ret;
+ uint16_t index;
+
+ for (index = 0; index < nb_timers; index++) {
+ if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ }
+
+ if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+ ret = timvf_rem_entry(tim[index]);
+ if (ret) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t index;
+ struct tim_mem_entry entry;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ for (index = 0; index < nb_timers; index++) {
+ if (timvf_timer_reg_checks(timr, tim[index]))
+ break;
+
+ timvf_format_event(tim[index], &entry);
+ ret = timvf_add_entry_sp(timr, tim[index]->timeout_ticks,
+ tim[index], &entry);
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_sp_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ uint16_t ret;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_timer_arm_burst_sp(adptr, tim, nb_timers);
+ timr->tim_arm_cnt += ret;
+
+ return ret;
+}
+
+uint16_t
+timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t index;
+ struct tim_mem_entry entry;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ for (index = 0; index < nb_timers; index++) {
+ if (timvf_timer_reg_checks(timr, tim[index]))
+ break;
+ timvf_format_event(tim[index], &entry);
+ ret = timvf_add_entry_mp(timr, tim[index]->timeout_ticks,
+ tim[index], &entry);
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_mp_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ uint16_t ret;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_timer_arm_burst_mp(adptr, tim, nb_timers);
+ timr->tim_arm_cnt += ret;
+
+ return ret;
+}
+
+uint16_t
+timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t set_timers = 0;
+ uint16_t idx;
+ uint16_t arr_idx = 0;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct tim_mem_entry entry[TIMVF_MAX_BURST] __rte_cache_aligned;
+
+ if (unlikely(!timeout_tick || timeout_tick >= timr->nb_bkts)) {
+ const enum rte_event_timer_state state = timeout_tick ?
+ RTE_EVENT_TIMER_ERROR_TOOLATE :
+ RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ for (idx = 0; idx < nb_timers; idx++)
+ tim[idx]->state = state;
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ while (arr_idx < nb_timers) {
+ for (idx = 0; idx < TIMVF_MAX_BURST && (arr_idx < nb_timers);
+ idx++, arr_idx++) {
+ timvf_format_event(tim[arr_idx], &entry[idx]);
+ }
+ ret = timvf_add_entry_brst(timr, timeout_tick, &tim[set_timers],
+ entry, idx);
+ set_timers += ret;
+ if (ret != idx)
+ break;
+ }
+
+ return set_timers;
+}
+
+
+uint16_t
+timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers)
+{
+ uint16_t set_timers;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ set_timers = timvf_timer_arm_tmo_brst(adptr, tim, timeout_tick,
+ nb_timers);
+ timr->tim_arm_cnt += set_timers;
+
+ return set_timers;
+}
+
+void
+timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
+{
+ if (use_fpa)
+ timr->refill_chunk = timvf_refill_chunk_fpa;
+ else
+ timr->refill_chunk = timvf_refill_chunk_generic;
+}
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
new file mode 100644
index 00000000..dede1a4a
--- /dev/null
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+
+#include "timvf_evdev.h"
+
+static inline int16_t
+timr_bkt_fetch_rem(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
+ TIM_BUCKET_W1_M_CHUNK_REMAINDER;
+}
+
+static inline int16_t
+timr_bkt_get_rem(struct tim_mem_bucket *bktp)
+{
+ return __atomic_load_n(&bktp->chunk_remainder,
+ __ATOMIC_ACQUIRE);
+}
+
+static inline void
+timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
+{
+ __atomic_store_n(&bktp->chunk_remainder, v,
+ __ATOMIC_RELEASE);
+}
+
+static inline void
+timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
+{
+ __atomic_fetch_sub(&bktp->chunk_remainder, v,
+ __ATOMIC_RELEASE);
+}
+
+static inline uint8_t
+timr_bkt_get_sbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
+}
+
+static inline uint64_t
+timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
+ return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint8_t
+timr_bkt_get_shbt(uint64_t w1)
+{
+ return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
+ ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
+}
+
+static inline uint8_t
+timr_bkt_get_hbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
+}
+
+static inline uint8_t
+timr_bkt_get_bsk(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
+}
+
+static inline uint64_t
+timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
+{
+ /*Clear everything except lock. */
+ const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
+ __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
+ return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline void
+timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
+{
+ __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+}
+
+static inline uint32_t
+timr_bkt_get_nent(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
+ TIM_BUCKET_W1_M_NUM_ENTRIES;
+}
+
+static inline void
+timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
+{
+ __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+}
+
+static inline void
+timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
+{
+ __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
+ TIM_BUCKET_W1_S_NUM_ENTRIES);
+ return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline struct tim_mem_entry *
+timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
+{
+ struct tim_mem_entry *chunk;
+ struct tim_mem_entry *pnext;
+ chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
+ chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
+
+ while (chunk) {
+ pnext = (struct tim_mem_entry *)(uintptr_t)
+ ((chunk + nb_chunk_slots)->w0);
+ rte_mempool_put(timr->chunk_pool, chunk);
+ chunk = pnext;
+ }
+ return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
+}
+
+static inline int
+timvf_rem_entry(struct rte_event_timer *tim)
+{
+ uint64_t lock_sema;
+ struct tim_mem_entry *entry;
+ struct tim_mem_bucket *bkt;
+ if (tim->impl_opaque[1] == 0 ||
+ tim->impl_opaque[0] == 0)
+ return -ENOENT;
+
+ entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
+ if (entry->wqe != tim->ev.u64) {
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return -ENOENT;
+ }
+ bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
+ lock_sema = timr_bkt_inc_lock(bkt);
+ if (timr_bkt_get_shbt(lock_sema)
+ || !timr_bkt_get_nent(lock_sema)) {
+ timr_bkt_dec_lock(bkt);
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return -ENOENT;
+ }
+
+ entry->w0 = entry->wqe = 0;
+ timr_bkt_dec_lock(bkt);
+
+ tim->state = RTE_EVENT_TIMER_CANCELED;
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return 0;
+}
+
+static inline struct tim_mem_entry *
+timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr)
+{
+ struct tim_mem_entry *chunk;
+
+ if (bkt->nb_entry || !bkt->first_chunk) {
+ if (unlikely(rte_mempool_get(timr->chunk_pool,
+ (void **)&chunk))) {
+ return NULL;
+ }
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+ bkt->current_chunk) +
+ nb_chunk_slots) =
+ (uintptr_t) chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t) chunk;
+ }
+ } else {
+ chunk = timr_clr_bkt(timr, bkt);
+ bkt->first_chunk = (uintptr_t)chunk;
+ }
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+
+ return chunk;
+}
+
+static inline struct tim_mem_entry *
+timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr)
+{
+ struct tim_mem_entry *chunk;
+
+ if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
+ return NULL;
+
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+ bkt->current_chunk) +
+ nb_chunk_slots) =
+ (uintptr_t) chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t) chunk;
+ }
+
+ return chunk;
+}
+
+static inline struct tim_mem_bucket *
+timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
+{
+ const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
+ const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
+ &timr->fast_div) + rel_bkt;
+ const uint32_t tbkt_id = timr->get_target_bkt(bucket,
+ timr->nb_bkts);
+ return &timr->bkt[tbkt_id];
+}
+
+/* Single producer functions. */
+static inline int
+timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct tim_mem_entry * const pent)
+{
+ int16_t rem;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+__retry:
+ /*Get Bucket sema*/
+ lock_sema = timr_bkt_fetch_sema(bkt);
+ /* Bucket related checks. */
+ if (unlikely(timr_bkt_get_hbt(lock_sema)))
+ goto __retry;
+
+ /* Insert the work. */
+ rem = timr_bkt_fetch_rem(lock_sema);
+
+ if (!rem) {
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_set_rem(bkt, 0);
+ tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ bkt->current_chunk = (uintptr_t) chunk;
+ timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += nb_chunk_slots - rem;
+ }
+ /* Copy work entry. */
+ *chunk = *pent;
+ timr_bkt_inc_nent(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+ return 0;
+}
+
+/* Multi producer functions. */
+static inline int
+timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct tim_mem_entry * const pent)
+{
+ int16_t rem;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+__retry:
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+ /* Bucket related checks. */
+ /*Get Bucket sema*/
+ lock_sema = timr_bkt_fetch_sema_lock(bkt);
+ if (unlikely(timr_bkt_get_shbt(lock_sema))) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ rem = timr_bkt_fetch_rem(lock_sema);
+
+ if (rem < 0) {
+ /* goto diff bucket. */
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ } else if (!rem) {
+ /*Only one thread can be here*/
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_set_rem(bkt, 0);
+ timr_bkt_dec_lock(bkt);
+ tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ bkt->current_chunk = (uintptr_t) chunk;
+ timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += nb_chunk_slots - rem;
+ }
+ /* Copy work entry. */
+ *chunk = *pent;
+ timr_bkt_inc_nent(bkt);
+ timr_bkt_dec_lock(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+ return 0;
+}
+
+static inline uint16_t
+timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
+ struct tim_mem_entry *chunk,
+ struct rte_event_timer ** const tim,
+ const struct tim_mem_entry * const ents,
+ const struct tim_mem_bucket * const bkt)
+{
+ for (; index < cpy_lmt; index++) {
+ *chunk = *(ents + index);
+ tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
+ tim[index]->impl_opaque[1] = (uintptr_t)bkt;
+ tim[index]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ return index;
+}
+
+/* Burst mode functions */
+static inline int
+timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt,
+ struct rte_event_timer ** const tim,
+ const struct tim_mem_entry *ents,
+ const uint16_t nb_timers)
+{
+ int16_t rem;
+ int16_t crem;
+ uint8_t lock_cnt;
+ uint16_t index = 0;
+ uint16_t chunk_remainder;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+__retry:
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+
+ /* Only one thread beyond this. */
+ lock_sema = timr_bkt_inc_lock(bkt);
+ lock_cnt = (uint8_t)
+ ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
+
+ if (lock_cnt) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ /* Bucket related checks. */
+ if (unlikely(timr_bkt_get_hbt(lock_sema))) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ chunk_remainder = timr_bkt_fetch_rem(lock_sema);
+ rem = chunk_remainder - nb_timers;
+ if (rem < 0) {
+ crem = nb_chunk_slots - chunk_remainder;
+ if (chunk_remainder && crem) {
+ chunk = ((struct tim_mem_entry *)
+ (uintptr_t)bkt->current_chunk) + crem;
+
+ index = timvf_cpy_wrk(index, chunk_remainder,
+ chunk, tim, ents, bkt);
+ timr_bkt_sub_rem(bkt, chunk_remainder);
+ timr_bkt_add_nent(bkt, chunk_remainder);
+ }
+ rem = nb_timers - chunk_remainder;
+ ents = ents + chunk_remainder;
+
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_dec_lock(bkt);
+ rte_errno = ENOMEM;
+ tim[index]->state = RTE_EVENT_TIMER_ERROR;
+ return crem;
+ }
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+ bkt->current_chunk = (uintptr_t) chunk;
+
+ index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
+ timr_bkt_set_rem(bkt, nb_chunk_slots - rem);
+ timr_bkt_add_nent(bkt, rem);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += (nb_chunk_slots - chunk_remainder);
+
+ index = timvf_cpy_wrk(index, nb_timers,
+ chunk, tim, ents, bkt);
+ timr_bkt_sub_rem(bkt, nb_timers);
+ timr_bkt_add_nent(bkt, nb_timers);
+ }
+
+ timr_bkt_dec_lock(bkt);
+ return nb_timers;
+}
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 77083691..ef9fb30c 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -607,7 +607,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
static int
opdl_probe(struct rte_vdev_device *vdev)
{
- static const struct rte_eventdev_ops evdev_opdl_ops = {
+ static struct rte_eventdev_ops evdev_opdl_ops = {
.dev_configure = opdl_dev_configure,
.dev_infos_get = opdl_info_get,
.dev_close = opdl_close,
diff --git a/drivers/event/opdl/opdl_evdev_init.c b/drivers/event/opdl/opdl_evdev_init.c
index 1454de53..582ad698 100644
--- a/drivers/event/opdl/opdl_evdev_init.c
+++ b/drivers/event/opdl/opdl_evdev_init.c
@@ -733,6 +733,9 @@ initialise_all_other_ports(struct rte_eventdev *dev)
queue->ports[queue->nb_ports] = port;
port->instance_id = queue->nb_ports;
queue->nb_ports++;
+ opdl_stage_set_queue_id(stage_inst,
+ port->queue_id);
+
} else if (queue->q_pos == OPDL_Q_POS_END) {
/* tx port */
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index eca7712b..8aca481c 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -25,7 +25,10 @@
#define OPDL_NAME_SIZE 64
-#define OPDL_EVENT_MASK (0xFFFF0000000FFFFFULL)
+#define OPDL_EVENT_MASK (0x00000000000FFFFFULL)
+#define OPDL_FLOWID_MASK (0xFFFFF)
+#define OPDL_OPA_MASK (0xFF)
+#define OPDL_OPA_OFFSET (0x38)
int opdl_logtype_driver;
@@ -86,7 +89,6 @@ struct opdl_stage {
*/
uint32_t available_seq;
uint32_t head; /* Current head for single-thread operation */
- uint32_t shadow_head; /* Shadow head for single-thread operation */
uint32_t nb_instance; /* Number of instances */
uint32_t instance_id; /* ID of this stage instance */
uint16_t num_claimed; /* Number of slots claimed */
@@ -102,6 +104,9 @@ struct opdl_stage {
/* For managing disclaims in multi-threaded processing stages */
struct claim_manager pending_disclaims[RTE_MAX_LCORE]
__rte_cache_aligned;
+ uint32_t shadow_head; /* Shadow head for single-thread operation */
+ uint32_t queue_id; /* ID of Queue which is assigned to this stage */
+ uint32_t pos; /* Atomic scan position */
} __rte_cache_aligned;
/* Context for opdl_ring */
@@ -494,6 +499,9 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
{
uint32_t i = 0, j = 0, offset;
+ uint32_t opa_id = 0;
+ uint32_t flow_id = 0;
+ uint64_t event = 0;
void *get_slots;
struct rte_event *ev;
RTE_SET_USED(seq);
@@ -520,7 +528,17 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- if ((ev->flow_id%s->nb_instance) == s->instance_id) {
+
+ event = __atomic_load_n(&(ev->event),
+ __ATOMIC_ACQUIRE);
+
+ opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
+ flow_id = OPDL_FLOWID_MASK & event;
+
+ if (opa_id >= s->queue_id)
+ continue;
+
+ if ((flow_id % s->nb_instance) == s->instance_id) {
memcpy(entries_offset, ev, t->slot_size);
entries_offset += t->slot_size;
i++;
@@ -531,6 +549,7 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
s->head += num_entries;
s->num_claimed = num_entries;
s->num_event = i;
+ s->pos = 0;
/* automatically disclaim entries if number of rte_events is zero */
if (unlikely(i == 0))
@@ -953,21 +972,26 @@ opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index)
}
bool
-opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
uint32_t index, bool atomic)
{
- uint32_t i = 0, j = 0, offset;
+ uint32_t i = 0, offset;
struct opdl_ring *t = s->t;
struct rte_event *ev_orig = NULL;
bool ev_updated = false;
- uint64_t ev_temp = 0;
+ uint64_t ev_temp = 0;
+ uint64_t ev_update = 0;
+
+ uint32_t opa_id = 0;
+ uint32_t flow_id = 0;
+ uint64_t event = 0;
if (index > s->num_event) {
PMD_DRV_LOG(ERR, "index is overflow");
return ev_updated;
}
- ev_temp = ev->event&OPDL_EVENT_MASK;
+ ev_temp = ev->event & OPDL_EVENT_MASK;
if (!atomic) {
offset = opdl_first_entry_id(s->seq, s->nb_instance,
@@ -984,27 +1008,39 @@ opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
}
} else {
- for (i = 0; i < s->num_claimed; i++) {
+ for (i = s->pos; i < s->num_claimed; i++) {
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- if ((ev_orig->flow_id%s->nb_instance) ==
- s->instance_id) {
-
- if (j == index) {
- if ((ev_orig->event&OPDL_EVENT_MASK) !=
- ev_temp) {
- ev_orig->event = ev->event;
- ev_updated = true;
- }
- if (ev_orig->u64 != ev->u64) {
- ev_orig->u64 = ev->u64;
- ev_updated = true;
- }
-
- break;
+ event = __atomic_load_n(&(ev_orig->event),
+ __ATOMIC_ACQUIRE);
+
+ opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
+ flow_id = OPDL_FLOWID_MASK & event;
+
+ if (opa_id >= s->queue_id)
+ continue;
+
+ if ((flow_id % s->nb_instance) == s->instance_id) {
+ ev_update = s->queue_id;
+ ev_update = (ev_update << OPDL_OPA_OFFSET)
+ | ev->event;
+
+ s->pos = i + 1;
+
+ if ((event & OPDL_EVENT_MASK) !=
+ ev_temp) {
+ __atomic_store_n(&(ev_orig->event),
+ ev_update,
+ __ATOMIC_RELEASE);
+ ev_updated = true;
}
- j++;
+ if (ev_orig->u64 != ev->u64) {
+ ev_orig->u64 = ev->u64;
+ ev_updated = true;
+ }
+
+ break;
}
}
@@ -1049,11 +1085,7 @@ check_deps(struct opdl_ring *t, struct opdl_stage *deps[],
return -EINVAL;
}
}
- if (num_deps > t->num_stages) {
- PMD_DRV_LOG(ERR, "num_deps (%u) > number stages (%u)",
- num_deps, t->num_stages);
- return -EINVAL;
- }
+
return 0;
}
@@ -1154,6 +1186,13 @@ opdl_stage_get_opdl_ring(const struct opdl_stage *s)
}
void
+opdl_stage_set_queue_id(struct opdl_stage *s,
+ uint32_t queue_id)
+{
+ s->queue_id = queue_id;
+}
+
+void
opdl_ring_dump(const struct opdl_ring *t, FILE *f)
{
uint32_t i;
diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h
index 9e8c33e6..751a59db 100644
--- a/drivers/event/opdl/opdl_ring.h
+++ b/drivers/event/opdl/opdl_ring.h
@@ -518,6 +518,20 @@ opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries);
struct opdl_stage *
opdl_stage_create(struct opdl_ring *t, bool threadsafe);
+
+/**
+ * Set the internal queue id for each stage instance.
+ *
+ * @param s
+ * The pointer of stage instance.
+ *
+ * @param queue_id
+ * The value of internal queue id.
+ */
+void
+opdl_stage_set_queue_id(struct opdl_stage *s,
+ uint32_t queue_id);
+
/**
* Prints information on opdl_ring instance and all its stages
*
@@ -590,7 +604,7 @@ opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
*/
bool
-opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
uint32_t index, bool atomic);
#ifdef __cplusplus
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 7f467568..c889220e 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -319,7 +319,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
/* Initialize and register event driver with DPDK Application */
-static const struct rte_eventdev_ops skeleton_eventdev_ops = {
+static struct rte_eventdev_ops skeleton_eventdev_ops = {
.dev_infos_get = skeleton_eventdev_info_get,
.dev_configure = skeleton_eventdev_configure,
.dev_start = skeleton_eventdev_start,
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 6672fd8e..10f0e1ad 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -464,6 +464,33 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
return 0;
}
+static int
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
+ uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(flags);
+ *caps = 0;
+
+ /* Use default SW ops */
+ *ops = NULL;
+
+ return 0;
+}
+
+static int
+sw_crypto_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cdev);
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
+ return 0;
+}
+
static void
sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
{
@@ -772,7 +799,7 @@ static int32_t sw_sched_service_func(void *args)
static int
sw_probe(struct rte_vdev_device *vdev)
{
- static const struct rte_eventdev_ops evdev_sw_ops = {
+ static struct rte_eventdev_ops evdev_sw_ops = {
.dev_configure = sw_dev_configure,
.dev_infos_get = sw_info_get,
.dev_close = sw_close,
@@ -791,6 +818,10 @@ sw_probe(struct rte_vdev_device *vdev)
.eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
+ .timer_adapter_caps_get = sw_timer_adapter_caps_get,
+
+ .crypto_adapter_caps_get = sw_crypto_adapter_caps_get,
+
.xstats_get = sw_xstats_get,
.xstats_get_names = sw_xstats_get_names,
.xstats_get_by_name = sw_xstats_get_by_name,
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index 3106eb33..e3a41e02 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -508,7 +508,7 @@ sw_event_schedule(struct rte_eventdev *dev)
uint32_t i;
sw->sched_called++;
- if (!sw->started)
+ if (unlikely(!sw->started))
return;
do {
@@ -532,8 +532,7 @@ sw_event_schedule(struct rte_eventdev *dev)
} while (in_pkts > 4 &&
(int)in_pkts_this_iteration < sched_quanta);
- out_pkts = 0;
- out_pkts += sw_schedule_qid_to_cq(sw);
+ out_pkts = sw_schedule_qid_to_cq(sw);
out_pkts_total += out_pkts;
in_pkts_total += in_pkts_this_iteration;
@@ -541,6 +540,12 @@ sw_event_schedule(struct rte_eventdev *dev)
break;
} while ((int)out_pkts_total < sched_quanta);
+ sw->stats.tx_pkts += out_pkts_total;
+ sw->stats.rx_pkts += in_pkts_total;
+
+ sw->sched_no_iq_enqueues += (in_pkts_total == 0);
+ sw->sched_no_cq_enqueues += (out_pkts_total == 0);
+
/* push all the internal buffered QEs in port->cq_ring to the
* worker cores: aka, do the ring transfers batched.
*/
@@ -552,10 +557,4 @@ sw_event_schedule(struct rte_eventdev *dev)
sw->ports[i].cq_buf_count = 0;
}
- sw->stats.tx_pkts += out_pkts_total;
- sw->stats.rx_pkts += in_pkts_total;
-
- sw->sched_no_iq_enqueues += (in_pkts_total == 0);
- sw->sched_no_cq_enqueues += (out_pkts_total == 0);
-
}
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index 67151f77..063b919c 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -77,8 +77,10 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
rte_atomic32_add(&sw->inflights, credit_update_quanta);
p->inflight_credits += (credit_update_quanta);
- if (p->inflight_credits < new)
- return 0;
+ /* If there are fewer inflight credits than new events, limit
+ * the number of enqueued events.
+ */
+ num = (p->inflight_credits < new) ? p->inflight_credits : new;
}
for (i = 0; i < num; i++) {