aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/event/octeontx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/event/octeontx')
-rw-r--r--drivers/event/octeontx/Makefile70
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_ssovf.h61
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map9
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c580
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h203
-rw-r--r--drivers/event/octeontx/ssovf_mbox.c232
-rw-r--r--drivers/event/octeontx/ssovf_probe.c288
-rw-r--r--drivers/event/octeontx/ssovf_worker.c252
-rw-r--r--drivers/event/octeontx/ssovf_worker.h139
9 files changed, 1834 insertions, 0 deletions
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
new file mode 100644
index 00000000..aca3d095
--- /dev/null
+++ b/drivers/event/octeontx/Makefile
@@ -0,0 +1,70 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium Networks. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx_ssovf.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_mbox.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_ssovf_worker.o += -Ofast
+else
+CFLAGS_ssovf_worker.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_ssovf_worker.o += -Ofast
+endif
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)-include := rte_pmd_octeontx_ssovf.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
new file mode 100644
index 00000000..3da7cfdd
--- /dev/null
+++ b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
@@ -0,0 +1,61 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTE_PMD_OCTEONTX_SSOVF_H__
+#define __RTE_PMD_OCTEONTX_SSOVF_H__
+
+#include <rte_common.h>
+
+struct octeontx_ssovf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_ssovfs; /* Total sso groups available in domain */
+ uint8_t total_ssowvfs;/* Total sso hws available in domain */
+};
+
+enum octeontx_ssovf_type {
+ OCTEONTX_SSO_GROUP, /* SSO group vf */
+ OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
+};
+
+struct octeontx_mbox_hdr {
+ uint16_t vfid; /* VF index or pf resource index local to the domain */
+ uint8_t coproc; /* Coprocessor id */
+ uint8_t msg; /* Message id */
+ uint8_t res_code; /* Functional layer response code */
+};
+
+int octeontx_ssovf_info(struct octeontx_ssovf_info *info);
+void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar);
+int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr,
+ void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
+
+#endif /* __RTE_PMD_OCTEONTX_SSOVF_H__ */
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map b/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
new file mode 100644
index 00000000..3810a03f
--- /dev/null
+++ b/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
@@ -0,0 +1,9 @@
+DPDK_17.05 {
+ global:
+
+ octeontx_ssovf_info;
+ octeontx_ssovf_bar;
+ octeontx_ssovf_mbox_send;
+
+ local: *;
+};
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
new file mode 100644
index 00000000..c80a4437
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -0,0 +1,580 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_vdev.h>
+
+#include "ssovf_evdev.h"
+
+/* SSOPF Mailbox messages */
+
+struct ssovf_mbox_dev_info {
+ uint64_t min_deq_timeout_ns;
+ uint64_t max_deq_timeout_ns;
+ uint32_t max_num_events;
+};
+
+static int
+ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct ssovf_mbox_dev_info);
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_GET_DEV_INFO;
+ hdr.vfid = 0;
+
+ memset(info, 0, len);
+ return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
+}
+
+struct ssovf_mbox_getwork_wait {
+ uint64_t wait_ns;
+};
+
+static int
+ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_getwork_wait tmo_set;
+ uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_SET_GETWORK_WAIT;
+ hdr.vfid = 0;
+
+ tmo_set.wait_ns = timeout_ns;
+ ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
+ if (ret)
+ ssovf_log_err("Failed to set getwork timeout(%d)", ret);
+
+ return ret;
+}
+
+struct ssovf_mbox_grp_pri {
+ uint8_t wgt_left; /* Read only */
+ uint8_t weight;
+ uint8_t affinity;
+ uint8_t priority;
+};
+
+static int
+ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_grp_pri grp;
+ uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_GRP_SET_PRIORITY;
+ hdr.vfid = queue;
+
+ grp.weight = 0xff;
+ grp.affinity = 0xff;
+ grp.priority = prio / 32; /* Normalize to 0 to 7 */
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
+ if (ret)
+ ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
+
+ return ret;
+}
+
+struct ssovf_mbox_convert_ns_getworks_iter {
+ uint64_t wait_ns;
+ uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
+};
+
+static int
+ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
+ uint16_t len = sizeof(ns2iter);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
+ hdr.vfid = 0;
+
+ memset(&ns2iter, 0, len);
+ ns2iter.wait_ns = ns;
+ ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
+ if (ret < 0 || (ret != len)) {
+ ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
+ return -EIO;
+ }
+
+ *tmo_ticks = ns2iter.getwork_iter;
+ return 0;
+}
+
+static void
+ssovf_fastpath_fns_set(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev->schedule = NULL;
+ dev->enqueue = ssows_enq;
+ dev->enqueue_burst = ssows_enq_burst;
+ dev->dequeue = ssows_deq;
+ dev->dequeue_burst = ssows_deq_burst;
+
+ if (edev->is_timeout_deq) {
+ dev->dequeue = ssows_deq_timeout;
+ dev->dequeue_burst = ssows_deq_timeout_burst;
+ }
+}
+
+static void
+ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
+ dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
+ dev_info->max_event_queues = edev->max_event_queues;
+ dev_info->max_event_queue_flows = (1ULL << 20);
+ dev_info->max_event_queue_priority_levels = 8;
+ dev_info->max_event_priority_levels = 1;
+ dev_info->max_event_ports = edev->max_event_ports;
+ dev_info->max_event_port_dequeue_depth = 1;
+ dev_info->max_event_port_enqueue_depth = 1;
+ dev_info->max_num_events = edev->max_num_events;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
+}
+
+static int
+ssovf_configure(const struct rte_eventdev *dev)
+{
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint64_t deq_tmo_ns;
+
+ ssovf_func_trace();
+ deq_tmo_ns = conf->dequeue_timeout_ns;
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ edev->is_timeout_deq = 1;
+ deq_tmo_ns = edev->min_deq_timeout_ns;
+ }
+ edev->nb_event_queues = conf->nb_event_queues;
+ edev->nb_event_ports = conf->nb_event_ports;
+
+ return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
+}
+
+static void
+ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = (1ULL << 20);
+ queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(dev);
+ ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
+
+ return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
+}
+
+static void
+ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ RTE_SET_USED(port_id);
+ port_conf->new_event_threshold = edev->max_num_events;
+ port_conf->dequeue_depth = 1;
+ port_conf->enqueue_depth = 1;
+}
+
+static void
+ssovf_port_release(void *port)
+{
+ rte_free(port);
+}
+
+static int
+ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct ssows *ws;
+ uint32_t reg_off;
+ uint8_t q;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ ssovf_func_trace("port=%d", port_id);
+ RTE_SET_USED(port_conf);
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->ports[port_id] != NULL) {
+ ssovf_port_release(dev->data->ports[port_id]);
+ dev->data->ports[port_id] = NULL;
+ }
+
+ /* Allocate event port memory */
+ ws = rte_zmalloc_socket("eventdev ssows",
+ sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
+ dev->data->socket_id);
+ if (ws == NULL) {
+ ssovf_log_err("Failed to alloc memory for port=%d", port_id);
+ return -ENOMEM;
+ }
+
+ ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
+ if (ws->base == NULL) {
+ rte_free(ws);
+ ssovf_log_err("Failed to get hws base addr port=%d", port_id);
+ return -EINVAL;
+ }
+
+ reg_off = SSOW_VHWS_OP_GET_WORK0;
+ reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
+ reg_off |= 1 << 16; /* Wait */
+ ws->getwork = ws->base + reg_off;
+ ws->port = port_id;
+
+ for (q = 0; q < edev->nb_event_queues; q++) {
+ ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
+ if (ws->grps[q] == NULL) {
+ rte_free(ws);
+ ssovf_log_err("Failed to get grp%d base addr", q);
+ return -EINVAL;
+ }
+ }
+
+ dev->data->ports[port_id] = ws;
+ ssovf_log_dbg("port=%d ws=%p", port_id, ws);
+ return 0;
+}
+
+static int
+ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t nb_links)
+{
+ uint16_t link;
+ uint64_t val;
+ struct ssows *ws = port;
+
+ ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
+ RTE_SET_USED(dev);
+ RTE_SET_USED(priorities);
+
+ for (link = 0; link < nb_links; link++) {
+ val = queues[link];
+ val |= (1ULL << 24); /* Set membership */
+ ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
+ }
+ return (int)nb_links;
+}
+
+static int
+ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ uint16_t unlink;
+ uint64_t val;
+ struct ssows *ws = port;
+
+ ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
+ RTE_SET_USED(dev);
+
+ for (unlink = 0; unlink < nb_unlinks; unlink++) {
+ val = queues[unlink];
+ val &= ~(1ULL << 24); /* Clear membership */
+ ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
+ }
+ return (int)nb_unlinks;
+}
+
+static int
+ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
+{
+ RTE_SET_USED(dev);
+
+ return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
+}
+
+static void
+ssows_dump(struct ssows *ws, FILE *f)
+{
+ uint8_t *base = ws->base;
+ uint64_t val;
+
+ fprintf(f, "\t---------------port%d---------------\n", ws->port);
+ val = ssovf_read64(base + SSOW_VHWS_TAG);
+ fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
+ (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
+ (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
+ (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
+ (int)(val >> 63) & 0x1);
+
+ val = ssovf_read64(base + SSOW_VHWS_WQP);
+ fprintf(f, "\twqp=0x%"PRIx64"\n", val);
+
+ val = ssovf_read64(base + SSOW_VHWS_LINKS);
+ fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
+ (int)(val & 0x3ff), (int)(val >> 10) & 0x1,
+ (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
+ (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
+
+ val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
+ fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
+ (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
+ (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
+ (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
+ (int)(val >> 63) & 0x1);
+
+ val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
+ fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
+}
+
+static void
+ssovf_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint8_t port;
+
+ /* Dump SSOWVF debug registers */
+ for (port = 0; port < edev->nb_event_ports; port++)
+ ssows_dump(dev->data->ports[port], f);
+}
+
+static int
+ssovf_start(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct ssows *ws;
+ uint8_t *base;
+ uint8_t i;
+
+ ssovf_func_trace();
+ for (i = 0; i < edev->nb_event_ports; i++) {
+ ws = dev->data->ports[i];
+ ssows_reset(ws);
+ ws->swtag_req = 0;
+ }
+
+ for (i = 0; i < edev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssows_flush_events(dev->data->ports[0], i);
+
+ base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base += SSO_VHGRP_QCTL;
+ ssovf_write64(1, base); /* Enable SSO group */
+ }
+
+ ssovf_fastpath_fns_set(dev);
+ return 0;
+}
+
+static void
+ssovf_stop(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct ssows *ws;
+ uint8_t *base;
+ uint8_t i;
+
+ ssovf_func_trace();
+ for (i = 0; i < edev->nb_event_ports; i++) {
+ ws = dev->data->ports[i];
+ ssows_reset(ws);
+ ws->swtag_req = 0;
+ }
+
+ for (i = 0; i < edev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssows_flush_events(dev->data->ports[0], i);
+
+ base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base += SSO_VHGRP_QCTL;
+ ssovf_write64(0, base); /* Disable SSO group */
+ }
+}
+
+static int
+ssovf_close(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t i;
+
+ for (i = 0; i < edev->nb_event_queues; i++)
+ all_queues[i] = i;
+
+ for (i = 0; i < edev->nb_event_ports; i++)
+ ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
+ edev->nb_event_queues);
+ return 0;
+}
+
+/* Initialize and register event driver with DPDK Application */
+static const struct rte_eventdev_ops ssovf_ops = {
+ .dev_infos_get = ssovf_info_get,
+ .dev_configure = ssovf_configure,
+ .queue_def_conf = ssovf_queue_def_conf,
+ .queue_setup = ssovf_queue_setup,
+ .queue_release = ssovf_queue_release,
+ .port_def_conf = ssovf_port_def_conf,
+ .port_setup = ssovf_port_setup,
+ .port_release = ssovf_port_release,
+ .port_link = ssovf_port_link,
+ .port_unlink = ssovf_port_unlink,
+ .timeout_ticks = ssovf_timeout_ticks,
+ .dump = ssovf_dump,
+ .dev_start = ssovf_start,
+ .dev_stop = ssovf_stop,
+ .dev_close = ssovf_close
+};
+
+static int
+ssovf_vdev_probe(struct rte_vdev_device *vdev)
+{
+ struct octeontx_ssovf_info oinfo;
+ struct ssovf_mbox_dev_info info;
+ struct ssovf_evdev *edev;
+ struct rte_eventdev *eventdev;
+ static int ssovf_init_once;
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ /* More than one instance is not supported */
+ if (ssovf_init_once) {
+ ssovf_log_err("Request to create >1 %s instance", name);
+ return -EINVAL;
+ }
+
+ eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ ssovf_log_err("Failed to create eventdev vdev %s", name);
+ return -ENOMEM;
+ }
+ eventdev->dev_ops = &ssovf_ops;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ssovf_fastpath_fns_set(eventdev);
+ return 0;
+ }
+
+ ret = octeontx_ssovf_info(&oinfo);
+ if (ret) {
+ ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
+ goto error;
+ }
+
+ edev = ssovf_pmd_priv(eventdev);
+ edev->max_event_ports = oinfo.total_ssowvfs;
+ edev->max_event_queues = oinfo.total_ssovfs;
+ edev->is_timeout_deq = 0;
+
+ ret = ssovf_mbox_dev_info(&info);
+ if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
+ ssovf_log_err("Failed to get mbox devinfo %d", ret);
+ goto error;
+ }
+
+ edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
+ edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
+ edev->max_num_events = info.max_num_events;
+ ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
+ info.min_deq_timeout_ns, info.max_deq_timeout_ns,
+ info.max_num_events);
+
+ if (!edev->max_event_ports || !edev->max_event_queues) {
+ ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
+ edev->max_event_queues, edev->max_event_ports);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
+ name, oinfo.domain, edev->max_event_queues,
+ edev->max_event_ports);
+
+ ssovf_init_once = 1;
+ return 0;
+
+error:
+ rte_event_pmd_vdev_uninit(name);
+ return ret;
+}
+
+static int
+ssovf_vdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ ssovf_log_info("Closing %s", name);
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_ssovf_pmd = {
+ .probe = ssovf_vdev_probe,
+ .remove = ssovf_vdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
new file mode 100644
index 00000000..6e0a3521
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -0,0 +1,203 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SSOVF_EVDEV_H__
+#define __SSOVF_EVDEV_H__
+
+#include <rte_config.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_io.h>
+
+#include "rte_pmd_octeontx_ssovf.h"
+
+#define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
+
+#ifdef RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG
+#define ssovf_log_info(fmt, args...) \
+ RTE_LOG(INFO, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#define ssovf_log_dbg(fmt, args...) \
+ RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#else
+#define ssovf_log_info(fmt, args...)
+#define ssovf_log_dbg(fmt, args...)
+#endif
+
+#define ssovf_func_trace ssovf_log_dbg
+#define ssovf_log_err(fmt, args...) \
+ RTE_LOG(ERR, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
+#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
+
+#define SSO_MAX_VHGRP (64)
+#define SSO_MAX_VHWS (32)
+
+/* SSO VF register offsets */
+#define SSO_VHGRP_QCTL (0x010ULL)
+#define SSO_VHGRP_INT (0x100ULL)
+#define SSO_VHGRP_INT_W1S (0x108ULL)
+#define SSO_VHGRP_INT_ENA_W1S (0x110ULL)
+#define SSO_VHGRP_INT_ENA_W1C (0x118ULL)
+#define SSO_VHGRP_INT_THR (0x140ULL)
+#define SSO_VHGRP_INT_CNT (0x180ULL)
+#define SSO_VHGRP_XAQ_CNT (0x1B0ULL)
+#define SSO_VHGRP_AQ_CNT (0x1C0ULL)
+#define SSO_VHGRP_AQ_THR (0x1E0ULL)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
+/* BAR2 */
+#define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL)
+#define SSO_VHGRP_OP_ADD_WORK1 (0x08ULL)
+
+/* SSOW VF register offsets (BAR0) */
+#define SSOW_VHWS_GRPMSK_CHGX(x) (0x080ULL | ((x) << 3))
+#define SSOW_VHWS_TAG (0x300ULL)
+#define SSOW_VHWS_WQP (0x308ULL)
+#define SSOW_VHWS_LINKS (0x310ULL)
+#define SSOW_VHWS_PENDTAG (0x340ULL)
+#define SSOW_VHWS_PENDWQP (0x348ULL)
+#define SSOW_VHWS_SWTP (0x400ULL)
+#define SSOW_VHWS_OP_ALLOC_WE (0x410ULL)
+#define SSOW_VHWS_OP_UPD_WQP_GRP0 (0x440ULL)
+#define SSOW_VHWS_OP_UPD_WQP_GRP1 (0x448ULL)
+#define SSOW_VHWS_OP_SWTAG_UNTAG (0x490ULL)
+#define SSOW_VHWS_OP_SWTAG_CLR (0x820ULL)
+#define SSOW_VHWS_OP_DESCHED (0x860ULL)
+#define SSOW_VHWS_OP_DESCHED_NOSCH (0x870ULL)
+#define SSOW_VHWS_OP_SWTAG_DESCHED (0x8C0ULL)
+#define SSOW_VHWS_OP_SWTAG_NOSCHED (0x8D0ULL)
+#define SSOW_VHWS_OP_SWTP_SET (0xC20ULL)
+#define SSOW_VHWS_OP_SWTAG_NORM (0xC80ULL)
+#define SSOW_VHWS_OP_SWTAG_FULL0 (0xCA0UL)
+#define SSOW_VHWS_OP_SWTAG_FULL1 (0xCA8ULL)
+#define SSOW_VHWS_OP_CLR_NSCHED (0x10000ULL)
+#define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL)
+#define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL)
+
+#define SSOW_BAR4_LEN (64 * 1024)
+
+/* Mailbox message constants */
+#define SSO_COPROC 0x2
+
+#define SSO_GETDOMAINCFG 0x1
+#define SSO_IDENTIFY 0x2
+#define SSO_GET_DEV_INFO 0x3
+#define SSO_GET_GETWORK_WAIT 0x4
+#define SSO_SET_GETWORK_WAIT 0x5
+#define SSO_CONVERT_NS_GETWORK_ITER 0x6
+#define SSO_GRP_GET_PRIORITY 0x7
+#define SSO_GRP_SET_PRIORITY 0x8
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implictly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define ssovf_read64 rte_read64_relaxed
+#define ssovf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define ssovf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define ssovf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define ssovf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define ssovf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+
+struct ssovf_evdev {
+ uint8_t max_event_queues;
+ uint8_t max_event_ports;
+ uint8_t is_timeout_deq;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint32_t min_deq_timeout_ns;
+ uint32_t max_deq_timeout_ns;
+ int32_t max_num_events;
+} __rte_cache_aligned;
+
+/* Event port aka HWS */
+struct ssows {
+ uint8_t cur_tt;
+ uint8_t cur_grp;
+ uint8_t swtag_req;
+ uint8_t *base;
+ uint8_t *getwork;
+ uint8_t *grps[SSO_MAX_VHGRP];
+ uint8_t port;
+} __rte_cache_aligned;
+
+static inline struct ssovf_evdev *
+ssovf_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+uint16_t ssows_enq(void *port, const struct rte_event *ev);
+uint16_t ssows_enq_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
+uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
+void ssows_reset(struct ssows *ws);
+
+#endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_mbox.c b/drivers/event/octeontx/ssovf_mbox.c
new file mode 100644
index 00000000..7394a3a9
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_mbox.c
@@ -0,0 +1,232 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_spinlock.h>
+
+#include "ssovf_evdev.h"
+
+/* Mbox operation timeout in seconds */
+#define MBOX_WAIT_TIME_SEC 3
+#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
+
+/* Mbox channel state */
+enum {
+ MBOX_CHAN_STATE_REQ = 1,
+ MBOX_CHAN_STATE_RES = 0,
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+struct mbox {
+ int init_once;
+ uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
+ uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
+ uint16_t tag_own; /* Last tag which was written to own channel */
+ rte_spinlock_t lock;
+};
+
+static struct mbox octeontx_mbox;
+
+/*
+ * Structure used for mbox synchronization
+ * This structure sits at the begin of Mbox RAM and used as main
+ * synchronization point for channel communication
+ */
+struct mbox_ram_hdr {
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t chan_state : 1;
+ uint8_t coproc : 7;
+ uint8_t msg;
+ uint8_t vfid;
+ uint8_t res_code;
+ uint16_t tag;
+ uint16_t len;
+ };
+ };
+};
+
+static inline void
+mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ const void *txmsg, uint16_t txsize)
+{
+ struct mbox_ram_hdr old_hdr;
+ struct mbox_ram_hdr new_hdr = { {0} };
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /*
+ * Initialize the channel with the tag left by last send.
+ * On success full mbox send complete, PF increments the tag by one.
+ * The sender can validate integrity of PF message with this scheme
+ */
+ old_hdr.u64 = rte_read64(ram_mbox_hdr);
+ m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
+
+ /* Copy msg body */
+ if (txmsg)
+ memcpy(ram_mbox_msg, txmsg, txsize);
+
+ /* Prepare new hdr */
+ new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
+ new_hdr.coproc = hdr->coproc;
+ new_hdr.msg = hdr->msg;
+ new_hdr.vfid = hdr->vfid;
+ new_hdr.tag = m->tag_own;
+ new_hdr.len = txsize;
+
+ /* Write the msg header */
+ rte_write64(new_hdr.u64, ram_mbox_hdr);
+ rte_io_wmb();
+ /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
+ rte_write64(0, m->reg);
+}
+
+static inline int
+mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ void *rxmsg, uint16_t rxsize)
+{
+ int res = 0, wait;
+ uint16_t len;
+ struct mbox_ram_hdr rx_hdr;
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /* Wait for response */
+ wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
+ while (wait > 0) {
+ rte_delay_us(100);
+ rx_hdr.u64 = rte_read64(ram_mbox_hdr);
+ if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
+ break;
+ --wait;
+ }
+
+ hdr->res_code = rx_hdr.res_code;
+ m->tag_own++;
+
+ /* Timeout */
+ if (wait <= 0) {
+ res = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Tag mismatch */
+ if (m->tag_own != rx_hdr.tag) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ /* PF nacked the msg */
+ if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
+ res = -EBADMSG;
+ goto error;
+ }
+
+ len = RTE_MIN(rx_hdr.len, rxsize);
+ if (rxmsg)
+ memcpy(rxmsg, ram_mbox_msg, len);
+
+ return len;
+
+error:
+ ssovf_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
+ m->tag_own, rx_hdr.tag, hdr->msg, hdr->coproc, res,
+ hdr->res_code);
+ return res;
+}
+
+static inline int
+mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
+ uint16_t txsize, void *rxmsg, uint16_t rxsize)
+{
+ int res = -EINVAL;
+
+ if (m->init_once == 0 || hdr == NULL ||
+ txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
+ ssovf_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
+ m->init_once, hdr, txsize, rxsize);
+ return res;
+ }
+
+ rte_spinlock_lock(&m->lock);
+
+ mbox_send_request(m, hdr, txmsg, txsize);
+ res = mbox_wait_response(m, hdr, rxmsg, rxsize);
+
+ rte_spinlock_unlock(&m->lock);
+ return res;
+}
+
+static inline int
+mbox_setup(struct mbox *m)
+{
+ if (unlikely(m->init_once == 0)) {
+ rte_spinlock_init(&m->lock);
+ m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
+ m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
+ m->reg += SSO_VHGRP_PF_MBOX(1);
+
+ if (m->ram_mbox_base == NULL || m->reg == NULL) {
+ ssovf_log_err("Invalid ram_mbox_base=%p or reg=%p",
+ m->ram_mbox_base, m->reg);
+ return -EINVAL;
+ }
+ m->init_once = 1;
+ }
+ return 0;
+}
+
+int
+octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
+ uint16_t txlen, void *rxdata, uint16_t rxlen)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m))
+ return -EINVAL;
+
+ return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
+}
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
new file mode 100644
index 00000000..b644ebde
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -0,0 +1,288 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+
+#include "ssovf_evdev.h"
+
+struct ssovf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+};
+
+struct ssowvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct ssowvf_identify {
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct ssodev {
+ uint8_t total_ssovfs;
+ uint8_t total_ssowvfs;
+ struct ssovf_res grp[SSO_MAX_VHGRP];
+ struct ssowvf_res hws[SSO_MAX_VHWS];
+};
+
+static struct ssodev sdev;
+
+/* Interface functions */
+int
+octeontx_ssovf_info(struct octeontx_ssovf_info *info)
+{
+ uint8_t i;
+ uint16_t domain;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL)
+ return -EINVAL;
+
+ if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0)
+ return -ENODEV;
+
+ domain = sdev.grp[0].domain;
+ for (i = 0; i < sdev.total_ssovfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.grp[i].vfid != i ||
+ sdev.grp[i].bar0 == NULL ||
+ sdev.grp[i].domain != domain) {
+ ssovf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.grp[i].vfid,
+ domain, sdev.grp[i].domain,
+ sdev.grp[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < sdev.total_ssowvfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.hws[i].vfid != i ||
+ sdev.hws[i].bar0 == NULL ||
+ sdev.hws[i].domain != domain) {
+ ssovf_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.hws[i].vfid,
+ domain, sdev.hws[i].domain,
+ sdev.hws[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ info->domain = domain;
+ info->total_ssovfs = sdev.total_ssovfs;
+ info->total_ssowvfs = sdev.total_ssowvfs;
+ return 0;
+}
+
+void*
+octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ type > OCTEONTX_SSO_HWS)
+ return NULL;
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ if (id >= sdev.total_ssovfs)
+ return NULL;
+ } else {
+ if (id >= sdev.total_ssowvfs)
+ return NULL;
+ }
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ switch (bar) {
+ case 0:
+ return sdev.grp[id].bar0;
+ case 2:
+ return sdev.grp[id].bar2;
+ default:
+ return NULL;
+ }
+ } else {
+ switch (bar) {
+ case 0:
+ return sdev.hws[id].bar0;
+ case 2:
+ return sdev.hws[id].bar2;
+ case 4:
+ return sdev.hws[id].bar4;
+ default:
+ return NULL;
+ }
+ }
+}
+
+/* SSOWVF pcie device aka event port probe */
+
+static int
+ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint16_t vfid;
+ struct ssowvf_res *res;
+ struct ssowvf_identify *id;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ ssovf_log_err("Empty bars %p %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
+ ssovf_log_err("Bar4 len mismatch %d != %d",
+ SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
+ return -EINVAL;
+ }
+
+ id = pci_dev->mem_resource[4].addr;
+ vfid = id->vfid;
+ if (vfid >= SSO_MAX_VHWS) {
+ ssovf_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
+ return -EINVAL;
+ }
+
+ res = &sdev.hws[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = id->domain;
+
+ sdev.total_ssowvfs++;
+ rte_wmb();
+ ssovf_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
+ res->vfid, sdev.total_ssowvfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssowvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOWS_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssowvf = {
+ .id_table = pci_ssowvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssowvf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf);
+
+/* SSOVF pcie device aka event queue probe */
+
+static int
+ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint8_t *idreg;
+ struct ssovf_res *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ ssovf_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+ idreg += SSO_VHGRP_AQ_THR;
+ val = rte_read64(idreg);
+
+ /* Write back the default value of aq_thr */
+ rte_write64((1ULL << 33) - 1, idreg);
+ vfid = (val >> 16) & 0xffff;
+ if (vfid >= SSO_MAX_VHGRP) {
+ ssovf_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
+ return -EINVAL;
+ }
+
+ res = &sdev.grp[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->domain = val & 0xffff;
+
+ sdev.total_ssovfs++;
+ rte_wmb();
+ ssovf_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
+ res->vfid, sdev.total_ssovfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssovf = {
+ .id_table = pci_ssovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
new file mode 100644
index 00000000..ad3fe684
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -0,0 +1,252 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ssovf_worker.h"
+
+static force_inline void
+ssows_new_event(struct ssows *ws, const struct rte_event *ev)
+{
+ const uint64_t event_ptr = ev->u64;
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint8_t grp = ev->queue_id;
+
+ ssows_add_work(ws, event_ptr, tag, new_tt, grp);
+}
+
+static force_inline void
+ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
+{
+ const uint8_t cur_tt = ws->cur_tt;
+ const uint8_t new_tt = ev->sched_type;
+ const uint32_t tag = (uint32_t)ev->event;
+ /*
+ * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
+ *
+ * SSO_SYNC_ORDERED norm norm untag
+ * SSO_SYNC_ATOMIC norm norm untag
+ * SSO_SYNC_UNTAGGED full full NOOP
+ */
+ if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
+ if (new_tt != SSO_SYNC_UNTAGGED) {
+ ssows_swtag_full(ws, ev->u64, tag,
+ new_tt, grp);
+ }
+ } else {
+ if (likely(new_tt != SSO_SYNC_UNTAGGED))
+ ssows_swtag_norm(ws, tag, new_tt);
+ else
+ ssows_swtag_untag(ws);
+ }
+ ws->swtag_req = 1;
+}
+
+#define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
+
+static force_inline void
+ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
+{
+ const uint64_t event_ptr = ev->u64;
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t cur_tt = ws->cur_tt;
+ const uint8_t new_tt = ev->sched_type;
+
+ if (cur_tt == SSO_SYNC_ORDERED) {
+ /* Create unique tag based on custom event type and new grp */
+ uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
+
+ newtag |= grp << 20;
+ newtag |= tag;
+ ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
+ rte_smp_wmb();
+ ssows_swtag_wait(ws);
+ } else {
+ rte_smp_wmb();
+ }
+ ssows_add_work(ws, event_ptr, tag, new_tt, grp);
+}
+
+static force_inline void
+ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
+{
+ const uint8_t grp = ev->queue_id;
+
+ /* Group hasn't changed, Use SWTAG to forward the event */
+ if (ws->cur_grp == grp)
+ ssows_fwd_swtag(ws, ev, grp);
+ else
+ /*
+ * Group has been changed for group based work pipelining,
+ * Use deschedule/add_work operation to transfer the event to
+ * new group/core
+ */
+ ssows_fwd_group(ws, ev, grp);
+}
+
+static force_inline void
+ssows_release_event(struct ssows *ws)
+{
+ if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
+ ssows_swtag_untag(ws);
+}
+
+force_inline uint16_t __hot
+ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ struct ssows *ws = port;
+
+ RTE_SET_USED(timeout_ticks);
+
+ ssows_swtag_wait(ws);
+ if (ws->swtag_req) {
+ ws->swtag_req = 0;
+ return 1;
+ } else {
+ return ssows_get_work(ws, ev);
+ }
+}
+
+force_inline uint16_t __hot
+ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ struct ssows *ws = port;
+ uint64_t iter;
+ uint16_t ret = 1;
+
+ ssows_swtag_wait(ws);
+ if (ws->swtag_req) {
+ ws->swtag_req = 0;
+ } else {
+ ret = ssows_get_work(ws, ev);
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
+ ret = ssows_get_work(ws, ev);
+ }
+ return ret;
+}
+
+uint16_t __hot
+ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
+ uint64_t timeout_ticks)
+{
+ RTE_SET_USED(nb_events);
+
+ return ssows_deq(port, ev, timeout_ticks);
+}
+
+uint16_t __hot
+ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
+ uint64_t timeout_ticks)
+{
+ RTE_SET_USED(nb_events);
+
+ return ssows_deq_timeout(port, ev, timeout_ticks);
+}
+
+force_inline uint16_t __hot
+ssows_enq(void *port, const struct rte_event *ev)
+{
+ struct ssows *ws = port;
+ uint16_t ret = 1;
+
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ ssows_new_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_FORWARD:
+ ssows_forward_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ ssows_release_event(ws);
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+uint16_t __hot
+ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return ssows_enq(port, ev);
+}
+
+void
+ssows_flush_events(struct ssows *ws, uint8_t queue_id)
+{
+ uint32_t reg_off;
+ uint64_t aq_cnt = 1;
+ uint64_t cq_ds_cnt = 1;
+ uint64_t enable, get_work0, get_work1;
+ uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+
+ enable = ssovf_read64(base + SSO_VHGRP_QCTL);
+ if (!enable)
+ return;
+
+ reg_off = SSOW_VHWS_OP_GET_WORK0;
+ reg_off |= 1 << 17; /* Grouped */
+ reg_off |= 1 << 16; /* WAIT */
+ reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
+ while (aq_cnt || cq_ds_cnt) {
+ aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
+ cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
+ /* Extract cq and ds count */
+ cq_ds_cnt &= 0x1FFF1FFF0000;
+ ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
+ }
+
+ RTE_SET_USED(get_work0);
+ RTE_SET_USED(get_work1);
+}
+
+void
+ssows_reset(struct ssows *ws)
+{
+ uint64_t tag;
+ uint64_t pend_tag;
+ uint8_t pend_tt;
+ uint8_t tt;
+
+ tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
+ pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
+
+ if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
+ pend_tt = (pend_tag >> 32) & 0x3;
+ if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
+ ssows_desched(ws);
+ } else {
+ tt = (tag >> 32) & 0x3;
+ if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
+ ssows_swtag_untag(ws);
+ }
+}
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
new file mode 100644
index 00000000..300dfae8
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -0,0 +1,139 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <rte_common.h>
+
+#include "ssovf_evdev.h"
+
+enum {
+ SSO_SYNC_ORDERED,
+ SSO_SYNC_ATOMIC,
+ SSO_SYNC_UNTAGGED,
+ SSO_SYNC_EMPTY
+};
+
+#ifndef force_inline
+#define force_inline inline __attribute__((always_inline))
+#endif
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+/* SSO Operations */
+
+static force_inline uint16_t
+ssows_get_work(struct ssows *ws, struct rte_event *ev)
+{
+ uint64_t get_work0, get_work1;
+ uint64_t sched_type_queue;
+
+ ssovf_load_pair(get_work0, get_work1, ws->getwork);
+
+ sched_type_queue = (get_work0 >> 32) & 0xfff;
+ ws->cur_tt = sched_type_queue & 0x3;
+ ws->cur_grp = sched_type_queue >> 2;
+ sched_type_queue = sched_type_queue << 38;
+
+ ev->event = sched_type_queue | (get_work0 & 0xffffffff);
+ ev->u64 = get_work1;
+ return !!get_work1;
+}
+
+static force_inline void
+ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
+ const uint8_t new_tt, const uint8_t grp)
+{
+ uint64_t add_work0;
+
+ add_work0 = tag | ((uint64_t)(new_tt) << 32);
+ ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
+}
+
+static force_inline void
+ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
+ const uint8_t new_tt, const uint8_t grp)
+{
+ uint64_t swtag_full0;
+
+ swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
+ ((uint64_t)grp << 34);
+ ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
+ SSOW_VHWS_OP_SWTAG_FULL0));
+}
+
+static force_inline void
+ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
+ ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
+}
+
+static force_inline void
+ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32);
+ ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
+}
+
+static force_inline void
+ssows_swtag_untag(struct ssows *ws)
+{
+ ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
+ ws->cur_tt = SSO_SYNC_UNTAGGED;
+}
+
+static force_inline void
+ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
+{
+ ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
+ SSOW_VHWS_OP_UPD_WQP_GRP0));
+}
+
+static force_inline void
+ssows_desched(struct ssows *ws)
+{
+ ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
+}
+
+static force_inline void
+ssows_swtag_wait(struct ssows *ws)
+{
+ /* Wait for the SWTAG/SWTAG_FULL operation */
+ while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
+ ;
+}