diff options
Diffstat (limited to 'drivers/event/octeontx')
-rw-r--r-- | drivers/event/octeontx/Makefile | 12 | ||||
-rw-r--r-- | drivers/event/octeontx/meson.build | 9 | ||||
-rw-r--r-- | drivers/event/octeontx/ssovf_evdev.c | 77 | ||||
-rw-r--r-- | drivers/event/octeontx/ssovf_evdev.h | 17 | ||||
-rw-r--r-- | drivers/event/octeontx/ssovf_evdev_selftest.c | 36 | ||||
-rw-r--r-- | drivers/event/octeontx/ssovf_probe.c | 290 | ||||
-rw-r--r-- | drivers/event/octeontx/ssovf_worker.c | 30 | ||||
-rw-r--r-- | drivers/event/octeontx/ssovf_worker.h | 2 | ||||
-rw-r--r-- | drivers/event/octeontx/timvf_evdev.c | 405 | ||||
-rw-r--r-- | drivers/event/octeontx/timvf_evdev.h | 225 | ||||
-rw-r--r-- | drivers/event/octeontx/timvf_probe.c | 148 | ||||
-rw-r--r-- | drivers/event/octeontx/timvf_worker.c | 199 | ||||
-rw-r--r-- | drivers/event/octeontx/timvf_worker.h | 443 |
13 files changed, 1852 insertions, 41 deletions
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile index 0e49efd8..90ad2217 100644 --- a/drivers/event/octeontx/Makefile +++ b/drivers/event/octeontx/Makefile @@ -10,10 +10,12 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_pmd_octeontx_ssovf.a CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/ CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/ CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/ +CFLAGS += -DALLOW_EXPERIMENTAL_API -LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx -lrte_pmd_octeontx +LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs LDLIBS += -lrte_bus_vdev @@ -27,18 +29,26 @@ LIBABIVER := 1 SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_worker.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_evdev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_probe.c ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays +CFLAGS_timvf_worker.o += -fno-prefetch-loop-arrays ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1) CFLAGS_ssovf_worker.o += -Ofast +CFLAGS_timvf_worker.o += -Ofast else CFLAGS_ssovf_worker.o += -O3 -ffast-math +CFLAGS_timvf_worker.o += -O3 -ffast-math endif else CFLAGS_ssovf_worker.o += -Ofast +CFLAGS_timvf_worker.o += -Ofast endif include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/event/octeontx/meson.build b/drivers/event/octeontx/meson.build index 358fc9fc..04185533 100644 --- a/drivers/event/octeontx/meson.build +++ b/drivers/event/octeontx/meson.build @@ -3,7 +3,12 @@ sources = files('ssovf_worker.c', 'ssovf_evdev.c', - 'ssovf_evdev_selftest.c' + 'ssovf_evdev_selftest.c', + 'ssovf_probe.c', + 'timvf_worker.c', + 'timvf_evdev.c', + 'timvf_probe.c' ) -deps += ['mempool_octeontx', 'bus_vdev', 'pmd_octeontx'] +allow_experimental_apis = true +deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx'] diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index a1086077..16a3a04b 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -18,12 +18,12 @@ #include <rte_bus_vdev.h> #include "ssovf_evdev.h" +#include "timvf_evdev.h" int otx_logtype_ssovf; +static uint8_t timvf_enable_stats; -RTE_INIT(otx_ssovf_init_log); -static void -otx_ssovf_init_log(void) +RTE_INIT(otx_ssovf_init_log) { otx_logtype_ssovf = rte_log_register("pmd.event.octeontx"); if (otx_logtype_ssovf >= 0) @@ -49,7 +49,7 @@ ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info) hdr.vfid = 0; memset(info, 0, len); - return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len); + return octeontx_mbox_send(&hdr, NULL, 0, info, len); } struct ssovf_mbox_getwork_wait { @@ -69,7 +69,7 @@ ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns) hdr.vfid = 0; tmo_set.wait_ns = timeout_ns; - ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0); + ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0); if (ret) ssovf_log_err("Failed to set getwork timeout(%d)", ret); @@ -99,7 +99,7 @@ ssovf_mbox_priority_set(uint8_t queue, uint8_t prio) grp.affinity = 0xff; grp.priority = prio / 32; /* Normalize to 0 to 7 */ - ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0); + ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0); if (ret) ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio); @@ -125,7 +125,7 @@ ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks) memset(&ns2iter, 0, len); ns2iter.wait_ns = ns; - ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); + ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len); if (ret < 0 || (ret != len)) { ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns); return -EIO; @@ -276,7 +276,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, return -ENOMEM; } - ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); + ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0); if (ws->base == NULL) { rte_free(ws); ssovf_log_err("Failed to get hws base addr port=%d", port_id); @@ -290,7 +290,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id, ws->port = port_id; for (q = 0; q < edev->nb_event_queues; q++) { - ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); + ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2); if (ws->grps[q] == NULL) { rte_free(ws); ssovf_log_err("Failed to get grp%d base addr", q); @@ -474,14 +474,9 @@ static int ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev) { - int ret; - const struct octeontx_nic *nic = eth_dev->data->dev_private; RTE_SET_USED(dev); + RTE_SET_USED(eth_dev); - ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); - if (ret) - return 0; - octeontx_pki_port_start(nic->port_id); return 0; } @@ -490,14 +485,9 @@ static int ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev) { - int ret; - const struct octeontx_nic *nic = eth_dev->data->dev_private; RTE_SET_USED(dev); + RTE_SET_USED(eth_dev); - ret = strncmp(eth_dev->data->name, "eth_octeontx", 12); - if (ret) - return 0; - octeontx_pki_port_stop(nic->port_id); return 0; } @@ -529,9 +519,9 @@ ssovf_start(struct rte_eventdev *dev) for (i = 0; i < edev->nb_event_queues; i++) { /* Consume all the events through HWS0 */ - ssows_flush_events(dev->data->ports[0], i); + ssows_flush_events(dev->data->ports[0], i, NULL, NULL); - base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); + base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); base += SSO_VHGRP_QCTL; ssovf_write64(1, base); /* Enable SSO group */ } @@ -541,6 +531,16 @@ ssovf_start(struct rte_eventdev *dev) } static void +ssows_handle_event(void *arg, struct rte_event event) +{ + struct rte_eventdev *dev = arg; + + if (dev->dev_ops->dev_stop_flush != NULL) + dev->dev_ops->dev_stop_flush(dev->data->dev_id, event, + dev->data->dev_stop_flush_arg); +} + +static void ssovf_stop(struct rte_eventdev *dev) { struct ssovf_evdev *edev = ssovf_pmd_priv(dev); @@ -557,9 +557,10 @@ ssovf_stop(struct rte_eventdev *dev) for (i = 0; i < edev->nb_event_queues; i++) { /* Consume all the events through HWS0 */ - ssows_flush_events(dev->data->ports[0], i); + ssows_flush_events(dev->data->ports[0], i, + ssows_handle_event, dev); - base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); + base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0); base += SSO_VHGRP_QCTL; ssovf_write64(0, base); /* Disable SSO group */ } @@ -590,8 +591,16 @@ ssovf_selftest(const char *key __rte_unused, const char *value, return 0; } +static int +ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags, + uint32_t *caps, const struct rte_event_timer_adapter_ops **ops) +{ + return timvf_timer_adapter_caps_get(dev, flags, caps, ops, + timvf_enable_stats); +} + /* Initialize and register event driver with DPDK Application */ -static const struct rte_eventdev_ops ssovf_ops = { +static struct rte_eventdev_ops ssovf_ops = { .dev_infos_get = ssovf_info_get, .dev_configure = ssovf_configure, .queue_def_conf = ssovf_queue_def_conf, @@ -610,6 +619,8 @@ static const struct rte_eventdev_ops ssovf_ops = { .eth_rx_adapter_start = ssovf_eth_rx_adapter_start, .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop, + .timer_adapter_caps_get = ssovf_timvf_caps_get, + .dev_selftest = test_eventdev_octeontx, .dump = ssovf_dump, @@ -621,7 +632,7 @@ static const struct rte_eventdev_ops ssovf_ops = { static int ssovf_vdev_probe(struct rte_vdev_device *vdev) { - struct octeontx_ssovf_info oinfo; + struct ssovf_info oinfo; struct ssovf_mbox_dev_info info; struct ssovf_evdev *edev; struct rte_eventdev *eventdev; @@ -633,6 +644,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) static const char *const args[] = { SSOVF_SELFTEST_ARG, + TIMVF_ENABLE_STATS_ARG, NULL }; @@ -660,6 +672,15 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) rte_kvargs_free(kvlist); return ret; } + + ret = rte_kvargs_process(kvlist, + TIMVF_ENABLE_STATS_ARG, + ssovf_selftest, &timvf_enable_stats); + if (ret != 0) { + ssovf_log_err("%s: Error in timvf stats", name); + rte_kvargs_free(kvlist); + return ret; + } } rte_kvargs_free(kvlist); @@ -679,7 +700,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) return 0; } - ret = octeontx_ssovf_info(&oinfo); + ret = ssovf_info(&oinfo); if (ret) { ssovf_log_err("Failed to probe and validate ssovfs %d", ret); goto error; diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h index d1825b4f..18293e96 100644 --- a/drivers/event/octeontx/ssovf_evdev.h +++ b/drivers/event/octeontx/ssovf_evdev.h @@ -119,6 +119,16 @@ do { \ } while (0) #endif +struct ssovf_info { + uint16_t domain; /* Domain id */ + uint8_t total_ssovfs; /* Total sso groups available in domain */ + uint8_t total_ssowvfs;/* Total sso hws available in domain */ +}; + +enum ssovf_type { + OCTEONTX_SSO_GROUP, /* SSO group vf */ + OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */ +}; struct ssovf_evdev { uint8_t max_event_queues; @@ -164,8 +174,13 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks); uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); -void ssows_flush_events(struct ssows *ws, uint8_t queue_id); + +typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev); +void ssows_flush_events(struct ssows *ws, uint8_t queue_id, + ssows_handle_event_t fn, void *arg); void ssows_reset(struct ssows *ws); +int ssovf_info(struct ssovf_info *info); +void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar); int test_eventdev_octeontx(void); #endif /* __SSOVF_EVDEV_H__ */ diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c index 5e012a95..239362fc 100644 --- a/drivers/event/octeontx/ssovf_evdev_selftest.c +++ b/drivers/event/octeontx/ssovf_evdev_selftest.c @@ -688,6 +688,40 @@ test_multi_queue_enq_multi_port_deq(void) nr_ports, 0xff /* invalid */); } +static +void flush(uint8_t dev_id, struct rte_event event, void *arg) +{ + unsigned int *count = arg; + + RTE_SET_USED(dev_id); + if (event.event_type == RTE_EVENT_TYPE_CPU) + *count = *count + 1; + +} + +static int +test_dev_stop_flush(void) +{ + unsigned int total_events = MAX_EVENTS, count = 0; + int ret; + + ret = generate_random_events(total_events); + if (ret) + return -1; + + ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count); + if (ret) + return -2; + rte_event_dev_stop(evdev); + ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL); + if (ret) + return -3; + RTE_TEST_ASSERT_EQUAL(total_events, count, + "count mismatch total_events=%d count=%d", + total_events, count); + return 0; +} + static int validate_queue_to_port_single_link(uint32_t index, uint8_t port, struct rte_event *ev) @@ -1415,6 +1449,8 @@ test_eventdev_octeontx(void) OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, test_multi_queue_enq_single_port_deq); OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, + test_dev_stop_flush); + OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, test_multi_queue_enq_multi_port_deq); OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, test_queue_to_port_single_link); diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c new file mode 100644 index 00000000..b3db596d --- /dev/null +++ b/drivers/event/octeontx/ssovf_probe.c @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include <rte_atomic.h> +#include <rte_common.h> +#include <rte_eal.h> +#include <rte_io.h> +#include <rte_pci.h> +#include <rte_bus_pci.h> + +#include "octeontx_mbox.h" +#include "ssovf_evdev.h" + +#define PCI_VENDOR_ID_CAVIUM 0x177D +#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B +#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D + +#define SSO_MAX_VHGRP (64) +#define SSO_MAX_VHWS (32) + +#define SSO_VHGRP_AQ_THR (0x1E0ULL) + +struct ssovf_res { + uint16_t domain; + uint16_t vfid; + void *bar0; + void *bar2; +}; + +struct ssowvf_res { + uint16_t domain; + uint16_t vfid; + void *bar0; + void *bar2; + void *bar4; +}; + +struct ssowvf_identify { + uint16_t domain; + uint16_t vfid; +}; + +struct ssodev { + uint8_t total_ssovfs; + uint8_t total_ssowvfs; + struct ssovf_res grp[SSO_MAX_VHGRP]; + struct ssowvf_res hws[SSO_MAX_VHWS]; +}; + +static struct ssodev sdev; + +/* Interface functions */ +int +ssovf_info(struct ssovf_info *info) +{ + uint8_t i; + uint16_t domain; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL) + return -EINVAL; + + if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0) + return -ENODEV; + + domain = sdev.grp[0].domain; + for (i = 0; i < sdev.total_ssovfs; i++) { + /* Check vfid's are contiguous and belong to same domain */ + if (sdev.grp[i].vfid != i || + sdev.grp[i].bar0 == NULL || + sdev.grp[i].domain != domain) { + mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p", + i, sdev.grp[i].vfid, + domain, sdev.grp[i].domain, + sdev.grp[i].bar0); + return -EINVAL; + } + } + + for (i = 0; i < sdev.total_ssowvfs; i++) { + /* Check vfid's are contiguous and belong to same domain */ + if (sdev.hws[i].vfid != i || + sdev.hws[i].bar0 == NULL || + sdev.hws[i].domain != domain) { + mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p", + i, sdev.hws[i].vfid, + domain, sdev.hws[i].domain, + sdev.hws[i].bar0); + return -EINVAL; + } + } + + info->domain = domain; + info->total_ssovfs = sdev.total_ssovfs; + info->total_ssowvfs = sdev.total_ssowvfs; + return 0; +} + +void* +ssovf_bar(enum ssovf_type type, uint8_t id, uint8_t bar) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY || + type > OCTEONTX_SSO_HWS) + return NULL; + + if (type == OCTEONTX_SSO_GROUP) { + if (id >= sdev.total_ssovfs) + return NULL; + } else { + if (id >= sdev.total_ssowvfs) + return NULL; + } + + if (type == OCTEONTX_SSO_GROUP) { + switch (bar) { + case 0: + return sdev.grp[id].bar0; + case 2: + return sdev.grp[id].bar2; + default: + return NULL; + } + } else { + switch (bar) { + case 0: + return sdev.hws[id].bar0; + case 2: + return sdev.hws[id].bar2; + case 4: + return sdev.hws[id].bar4; + default: + return NULL; + } + } +} + +/* SSOWVF pcie device aka event port probe */ + +static int +ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint16_t vfid; + struct ssowvf_res *res; + struct ssowvf_identify *id; + uint8_t *ram_mbox_base; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[2].addr == NULL || + pci_dev->mem_resource[4].addr == NULL) { + mbox_log_err("Empty bars %p %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[2].addr, + pci_dev->mem_resource[4].addr); + return -ENODEV; + } + + if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) { + mbox_log_err("Bar4 len mismatch %d != %d", + SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len); + return -EINVAL; + } + + id = pci_dev->mem_resource[4].addr; + vfid = id->vfid; + if (vfid >= SSO_MAX_VHWS) { + mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS); + return -EINVAL; + } + + res = &sdev.hws[vfid]; + res->vfid = vfid; + res->bar0 = pci_dev->mem_resource[0].addr; + res->bar2 = pci_dev->mem_resource[2].addr; + res->bar4 = pci_dev->mem_resource[4].addr; + res->domain = id->domain; + + sdev.total_ssowvfs++; + if (vfid == 0) { + ram_mbox_base = ssovf_bar(OCTEONTX_SSO_HWS, 0, 4); + if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base)) { + mbox_log_err("Invalid Failed to set ram mbox base"); + return -EINVAL; + } + } + + rte_wmb(); + mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain, + res->vfid, sdev.total_ssowvfs); + return 0; +} + +static const struct rte_pci_id pci_ssowvf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_SSOWS_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_ssowvf = { + .id_table = pci_ssowvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = ssowvf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf); + +/* SSOVF pcie device aka event queue probe */ + +static int +ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint64_t val; + uint16_t vfid; + uint8_t *idreg; + struct ssovf_res *res; + uint8_t *reg; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[2].addr == NULL) { + mbox_log_err("Empty bars %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[2].addr); + return -ENODEV; + } + idreg = pci_dev->mem_resource[0].addr; + idreg += SSO_VHGRP_AQ_THR; + val = rte_read64(idreg); + + /* Write back the default value of aq_thr */ + rte_write64((1ULL << 33) - 1, idreg); + vfid = (val >> 16) & 0xffff; + if (vfid >= SSO_MAX_VHGRP) { + mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP); + return -EINVAL; + } + + res = &sdev.grp[vfid]; + res->vfid = vfid; + res->bar0 = pci_dev->mem_resource[0].addr; + res->bar2 = pci_dev->mem_resource[2].addr; + res->domain = val & 0xffff; + + sdev.total_ssovfs++; + if (vfid == 0) { + reg = ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0); + reg += SSO_VHGRP_PF_MBOX(1); + if (octeontx_mbox_set_reg(reg)) { + mbox_log_err("Invalid Failed to set mbox_reg"); + return -EINVAL; + } + } + + rte_wmb(); + mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain, + res->vfid, sdev.total_ssovfs); + return 0; +} + +static const struct rte_pci_id pci_ssovf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_ssovf = { + .id_table = pci_ssovf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = ssovf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf); diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c index 753c1e9f..fffa9024 100644 --- a/drivers/event/octeontx/ssovf_worker.c +++ b/drivers/event/octeontx/ssovf_worker.c @@ -198,13 +198,15 @@ ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events) } void -ssows_flush_events(struct ssows *ws, uint8_t queue_id) +ssows_flush_events(struct ssows *ws, uint8_t queue_id, + ssows_handle_event_t fn, void *arg) { uint32_t reg_off; - uint64_t aq_cnt = 1; - uint64_t cq_ds_cnt = 1; - uint64_t enable, get_work0, get_work1; - uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0); + struct rte_event ev; + uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1; + uint64_t get_work0, get_work1; + uint64_t sched_type_queue; + uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0); enable = ssovf_read64(base + SSO_VHGRP_QCTL); if (!enable) @@ -219,11 +221,23 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id) cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT); /* Extract cq and ds count */ cq_ds_cnt &= 0x1FFF1FFF0000; + ssovf_load_pair(get_work0, get_work1, ws->base + reg_off); - } - RTE_SET_USED(get_work0); - RTE_SET_USED(get_work1); + sched_type_queue = (get_work0 >> 32) & 0xfff; + ws->cur_tt = sched_type_queue & 0x3; + ws->cur_grp = sched_type_queue >> 2; + sched_type_queue = sched_type_queue << 38; + ev.event = sched_type_queue | (get_work0 & 0xffffffff); + if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV) + ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1, + (ev.event >> 20) & 0x7F); + else + ev.u64 = get_work1; + + if (fn != NULL && ev.u64 != 0) + fn(arg, ev); + } } void diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h index d55018a9..7c7306b5 100644 --- a/drivers/event/octeontx/ssovf_worker.h +++ b/drivers/event/octeontx/ssovf_worker.h @@ -28,11 +28,11 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info) { struct rte_mbuf *mbuf; octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work; - rte_prefetch_non_temporal(wqe); /* Get mbuf from wqe */ mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP); + rte_prefetch_non_temporal(mbuf); mbuf->packet_type = ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty]; mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr); diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c new file mode 100644 index 00000000..abbc9a77 --- /dev/null +++ b/drivers/event/octeontx/timvf_evdev.c @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include "timvf_evdev.h" + +int otx_logtype_timvf; + +RTE_INIT(otx_timvf_init_log) +{ + otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer"); + if (otx_logtype_timvf >= 0) + rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE); +} + +struct __rte_packed timvf_mbox_dev_info { + uint64_t ring_active[4]; + uint64_t clk_freq; +}; + +/* Response messages */ +enum { + MBOX_RET_SUCCESS, + MBOX_RET_INVALID, + MBOX_RET_INTERNAL_ERR, +}; + +static int +timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info) +{ + struct octeontx_mbox_hdr hdr = {0}; + uint16_t len = sizeof(struct timvf_mbox_dev_info); + + hdr.coproc = TIM_COPROC; + hdr.msg = TIM_GET_DEV_INFO; + hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */ + + memset(info, 0, len); + return octeontx_mbox_send(&hdr, NULL, 0, info, len); +} + +static void +timvf_ring_info_get(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer_adapter_info *adptr_info) +{ + struct timvf_ring *timr = adptr->data->adapter_priv; + adptr_info->max_tmo_ns = timr->max_tout; + adptr_info->min_resolution_ns = timr->tck_nsec; + rte_memcpy(&adptr_info->conf, &adptr->data->conf, + sizeof(struct rte_event_timer_adapter_conf)); +} + +static int +timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id) +{ + struct octeontx_mbox_hdr hdr = {0}; + uint16_t len = sizeof(struct timvf_ctrl_reg); + int ret; + + hdr.coproc = TIM_COPROC; + hdr.msg = TIM_SET_RING_INFO; + hdr.vfid = ring_id; + + ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0); + if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS) + return -EACCES; + return 0; +} + +static int +timvf_get_start_cyc(uint64_t *now, uint8_t ring_id) +{ + struct octeontx_mbox_hdr hdr = {0}; + + hdr.coproc = TIM_COPROC; + hdr.msg = TIM_RING_START_CYC_GET; + hdr.vfid = ring_id; + *now = 0; + return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t)); +} + +static int +optimize_bucket_parameters(struct timvf_ring *timr) +{ + uint32_t hbkts; + uint32_t lbkts; + uint64_t tck_nsec; + + hbkts = rte_align32pow2(timr->nb_bkts); + tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10); + + if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS)) + hbkts = 0; + + lbkts = rte_align32prevpow2(timr->nb_bkts); + tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10); + + if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS)) + lbkts = 0; + + if (!hbkts && !lbkts) + return 0; + + if (!hbkts) { + timr->nb_bkts = lbkts; + goto end; + } else if (!lbkts) { + timr->nb_bkts = hbkts; + goto end; + } + + timr->nb_bkts = (hbkts - timr->nb_bkts) < + (timr->nb_bkts - lbkts) ? hbkts : lbkts; +end: + timr->get_target_bkt = bkt_and; + timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / + (timr->nb_bkts - 1)), 10); + return 1; +} + +static int +timvf_ring_start(const struct rte_event_timer_adapter *adptr) +{ + int ret; + uint8_t use_fpa = 0; + uint64_t interval; + uintptr_t pool; + struct timvf_ctrl_reg rctrl; + struct timvf_mbox_dev_info dinfo; + struct timvf_ring *timr = adptr->data->adapter_priv; + + ret = timvf_mbox_dev_info_get(&dinfo); + if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info)) + return -EINVAL; + + /* Calculate the interval cycles according to clock source. */ + switch (timr->clk_src) { + case TIM_CLK_SRC_SCLK: + interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq); + break; + case TIM_CLK_SRC_GPIO: + /* GPIO doesn't work on tck_nsec. */ + interval = 0; + break; + case TIM_CLK_SRC_GTI: + interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq); + break; + case TIM_CLK_SRC_PTP: + interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq); + break; + default: + timvf_log_err("Unsupported clock source configured %d", + timr->clk_src); + return -EINVAL; + } + + if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf")) + use_fpa = 1; + + /*CTRL0 register.*/ + rctrl.rctrl0 = interval; + + /*CTRL1 register.*/ + rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 | + 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ | + 1ull << 47 /* ENA */ | + 1ull << 44 /* ENA_LDWB */ | + (timr->nb_bkts - 1); + + rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40; + + if (use_fpa) { + pool = (uintptr_t)((struct rte_mempool *) + timr->chunk_pool)->pool_id; + ret = octeontx_fpa_bufpool_gaura(pool); + if (ret < 0) { + timvf_log_dbg("Unable to get gaura id"); + ret = -ENOMEM; + goto error; + } + timvf_write64((uint64_t)ret, + (uint8_t *)timr->vbar0 + TIM_VRING_AURA); + } else { + rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */; + } + + timvf_write64((uintptr_t)timr->bkt, + (uint8_t *)timr->vbar0 + TIM_VRING_BASE); + timvf_set_chunk_refill(timr, use_fpa); + if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) { + ret = -EACCES; + goto error; + } + + if (timvf_get_start_cyc(&timr->ring_start_cyc, + timr->tim_ring_id) < 0) { + ret = -EACCES; + goto error; + } + timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz()); + timr->fast_div = rte_reciprocal_value_u64(timr->tck_int); + timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64"" + " maxtmo %"PRIu64"\n", + timr->nb_bkts, timr->tck_nsec, interval, + timr->max_tout); + + return 0; +error: + rte_free(timr->bkt); + rte_mempool_free(timr->chunk_pool); + return ret; +} + +static int +timvf_ring_stop(const struct rte_event_timer_adapter *adptr) +{ + struct timvf_ring *timr = adptr->data->adapter_priv; + struct timvf_ctrl_reg rctrl = {0}; + rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0); + rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1); + rctrl.rctrl1 &= ~(1ull << 47); /* Disable */ + rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2); + + if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) + return -EACCES; + return 0; +} + +static int +timvf_ring_create(struct rte_event_timer_adapter *adptr) +{ + char pool_name[25]; + int ret; + uint64_t nb_timers; + struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf; + struct timvf_ring *timr; + struct timvf_info tinfo; + const char *mempool_ops; + unsigned int mp_flags = 0; + + if (timvf_info(&tinfo) < 0) + return -ENODEV; + + if (adptr->data->id >= tinfo.total_timvfs) + return -ENODEV; + + timr = rte_zmalloc("octeontx_timvf_priv", + sizeof(struct timvf_ring), 0); + if (timr == NULL) + return -ENOMEM; + + adptr->data->adapter_priv = timr; + /* Check config parameters. */ + if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) && + (!rcfg->timer_tick_ns || + rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) { + timvf_log_err("Too low timer ticks"); + goto cfg_err; + } + + timr->clk_src = (int) rcfg->clk_src; + timr->tim_ring_id = adptr->data->id; + timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10); + timr->max_tout = rcfg->max_tmo_ns; + timr->nb_bkts = (timr->max_tout / timr->tck_nsec); + timr->vbar0 = timvf_bar(timr->tim_ring_id, 0); + timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL; + nb_timers = rcfg->nb_timers; + timr->get_target_bkt = bkt_mod; + + timr->nb_chunks = nb_timers / nb_chunk_slots; + + /* Try to optimize the bucket parameters. */ + if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES) + && !rte_is_power_of_2(timr->nb_bkts)) { + if (optimize_bucket_parameters(timr)) { + timvf_log_info("Optimized configured values"); + timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts); + timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec); + } else + timvf_log_info("Failed to Optimize configured values"); + } + + if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) { + mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET; + timvf_log_info("Using single producer mode"); + } + + timr->bkt = rte_zmalloc("octeontx_timvf_bucket", + (timr->nb_bkts) * sizeof(struct tim_mem_bucket), + 0); + if (timr->bkt == NULL) + goto mem_err; + + snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d", + timr->tim_ring_id); + timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name, + timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(), + mp_flags); + + if (!timr->chunk_pool) { + rte_free(timr->bkt); + timvf_log_err("Unable to create chunkpool."); + return -ENOMEM; + } + + mempool_ops = rte_mbuf_best_mempool_ops(); + ret = rte_mempool_set_ops_byname(timr->chunk_pool, + mempool_ops, NULL); + + if (ret != 0) { + timvf_log_err("Unable to set chunkpool ops."); + goto mem_err; + } + + ret = rte_mempool_populate_default(timr->chunk_pool); + if (ret < 0) { + timvf_log_err("Unable to set populate chunkpool."); + goto mem_err; + } + timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE); + timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT); + timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S); + timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C); + timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S); + + return 0; +mem_err: + rte_free(timr); + return -ENOMEM; +cfg_err: + rte_free(timr); + return -EINVAL; +} + +static int +timvf_ring_free(struct rte_event_timer_adapter *adptr) +{ + struct timvf_ring *timr = adptr->data->adapter_priv; + rte_mempool_free(timr->chunk_pool); + rte_free(timr->bkt); + rte_free(adptr->data->adapter_priv); + return 0; +} + +static int +timvf_stats_get(const struct rte_event_timer_adapter *adapter, + struct rte_event_timer_adapter_stats *stats) +{ + struct timvf_ring *timr = adapter->data->adapter_priv; + uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc; + + stats->evtim_exp_count = timr->tim_arm_cnt; + stats->ev_enq_count = timr->tim_arm_cnt; + stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc, + &timr->fast_div); + return 0; +} + +static int +timvf_stats_reset(const struct rte_event_timer_adapter *adapter) +{ + struct timvf_ring *timr = adapter->data->adapter_priv; + + timr->tim_arm_cnt = 0; + return 0; +} + +static struct rte_event_timer_adapter_ops timvf_ops = { + .init = timvf_ring_create, + .uninit = timvf_ring_free, + .start = timvf_ring_start, + .stop = timvf_ring_stop, + .get_info = timvf_ring_info_get, +}; + +int +timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags, + uint32_t *caps, const struct rte_event_timer_adapter_ops **ops, + uint8_t enable_stats) +{ + RTE_SET_USED(dev); + + if (enable_stats) { + timvf_ops.stats_get = timvf_stats_get; + timvf_ops.stats_reset = timvf_stats_reset; + } + + if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) + timvf_ops.arm_burst = enable_stats ? + timvf_timer_arm_burst_sp_stats : + timvf_timer_arm_burst_sp; + else + timvf_ops.arm_burst = enable_stats ? + timvf_timer_arm_burst_mp_stats : + timvf_timer_arm_burst_mp; + + timvf_ops.arm_tmo_tick_burst = enable_stats ? + timvf_timer_arm_tmo_brst_stats : + timvf_timer_arm_tmo_brst; + timvf_ops.cancel_burst = timvf_timer_cancel_burst; + *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT; + *ops = &timvf_ops; + return 0; +} diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h new file mode 100644 index 00000000..0185593f --- /dev/null +++ b/drivers/event/octeontx/timvf_evdev.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __TIMVF_EVDEV_H__ +#define __TIMVF_EVDEV_H__ + +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_debug.h> +#include <rte_eal.h> +#include <rte_eventdev.h> +#include <rte_event_timer_adapter.h> +#include <rte_event_timer_adapter_pmd.h> +#include <rte_io.h> +#include <rte_lcore.h> +#include <rte_log.h> +#include <rte_malloc.h> +#include <rte_mbuf_pool_ops.h> +#include <rte_mempool.h> +#include <rte_memzone.h> +#include <rte_pci.h> +#include <rte_prefetch.h> +#include <rte_reciprocal.h> + +#include <octeontx_mbox.h> +#include <octeontx_fpavf.h> + +#define timvf_log(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \ + "[%s] %s() " fmt "\n", \ + RTE_STR(event_timer_octeontx), __func__, ## args) + +#define timvf_log_info(fmt, ...) timvf_log(INFO, fmt, ##__VA_ARGS__) +#define timvf_log_dbg(fmt, ...) timvf_log(DEBUG, fmt, ##__VA_ARGS__) +#define timvf_log_err(fmt, ...) timvf_log(ERR, fmt, ##__VA_ARGS__) +#define timvf_func_trace timvf_log_dbg + +#define TIM_COPROC (8) +#define TIM_GET_DEV_INFO (1) +#define TIM_GET_RING_INFO (2) +#define TIM_SET_RING_INFO (3) +#define TIM_RING_START_CYC_GET (4) + +#define TIM_MAX_RINGS (64) +#define TIM_DEV_PER_NODE (1) +#define TIM_VF_PER_DEV (64) +#define TIM_RING_PER_DEV (TIM_VF_PER_DEV) +#define TIM_RING_NODE_SHIFT (6) +#define TIM_RING_MASK ((TIM_RING_PER_DEV) - 1) +#define TIM_RING_INVALID (-1) + +#define TIM_MIN_INTERVAL (1E3) +#define TIM_MAX_INTERVAL ((1ull << 32) - 1) +#define TIM_MAX_BUCKETS (1ull << 20) +#define TIM_CHUNK_SIZE (4096) +#define TIM_MAX_CHUNKS_PER_BUCKET (1ull << 32) + +#define TIMVF_MAX_BURST (8) + +/* TIM VF Control/Status registers (CSRs): */ +/* VF_BAR0: */ +#define TIM_VF_NRSPERR_INT (0x0) +#define TIM_VF_NRSPERR_INT_W1S (0x8) +#define TIM_VF_NRSPERR_ENA_W1C (0x10) +#define TIM_VF_NRSPERR_ENA_W1S (0x18) +#define TIM_VRING_FR_RN_CYCLES (0x20) +#define TIM_VRING_FR_RN_GPIOS (0x28) +#define TIM_VRING_FR_RN_GTI (0x30) +#define TIM_VRING_FR_RN_PTP (0x38) +#define TIM_VRING_CTL0 (0x40) +#define TIM_VRING_CTL1 (0x50) +#define TIM_VRING_CTL2 (0x60) +#define TIM_VRING_BASE (0x100) +#define TIM_VRING_AURA (0x108) +#define TIM_VRING_REL (0x110) + +#define TIM_CTL1_W0_S_BUCKET 20 +#define TIM_CTL1_W0_M_BUCKET ((1ull << (40 - 20)) - 1) + +#define TIM_BUCKET_W1_S_NUM_ENTRIES (0) /*Shift*/ +#define TIM_BUCKET_W1_M_NUM_ENTRIES ((1ull << (32 - 0)) - 1) +#define TIM_BUCKET_W1_S_SBT (32) +#define TIM_BUCKET_W1_M_SBT ((1ull << (33 - 32)) - 1) +#define TIM_BUCKET_W1_S_HBT (33) +#define TIM_BUCKET_W1_M_HBT ((1ull << (34 - 33)) - 1) +#define TIM_BUCKET_W1_S_BSK (34) +#define TIM_BUCKET_W1_M_BSK ((1ull << (35 - 34)) - 1) +#define TIM_BUCKET_W1_S_LOCK (40) +#define TIM_BUCKET_W1_M_LOCK ((1ull << (48 - 40)) - 1) +#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48) +#define TIM_BUCKET_W1_M_CHUNK_REMAINDER ((1ull << (64 - 48)) - 1) + +#define TIM_BUCKET_SEMA \ + (TIM_BUCKET_CHUNK_REMAIN) + +#define TIM_BUCKET_CHUNK_REMAIN \ + (TIM_BUCKET_W1_M_CHUNK_REMAINDER << TIM_BUCKET_W1_S_CHUNK_REMAINDER) + +#define TIM_BUCKET_LOCK \ + (TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK) + +#define TIM_BUCKET_SEMA_WLOCK \ + (TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK)) + +#define NSEC_PER_SEC 1E9 +#define NSEC2CLK(__ns, __freq) (((__ns) * (__freq)) / NSEC_PER_SEC) +#define CLK2NSEC(__clk, __freq) (((__clk) * NSEC_PER_SEC) / (__freq)) + +#define timvf_read64 rte_read64_relaxed +#define timvf_write64 rte_write64_relaxed + +#define TIMVF_ENABLE_STATS_ARG ("timvf_stats") + +extern int otx_logtype_timvf; +static const uint16_t nb_chunk_slots = (TIM_CHUNK_SIZE / 16) - 1; + +struct timvf_info { + uint16_t domain; /* Domain id */ + uint8_t total_timvfs; /* Total timvf available in domain */ +}; + +enum timvf_clk_src { + TIM_CLK_SRC_SCLK = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, + TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0, + TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1, + TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2, +}; + +/* TIM_MEM_BUCKET */ +struct tim_mem_bucket { + uint64_t first_chunk; + union { + uint64_t w1; + struct { + uint32_t nb_entry; + uint8_t sbt:1; + uint8_t hbt:1; + uint8_t bsk:1; + uint8_t rsvd:5; + uint8_t lock; + int16_t chunk_remainder; + }; + }; + uint64_t current_chunk; + uint64_t pad; +} __rte_packed __rte_aligned(8); + +struct tim_mem_entry { + uint64_t w0; + uint64_t wqe; +} __rte_packed; + +struct timvf_ctrl_reg { + uint64_t rctrl0; + uint64_t rctrl1; + uint64_t rctrl2; + uint8_t use_pmu; +} __rte_packed; + +struct timvf_ring; + +typedef uint32_t (*bkt_id)(const uint32_t bkt_tcks, const uint32_t nb_bkts); +typedef struct tim_mem_entry * (*refill_chunk)( + struct tim_mem_bucket * const bkt, + struct timvf_ring * const timr); + +struct timvf_ring { + bkt_id get_target_bkt; + refill_chunk refill_chunk; + struct rte_reciprocal_u64 fast_div; + uint64_t ring_start_cyc; + uint32_t nb_bkts; + struct tim_mem_bucket *bkt; + void *chunk_pool; + uint64_t tck_int; + volatile uint64_t tim_arm_cnt; + uint64_t tck_nsec; + void *vbar0; + void *bkt_pos; + uint64_t max_tout; + uint64_t nb_chunks; + enum timvf_clk_src clk_src; + uint16_t tim_ring_id; +} __rte_cache_aligned; + +static __rte_always_inline uint32_t +bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts) +{ + return rel_bkt % nb_bkts; +} + +static __rte_always_inline uint32_t +bkt_and(uint32_t rel_bkt, uint32_t nb_bkts) +{ + return rel_bkt & (nb_bkts - 1); +} + +int timvf_info(struct timvf_info *tinfo); +void *timvf_bar(uint8_t id, uint8_t bar); +int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags, + uint32_t *caps, const struct rte_event_timer_adapter_ops **ops, + uint8_t enable_stats); +uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_burst_sp_stats( + const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_burst_mp_stats( + const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers); +uint16_t timvf_timer_arm_tmo_brst_stats( + const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers); +void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa); + +#endif /* __TIMVF_EVDEV_H__ */ diff --git a/drivers/event/octeontx/timvf_probe.c b/drivers/event/octeontx/timvf_probe.c new file mode 100644 index 00000000..08dbd2be --- /dev/null +++ b/drivers/event/octeontx/timvf_probe.c @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include <rte_eal.h> +#include <rte_io.h> +#include <rte_pci.h> +#include <rte_bus_pci.h> + +#include <octeontx_mbox.h> + +#include "ssovf_evdev.h" +#include "timvf_evdev.h" + +#ifndef PCI_VENDOR_ID_CAVIUM +#define PCI_VENDOR_ID_CAVIUM (0x177D) +#endif + +#define PCI_DEVICE_ID_OCTEONTX_TIM_VF (0xA051) +#define TIM_MAX_RINGS (64) + +struct timvf_res { + uint16_t domain; + uint16_t vfid; + void *bar0; + void *bar2; + void *bar4; +}; + +struct timdev { + uint8_t total_timvfs; + struct timvf_res rings[TIM_MAX_RINGS]; +}; + +static struct timdev tdev; + +int +timvf_info(struct timvf_info *tinfo) +{ + int i; + struct ssovf_info info; + + if (tinfo == NULL) + return -EINVAL; + + if (!tdev.total_timvfs) + return -ENODEV; + + if (ssovf_info(&info) < 0) + return -EINVAL; + + for (i = 0; i < tdev.total_timvfs; i++) { + if (info.domain != tdev.rings[i].domain) { + timvf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p", + i, tdev.rings[i].vfid, + info.domain, tdev.rings[i].domain, + tdev.rings[i].bar0); + return -EINVAL; + } + } + + tinfo->total_timvfs = tdev.total_timvfs; + tinfo->domain = info.domain; + return 0; +} + +void* +timvf_bar(uint8_t id, uint8_t bar) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return NULL; + + if (id > tdev.total_timvfs) + return NULL; + + switch (bar) { + case 0: + return tdev.rings[id].bar0; + case 4: + return tdev.rings[id].bar4; + default: + return NULL; + } +} + +static int +timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint64_t val; + uint16_t vfid; + struct timvf_res *res; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[4].addr == NULL) { + timvf_log_err("Empty bars %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[4].addr); + return -ENODEV; + } + + val = rte_read64((uint8_t *)pci_dev->mem_resource[0].addr + + 0x100 /* TIM_VRINGX_BASE */); + vfid = (val >> 23) & 0xff; + if (vfid >= TIM_MAX_RINGS) { + timvf_log_err("Invalid vfid(%d/%d)", vfid, TIM_MAX_RINGS); + return -EINVAL; + } + + res = &tdev.rings[tdev.total_timvfs]; + res->vfid = vfid; + res->bar0 = pci_dev->mem_resource[0].addr; + res->bar2 = pci_dev->mem_resource[2].addr; + res->bar4 = pci_dev->mem_resource[4].addr; + res->domain = (val >> 7) & 0xffff; + tdev.total_timvfs++; + rte_wmb(); + + timvf_log_dbg("Domain=%d VFid=%d bar0 %p total_timvfs=%d", res->domain, + res->vfid, pci_dev->mem_resource[0].addr, + tdev.total_timvfs); + return 0; +} + + +static const struct rte_pci_id pci_timvf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_TIM_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_timvf = { + .id_table = pci_timvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .probe = timvf_probe, + .remove = NULL, +}; + +RTE_PMD_REGISTER_PCI(octeontx_timvf, pci_timvf); diff --git a/drivers/event/octeontx/timvf_worker.c b/drivers/event/octeontx/timvf_worker.c new file mode 100644 index 00000000..50790e19 --- /dev/null +++ b/drivers/event/octeontx/timvf_worker.c @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include "timvf_worker.h" + +static inline int +timvf_timer_reg_checks(const struct timvf_ring * const timr, + struct rte_event_timer * const tim) +{ + if (unlikely(tim->state)) { + tim->state = RTE_EVENT_TIMER_ERROR; + rte_errno = EALREADY; + goto fail; + } + + if (unlikely(!tim->timeout_ticks || + tim->timeout_ticks >= timr->nb_bkts)) { + tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE + : RTE_EVENT_TIMER_ERROR_TOOEARLY; + rte_errno = EINVAL; + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static inline void +timvf_format_event(const struct rte_event_timer * const tim, + struct tim_mem_entry * const entry) +{ + entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 | + (tim->ev.event & 0xFFFFFFFFF); + entry->wqe = tim->ev.u64; +} + +uint16_t +timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + RTE_SET_USED(adptr); + int ret; + uint16_t index; + + for (index = 0; index < nb_timers; index++) { + if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) { + rte_errno = EALREADY; + break; + } + + if (tim[index]->state != RTE_EVENT_TIMER_ARMED) { + rte_errno = EINVAL; + break; + } + ret = timvf_rem_entry(tim[index]); + if (ret) { + rte_errno = -ret; + break; + } + } + return index; +} + +uint16_t +timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + int ret; + uint16_t index; + struct tim_mem_entry entry; + struct timvf_ring *timr = adptr->data->adapter_priv; + for (index = 0; index < nb_timers; index++) { + if (timvf_timer_reg_checks(timr, tim[index])) + break; + + timvf_format_event(tim[index], &entry); + ret = timvf_add_entry_sp(timr, tim[index]->timeout_ticks, + tim[index], &entry); + if (unlikely(ret)) { + rte_errno = -ret; + break; + } + } + + return index; +} + +uint16_t +timvf_timer_arm_burst_sp_stats(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + uint16_t ret; + struct timvf_ring *timr = adptr->data->adapter_priv; + + ret = timvf_timer_arm_burst_sp(adptr, tim, nb_timers); + timr->tim_arm_cnt += ret; + + return ret; +} + +uint16_t +timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + int ret; + uint16_t index; + struct tim_mem_entry entry; + struct timvf_ring *timr = adptr->data->adapter_priv; + for (index = 0; index < nb_timers; index++) { + if (timvf_timer_reg_checks(timr, tim[index])) + break; + timvf_format_event(tim[index], &entry); + ret = timvf_add_entry_mp(timr, tim[index]->timeout_ticks, + tim[index], &entry); + if (unlikely(ret)) { + rte_errno = -ret; + break; + } + } + + return index; +} + +uint16_t +timvf_timer_arm_burst_mp_stats(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + uint16_t ret; + struct timvf_ring *timr = adptr->data->adapter_priv; + + ret = timvf_timer_arm_burst_mp(adptr, tim, nb_timers); + timr->tim_arm_cnt += ret; + + return ret; +} + +uint16_t +timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers) +{ + int ret; + uint16_t set_timers = 0; + uint16_t idx; + uint16_t arr_idx = 0; + struct timvf_ring *timr = adptr->data->adapter_priv; + struct tim_mem_entry entry[TIMVF_MAX_BURST] __rte_cache_aligned; + + if (unlikely(!timeout_tick || timeout_tick >= timr->nb_bkts)) { + const enum rte_event_timer_state state = timeout_tick ? + RTE_EVENT_TIMER_ERROR_TOOLATE : + RTE_EVENT_TIMER_ERROR_TOOEARLY; + for (idx = 0; idx < nb_timers; idx++) + tim[idx]->state = state; + rte_errno = EINVAL; + return 0; + } + + while (arr_idx < nb_timers) { + for (idx = 0; idx < TIMVF_MAX_BURST && (arr_idx < nb_timers); + idx++, arr_idx++) { + timvf_format_event(tim[arr_idx], &entry[idx]); + } + ret = timvf_add_entry_brst(timr, timeout_tick, &tim[set_timers], + entry, idx); + set_timers += ret; + if (ret != idx) + break; + } + + return set_timers; +} + + +uint16_t +timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers) +{ + uint16_t set_timers; + struct timvf_ring *timr = adptr->data->adapter_priv; + + set_timers = timvf_timer_arm_tmo_brst(adptr, tim, timeout_tick, + nb_timers); + timr->tim_arm_cnt += set_timers; + + return set_timers; +} + +void +timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa) +{ + if (use_fpa) + timr->refill_chunk = timvf_refill_chunk_fpa; + else + timr->refill_chunk = timvf_refill_chunk_generic; +} diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h new file mode 100644 index 00000000..dede1a4a --- /dev/null +++ b/drivers/event/octeontx/timvf_worker.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include <rte_common.h> +#include <rte_branch_prediction.h> + +#include "timvf_evdev.h" + +static inline int16_t +timr_bkt_fetch_rem(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) & + TIM_BUCKET_W1_M_CHUNK_REMAINDER; +} + +static inline int16_t +timr_bkt_get_rem(struct tim_mem_bucket *bktp) +{ + return __atomic_load_n(&bktp->chunk_remainder, + __ATOMIC_ACQUIRE); +} + +static inline void +timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v) +{ + __atomic_store_n(&bktp->chunk_remainder, v, + __ATOMIC_RELEASE); +} + +static inline void +timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v) +{ + __atomic_fetch_sub(&bktp->chunk_remainder, v, + __ATOMIC_RELEASE); +} + +static inline uint8_t +timr_bkt_get_sbt(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT; +} + +static inline uint64_t +timr_bkt_set_sbt(struct tim_mem_bucket *bktp) +{ + const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT; + return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline uint64_t +timr_bkt_clr_sbt(struct tim_mem_bucket *bktp) +{ + const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT); + return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline uint8_t +timr_bkt_get_shbt(uint64_t w1) +{ + return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) | + ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT); +} + +static inline uint8_t +timr_bkt_get_hbt(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT; +} + +static inline uint8_t +timr_bkt_get_bsk(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK; +} + +static inline uint64_t +timr_bkt_clr_bsk(struct tim_mem_bucket *bktp) +{ + /*Clear everything except lock. */ + const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK; + return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline uint64_t +timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp) +{ + return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK, + __ATOMIC_ACQ_REL); +} + +static inline uint64_t +timr_bkt_fetch_sema(struct tim_mem_bucket *bktp) +{ + return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA, + __ATOMIC_RELAXED); +} + +static inline uint64_t +timr_bkt_inc_lock(struct tim_mem_bucket *bktp) +{ + const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK; + return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline void +timr_bkt_dec_lock(struct tim_mem_bucket *bktp) +{ + __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL); +} + +static inline uint32_t +timr_bkt_get_nent(uint64_t w1) +{ + return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) & + TIM_BUCKET_W1_M_NUM_ENTRIES; +} + +static inline void +timr_bkt_inc_nent(struct tim_mem_bucket *bktp) +{ + __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED); +} + +static inline void +timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v) +{ + __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED); +} + +static inline uint64_t +timr_bkt_clr_nent(struct tim_mem_bucket *bktp) +{ + const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES << + TIM_BUCKET_W1_S_NUM_ENTRIES); + return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL); +} + +static inline struct tim_mem_entry * +timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt) +{ + struct tim_mem_entry *chunk; + struct tim_mem_entry *pnext; + chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk); + chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0; + + while (chunk) { + pnext = (struct tim_mem_entry *)(uintptr_t) + ((chunk + nb_chunk_slots)->w0); + rte_mempool_put(timr->chunk_pool, chunk); + chunk = pnext; + } + return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk; +} + +static inline int +timvf_rem_entry(struct rte_event_timer *tim) +{ + uint64_t lock_sema; + struct tim_mem_entry *entry; + struct tim_mem_bucket *bkt; + if (tim->impl_opaque[1] == 0 || + tim->impl_opaque[0] == 0) + return -ENOENT; + + entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0]; + if (entry->wqe != tim->ev.u64) { + tim->impl_opaque[1] = tim->impl_opaque[0] = 0; + return -ENOENT; + } + bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1]; + lock_sema = timr_bkt_inc_lock(bkt); + if (timr_bkt_get_shbt(lock_sema) + || !timr_bkt_get_nent(lock_sema)) { + timr_bkt_dec_lock(bkt); + tim->impl_opaque[1] = tim->impl_opaque[0] = 0; + return -ENOENT; + } + + entry->w0 = entry->wqe = 0; + timr_bkt_dec_lock(bkt); + + tim->state = RTE_EVENT_TIMER_CANCELED; + tim->impl_opaque[1] = tim->impl_opaque[0] = 0; + return 0; +} + +static inline struct tim_mem_entry * +timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt, + struct timvf_ring * const timr) +{ + struct tim_mem_entry *chunk; + + if (bkt->nb_entry || !bkt->first_chunk) { + if (unlikely(rte_mempool_get(timr->chunk_pool, + (void **)&chunk))) { + return NULL; + } + if (bkt->nb_entry) { + *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t) + bkt->current_chunk) + + nb_chunk_slots) = + (uintptr_t) chunk; + } else { + bkt->first_chunk = (uintptr_t) chunk; + } + } else { + chunk = timr_clr_bkt(timr, bkt); + bkt->first_chunk = (uintptr_t)chunk; + } + *(uint64_t *)(chunk + nb_chunk_slots) = 0; + + return chunk; +} + +static inline struct tim_mem_entry * +timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt, + struct timvf_ring * const timr) +{ + struct tim_mem_entry *chunk; + + if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk))) + return NULL; + + *(uint64_t *)(chunk + nb_chunk_slots) = 0; + if (bkt->nb_entry) { + *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t) + bkt->current_chunk) + + nb_chunk_slots) = + (uintptr_t) chunk; + } else { + bkt->first_chunk = (uintptr_t) chunk; + } + + return chunk; +} + +static inline struct tim_mem_bucket * +timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt) +{ + const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc; + const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc, + &timr->fast_div) + rel_bkt; + const uint32_t tbkt_id = timr->get_target_bkt(bucket, + timr->nb_bkts); + return &timr->bkt[tbkt_id]; +} + +/* Single producer functions. */ +static inline int +timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt, + struct rte_event_timer * const tim, + const struct tim_mem_entry * const pent) +{ + int16_t rem; + uint64_t lock_sema; + struct tim_mem_bucket *bkt; + struct tim_mem_entry *chunk; + + + bkt = timvf_get_target_bucket(timr, rel_bkt); +__retry: + /*Get Bucket sema*/ + lock_sema = timr_bkt_fetch_sema(bkt); + /* Bucket related checks. */ + if (unlikely(timr_bkt_get_hbt(lock_sema))) + goto __retry; + + /* Insert the work. */ + rem = timr_bkt_fetch_rem(lock_sema); + + if (!rem) { + chunk = timr->refill_chunk(bkt, timr); + if (unlikely(chunk == NULL)) { + timr_bkt_set_rem(bkt, 0); + tim->impl_opaque[0] = tim->impl_opaque[1] = 0; + tim->state = RTE_EVENT_TIMER_ERROR; + return -ENOMEM; + } + bkt->current_chunk = (uintptr_t) chunk; + timr_bkt_set_rem(bkt, nb_chunk_slots - 1); + } else { + chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk; + chunk += nb_chunk_slots - rem; + } + /* Copy work entry. */ + *chunk = *pent; + timr_bkt_inc_nent(bkt); + + tim->impl_opaque[0] = (uintptr_t)chunk; + tim->impl_opaque[1] = (uintptr_t)bkt; + tim->state = RTE_EVENT_TIMER_ARMED; + return 0; +} + +/* Multi producer functions. */ +static inline int +timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt, + struct rte_event_timer * const tim, + const struct tim_mem_entry * const pent) +{ + int16_t rem; + uint64_t lock_sema; + struct tim_mem_bucket *bkt; + struct tim_mem_entry *chunk; + +__retry: + bkt = timvf_get_target_bucket(timr, rel_bkt); + /* Bucket related checks. */ + /*Get Bucket sema*/ + lock_sema = timr_bkt_fetch_sema_lock(bkt); + if (unlikely(timr_bkt_get_shbt(lock_sema))) { + timr_bkt_dec_lock(bkt); + goto __retry; + } + + rem = timr_bkt_fetch_rem(lock_sema); + + if (rem < 0) { + /* goto diff bucket. */ + timr_bkt_dec_lock(bkt); + goto __retry; + } else if (!rem) { + /*Only one thread can be here*/ + chunk = timr->refill_chunk(bkt, timr); + if (unlikely(chunk == NULL)) { + timr_bkt_set_rem(bkt, 0); + timr_bkt_dec_lock(bkt); + tim->impl_opaque[0] = tim->impl_opaque[1] = 0; + tim->state = RTE_EVENT_TIMER_ERROR; + return -ENOMEM; + } + bkt->current_chunk = (uintptr_t) chunk; + timr_bkt_set_rem(bkt, nb_chunk_slots - 1); + } else { + chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk; + chunk += nb_chunk_slots - rem; + } + /* Copy work entry. */ + *chunk = *pent; + timr_bkt_inc_nent(bkt); + timr_bkt_dec_lock(bkt); + + tim->impl_opaque[0] = (uintptr_t)chunk; + tim->impl_opaque[1] = (uintptr_t)bkt; + tim->state = RTE_EVENT_TIMER_ARMED; + return 0; +} + +static inline uint16_t +timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt, + struct tim_mem_entry *chunk, + struct rte_event_timer ** const tim, + const struct tim_mem_entry * const ents, + const struct tim_mem_bucket * const bkt) +{ + for (; index < cpy_lmt; index++) { + *chunk = *(ents + index); + tim[index]->impl_opaque[0] = (uintptr_t)chunk++; + tim[index]->impl_opaque[1] = (uintptr_t)bkt; + tim[index]->state = RTE_EVENT_TIMER_ARMED; + } + + return index; +} + +/* Burst mode functions */ +static inline int +timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt, + struct rte_event_timer ** const tim, + const struct tim_mem_entry *ents, + const uint16_t nb_timers) +{ + int16_t rem; + int16_t crem; + uint8_t lock_cnt; + uint16_t index = 0; + uint16_t chunk_remainder; + uint64_t lock_sema; + struct tim_mem_bucket *bkt; + struct tim_mem_entry *chunk; + +__retry: + bkt = timvf_get_target_bucket(timr, rel_bkt); + + /* Only one thread beyond this. */ + lock_sema = timr_bkt_inc_lock(bkt); + lock_cnt = (uint8_t) + ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK); + + if (lock_cnt) { + timr_bkt_dec_lock(bkt); + goto __retry; + } + + /* Bucket related checks. */ + if (unlikely(timr_bkt_get_hbt(lock_sema))) { + timr_bkt_dec_lock(bkt); + goto __retry; + } + + chunk_remainder = timr_bkt_fetch_rem(lock_sema); + rem = chunk_remainder - nb_timers; + if (rem < 0) { + crem = nb_chunk_slots - chunk_remainder; + if (chunk_remainder && crem) { + chunk = ((struct tim_mem_entry *) + (uintptr_t)bkt->current_chunk) + crem; + + index = timvf_cpy_wrk(index, chunk_remainder, + chunk, tim, ents, bkt); + timr_bkt_sub_rem(bkt, chunk_remainder); + timr_bkt_add_nent(bkt, chunk_remainder); + } + rem = nb_timers - chunk_remainder; + ents = ents + chunk_remainder; + + chunk = timr->refill_chunk(bkt, timr); + if (unlikely(chunk == NULL)) { + timr_bkt_dec_lock(bkt); + rte_errno = ENOMEM; + tim[index]->state = RTE_EVENT_TIMER_ERROR; + return crem; + } + *(uint64_t *)(chunk + nb_chunk_slots) = 0; + bkt->current_chunk = (uintptr_t) chunk; + + index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt); + timr_bkt_set_rem(bkt, nb_chunk_slots - rem); + timr_bkt_add_nent(bkt, rem); + } else { + chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk; + chunk += (nb_chunk_slots - chunk_remainder); + + index = timvf_cpy_wrk(index, nb_timers, + chunk, tim, ents, bkt); + timr_bkt_sub_rem(bkt, nb_timers); + timr_bkt_add_nent(bkt, nb_timers); + } + + timr_bkt_dec_lock(bkt); + return nb_timers; +} |