diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-02-19 11:16:57 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-02-19 11:17:28 +0000 |
commit | ca33590b6af032bff57d9cc70455660466a654b2 (patch) | |
tree | 0b68b090bd9b4a78a3614b62400b29279d76d553 /drivers/event/sw | |
parent | 169a9de21e263aa6599cdc2d87a45ae158d9f509 (diff) |
New upstream version 18.02upstream/18.02
Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/event/sw')
-rw-r--r-- | drivers/event/sw/Makefile | 34 | ||||
-rw-r--r-- | drivers/event/sw/event_ring.h | 32 | ||||
-rw-r--r-- | drivers/event/sw/iq_chunk.h | 196 | ||||
-rw-r--r-- | drivers/event/sw/iq_ring.h | 172 | ||||
-rw-r--r-- | drivers/event/sw/meson.build | 11 | ||||
-rw-r--r-- | drivers/event/sw/sw_evdev.c | 184 | ||||
-rw-r--r-- | drivers/event/sw/sw_evdev.h | 71 | ||||
-rw-r--r-- | drivers/event/sw/sw_evdev_log.h | 23 | ||||
-rw-r--r-- | drivers/event/sw/sw_evdev_scheduler.c | 72 | ||||
-rw-r--r-- | drivers/event/sw/sw_evdev_selftest.c | 3245 | ||||
-rw-r--r-- | drivers/event/sw/sw_evdev_worker.c | 70 | ||||
-rw-r--r-- | drivers/event/sw/sw_evdev_xstats.c | 44 |
12 files changed, 3662 insertions, 492 deletions
diff --git a/drivers/event/sw/Makefile b/drivers/event/sw/Makefile index 2f2b67ba..81236a39 100644 --- a/drivers/event/sw/Makefile +++ b/drivers/event/sw/Makefile @@ -1,32 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2016-2017 Intel Corporation. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2016-2017 Intel Corporation include $(RTE_SDK)/mk/rte.vars.mk @@ -34,6 +7,7 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_pmd_sw_event.a # build flags +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) # for older GCC versions, allow us to initialize an event using @@ -44,6 +18,7 @@ CFLAGS += -Wno-missing-field-initializers endif endif LDLIBS += -lrte_eal -lrte_eventdev -lrte_kvargs -lrte_ring +LDLIBS += -lrte_mempool -lrte_mbuf LDLIBS += -lrte_bus_vdev # library version @@ -57,6 +32,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_worker.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_scheduler.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_xstats.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_selftest.c # export include files SYMLINK-y-include += diff --git a/drivers/event/sw/event_ring.h b/drivers/event/sw/event_ring.h index 734a3b4b..02308728 100644 --- a/drivers/event/sw/event_ring.h +++ b/drivers/event/sw/event_ring.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ /* diff --git a/drivers/event/sw/iq_chunk.h b/drivers/event/sw/iq_chunk.h new file mode 100644 index 00000000..31d013ea --- /dev/null +++ b/drivers/event/sw/iq_chunk.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _IQ_CHUNK_H_ +#define _IQ_CHUNK_H_ + +#include <stdint.h> +#include <stdbool.h> +#include <rte_eventdev.h> + +#define IQ_ROB_NAMESIZE 12 + +struct sw_queue_chunk { + struct rte_event events[SW_EVS_PER_Q_CHUNK]; + struct sw_queue_chunk *next; +} __rte_cache_aligned; + +static __rte_always_inline bool +iq_empty(struct sw_iq *iq) +{ + return (iq->count == 0); +} + +static __rte_always_inline uint16_t +iq_count(const struct sw_iq *iq) +{ + return iq->count; +} + +static __rte_always_inline struct sw_queue_chunk * +iq_alloc_chunk(struct sw_evdev *sw) +{ + struct sw_queue_chunk *chunk = sw->chunk_list_head; + sw->chunk_list_head = chunk->next; + chunk->next = NULL; + return chunk; +} + +static __rte_always_inline void +iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk) +{ + chunk->next = sw->chunk_list_head; + sw->chunk_list_head = chunk; +} + +static __rte_always_inline void +iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head) +{ + while (head) { + struct sw_queue_chunk *next; + next = head->next; + iq_free_chunk(sw, head); + head = next; + } +} + +static __rte_always_inline void +iq_init(struct sw_evdev *sw, struct sw_iq *iq) +{ + iq->head = iq_alloc_chunk(sw); + iq->tail = iq->head; + iq->head_idx = 0; + iq->tail_idx = 0; + iq->count = 0; +} + +static __rte_always_inline void +iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev) +{ + iq->tail->events[iq->tail_idx++] = *ev; + iq->count++; + + if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) { + /* The number of chunks is defined in relation to the total + * number of inflight events and number of IQS such that + * allocation will always succeed. + */ + struct sw_queue_chunk *chunk = iq_alloc_chunk(sw); + iq->tail->next = chunk; + iq->tail = chunk; + iq->tail_idx = 0; + } +} + +static __rte_always_inline void +iq_pop(struct sw_evdev *sw, struct sw_iq *iq) +{ + iq->head_idx++; + iq->count--; + + if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) { + struct sw_queue_chunk *next = iq->head->next; + iq_free_chunk(sw, iq->head); + iq->head = next; + iq->head_idx = 0; + } +} + +static __rte_always_inline const struct rte_event * +iq_peek(struct sw_iq *iq) +{ + return &iq->head->events[iq->head_idx]; +} + +/* Note: the caller must ensure that count <= iq_count() */ +static __rte_always_inline uint16_t +iq_dequeue_burst(struct sw_evdev *sw, + struct sw_iq *iq, + struct rte_event *ev, + uint16_t count) +{ + struct sw_queue_chunk *current; + uint16_t total, index; + + count = RTE_MIN(count, iq_count(iq)); + + current = iq->head; + index = iq->head_idx; + total = 0; + + /* Loop over the chunks */ + while (1) { + struct sw_queue_chunk *next; + for (; index < SW_EVS_PER_Q_CHUNK;) { + ev[total++] = current->events[index++]; + + if (unlikely(total == count)) + goto done; + } + + /* Move to the next chunk */ + next = current->next; + iq_free_chunk(sw, current); + current = next; + index = 0; + } + +done: + if (unlikely(index == SW_EVS_PER_Q_CHUNK)) { + struct sw_queue_chunk *next = current->next; + iq_free_chunk(sw, current); + iq->head = next; + iq->head_idx = 0; + } else { + iq->head = current; + iq->head_idx = index; + } + + iq->count -= total; + + return total; +} + +static __rte_always_inline void +iq_put_back(struct sw_evdev *sw, + struct sw_iq *iq, + struct rte_event *ev, + unsigned int count) +{ + /* Put back events that fit in the current head chunk. If necessary, + * put back events in a new head chunk. The caller must ensure that + * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is + * needed. + */ + uint16_t avail_space = iq->head_idx; + + if (avail_space >= count) { + const uint16_t idx = avail_space - count; + uint16_t i; + + for (i = 0; i < count; i++) + iq->head->events[idx + i] = ev[i]; + + iq->head_idx = idx; + } else if (avail_space < count) { + const uint16_t remaining = count - avail_space; + struct sw_queue_chunk *new_head; + uint16_t i; + + for (i = 0; i < avail_space; i++) + iq->head->events[i] = ev[remaining + i]; + + new_head = iq_alloc_chunk(sw); + new_head->next = iq->head; + iq->head = new_head; + iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining; + + for (i = 0; i < remaining; i++) + iq->head->events[iq->head_idx + i] = ev[i]; + } + + iq->count += count; +} + +#endif /* _IQ_CHUNK_H_ */ diff --git a/drivers/event/sw/iq_ring.h b/drivers/event/sw/iq_ring.h deleted file mode 100644 index 64cf6784..00000000 --- a/drivers/event/sw/iq_ring.h +++ /dev/null @@ -1,172 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Ring structure definitions used for the internal ring buffers of the - * SW eventdev implementation. These are designed for single-core use only. - */ -#ifndef _IQ_RING_ -#define _IQ_RING_ - -#include <stdint.h> - -#include <rte_common.h> -#include <rte_memory.h> -#include <rte_malloc.h> -#include <rte_eventdev.h> - -#define IQ_RING_NAMESIZE 12 -#define QID_IQ_DEPTH 512 -#define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1) - -struct iq_ring { - char name[IQ_RING_NAMESIZE] __rte_cache_aligned; - uint16_t write_idx; - uint16_t read_idx; - - struct rte_event ring[QID_IQ_DEPTH]; -}; - -static inline struct iq_ring * -iq_ring_create(const char *name, unsigned int socket_id) -{ - struct iq_ring *retval; - - retval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id); - if (retval == NULL) - goto end; - - snprintf(retval->name, sizeof(retval->name), "%s", name); - retval->write_idx = retval->read_idx = 0; -end: - return retval; -} - -static inline void -iq_ring_destroy(struct iq_ring *r) -{ - rte_free(r); -} - -static __rte_always_inline uint16_t -iq_ring_count(const struct iq_ring *r) -{ - return r->write_idx - r->read_idx; -} - -static __rte_always_inline uint16_t -iq_ring_free_count(const struct iq_ring *r) -{ - return QID_IQ_MASK - iq_ring_count(r); -} - -static __rte_always_inline uint16_t -iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes) -{ - const uint16_t read = r->read_idx; - uint16_t write = r->write_idx; - const uint16_t space = read + QID_IQ_MASK - write; - uint16_t i; - - if (space < nb_qes) - nb_qes = space; - - for (i = 0; i < nb_qes; i++, write++) - r->ring[write & QID_IQ_MASK] = qes[i]; - - r->write_idx = write; - - return nb_qes; -} - -static __rte_always_inline uint16_t -iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes) -{ - uint16_t read = r->read_idx; - const uint16_t write = r->write_idx; - const uint16_t items = write - read; - uint16_t i; - - for (i = 0; i < nb_qes; i++, read++) - qes[i] = r->ring[read & QID_IQ_MASK]; - - if (items < nb_qes) - nb_qes = items; - - r->read_idx += nb_qes; - - return nb_qes; -} - -/* assumes there is space, from a previous dequeue_burst */ -static __rte_always_inline uint16_t -iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes) -{ - uint16_t i, read = r->read_idx; - - for (i = nb_qes; i-- > 0; ) - r->ring[--read & QID_IQ_MASK] = qes[i]; - - r->read_idx = read; - return nb_qes; -} - -static __rte_always_inline const struct rte_event * -iq_ring_peek(const struct iq_ring *r) -{ - return &r->ring[r->read_idx & QID_IQ_MASK]; -} - -static __rte_always_inline void -iq_ring_pop(struct iq_ring *r) -{ - r->read_idx++; -} - -static __rte_always_inline int -iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe) -{ - const uint16_t read = r->read_idx; - const uint16_t write = r->write_idx; - const uint16_t space = read + QID_IQ_MASK - write; - - if (space == 0) - return -1; - - r->ring[write & QID_IQ_MASK] = *qe; - - r->write_idx = write + 1; - - return 0; -} - -#endif diff --git a/drivers/event/sw/meson.build b/drivers/event/sw/meson.build new file mode 100644 index 00000000..30d22164 --- /dev/null +++ b/drivers/event/sw/meson.build @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +allow_experimental_apis = true +sources = files('sw_evdev_scheduler.c', + 'sw_evdev_selftest.c', + 'sw_evdev_worker.c', + 'sw_evdev_xstats.c', + 'sw_evdev.c' +) +deps += ['hash', 'bus_vdev'] diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index fd110797..6672fd8e 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <inttypes.h> @@ -41,7 +13,7 @@ #include <rte_service_component.h> #include "sw_evdev.h" -#include "iq_ring.h" +#include "iq_chunk.h" #define EVENTDEV_NAME_SW_PMD event_sw #define NUMA_NODE_ARG "numa_node" @@ -62,6 +34,7 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], RTE_SET_USED(priorities); for (i = 0; i < num; i++) { struct sw_qid *q = &sw->qids[queues[i]]; + unsigned int j; /* check for qid map overflow */ if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) { @@ -74,6 +47,15 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], break; } + for (j = 0; j < q->cq_num_mapped_cqs; j++) { + if (q->cq_map[j] == p->id) + break; + } + + /* check if port is already linked */ + if (j < q->cq_num_mapped_cqs) + continue; + if (q->type == SW_SCHED_TYPE_DIRECT) { /* check directed qids only map to one port */ if (p->num_qids_mapped > 0) { @@ -181,6 +163,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, } p->inflight_max = conf->new_event_threshold; + p->implicit_release = !conf->disable_implicit_release; /* check if ring exists, same as rx_worker above */ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id, @@ -231,18 +214,9 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, unsigned int i; int dev_id = sw->data->dev_id; int socket_id = sw->data->socket_id; - char buf[IQ_RING_NAMESIZE]; + char buf[IQ_ROB_NAMESIZE]; struct sw_qid *qid = &sw->qids[idx]; - for (i = 0; i < SW_IQS_MAX; i++) { - snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i); - qid->iq[i] = iq_ring_create(buf, socket_id); - if (!qid->iq[i]) { - SW_LOG_DBG("ring create failed"); - goto cleanup; - } - } - /* Initialize the FID structures to no pinning (-1), and zero packets */ const struct sw_fid_t fid = {.cq = -1, .pcount = 0}; for (i = 0; i < RTE_DIM(qid->fids); i++) @@ -320,11 +294,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, return 0; cleanup: - for (i = 0; i < SW_IQS_MAX; i++) { - if (qid->iq[i]) - iq_ring_destroy(qid->iq[i]); - } - if (qid->reorder_buffer) { rte_free(qid->reorder_buffer); qid->reorder_buffer = NULL; @@ -338,6 +307,19 @@ cleanup: return -EINVAL; } +static void +sw_queue_release(struct rte_eventdev *dev, uint8_t id) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + struct sw_qid *qid = &sw->qids[id]; + + if (qid->type == RTE_SCHED_TYPE_ORDERED) { + rte_free(qid->reorder_buffer); + rte_ring_free(qid->reorder_buffer_freelist); + } + memset(qid, 0, sizeof(*qid)); +} + static int sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, const struct rte_event_queue_conf *conf) @@ -355,24 +337,46 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, } struct sw_evdev *sw = sw_pmd_priv(dev); + + if (sw->qids[queue_id].initialized) + sw_queue_release(dev, queue_id); + return qid_init(sw, queue_id, type, conf); } static void -sw_queue_release(struct rte_eventdev *dev, uint8_t id) +sw_init_qid_iqs(struct sw_evdev *sw) { - struct sw_evdev *sw = sw_pmd_priv(dev); - struct sw_qid *qid = &sw->qids[id]; - uint32_t i; + int i, j; - for (i = 0; i < SW_IQS_MAX; i++) - iq_ring_destroy(qid->iq[i]); + /* Initialize the IQ memory of all configured qids */ + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + struct sw_qid *qid = &sw->qids[i]; - if (qid->type == RTE_SCHED_TYPE_ORDERED) { - rte_free(qid->reorder_buffer); - rte_ring_free(qid->reorder_buffer_freelist); + if (!qid->initialized) + continue; + + for (j = 0; j < SW_IQS_MAX; j++) + iq_init(sw, &qid->iq[j]); + } +} + +static void +sw_clean_qid_iqs(struct sw_evdev *sw) +{ + int i, j; + + /* Release the IQ memory of all configured qids */ + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + struct sw_qid *qid = &sw->qids[i]; + + for (j = 0; j < SW_IQS_MAX; j++) { + if (!qid->iq[j].head) + continue; + iq_free_chunk_list(sw, qid->iq[j].head); + qid->iq[j].head = NULL; + } } - memset(qid, 0, sizeof(*qid)); } static void @@ -402,6 +406,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, port_conf->new_event_threshold = 1024; port_conf->dequeue_depth = 16; port_conf->enqueue_depth = 16; + port_conf->disable_implicit_release = 0; } static int @@ -410,12 +415,36 @@ sw_dev_configure(const struct rte_eventdev *dev) struct sw_evdev *sw = sw_pmd_priv(dev); const struct rte_eventdev_data *data = dev->data; const struct rte_event_dev_config *conf = &data->dev_conf; + int num_chunks, i; sw->qid_count = conf->nb_event_queues; sw->port_count = conf->nb_event_ports; sw->nb_events_limit = conf->nb_events_limit; rte_atomic32_set(&sw->inflights, 0); + /* Number of chunks sized for worst-case spread of events across IQs */ + num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) + + sw->qid_count*SW_IQS_MAX*2; + + /* If this is a reconfiguration, free the previous IQ allocation. All + * IQ chunk references were cleaned out of the QIDs in sw_stop(), and + * will be reinitialized in sw_start(). + */ + if (sw->chunks) + rte_free(sw->chunks); + + sw->chunks = rte_malloc_socket(NULL, + sizeof(struct sw_queue_chunk) * + num_chunks, + 0, + sw->data->socket_id); + if (!sw->chunks) + return -ENOMEM; + + sw->chunk_list_head = NULL; + for (i = 0; i < num_chunks; i++) + iq_free_chunk(sw, &sw->chunks[i]); + if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) return -ENOTSUP; @@ -450,9 +479,14 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH, .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH, .max_num_events = SW_INFLIGHT_EVENTS_TOTAL, - .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS | - RTE_EVENT_DEV_CAP_BURST_MODE | - RTE_EVENT_DEV_CAP_EVENT_QOS), + .event_dev_cap = ( + RTE_EVENT_DEV_CAP_QUEUE_QOS | + RTE_EVENT_DEV_CAP_BURST_MODE | + RTE_EVENT_DEV_CAP_EVENT_QOS | + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE| + RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | + RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | + RTE_EVENT_DEV_CAP_NONSEQ_MODE), }; *info = evdev_sw_info; @@ -589,17 +623,16 @@ sw_dump(struct rte_eventdev *dev, FILE *f) uint32_t iq; uint32_t iq_printed = 0; for (iq = 0; iq < SW_IQS_MAX; iq++) { - if (!qid->iq[iq]) { + if (!qid->iq[iq].head) { fprintf(f, "\tiq %d is not initialized.\n", iq); iq_printed = 1; continue; } - uint32_t used = iq_ring_count(qid->iq[iq]); - uint32_t free = iq_ring_free_count(qid->iq[iq]); - const char *col = (free == 0) ? COL_RED : COL_RESET; + uint32_t used = iq_count(&qid->iq[iq]); + const char *col = COL_RESET; if (used > 0) { - fprintf(f, "\t%siq %d: Used %d\tFree %d" - COL_RESET"\n", col, iq, used, free); + fprintf(f, "\t%siq %d: Used %d" + COL_RESET"\n", col, iq, used); iq_printed = 1; } } @@ -632,8 +665,8 @@ sw_start(struct rte_eventdev *dev) /* check all queues are configured and mapped to ports*/ for (i = 0; i < sw->qid_count; i++) - if (sw->qids[i].iq[0] == NULL || - sw->qids[i].cq_num_mapped_cqs == 0) { + if (!sw->qids[i].initialized || + sw->qids[i].cq_num_mapped_cqs == 0) { SW_LOG_ERR("Queue %d not configured\n", i); return -ENOLINK; } @@ -654,6 +687,8 @@ sw_start(struct rte_eventdev *dev) } } + sw_init_qid_iqs(sw); + if (sw_xstats_init(sw) < 0) return -EINVAL; @@ -667,6 +702,7 @@ static void sw_stop(struct rte_eventdev *dev) { struct sw_evdev *sw = sw_pmd_priv(dev); + sw_clean_qid_iqs(sw); sw_xstats_uninit(sw); sw->started = 0; rte_smp_wmb(); @@ -759,6 +795,8 @@ sw_probe(struct rte_vdev_device *vdev) .xstats_get_names = sw_xstats_get_names, .xstats_get_by_name = sw_xstats_get_by_name, .xstats_reset = sw_xstats_reset, + + .dev_selftest = test_sw_eventdev, }; static const char *const args[] = { @@ -891,3 +929,15 @@ static struct rte_vdev_driver evdev_sw_pmd_drv = { RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> " SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>"); + +/* declared extern in header, for access from other .c files */ +int eventdev_sw_log_level; + +RTE_INIT(evdev_sw_init_log); +static void +evdev_sw_init_log(void) +{ + eventdev_sw_log_level = rte_log_register("pmd.event.sw"); + if (eventdev_sw_log_level >= 0) + rte_log_set_level(eventdev_sw_log_level, RTE_LOG_NOTICE); +} diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h index e0dec910..d90b96d4 100644 --- a/drivers/event/sw/sw_evdev.h +++ b/drivers/event/sw/sw_evdev.h @@ -1,38 +1,11 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifndef _SW_EVDEV_H_ #define _SW_EVDEV_H_ +#include "sw_evdev_log.h" #include <rte_eventdev.h> #include <rte_eventdev_pmd_vdev.h> #include <rte_atomic.h> @@ -49,6 +22,10 @@ #define MAX_SW_PROD_Q_DEPTH 4096 #define SW_FRAGMENTS_MAX 16 +/* Should be power-of-two minus one, to leave room for the next pointer */ +#define SW_EVS_PER_Q_CHUNK 255 +#define SW_Q_CHUNK_SIZE ((SW_EVS_PER_Q_CHUNK + 1) * sizeof(struct rte_event)) + /* report dequeue burst sizes in buckets */ #define SW_DEQ_STAT_BUCKET_SHIFT 2 /* how many packets pulled from port by sched */ @@ -88,26 +65,6 @@ static const uint8_t sw_qe_flag_map[] = { QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP, }; -#ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG -#define SW_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \ - SW_PMD_NAME, \ - __func__, __LINE__, ## args) - -#define SW_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \ - SW_PMD_NAME, \ - __func__, __LINE__, ## args) -#else -#define SW_LOG_INFO(fmt, args...) -#define SW_LOG_DBG(fmt, args...) -#endif - -#define SW_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \ - SW_PMD_NAME, \ - __func__, __LINE__, ## args) - /* Records basic event stats at a given point. Used in port and qid structs */ struct sw_point_stats { uint64_t rx_pkts; @@ -130,6 +87,14 @@ struct reorder_buffer_entry { struct rte_event fragments[SW_FRAGMENTS_MAX]; }; +struct sw_iq { + struct sw_queue_chunk *head; + struct sw_queue_chunk *tail; + uint16_t head_idx; + uint16_t tail_idx; + uint16_t count; +}; + struct sw_qid { /* set when the QID has been initialized */ uint8_t initialized; @@ -142,7 +107,7 @@ struct sw_qid { struct sw_point_stats stats; /* Internal priority rings for packets */ - struct iq_ring *iq[SW_IQS_MAX]; + struct sw_iq iq[SW_IQS_MAX]; uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */ uint64_t iq_pkt_count[SW_IQS_MAX]; @@ -201,6 +166,7 @@ struct sw_port { uint16_t outstanding_releases __rte_cache_aligned; uint16_t inflight_max; /* app requested max inflights for this port */ uint16_t inflight_credits; /* num credits this port has right now */ + uint8_t implicit_release; /* release events before dequeueing */ uint16_t last_dequeue_burst_sz; /* how big the burst was */ uint64_t last_dequeue_ticks; /* used to track burst processing time */ @@ -253,6 +219,8 @@ struct sw_evdev { /* Internal queues - one per logical queue */ struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned; + struct sw_queue_chunk *chunk_list_head; + struct sw_queue_chunk *chunks; /* Cache how many packets are in each cq */ uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned; @@ -319,5 +287,6 @@ int sw_xstats_reset(struct rte_eventdev *dev, const uint32_t ids[], uint32_t nb_ids); +int test_sw_eventdev(void); #endif /* _SW_EVDEV_H_ */ diff --git a/drivers/event/sw/sw_evdev_log.h b/drivers/event/sw/sw_evdev_log.h new file mode 100644 index 00000000..f76825ab --- /dev/null +++ b/drivers/event/sw/sw_evdev_log.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _SW_EVDEV_LOG_H_ +#define _SW_EVDEV_LOG_H_ + +extern int eventdev_sw_log_level; + +#define SW_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eventdev_sw_log_level, "%s" fmt "\n", \ + __func__, ##args) + +#define SW_LOG_INFO(fmt, args...) \ + SW_LOG_IMPL(INFO, fmt, ## args) + +#define SW_LOG_DBG(fmt, args...) \ + SW_LOG_IMPL(DEBUG, fmt, ## args) + +#define SW_LOG_ERR(fmt, args...) \ + SW_LOG_IMPL(ERR, fmt, ## args) + +#endif /* _SW_EVDEV_LOG_H_ */ diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c index 8a2c9d4f..3106eb33 100644 --- a/drivers/event/sw/sw_evdev_scheduler.c +++ b/drivers/event/sw/sw_evdev_scheduler.c @@ -1,40 +1,12 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <rte_ring.h> #include <rte_hash_crc.h> #include <rte_event_ring.h> #include "sw_evdev.h" -#include "iq_ring.h" +#include "iq_chunk.h" #define SW_IQS_MASK (SW_IQS_MAX-1) @@ -71,7 +43,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, */ uint32_t qid_id = qid->id; - iq_ring_dequeue_burst(qid->iq[iq_num], qes, count); + iq_dequeue_burst(sw, &qid->iq[iq_num], qes, count); for (i = 0; i < count; i++) { const struct rte_event *qe = &qes[i]; const uint16_t flow_id = SW_HASH_FLOWID(qes[i].flow_id); @@ -130,7 +102,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, p->cq_buf_count = 0; } } - iq_ring_put_back(qid->iq[iq_num], blocked_qes, nb_blocked); + iq_put_back(sw, &qid->iq[iq_num], blocked_qes, nb_blocked); return count - nb_blocked; } @@ -156,7 +128,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, rte_ring_count(qid->reorder_buffer_freelist)); for (i = 0; i < count; i++) { - const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]); + const struct rte_event *qe = iq_peek(&qid->iq[iq_num]); uint32_t cq_check_count = 0; uint32_t cq; @@ -193,7 +165,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, (void *)&p->hist_list[head].rob_entry); sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe; - iq_ring_pop(qid->iq[iq_num]); + iq_pop(sw, &qid->iq[iq_num]); rte_compiler_barrier(); p->inflights++; @@ -218,8 +190,8 @@ sw_schedule_dir_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, return 0; /* burst dequeue from the QID IQ ring */ - struct iq_ring *ring = qid->iq[iq_num]; - uint32_t ret = iq_ring_dequeue_burst(ring, + struct sw_iq *iq = &qid->iq[iq_num]; + uint32_t ret = iq_dequeue_burst(sw, iq, &port->cq_buf[port->cq_buf_count], count_free); port->cq_buf_count += ret; @@ -252,7 +224,7 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw) continue; uint32_t pkts_done = 0; - uint32_t count = iq_ring_count(qid->iq[iq_num]); + uint32_t count = iq_count(&qid->iq[iq_num]); if (count > 0) { if (type == SW_SCHED_TYPE_DIRECT) @@ -324,22 +296,15 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end) continue; } - struct sw_qid *dest_qid_ptr = - &sw->qids[dest_qid]; - const struct iq_ring *dest_iq_ptr = - dest_qid_ptr->iq[dest_iq]; - if (iq_ring_free_count(dest_iq_ptr) == 0) - break; - pkts_iter++; struct sw_qid *q = &sw->qids[dest_qid]; - struct iq_ring *r = q->iq[dest_iq]; + struct sw_iq *iq = &q->iq[dest_iq]; /* we checked for space above, so enqueue must * succeed */ - iq_ring_enqueue(r, qe); + iq_enqueue(sw, iq, qe); q->iq_pkt_mask |= (1 << (dest_iq)); q->iq_pkt_count[dest_iq]++; q->stats.rx_pkts++; @@ -404,10 +369,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) uint32_t iq_num = PRIO_TO_IQ(qe->priority); struct sw_qid *qid = &sw->qids[qe->queue_id]; - if ((flags & QE_FLAG_VALID) && - iq_ring_free_count(qid->iq[iq_num]) == 0) - break; - /* now process based on flags. Note that for directed * queues, the enqueue_flush masks off all but the * valid flag. This makes FWD and PARTIAL enqueues just @@ -471,7 +432,7 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) */ qid->iq_pkt_mask |= (1 << (iq_num)); - iq_ring_enqueue(qid->iq[iq_num], qe); + iq_enqueue(sw, &qid->iq[iq_num], qe); qid->iq_pkt_count[iq_num]++; qid->stats.rx_pkts++; pkts_iter++; @@ -516,10 +477,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id) uint32_t iq_num = PRIO_TO_IQ(qe->priority); struct sw_qid *qid = &sw->qids[qe->queue_id]; - struct iq_ring *iq_ring = qid->iq[iq_num]; - - if (iq_ring_free_count(iq_ring) == 0) - break; /* move to next port */ + struct sw_iq *iq = &qid->iq[iq_num]; port->stats.rx_pkts++; @@ -527,7 +485,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id) * into the qid at the right priority */ qid->iq_pkt_mask |= (1 << (iq_num)); - iq_ring_enqueue(iq_ring, qe); + iq_enqueue(sw, iq, qe); qid->iq_pkt_count[iq_num]++; qid->stats.rx_pkts++; pkts_iter++; diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c new file mode 100644 index 00000000..78d30e07 --- /dev/null +++ b/drivers/event/sw/sw_evdev_selftest.c @@ -0,0 +1,3245 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#include <stdio.h> +#include <string.h> +#include <stdint.h> +#include <errno.h> +#include <unistd.h> +#include <sys/queue.h> + +#include <rte_memory.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_debug.h> +#include <rte_ethdev.h> +#include <rte_cycles.h> +#include <rte_eventdev.h> +#include <rte_pause.h> +#include <rte_service.h> +#include <rte_service_component.h> +#include <rte_bus_vdev.h> + +#include "sw_evdev.h" + +#define MAX_PORTS 16 +#define MAX_QIDS 16 +#define NUM_PACKETS (1<<18) + +static int evdev; + +struct test { + struct rte_mempool *mbuf_pool; + uint8_t port[MAX_PORTS]; + uint8_t qid[MAX_QIDS]; + int nb_qids; + uint32_t service_id; +}; + +static struct rte_event release_ev; + +static inline struct rte_mbuf * +rte_gen_arp(int portid, struct rte_mempool *mp) +{ + /* + * len = 14 + 46 + * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46 + */ + static const uint8_t arp_request[] = { + /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8, + 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01, + /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8, + 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01, + /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + }; + struct rte_mbuf *m; + int pkt_len = sizeof(arp_request) - 1; + + m = rte_pktmbuf_alloc(mp); + if (!m) + return 0; + + memcpy((void *)((uintptr_t)m->buf_addr + m->data_off), + arp_request, pkt_len); + rte_pktmbuf_pkt_len(m) = pkt_len; + rte_pktmbuf_data_len(m) = pkt_len; + + RTE_SET_USED(portid); + + return m; +} + +static void +xstats_print(void) +{ + const uint32_t XSTATS_MAX = 1024; + uint32_t i; + uint32_t ids[XSTATS_MAX]; + uint64_t values[XSTATS_MAX]; + struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; + + for (i = 0; i < XSTATS_MAX; i++) + ids[i] = i; + + /* Device names / values */ + int ret = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, 0, + xstats_names, ids, XSTATS_MAX); + if (ret < 0) { + printf("%d: xstats names get() returned error\n", + __LINE__); + return; + } + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, + 0, ids, values, ret); + if (ret > (signed int)XSTATS_MAX) + printf("%s %d: more xstats available than space\n", + __func__, __LINE__); + for (i = 0; (signed int)i < ret; i++) { + printf("%d : %s : %"PRIu64"\n", + i, xstats_names[i].name, values[i]); + } + + /* Port names / values */ + ret = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_PORT, 0, + xstats_names, ids, XSTATS_MAX); + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_PORT, 1, + ids, values, ret); + if (ret > (signed int)XSTATS_MAX) + printf("%s %d: more xstats available than space\n", + __func__, __LINE__); + for (i = 0; (signed int)i < ret; i++) { + printf("%d : %s : %"PRIu64"\n", + i, xstats_names[i].name, values[i]); + } + + /* Queue names / values */ + ret = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, 0, + xstats_names, ids, XSTATS_MAX); + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, + 1, ids, values, ret); + if (ret > (signed int)XSTATS_MAX) + printf("%s %d: more xstats available than space\n", + __func__, __LINE__); + for (i = 0; (signed int)i < ret; i++) { + printf("%d : %s : %"PRIu64"\n", + i, xstats_names[i].name, values[i]); + } +} + +/* initialization and config */ +static inline int +init(struct test *t, int nb_queues, int nb_ports) +{ + struct rte_event_dev_config config = { + .nb_event_queues = nb_queues, + .nb_event_ports = nb_ports, + .nb_event_queue_flows = 1024, + .nb_events_limit = 4096, + .nb_event_port_dequeue_depth = 128, + .nb_event_port_enqueue_depth = 128, + }; + int ret; + + void *temp = t->mbuf_pool; /* save and restore mbuf pool */ + + memset(t, 0, sizeof(*t)); + t->mbuf_pool = temp; + + ret = rte_event_dev_configure(evdev, &config); + if (ret < 0) + printf("%d: Error configuring device\n", __LINE__); + return ret; +}; + +static inline int +create_ports(struct test *t, int num_ports) +{ + int i; + static const struct rte_event_port_conf conf = { + .new_event_threshold = 1024, + .dequeue_depth = 32, + .enqueue_depth = 64, + .disable_implicit_release = 0, + }; + if (num_ports > MAX_PORTS) + return -1; + + for (i = 0; i < num_ports; i++) { + if (rte_event_port_setup(evdev, i, &conf) < 0) { + printf("Error setting up port %d\n", i); + return -1; + } + t->port[i] = i; + } + + return 0; +} + +static inline int +create_lb_qids(struct test *t, int num_qids, uint32_t flags) +{ + int i; + + /* Q creation */ + const struct rte_event_queue_conf conf = { + .schedule_type = flags, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + + for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) { + if (rte_event_queue_setup(evdev, i, &conf) < 0) { + printf("%d: error creating qid %d\n", __LINE__, i); + return -1; + } + t->qid[i] = i; + } + t->nb_qids += num_qids; + if (t->nb_qids > MAX_QIDS) + return -1; + + return 0; +} + +static inline int +create_atomic_qids(struct test *t, int num_qids) +{ + return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC); +} + +static inline int +create_ordered_qids(struct test *t, int num_qids) +{ + return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED); +} + + +static inline int +create_unordered_qids(struct test *t, int num_qids) +{ + return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL); +} + +static inline int +create_directed_qids(struct test *t, int num_qids, const uint8_t ports[]) +{ + int i; + + /* Q creation */ + static const struct rte_event_queue_conf conf = { + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK, + }; + + for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) { + if (rte_event_queue_setup(evdev, i, &conf) < 0) { + printf("%d: error creating qid %d\n", __LINE__, i); + return -1; + } + t->qid[i] = i; + + if (rte_event_port_link(evdev, ports[i - t->nb_qids], + &t->qid[i], NULL, 1) != 1) { + printf("%d: error creating link for qid %d\n", + __LINE__, i); + return -1; + } + } + t->nb_qids += num_qids; + if (t->nb_qids > MAX_QIDS) + return -1; + + return 0; +} + +/* destruction */ +static inline int +cleanup(struct test *t __rte_unused) +{ + rte_event_dev_stop(evdev); + rte_event_dev_close(evdev); + return 0; +}; + +struct test_event_dev_stats { + uint64_t rx_pkts; /**< Total packets received */ + uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */ + uint64_t tx_pkts; /**< Total packets transmitted */ + + /** Packets received on this port */ + uint64_t port_rx_pkts[MAX_PORTS]; + /** Packets dropped on this port */ + uint64_t port_rx_dropped[MAX_PORTS]; + /** Packets inflight on this port */ + uint64_t port_inflight[MAX_PORTS]; + /** Packets transmitted on this port */ + uint64_t port_tx_pkts[MAX_PORTS]; + /** Packets received on this qid */ + uint64_t qid_rx_pkts[MAX_QIDS]; + /** Packets dropped on this qid */ + uint64_t qid_rx_dropped[MAX_QIDS]; + /** Packets transmitted on this qid */ + uint64_t qid_tx_pkts[MAX_QIDS]; +}; + +static inline int +test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats) +{ + static uint32_t i; + static uint32_t total_ids[3]; /* rx, tx and drop */ + static uint32_t port_rx_pkts_ids[MAX_PORTS]; + static uint32_t port_rx_dropped_ids[MAX_PORTS]; + static uint32_t port_inflight_ids[MAX_PORTS]; + static uint32_t port_tx_pkts_ids[MAX_PORTS]; + static uint32_t qid_rx_pkts_ids[MAX_QIDS]; + static uint32_t qid_rx_dropped_ids[MAX_QIDS]; + static uint32_t qid_tx_pkts_ids[MAX_QIDS]; + + + stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id, + "dev_rx", &total_ids[0]); + stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id, + "dev_drop", &total_ids[1]); + stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id, + "dev_tx", &total_ids[2]); + for (i = 0; i < MAX_PORTS; i++) { + char name[32]; + snprintf(name, sizeof(name), "port_%u_rx", i); + stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get( + dev_id, name, &port_rx_pkts_ids[i]); + snprintf(name, sizeof(name), "port_%u_drop", i); + stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get( + dev_id, name, &port_rx_dropped_ids[i]); + snprintf(name, sizeof(name), "port_%u_inflight", i); + stats->port_inflight[i] = rte_event_dev_xstats_by_name_get( + dev_id, name, &port_inflight_ids[i]); + snprintf(name, sizeof(name), "port_%u_tx", i); + stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get( + dev_id, name, &port_tx_pkts_ids[i]); + } + for (i = 0; i < MAX_QIDS; i++) { + char name[32]; + snprintf(name, sizeof(name), "qid_%u_rx", i); + stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get( + dev_id, name, &qid_rx_pkts_ids[i]); + snprintf(name, sizeof(name), "qid_%u_drop", i); + stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get( + dev_id, name, &qid_rx_dropped_ids[i]); + snprintf(name, sizeof(name), "qid_%u_tx", i); + stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get( + dev_id, name, &qid_tx_pkts_ids[i]); + } + + return 0; +} + +/* run_prio_packet_test + * This performs a basic packet priority check on the test instance passed in. + * It is factored out of the main priority tests as the same tests must be + * performed to ensure prioritization of each type of QID. + * + * Requirements: + * - An initialized test structure, including mempool + * - t->port[0] is initialized for both Enq / Deq of packets to the QID + * - t->qid[0] is the QID to be tested + * - if LB QID, the CQ must be mapped to the QID. + */ +static int +run_prio_packet_test(struct test *t) +{ + int err; + const uint32_t MAGIC_SEQN[] = {4711, 1234}; + const uint32_t PRIORITY[] = { + RTE_EVENT_DEV_PRIORITY_NORMAL, + RTE_EVENT_DEV_PRIORITY_HIGHEST + }; + unsigned int i; + for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) { + /* generate pkt and enqueue */ + struct rte_event ev; + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + arp->seqn = MAGIC_SEQN[i]; + + ev = (struct rte_event){ + .priority = PRIORITY[i], + .op = RTE_EVENT_OP_NEW, + .queue_id = t->qid[0], + .mbuf = arp + }; + err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1); + if (err < 0) { + printf("%d: error failed to enqueue\n", __LINE__); + return -1; + } + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + struct test_event_dev_stats stats; + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: error failed to get stats\n", __LINE__); + return -1; + } + + if (stats.port_rx_pkts[t->port[0]] != 2) { + printf("%d: error stats incorrect for directed port\n", + __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + + struct rte_event ev, ev2; + uint32_t deq_pkts; + deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0); + if (deq_pkts != 1) { + printf("%d: error failed to deq\n", __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + if (ev.mbuf->seqn != MAGIC_SEQN[1]) { + printf("%d: first packet out not highest priority\n", + __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + rte_pktmbuf_free(ev.mbuf); + + deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0); + if (deq_pkts != 1) { + printf("%d: error failed to deq\n", __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + if (ev2.mbuf->seqn != MAGIC_SEQN[0]) { + printf("%d: second packet out not lower priority\n", + __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + rte_pktmbuf_free(ev2.mbuf); + + cleanup(t); + return 0; +} + +static int +test_single_directed_packet(struct test *t) +{ + const int rx_enq = 0; + const int wrk_enq = 2; + int err; + + /* Create instance with 3 directed QIDs going to 3 ports */ + if (init(t, 3, 3) < 0 || + create_ports(t, 3) < 0 || + create_directed_qids(t, 3, t->port) < 0) + return -1; + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** FORWARD ****************/ + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = wrk_enq, + .mbuf = arp, + }; + + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + const uint32_t MAGIC_SEQN = 4711; + arp->seqn = MAGIC_SEQN; + + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1); + if (err < 0) { + printf("%d: error failed to enqueue\n", __LINE__); + return -1; + } + + /* Run schedule() as dir packets may need to be re-ordered */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + struct test_event_dev_stats stats; + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: error failed to get stats\n", __LINE__); + return -1; + } + + if (stats.port_rx_pkts[rx_enq] != 1) { + printf("%d: error stats incorrect for directed port\n", + __LINE__); + return -1; + } + + uint32_t deq_pkts; + deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0); + if (deq_pkts != 1) { + printf("%d: error failed to deq\n", __LINE__); + return -1; + } + + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_rx_pkts[wrk_enq] != 0 && + stats.port_rx_pkts[wrk_enq] != 1) { + printf("%d: error directed stats post-dequeue\n", __LINE__); + return -1; + } + + if (ev.mbuf->seqn != MAGIC_SEQN) { + printf("%d: error magic sequence number not dequeued\n", + __LINE__); + return -1; + } + + rte_pktmbuf_free(ev.mbuf); + cleanup(t); + return 0; +} + +static int +test_directed_forward_credits(struct test *t) +{ + uint32_t i; + int32_t err; + + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0 || + create_directed_qids(t, 1, t->port) < 0) + return -1; + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = 0, + }; + + for (i = 0; i < 1000; i++) { + err = rte_event_enqueue_burst(evdev, 0, &ev, 1); + if (err < 0) { + printf("%d: error failed to enqueue\n", __LINE__); + return -1; + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + uint32_t deq_pkts; + deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); + if (deq_pkts != 1) { + printf("%d: error failed to deq\n", __LINE__); + return -1; + } + + /* re-write event to be a forward, and continue looping it */ + ev.op = RTE_EVENT_OP_FORWARD; + } + + cleanup(t); + return 0; +} + + +static int +test_priority_directed(struct test *t) +{ + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0 || + create_directed_qids(t, 1, t->port) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + return run_prio_packet_test(t); +} + +static int +test_priority_atomic(struct test *t) +{ + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* map the QID */ + if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) { + printf("%d: error mapping qid to port\n", __LINE__); + return -1; + } + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + return run_prio_packet_test(t); +} + +static int +test_priority_ordered(struct test *t) +{ + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0 || + create_ordered_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* map the QID */ + if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) { + printf("%d: error mapping qid to port\n", __LINE__); + return -1; + } + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + return run_prio_packet_test(t); +} + +static int +test_priority_unordered(struct test *t) +{ + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0 || + create_unordered_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* map the QID */ + if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) { + printf("%d: error mapping qid to port\n", __LINE__); + return -1; + } + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + return run_prio_packet_test(t); +} + +static int +burst_packets(struct test *t) +{ + /************** CONFIG ****************/ + uint32_t i; + int err; + int ret; + + /* Create instance with 2 ports and 2 queues */ + if (init(t, 2, 2) < 0 || + create_ports(t, 2) < 0 || + create_atomic_qids(t, 2) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1); + if (ret != 1) { + printf("%d: error mapping lb qid0\n", __LINE__); + return -1; + } + ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1); + if (ret != 1) { + printf("%d: error mapping lb qid1\n", __LINE__); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** FORWARD ****************/ + const uint32_t rx_port = 0; + const uint32_t NUM_PKTS = 2; + + for (i = 0; i < NUM_PKTS; i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: error generating pkt\n", __LINE__); + return -1; + } + + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = i % 2, + .flow_id = i % 3, + .mbuf = arp, + }; + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* Check stats for all NUM_PKTS arrived to sched core */ + struct test_event_dev_stats stats; + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) { + printf("%d: Sched core didn't receive all %d pkts\n", + __LINE__, NUM_PKTS); + rte_event_dev_dump(evdev, stdout); + return -1; + } + + uint32_t deq_pkts; + int p; + + deq_pkts = 0; + /******** DEQ QID 1 *******/ + do { + struct rte_event ev; + p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0); + deq_pkts += p; + rte_pktmbuf_free(ev.mbuf); + } while (p); + + if (deq_pkts != NUM_PKTS/2) { + printf("%d: Half of NUM_PKTS didn't arrive at port 1\n", + __LINE__); + return -1; + } + + /******** DEQ QID 2 *******/ + deq_pkts = 0; + do { + struct rte_event ev; + p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0); + deq_pkts += p; + rte_pktmbuf_free(ev.mbuf); + } while (p); + if (deq_pkts != NUM_PKTS/2) { + printf("%d: Half of NUM_PKTS didn't arrive at port 2\n", + __LINE__); + return -1; + } + + cleanup(t); + return 0; +} + +static int +abuse_inflights(struct test *t) +{ + const int rx_enq = 0; + const int wrk_enq = 2; + int err; + + /* Create instance with 4 ports */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* Enqueue op only */ + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + + /* schedule */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + struct test_event_dev_stats stats; + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + if (stats.rx_pkts != 0 || + stats.tx_pkts != 0 || + stats.port_inflight[wrk_enq] != 0) { + printf("%d: Sched core didn't handle pkt as expected\n", + __LINE__); + return -1; + } + + cleanup(t); + return 0; +} + +static int +xstats_tests(struct test *t) +{ + const int wrk_enq = 2; + int err; + + /* Create instance with 4 ports */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + const uint32_t XSTATS_MAX = 1024; + + uint32_t i; + uint32_t ids[XSTATS_MAX]; + uint64_t values[XSTATS_MAX]; + struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; + + for (i = 0; i < XSTATS_MAX; i++) + ids[i] = i; + + /* Device names / values */ + int ret = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, + 0, xstats_names, ids, XSTATS_MAX); + if (ret != 6) { + printf("%d: expected 6 stats, got return %d\n", __LINE__, ret); + return -1; + } + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, + 0, ids, values, ret); + if (ret != 6) { + printf("%d: expected 6 stats, got return %d\n", __LINE__, ret); + return -1; + } + + /* Port names / values */ + ret = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_PORT, 0, + xstats_names, ids, XSTATS_MAX); + if (ret != 21) { + printf("%d: expected 21 stats, got return %d\n", __LINE__, ret); + return -1; + } + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_PORT, 0, + ids, values, ret); + if (ret != 21) { + printf("%d: expected 21 stats, got return %d\n", __LINE__, ret); + return -1; + } + + /* Queue names / values */ + ret = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, + 0, xstats_names, ids, XSTATS_MAX); + if (ret != 16) { + printf("%d: expected 16 stats, got return %d\n", __LINE__, ret); + return -1; + } + + /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */ + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, + 1, ids, values, ret); + if (ret != -EINVAL) { + printf("%d: expected 0 stats, got return %d\n", __LINE__, ret); + return -1; + } + + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, + 0, ids, values, ret); + if (ret != 16) { + printf("%d: expected 16 stats, got return %d\n", __LINE__, ret); + return -1; + } + + /* enqueue packets to check values */ + for (i = 0; i < 3; i++) { + struct rte_event ev; + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + ev.queue_id = t->qid[i]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + ev.flow_id = 7; + arp->seqn = i; + + int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* Device names / values */ + int num_stats = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, 0, + xstats_names, ids, XSTATS_MAX); + if (num_stats < 0) + goto fail; + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, + 0, ids, values, num_stats); + static const uint64_t expected[] = {3, 3, 0, 1, 0, 0}; + for (i = 0; (signed int)i < ret; i++) { + if (expected[i] != values[i]) { + printf( + "%d Error xstat %d (id %d) %s : %"PRIu64 + ", expect %"PRIu64"\n", + __LINE__, i, ids[i], xstats_names[i].name, + values[i], expected[i]); + goto fail; + } + } + + ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE, + 0, NULL, 0); + + /* ensure reset statistics are zero-ed */ + static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0}; + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, + 0, ids, values, num_stats); + for (i = 0; (signed int)i < ret; i++) { + if (expected_zero[i] != values[i]) { + printf( + "%d Error, xstat %d (id %d) %s : %"PRIu64 + ", expect %"PRIu64"\n", + __LINE__, i, ids[i], xstats_names[i].name, + values[i], expected_zero[i]); + goto fail; + } + } + + /* port reset checks */ + num_stats = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_PORT, 0, + xstats_names, ids, XSTATS_MAX); + if (num_stats < 0) + goto fail; + ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, + 0, ids, values, num_stats); + + static const uint64_t port_expected[] = { + 3 /* rx */, + 0 /* tx */, + 0 /* drop */, + 0 /* inflights */, + 0 /* avg pkt cycles */, + 29 /* credits */, + 0 /* rx ring used */, + 4096 /* rx ring free */, + 0 /* cq ring used */, + 32 /* cq ring free */, + 0 /* dequeue calls */, + /* 10 dequeue burst buckets */ + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + }; + if (ret != RTE_DIM(port_expected)) { + printf( + "%s %d: wrong number of port stats (%d), expected %zu\n", + __func__, __LINE__, ret, RTE_DIM(port_expected)); + } + + for (i = 0; (signed int)i < ret; i++) { + if (port_expected[i] != values[i]) { + printf( + "%s : %d: Error stat %s is %"PRIu64 + ", expected %"PRIu64"\n", + __func__, __LINE__, xstats_names[i].name, + values[i], port_expected[i]); + goto fail; + } + } + + ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT, + 0, NULL, 0); + + /* ensure reset statistics are zero-ed */ + static const uint64_t port_expected_zero[] = { + 0 /* rx */, + 0 /* tx */, + 0 /* drop */, + 0 /* inflights */, + 0 /* avg pkt cycles */, + 29 /* credits */, + 0 /* rx ring used */, + 4096 /* rx ring free */, + 0 /* cq ring used */, + 32 /* cq ring free */, + 0 /* dequeue calls */, + /* 10 dequeue burst buckets */ + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + }; + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_PORT, + 0, ids, values, num_stats); + for (i = 0; (signed int)i < ret; i++) { + if (port_expected_zero[i] != values[i]) { + printf( + "%d, Error, xstat %d (id %d) %s : %"PRIu64 + ", expect %"PRIu64"\n", + __LINE__, i, ids[i], xstats_names[i].name, + values[i], port_expected_zero[i]); + goto fail; + } + } + + /* QUEUE STATS TESTS */ + num_stats = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, 0, + xstats_names, ids, XSTATS_MAX); + ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, + 0, ids, values, num_stats); + if (ret < 0) { + printf("xstats get returned %d\n", ret); + goto fail; + } + if ((unsigned int)ret > XSTATS_MAX) + printf("%s %d: more xstats available than space\n", + __func__, __LINE__); + + static const uint64_t queue_expected[] = { + 3 /* rx */, + 3 /* tx */, + 0 /* drop */, + 3 /* inflights */, + 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */ + /* QID-to-Port: pinned_flows, packets */ + 0, 0, + 0, 0, + 1, 3, + 0, 0, + }; + for (i = 0; (signed int)i < ret; i++) { + if (queue_expected[i] != values[i]) { + printf( + "%d, Error, xstat %d (id %d) %s : %"PRIu64 + ", expect %"PRIu64"\n", + __LINE__, i, ids[i], xstats_names[i].name, + values[i], queue_expected[i]); + goto fail; + } + } + + /* Reset the queue stats here */ + ret = rte_event_dev_xstats_reset(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, 0, + NULL, + 0); + + /* Verify that the resetable stats are reset, and others are not */ + static const uint64_t queue_expected_zero[] = { + 0 /* rx */, + 0 /* tx */, + 0 /* drop */, + 3 /* inflight */, + 0, 0, 0, 0, /* 4 iq used */ + /* QID-to-Port: pinned_flows, packets */ + 0, 0, + 0, 0, + 1, 0, + 0, 0, + }; + + ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0, + ids, values, num_stats); + int fails = 0; + for (i = 0; (signed int)i < ret; i++) { + if (queue_expected_zero[i] != values[i]) { + printf( + "%d, Error, xstat %d (id %d) %s : %"PRIu64 + ", expect %"PRIu64"\n", + __LINE__, i, ids[i], xstats_names[i].name, + values[i], queue_expected_zero[i]); + fails++; + } + } + if (fails) { + printf("%d : %d of values were not as expected above\n", + __LINE__, fails); + goto fail; + } + + cleanup(t); + return 0; + +fail: + rte_event_dev_dump(0, stdout); + cleanup(t); + return -1; +} + + +static int +xstats_id_abuse_tests(struct test *t) +{ + int err; + const uint32_t XSTATS_MAX = 1024; + const uint32_t link_port = 2; + + uint32_t ids[XSTATS_MAX]; + struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; + + /* Create instance with 4 ports */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + goto fail; + } + + err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + goto fail; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto fail; + } + + /* no test for device, as it ignores the port/q number */ + int num_stats = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_PORT, + UINT8_MAX-1, xstats_names, ids, + XSTATS_MAX); + if (num_stats != 0) { + printf("%d: expected %d stats, got return %d\n", __LINE__, + 0, num_stats); + goto fail; + } + + num_stats = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, + UINT8_MAX-1, xstats_names, ids, + XSTATS_MAX); + if (num_stats != 0) { + printf("%d: expected %d stats, got return %d\n", __LINE__, + 0, num_stats); + goto fail; + } + + cleanup(t); + return 0; +fail: + cleanup(t); + return -1; +} + +static int +port_reconfig_credits(struct test *t) +{ + if (init(t, 1, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + uint32_t i; + const uint32_t NUM_ITERS = 32; + for (i = 0; i < NUM_ITERS; i++) { + const struct rte_event_queue_conf conf = { + .schedule_type = RTE_SCHED_TYPE_ATOMIC, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + if (rte_event_queue_setup(evdev, 0, &conf) < 0) { + printf("%d: error creating qid\n", __LINE__); + return -1; + } + t->qid[0] = 0; + + static const struct rte_event_port_conf port_conf = { + .new_event_threshold = 128, + .dequeue_depth = 32, + .enqueue_depth = 64, + .disable_implicit_release = 0, + }; + if (rte_event_port_setup(evdev, 0, &port_conf) < 0) { + printf("%d Error setting up port\n", __LINE__); + return -1; + } + + int links = rte_event_port_link(evdev, 0, NULL, NULL, 0); + if (links != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + goto fail; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto fail; + } + + const uint32_t NPKTS = 1; + uint32_t j; + for (j = 0; j < NPKTS; j++) { + struct rte_event ev; + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + goto fail; + } + ev.queue_id = t->qid[0]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + int err = rte_event_enqueue_burst(evdev, 0, &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + rte_event_dev_dump(0, stdout); + goto fail; + } + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + struct rte_event ev[NPKTS]; + int deq = rte_event_dequeue_burst(evdev, t->port[0], ev, + NPKTS, 0); + if (deq != 1) + printf("%d error; no packet dequeued\n", __LINE__); + + /* let cleanup below stop the device on last iter */ + if (i != NUM_ITERS-1) + rte_event_dev_stop(evdev); + } + + cleanup(t); + return 0; +fail: + cleanup(t); + return -1; +} + +static int +port_single_lb_reconfig(struct test *t) +{ + if (init(t, 2, 2) < 0) { + printf("%d: Error initializing device\n", __LINE__); + goto fail; + } + + static const struct rte_event_queue_conf conf_lb_atomic = { + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .schedule_type = RTE_SCHED_TYPE_ATOMIC, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) { + printf("%d: error creating qid\n", __LINE__); + goto fail; + } + + static const struct rte_event_queue_conf conf_single_link = { + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK, + }; + if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) { + printf("%d: error creating qid\n", __LINE__); + goto fail; + } + + struct rte_event_port_conf port_conf = { + .new_event_threshold = 128, + .dequeue_depth = 32, + .enqueue_depth = 64, + .disable_implicit_release = 0, + }; + if (rte_event_port_setup(evdev, 0, &port_conf) < 0) { + printf("%d Error setting up port\n", __LINE__); + goto fail; + } + if (rte_event_port_setup(evdev, 1, &port_conf) < 0) { + printf("%d Error setting up port\n", __LINE__); + goto fail; + } + + /* link port to lb queue */ + uint8_t queue_id = 0; + if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) { + printf("%d: error creating link for qid\n", __LINE__); + goto fail; + } + + int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1); + if (ret != 1) { + printf("%d: Error unlinking lb port\n", __LINE__); + goto fail; + } + + queue_id = 1; + if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) { + printf("%d: error creating link for qid\n", __LINE__); + goto fail; + } + + queue_id = 0; + int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + goto fail; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto fail; + } + + cleanup(t); + return 0; +fail: + cleanup(t); + return -1; +} + +static int +xstats_brute_force(struct test *t) +{ + uint32_t i; + const uint32_t XSTATS_MAX = 1024; + uint32_t ids[XSTATS_MAX]; + uint64_t values[XSTATS_MAX]; + struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; + + + /* Create instance with 4 ports */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + goto fail; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto fail; + } + + for (i = 0; i < XSTATS_MAX; i++) + ids[i] = i; + + for (i = 0; i < 3; i++) { + uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i; + uint32_t j; + for (j = 0; j < UINT8_MAX; j++) { + rte_event_dev_xstats_names_get(evdev, mode, + j, xstats_names, ids, XSTATS_MAX); + + rte_event_dev_xstats_get(evdev, mode, j, ids, + values, XSTATS_MAX); + } + } + + cleanup(t); + return 0; +fail: + cleanup(t); + return -1; +} + +static int +xstats_id_reset_tests(struct test *t) +{ + const int wrk_enq = 2; + int err; + + /* Create instance with 4 ports */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + goto fail; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto fail; + } + +#define XSTATS_MAX 1024 + int ret; + uint32_t i; + uint32_t ids[XSTATS_MAX]; + uint64_t values[XSTATS_MAX]; + struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; + + for (i = 0; i < XSTATS_MAX; i++) + ids[i] = i; + +#define NUM_DEV_STATS 6 + /* Device names / values */ + int num_stats = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, + 0, xstats_names, ids, XSTATS_MAX); + if (num_stats != NUM_DEV_STATS) { + printf("%d: expected %d stats, got return %d\n", __LINE__, + NUM_DEV_STATS, num_stats); + goto fail; + } + ret = rte_event_dev_xstats_get(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, + 0, ids, values, num_stats); + if (ret != NUM_DEV_STATS) { + printf("%d: expected %d stats, got return %d\n", __LINE__, + NUM_DEV_STATS, ret); + goto fail; + } + +#define NPKTS 7 + for (i = 0; i < NPKTS; i++) { + struct rte_event ev; + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + goto fail; + } + ev.queue_id = t->qid[i]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + arp->seqn = i; + + int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + goto fail; + } + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + static const char * const dev_names[] = { + "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls", + "dev_sched_no_iq_enq", "dev_sched_no_cq_enq", + }; + uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0}; + for (i = 0; (int)i < ret; i++) { + unsigned int id; + uint64_t val = rte_event_dev_xstats_by_name_get(evdev, + dev_names[i], + &id); + if (id != i) { + printf("%d: %s id incorrect, expected %d got %d\n", + __LINE__, dev_names[i], i, id); + goto fail; + } + if (val != dev_expected[i]) { + printf("%d: %s value incorrect, expected %" + PRIu64" got %d\n", __LINE__, dev_names[i], + dev_expected[i], id); + goto fail; + } + /* reset to zero */ + int reset_ret = rte_event_dev_xstats_reset(evdev, + RTE_EVENT_DEV_XSTATS_DEVICE, 0, + &id, + 1); + if (reset_ret) { + printf("%d: failed to reset successfully\n", __LINE__); + goto fail; + } + dev_expected[i] = 0; + /* check value again */ + val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0); + if (val != dev_expected[i]) { + printf("%d: %s value incorrect, expected %"PRIu64 + " got %"PRIu64"\n", __LINE__, dev_names[i], + dev_expected[i], val); + goto fail; + } + }; + +/* 48 is stat offset from start of the devices whole xstats. + * This WILL break every time we add a statistic to a port + * or the device, but there is no other way to test + */ +#define PORT_OFF 48 +/* num stats for the tested port. CQ size adds more stats to a port */ +#define NUM_PORT_STATS 21 +/* the port to test. */ +#define PORT 2 + num_stats = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_PORT, PORT, + xstats_names, ids, XSTATS_MAX); + if (num_stats != NUM_PORT_STATS) { + printf("%d: expected %d stats, got return %d\n", + __LINE__, NUM_PORT_STATS, num_stats); + goto fail; + } + ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT, + ids, values, num_stats); + + if (ret != NUM_PORT_STATS) { + printf("%d: expected %d stats, got return %d\n", + __LINE__, NUM_PORT_STATS, ret); + goto fail; + } + static const char * const port_names[] = { + "port_2_rx", + "port_2_tx", + "port_2_drop", + "port_2_inflight", + "port_2_avg_pkt_cycles", + "port_2_credits", + "port_2_rx_ring_used", + "port_2_rx_ring_free", + "port_2_cq_ring_used", + "port_2_cq_ring_free", + "port_2_dequeue_calls", + "port_2_dequeues_returning_0", + "port_2_dequeues_returning_1-4", + "port_2_dequeues_returning_5-8", + "port_2_dequeues_returning_9-12", + "port_2_dequeues_returning_13-16", + "port_2_dequeues_returning_17-20", + "port_2_dequeues_returning_21-24", + "port_2_dequeues_returning_25-28", + "port_2_dequeues_returning_29-32", + "port_2_dequeues_returning_33-36", + }; + uint64_t port_expected[] = { + 0, /* rx */ + NPKTS, /* tx */ + 0, /* drop */ + NPKTS, /* inflight */ + 0, /* avg pkt cycles */ + 0, /* credits */ + 0, /* rx ring used */ + 4096, /* rx ring free */ + NPKTS, /* cq ring used */ + 25, /* cq ring free */ + 0, /* dequeue zero calls */ + 0, 0, 0, 0, 0, /* 10 dequeue buckets */ + 0, 0, 0, 0, 0, + }; + uint64_t port_expected_zero[] = { + 0, /* rx */ + 0, /* tx */ + 0, /* drop */ + NPKTS, /* inflight */ + 0, /* avg pkt cycles */ + 0, /* credits */ + 0, /* rx ring used */ + 4096, /* rx ring free */ + NPKTS, /* cq ring used */ + 25, /* cq ring free */ + 0, /* dequeue zero calls */ + 0, 0, 0, 0, 0, /* 10 dequeue buckets */ + 0, 0, 0, 0, 0, + }; + if (RTE_DIM(port_expected) != NUM_PORT_STATS || + RTE_DIM(port_names) != NUM_PORT_STATS) { + printf("%d: port array of wrong size\n", __LINE__); + goto fail; + } + + int failed = 0; + for (i = 0; (int)i < ret; i++) { + unsigned int id; + uint64_t val = rte_event_dev_xstats_by_name_get(evdev, + port_names[i], + &id); + if (id != i + PORT_OFF) { + printf("%d: %s id incorrect, expected %d got %d\n", + __LINE__, port_names[i], i+PORT_OFF, + id); + failed = 1; + } + if (val != port_expected[i]) { + printf("%d: %s value incorrect, expected %"PRIu64 + " got %d\n", __LINE__, port_names[i], + port_expected[i], id); + failed = 1; + } + /* reset to zero */ + int reset_ret = rte_event_dev_xstats_reset(evdev, + RTE_EVENT_DEV_XSTATS_PORT, PORT, + &id, + 1); + if (reset_ret) { + printf("%d: failed to reset successfully\n", __LINE__); + failed = 1; + } + /* check value again */ + val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0); + if (val != port_expected_zero[i]) { + printf("%d: %s value incorrect, expected %"PRIu64 + " got %"PRIu64"\n", __LINE__, port_names[i], + port_expected_zero[i], val); + failed = 1; + } + }; + if (failed) + goto fail; + +/* num queue stats */ +#define NUM_Q_STATS 16 +/* queue offset from start of the devices whole xstats. + * This will break every time we add a statistic to a device/port/queue + */ +#define QUEUE_OFF 90 + const uint32_t queue = 0; + num_stats = rte_event_dev_xstats_names_get(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, queue, + xstats_names, ids, XSTATS_MAX); + if (num_stats != NUM_Q_STATS) { + printf("%d: expected %d stats, got return %d\n", + __LINE__, NUM_Q_STATS, num_stats); + goto fail; + } + ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, + queue, ids, values, num_stats); + if (ret != NUM_Q_STATS) { + printf("%d: expected 21 stats, got return %d\n", __LINE__, ret); + goto fail; + } + static const char * const queue_names[] = { + "qid_0_rx", + "qid_0_tx", + "qid_0_drop", + "qid_0_inflight", + "qid_0_iq_0_used", + "qid_0_iq_1_used", + "qid_0_iq_2_used", + "qid_0_iq_3_used", + "qid_0_port_0_pinned_flows", + "qid_0_port_0_packets", + "qid_0_port_1_pinned_flows", + "qid_0_port_1_packets", + "qid_0_port_2_pinned_flows", + "qid_0_port_2_packets", + "qid_0_port_3_pinned_flows", + "qid_0_port_3_packets", + }; + uint64_t queue_expected[] = { + 7, /* rx */ + 7, /* tx */ + 0, /* drop */ + 7, /* inflight */ + 0, /* iq 0 used */ + 0, /* iq 1 used */ + 0, /* iq 2 used */ + 0, /* iq 3 used */ + /* QID-to-Port: pinned_flows, packets */ + 0, 0, + 0, 0, + 1, 7, + 0, 0, + }; + uint64_t queue_expected_zero[] = { + 0, /* rx */ + 0, /* tx */ + 0, /* drop */ + 7, /* inflight */ + 0, /* iq 0 used */ + 0, /* iq 1 used */ + 0, /* iq 2 used */ + 0, /* iq 3 used */ + /* QID-to-Port: pinned_flows, packets */ + 0, 0, + 0, 0, + 1, 0, + 0, 0, + }; + if (RTE_DIM(queue_expected) != NUM_Q_STATS || + RTE_DIM(queue_expected_zero) != NUM_Q_STATS || + RTE_DIM(queue_names) != NUM_Q_STATS) { + printf("%d : queue array of wrong size\n", __LINE__); + goto fail; + } + + failed = 0; + for (i = 0; (int)i < ret; i++) { + unsigned int id; + uint64_t val = rte_event_dev_xstats_by_name_get(evdev, + queue_names[i], + &id); + if (id != i + QUEUE_OFF) { + printf("%d: %s id incorrect, expected %d got %d\n", + __LINE__, queue_names[i], i+QUEUE_OFF, + id); + failed = 1; + } + if (val != queue_expected[i]) { + printf("%d: %d: %s value , expected %"PRIu64 + " got %"PRIu64"\n", i, __LINE__, + queue_names[i], queue_expected[i], val); + failed = 1; + } + /* reset to zero */ + int reset_ret = rte_event_dev_xstats_reset(evdev, + RTE_EVENT_DEV_XSTATS_QUEUE, + queue, &id, 1); + if (reset_ret) { + printf("%d: failed to reset successfully\n", __LINE__); + failed = 1; + } + /* check value again */ + val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i], + 0); + if (val != queue_expected_zero[i]) { + printf("%d: %s value incorrect, expected %"PRIu64 + " got %"PRIu64"\n", __LINE__, queue_names[i], + queue_expected_zero[i], val); + failed = 1; + } + }; + + if (failed) + goto fail; + + cleanup(t); + return 0; +fail: + cleanup(t); + return -1; +} + +static int +ordered_reconfigure(struct test *t) +{ + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + const struct rte_event_queue_conf conf = { + .schedule_type = RTE_SCHED_TYPE_ORDERED, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + + if (rte_event_queue_setup(evdev, 0, &conf) < 0) { + printf("%d: error creating qid\n", __LINE__); + goto failed; + } + + if (rte_event_queue_setup(evdev, 0, &conf) < 0) { + printf("%d: error creating qid, for 2nd time\n", __LINE__); + goto failed; + } + + rte_event_port_link(evdev, t->port[0], NULL, NULL, 0); + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + cleanup(t); + return 0; +failed: + cleanup(t); + return -1; +} + +static int +qid_priorities(struct test *t) +{ + /* Test works by having a CQ with enough empty space for all packets, + * and enqueueing 3 packets to 3 QIDs. They must return based on the + * priority of the QID, not the ingress order, to pass the test + */ + unsigned int i; + /* Create instance with 1 ports, and 3 qids */ + if (init(t, 3, 1) < 0 || + create_ports(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + for (i = 0; i < 3; i++) { + /* Create QID */ + const struct rte_event_queue_conf conf = { + .schedule_type = RTE_SCHED_TYPE_ATOMIC, + /* increase priority (0 == highest), as we go */ + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + + if (rte_event_queue_setup(evdev, i, &conf) < 0) { + printf("%d: error creating qid %d\n", __LINE__, i); + return -1; + } + t->qid[i] = i; + } + t->nb_qids = i; + /* map all QIDs to port */ + rte_event_port_link(evdev, t->port[0], NULL, NULL, 0); + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* enqueue 3 packets, setting seqn and QID to check priority */ + for (i = 0; i < 3; i++) { + struct rte_event ev; + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + ev.queue_id = t->qid[i]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + arp->seqn = i; + + int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* dequeue packets, verify priority was upheld */ + struct rte_event ev[32]; + uint32_t deq_pkts = + rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0); + if (deq_pkts != 3) { + printf("%d: failed to deq packets\n", __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + for (i = 0; i < 3; i++) { + if (ev[i].mbuf->seqn != 2-i) { + printf( + "%d: qid priority test: seqn %d incorrectly prioritized\n", + __LINE__, i); + } + } + + cleanup(t); + return 0; +} + +static int +load_balancing(struct test *t) +{ + const int rx_enq = 0; + int err; + uint32_t i; + + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + for (i = 0; i < 3; i++) { + /* map port 1 - 3 inclusive */ + if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0], + NULL, 1) != 1) { + printf("%d: error mapping qid to port %d\n", + __LINE__, i); + return -1; + } + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** FORWARD ****************/ + /* + * Create a set of flows that test the load-balancing operation of the + * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test + * with a new flow, which should be sent to the 3rd mapped CQ + */ + static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2}; + + for (i = 0; i < RTE_DIM(flows); i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = t->qid[0], + .flow_id = flows[i], + .mbuf = arp, + }; + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + struct test_event_dev_stats stats; + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + if (stats.port_inflight[1] != 4) { + printf("%d:%s: port 1 inflight not correct\n", __LINE__, + __func__); + return -1; + } + if (stats.port_inflight[2] != 2) { + printf("%d:%s: port 2 inflight not correct\n", __LINE__, + __func__); + return -1; + } + if (stats.port_inflight[3] != 3) { + printf("%d:%s: port 3 inflight not correct\n", __LINE__, + __func__); + return -1; + } + + cleanup(t); + return 0; +} + +static int +load_balancing_history(struct test *t) +{ + struct test_event_dev_stats stats = {0}; + const int rx_enq = 0; + int err; + uint32_t i; + + /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) + return -1; + + /* CQ mapping to QID */ + if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) { + printf("%d: error mapping port 1 qid\n", __LINE__); + return -1; + } + if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) { + printf("%d: error mapping port 2 qid\n", __LINE__); + return -1; + } + if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) { + printf("%d: error mapping port 3 qid\n", __LINE__); + return -1; + } + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* + * Create a set of flows that test the load-balancing operation of the + * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop + * the packet from CQ 0, send in a new set of flows. Ensure that: + * 1. The new flow 3 gets into the empty CQ0 + * 2. packets for existing flow gets added into CQ1 + * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain + * more outstanding pkts + * + * This test makes sure that when a flow ends (i.e. all packets + * have been completed for that flow), that the flow can be moved + * to a different CQ when new packets come in for that flow. + */ + static uint32_t flows1[] = {0, 1, 1, 2}; + + for (i = 0; i < RTE_DIM(flows1); i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + struct rte_event ev = { + .flow_id = flows1[i], + .op = RTE_EVENT_OP_NEW, + .queue_id = t->qid[0], + .event_type = RTE_EVENT_TYPE_CPU, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .mbuf = arp + }; + + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + arp->hash.rss = flows1[i]; + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + + /* call the scheduler */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* Dequeue the flow 0 packet from port 1, so that we can then drop */ + struct rte_event ev; + if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) { + printf("%d: failed to dequeue\n", __LINE__); + return -1; + } + if (ev.mbuf->hash.rss != flows1[0]) { + printf("%d: unexpected flow received\n", __LINE__); + return -1; + } + + /* drop the flow 0 packet from port 1 */ + rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1); + + /* call the scheduler */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* + * Set up the next set of flows, first a new flow to fill up + * CQ 0, so that the next flow 0 packet should go to CQ2 + */ + static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 }; + + for (i = 0; i < RTE_DIM(flows2); i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + struct rte_event ev = { + .flow_id = flows2[i], + .op = RTE_EVENT_OP_NEW, + .queue_id = t->qid[0], + .event_type = RTE_EVENT_TYPE_CPU, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .mbuf = arp + }; + + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + arp->hash.rss = flows2[i]; + + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + + /* schedule */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d:failed to get stats\n", __LINE__); + return -1; + } + + /* + * Now check the resulting inflights on each port. + */ + if (stats.port_inflight[1] != 3) { + printf("%d:%s: port 1 inflight not correct\n", __LINE__, + __func__); + printf("Inflights, ports 1, 2, 3: %u, %u, %u\n", + (unsigned int)stats.port_inflight[1], + (unsigned int)stats.port_inflight[2], + (unsigned int)stats.port_inflight[3]); + return -1; + } + if (stats.port_inflight[2] != 4) { + printf("%d:%s: port 2 inflight not correct\n", __LINE__, + __func__); + printf("Inflights, ports 1, 2, 3: %u, %u, %u\n", + (unsigned int)stats.port_inflight[1], + (unsigned int)stats.port_inflight[2], + (unsigned int)stats.port_inflight[3]); + return -1; + } + if (stats.port_inflight[3] != 2) { + printf("%d:%s: port 3 inflight not correct\n", __LINE__, + __func__); + printf("Inflights, ports 1, 2, 3: %u, %u, %u\n", + (unsigned int)stats.port_inflight[1], + (unsigned int)stats.port_inflight[2], + (unsigned int)stats.port_inflight[3]); + return -1; + } + + for (i = 1; i <= 3; i++) { + struct rte_event ev; + while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0)) + rte_event_enqueue_burst(evdev, i, &release_ev, 1); + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + cleanup(t); + return 0; +} + +static int +invalid_qid(struct test *t) +{ + struct test_event_dev_stats stats; + const int rx_enq = 0; + int err; + uint32_t i; + + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + for (i = 0; i < 4; i++) { + err = rte_event_port_link(evdev, t->port[i], &t->qid[0], + NULL, 1); + if (err != 1) { + printf("%d: error mapping port 1 qid\n", __LINE__); + return -1; + } + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* + * Send in a packet with an invalid qid to the scheduler. + * We should see the packed enqueued OK, but the inflights for + * that packet should not be incremented, and the rx_dropped + * should be incremented. + */ + static uint32_t flows1[] = {20}; + + for (i = 0; i < RTE_DIM(flows1); i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = t->qid[0] + flows1[i], + .flow_id = i, + .mbuf = arp, + }; + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + + /* call the scheduler */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + /* + * Now check the resulting inflights on the port, and the rx_dropped. + */ + if (stats.port_inflight[0] != 0) { + printf("%d:%s: port 1 inflight count not correct\n", __LINE__, + __func__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + if (stats.port_rx_dropped[0] != 1) { + printf("%d:%s: port 1 drops\n", __LINE__, __func__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + /* each packet drop should only be counted in one place - port or dev */ + if (stats.rx_dropped != 0) { + printf("%d:%s: port 1 dropped count not correct\n", __LINE__, + __func__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + + cleanup(t); + return 0; +} + +static int +single_packet(struct test *t) +{ + const uint32_t MAGIC_SEQN = 7321; + struct rte_event ev; + struct test_event_dev_stats stats; + const int rx_enq = 0; + const int wrk_enq = 2; + int err; + + /* Create instance with 4 ports */ + if (init(t, 1, 4) < 0 || + create_ports(t, 4) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** Gen pkt and enqueue ****************/ + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + ev.op = RTE_EVENT_OP_NEW; + ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; + ev.mbuf = arp; + ev.queue_id = 0; + ev.flow_id = 3; + arp->seqn = MAGIC_SEQN; + + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + if (stats.rx_pkts != 1 || + stats.tx_pkts != 1 || + stats.port_inflight[wrk_enq] != 1) { + printf("%d: Sched core didn't handle pkt as expected\n", + __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + + uint32_t deq_pkts; + + deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0); + if (deq_pkts < 1) { + printf("%d: Failed to deq\n", __LINE__); + return -1; + } + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + return -1; + } + + err = test_event_dev_stats_get(evdev, &stats); + if (ev.mbuf->seqn != MAGIC_SEQN) { + printf("%d: magic sequence number not dequeued\n", __LINE__); + return -1; + } + + rte_pktmbuf_free(ev.mbuf); + err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1); + if (err < 0) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[wrk_enq] != 0) { + printf("%d: port inflight not correct\n", __LINE__); + return -1; + } + + cleanup(t); + return 0; +} + +static int +inflight_counts(struct test *t) +{ + struct rte_event ev; + struct test_event_dev_stats stats; + const int rx_enq = 0; + const int p1 = 1; + const int p2 = 2; + int err; + int i; + + /* Create instance with 4 ports */ + if (init(t, 2, 3) < 0 || + create_ports(t, 3) < 0 || + create_atomic_qids(t, 2) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /************** FORWARD ****************/ +#define QID1_NUM 5 + for (i = 0; i < QID1_NUM; i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + goto err; + } + + ev.queue_id = t->qid[0]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + goto err; + } + } +#define QID2_NUM 3 + for (i = 0; i < QID2_NUM; i++) { + struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); + + if (!arp) { + printf("%d: gen of pkt failed\n", __LINE__); + goto err; + } + ev.queue_id = t->qid[1]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + goto err; + } + } + + /* schedule */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + err = test_event_dev_stats_get(evdev, &stats); + if (err) { + printf("%d: failed to get stats\n", __LINE__); + goto err; + } + + if (stats.rx_pkts != QID1_NUM + QID2_NUM || + stats.tx_pkts != QID1_NUM + QID2_NUM) { + printf("%d: Sched core didn't handle pkt as expected\n", + __LINE__); + goto err; + } + + if (stats.port_inflight[p1] != QID1_NUM) { + printf("%d: %s port 1 inflight not correct\n", __LINE__, + __func__); + goto err; + } + if (stats.port_inflight[p2] != QID2_NUM) { + printf("%d: %s port 2 inflight not correct\n", __LINE__, + __func__); + goto err; + } + + /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/ + /* port 1 */ + struct rte_event events[QID1_NUM + QID2_NUM]; + uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events, + RTE_DIM(events), 0); + + if (deq_pkts != QID1_NUM) { + printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__); + goto err; + } + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[p1] != QID1_NUM) { + printf("%d: port 1 inflight decrement after DEQ != 0\n", + __LINE__); + goto err; + } + for (i = 0; i < QID1_NUM; i++) { + err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev, + 1); + if (err != 1) { + printf("%d: %s rte enqueue of inf release failed\n", + __LINE__, __func__); + goto err; + } + } + + /* + * As the scheduler core decrements inflights, it needs to run to + * process packets to act on the drop messages + */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[p1] != 0) { + printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__); + goto err; + } + + /* port2 */ + deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events, + RTE_DIM(events), 0); + if (deq_pkts != QID2_NUM) { + printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__); + goto err; + } + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[p2] != QID2_NUM) { + printf("%d: port 1 inflight decrement after DEQ != 0\n", + __LINE__); + goto err; + } + for (i = 0; i < QID2_NUM; i++) { + err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev, + 1); + if (err != 1) { + printf("%d: %s rte enqueue of inf release failed\n", + __LINE__, __func__); + goto err; + } + } + + /* + * As the scheduler core decrements inflights, it needs to run to + * process packets to act on the drop messages + */ + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + err = test_event_dev_stats_get(evdev, &stats); + if (stats.port_inflight[p2] != 0) { + printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__); + goto err; + } + cleanup(t); + return 0; + +err: + rte_event_dev_dump(evdev, stdout); + cleanup(t); + return -1; +} + +static int +parallel_basic(struct test *t, int check_order) +{ + const uint8_t rx_port = 0; + const uint8_t w1_port = 1; + const uint8_t w3_port = 3; + const uint8_t tx_port = 4; + int err; + int i; + uint32_t deq_pkts, j; + struct rte_mbuf *mbufs[3]; + struct rte_mbuf *mbufs_out[3] = { 0 }; + const uint32_t MAGIC_SEQN = 1234; + + /* Create instance with 4 ports */ + if (init(t, 2, tx_port + 1) < 0 || + create_ports(t, tx_port + 1) < 0 || + (check_order ? create_ordered_qids(t, 1) : + create_unordered_qids(t, 1)) < 0 || + create_directed_qids(t, 1, &tx_port)) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* + * CQ mapping to QID + * We need three ports, all mapped to the same ordered qid0. Then we'll + * take a packet out to each port, re-enqueue in reverse order, + * then make sure the reordering has taken place properly when we + * dequeue from the tx_port. + * + * Simplified test setup diagram: + * + * rx_port w1_port + * \ / \ + * qid0 - w2_port - qid1 + * \ / \ + * w3_port tx_port + */ + /* CQ mapping to QID for LB ports (directed mapped on create) */ + for (i = w1_port; i <= w3_port; i++) { + err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL, + 1); + if (err != 1) { + printf("%d: error mapping lb qid\n", __LINE__); + cleanup(t); + return -1; + } + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + /* Enqueue 3 packets to the rx port */ + for (i = 0; i < 3; i++) { + struct rte_event ev; + mbufs[i] = rte_gen_arp(0, t->mbuf_pool); + if (!mbufs[i]) { + printf("%d: gen of pkt failed\n", __LINE__); + return -1; + } + + ev.queue_id = t->qid[0]; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = mbufs[i]; + mbufs[i]->seqn = MAGIC_SEQN + i; + + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); + if (err != 1) { + printf("%d: Failed to enqueue pkt %u, retval = %u\n", + __LINE__, i, err); + return -1; + } + } + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* use extra slot to make logic in loops easier */ + struct rte_event deq_ev[w3_port + 1]; + + /* Dequeue the 3 packets, one from each worker port */ + for (i = w1_port; i <= w3_port; i++) { + deq_pkts = rte_event_dequeue_burst(evdev, t->port[i], + &deq_ev[i], 1, 0); + if (deq_pkts != 1) { + printf("%d: Failed to deq\n", __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } + } + + /* Enqueue each packet in reverse order, flushing after each one */ + for (i = w3_port; i >= w1_port; i--) { + + deq_ev[i].op = RTE_EVENT_OP_FORWARD; + deq_ev[i].queue_id = t->qid[1]; + err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1); + if (err != 1) { + printf("%d: Failed to enqueue\n", __LINE__); + return -1; + } + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* dequeue from the tx ports, we should get 3 packets */ + deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev, + 3, 0); + + /* Check to see if we've got all 3 packets */ + if (deq_pkts != 3) { + printf("%d: expected 3 pkts at tx port got %d from port %d\n", + __LINE__, deq_pkts, tx_port); + rte_event_dev_dump(evdev, stdout); + return 1; + } + + /* Check to see if the sequence numbers are in expected order */ + if (check_order) { + for (j = 0 ; j < deq_pkts ; j++) { + if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) { + printf( + "%d: Incorrect sequence number(%d) from port %d\n", + __LINE__, mbufs_out[j]->seqn, tx_port); + return -1; + } + } + } + + /* Destroy the instance */ + cleanup(t); + return 0; +} + +static int +ordered_basic(struct test *t) +{ + return parallel_basic(t, 1); +} + +static int +unordered_basic(struct test *t) +{ + return parallel_basic(t, 0); +} + +static int +holb(struct test *t) /* test to check we avoid basic head-of-line blocking */ +{ + const struct rte_event new_ev = { + .op = RTE_EVENT_OP_NEW + /* all other fields zero */ + }; + struct rte_event ev = new_ev; + unsigned int rx_port = 0; /* port we get the first flow on */ + char rx_port_used_stat[64]; + char rx_port_free_stat[64]; + char other_port_used_stat[64]; + + if (init(t, 1, 2) < 0 || + create_ports(t, 2) < 0 || + create_atomic_qids(t, 1) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0); + if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 || + nb_links != 1) { + printf("%d: Error links queue to ports\n", __LINE__); + goto err; + } + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + goto err; + } + + /* send one packet and see where it goes, port 0 or 1 */ + if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { + printf("%d: Error doing first enqueue\n", __LINE__); + goto err; + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL) + != 1) + rx_port = 1; + + snprintf(rx_port_used_stat, sizeof(rx_port_used_stat), + "port_%u_cq_ring_used", rx_port); + snprintf(rx_port_free_stat, sizeof(rx_port_free_stat), + "port_%u_cq_ring_free", rx_port); + snprintf(other_port_used_stat, sizeof(other_port_used_stat), + "port_%u_cq_ring_used", rx_port ^ 1); + if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL) + != 1) { + printf("%d: Error, first event not scheduled\n", __LINE__); + goto err; + } + + /* now fill up the rx port's queue with one flow to cause HOLB */ + do { + ev = new_ev; + if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { + printf("%d: Error with enqueue\n", __LINE__); + goto err; + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + } while (rte_event_dev_xstats_by_name_get(evdev, + rx_port_free_stat, NULL) != 0); + + /* one more packet, which needs to stay in IQ - i.e. HOLB */ + ev = new_ev; + if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { + printf("%d: Error with enqueue\n", __LINE__); + goto err; + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + /* check that the other port still has an empty CQ */ + if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL) + != 0) { + printf("%d: Error, second port CQ is not empty\n", __LINE__); + goto err; + } + /* check IQ now has one packet */ + if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL) + != 1) { + printf("%d: Error, QID does not have exactly 1 packet\n", + __LINE__); + goto err; + } + + /* send another flow, which should pass the other IQ entry */ + ev = new_ev; + ev.flow_id = 1; + if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { + printf("%d: Error with enqueue\n", __LINE__); + goto err; + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL) + != 1) { + printf("%d: Error, second flow did not pass out first\n", + __LINE__); + goto err; + } + + if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL) + != 1) { + printf("%d: Error, QID does not have exactly 1 packet\n", + __LINE__); + goto err; + } + cleanup(t); + return 0; +err: + rte_event_dev_dump(evdev, stdout); + cleanup(t); + return -1; +} + +static int +worker_loopback_worker_fn(void *arg) +{ + struct test *t = arg; + uint8_t port = t->port[1]; + int count = 0; + int enqd; + + /* + * Takes packets from the input port and then loops them back through + * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times + * so each packet goes through 8*16 = 128 times. + */ + printf("%d: \tWorker function started\n", __LINE__); + while (count < NUM_PACKETS) { +#define BURST_SIZE 32 + struct rte_event ev[BURST_SIZE]; + uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev, + BURST_SIZE, 0); + if (nb_rx == 0) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + ev[i].queue_id++; + if (ev[i].queue_id != 8) { + ev[i].op = RTE_EVENT_OP_FORWARD; + enqd = rte_event_enqueue_burst(evdev, port, + &ev[i], 1); + if (enqd != 1) { + printf("%d: Can't enqueue FWD!!\n", + __LINE__); + return -1; + } + continue; + } + + ev[i].queue_id = 0; + ev[i].mbuf->udata64++; + if (ev[i].mbuf->udata64 != 16) { + ev[i].op = RTE_EVENT_OP_FORWARD; + enqd = rte_event_enqueue_burst(evdev, port, + &ev[i], 1); + if (enqd != 1) { + printf("%d: Can't enqueue FWD!!\n", + __LINE__); + return -1; + } + continue; + } + /* we have hit 16 iterations through system - drop */ + rte_pktmbuf_free(ev[i].mbuf); + count++; + ev[i].op = RTE_EVENT_OP_RELEASE; + enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1); + if (enqd != 1) { + printf("%d drop enqueue failed\n", __LINE__); + return -1; + } + } + } + + return 0; +} + +static int +worker_loopback_producer_fn(void *arg) +{ + struct test *t = arg; + uint8_t port = t->port[0]; + uint64_t count = 0; + + printf("%d: \tProducer function started\n", __LINE__); + while (count < NUM_PACKETS) { + struct rte_mbuf *m = 0; + do { + m = rte_pktmbuf_alloc(t->mbuf_pool); + } while (m == NULL); + + m->udata64 = 0; + + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = t->qid[0], + .flow_id = (uintptr_t)m & 0xFFFF, + .mbuf = m, + }; + + if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) { + while (rte_event_enqueue_burst(evdev, port, &ev, 1) != + 1) + rte_pause(); + } + + count++; + } + + return 0; +} + +static int +worker_loopback(struct test *t, uint8_t disable_implicit_release) +{ + /* use a single producer core, and a worker core to see what happens + * if the worker loops packets back multiple times + */ + struct test_event_dev_stats stats; + uint64_t print_cycles = 0, cycles = 0; + uint64_t tx_pkts = 0; + int err; + int w_lcore, p_lcore; + + if (init(t, 8, 2) < 0 || + create_atomic_qids(t, 8) < 0) { + printf("%d: Error initializing device\n", __LINE__); + return -1; + } + + /* RX with low max events */ + static struct rte_event_port_conf conf = { + .dequeue_depth = 32, + .enqueue_depth = 64, + }; + /* beware: this cannot be initialized in the static above as it would + * only be initialized once - and this needs to be set for multiple runs + */ + conf.new_event_threshold = 512; + conf.disable_implicit_release = disable_implicit_release; + + if (rte_event_port_setup(evdev, 0, &conf) < 0) { + printf("Error setting up RX port\n"); + return -1; + } + t->port[0] = 0; + /* TX with higher max events */ + conf.new_event_threshold = 4096; + if (rte_event_port_setup(evdev, 1, &conf) < 0) { + printf("Error setting up TX port\n"); + return -1; + } + t->port[1] = 1; + + /* CQ mapping to QID */ + err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0); + if (err != 8) { /* should have mapped all queues*/ + printf("%d: error mapping port 2 to all qids\n", __LINE__); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + p_lcore = rte_get_next_lcore( + /* start core */ -1, + /* skip master */ 1, + /* wrap */ 0); + w_lcore = rte_get_next_lcore(p_lcore, 1, 0); + + rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore); + rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore); + + print_cycles = cycles = rte_get_timer_cycles(); + while (rte_eal_get_lcore_state(p_lcore) != FINISHED || + rte_eal_get_lcore_state(w_lcore) != FINISHED) { + + rte_service_run_iter_on_app_lcore(t->service_id, 1); + + uint64_t new_cycles = rte_get_timer_cycles(); + + if (new_cycles - print_cycles > rte_get_timer_hz()) { + test_event_dev_stats_get(evdev, &stats); + printf( + "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n", + __LINE__, stats.rx_pkts, stats.tx_pkts); + + print_cycles = new_cycles; + } + if (new_cycles - cycles > rte_get_timer_hz() * 3) { + test_event_dev_stats_get(evdev, &stats); + if (stats.tx_pkts == tx_pkts) { + rte_event_dev_dump(evdev, stdout); + printf("Dumping xstats:\n"); + xstats_print(); + printf( + "%d: No schedules for seconds, deadlock\n", + __LINE__); + return -1; + } + tx_pkts = stats.tx_pkts; + cycles = new_cycles; + } + } + rte_service_run_iter_on_app_lcore(t->service_id, 1); + /* ensure all completions are flushed */ + + rte_eal_mp_wait_lcore(); + + cleanup(t); + return 0; +} + +static struct rte_mempool *eventdev_func_mempool; + +int +test_sw_eventdev(void) +{ + struct test *t; + int ret; + + t = malloc(sizeof(struct test)); + if (t == NULL) + return -1; + /* manually initialize the op, older gcc's complain on static + * initialization of struct elements that are a bitfield. + */ + release_ev.op = RTE_EVENT_OP_RELEASE; + + const char *eventdev_name = "event_sw"; + evdev = rte_event_dev_get_dev_id(eventdev_name); + if (evdev < 0) { + printf("%d: Eventdev %s not found - creating.\n", + __LINE__, eventdev_name); + if (rte_vdev_init(eventdev_name, NULL) < 0) { + printf("Error creating eventdev\n"); + goto test_fail; + } + evdev = rte_event_dev_get_dev_id(eventdev_name); + if (evdev < 0) { + printf("Error finding newly created eventdev\n"); + goto test_fail; + } + } + + if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) { + printf("Failed to get service ID for software event dev\n"); + goto test_fail; + } + + rte_service_runstate_set(t->service_id, 1); + rte_service_set_runstate_mapped_check(t->service_id, 0); + + /* Only create mbuf pool once, reuse for each test run */ + if (!eventdev_func_mempool) { + eventdev_func_mempool = rte_pktmbuf_pool_create( + "EVENTDEV_SW_SA_MBUF_POOL", + (1<<12), /* 4k buffers */ + 32 /*MBUF_CACHE_SIZE*/, + 0, + 512, /* use very small mbufs */ + rte_socket_id()); + if (!eventdev_func_mempool) { + printf("ERROR creating mempool\n"); + goto test_fail; + } + } + t->mbuf_pool = eventdev_func_mempool; + printf("*** Running Single Directed Packet test...\n"); + ret = test_single_directed_packet(t); + if (ret != 0) { + printf("ERROR - Single Directed Packet test FAILED.\n"); + goto test_fail; + } + printf("*** Running Directed Forward Credit test...\n"); + ret = test_directed_forward_credits(t); + if (ret != 0) { + printf("ERROR - Directed Forward Credit test FAILED.\n"); + goto test_fail; + } + printf("*** Running Single Load Balanced Packet test...\n"); + ret = single_packet(t); + if (ret != 0) { + printf("ERROR - Single Packet test FAILED.\n"); + goto test_fail; + } + printf("*** Running Unordered Basic test...\n"); + ret = unordered_basic(t); + if (ret != 0) { + printf("ERROR - Unordered Basic test FAILED.\n"); + goto test_fail; + } + printf("*** Running Ordered Basic test...\n"); + ret = ordered_basic(t); + if (ret != 0) { + printf("ERROR - Ordered Basic test FAILED.\n"); + goto test_fail; + } + printf("*** Running Burst Packets test...\n"); + ret = burst_packets(t); + if (ret != 0) { + printf("ERROR - Burst Packets test FAILED.\n"); + goto test_fail; + } + printf("*** Running Load Balancing test...\n"); + ret = load_balancing(t); + if (ret != 0) { + printf("ERROR - Load Balancing test FAILED.\n"); + goto test_fail; + } + printf("*** Running Prioritized Directed test...\n"); + ret = test_priority_directed(t); + if (ret != 0) { + printf("ERROR - Prioritized Directed test FAILED.\n"); + goto test_fail; + } + printf("*** Running Prioritized Atomic test...\n"); + ret = test_priority_atomic(t); + if (ret != 0) { + printf("ERROR - Prioritized Atomic test FAILED.\n"); + goto test_fail; + } + + printf("*** Running Prioritized Ordered test...\n"); + ret = test_priority_ordered(t); + if (ret != 0) { + printf("ERROR - Prioritized Ordered test FAILED.\n"); + goto test_fail; + } + printf("*** Running Prioritized Unordered test...\n"); + ret = test_priority_unordered(t); + if (ret != 0) { + printf("ERROR - Prioritized Unordered test FAILED.\n"); + goto test_fail; + } + printf("*** Running Invalid QID test...\n"); + ret = invalid_qid(t); + if (ret != 0) { + printf("ERROR - Invalid QID test FAILED.\n"); + goto test_fail; + } + printf("*** Running Load Balancing History test...\n"); + ret = load_balancing_history(t); + if (ret != 0) { + printf("ERROR - Load Balancing History test FAILED.\n"); + goto test_fail; + } + printf("*** Running Inflight Count test...\n"); + ret = inflight_counts(t); + if (ret != 0) { + printf("ERROR - Inflight Count test FAILED.\n"); + goto test_fail; + } + printf("*** Running Abuse Inflights test...\n"); + ret = abuse_inflights(t); + if (ret != 0) { + printf("ERROR - Abuse Inflights test FAILED.\n"); + goto test_fail; + } + printf("*** Running XStats test...\n"); + ret = xstats_tests(t); + if (ret != 0) { + printf("ERROR - XStats test FAILED.\n"); + goto test_fail; + } + printf("*** Running XStats ID Reset test...\n"); + ret = xstats_id_reset_tests(t); + if (ret != 0) { + printf("ERROR - XStats ID Reset test FAILED.\n"); + goto test_fail; + } + printf("*** Running XStats Brute Force test...\n"); + ret = xstats_brute_force(t); + if (ret != 0) { + printf("ERROR - XStats Brute Force test FAILED.\n"); + goto test_fail; + } + printf("*** Running XStats ID Abuse test...\n"); + ret = xstats_id_abuse_tests(t); + if (ret != 0) { + printf("ERROR - XStats ID Abuse test FAILED.\n"); + goto test_fail; + } + printf("*** Running QID Priority test...\n"); + ret = qid_priorities(t); + if (ret != 0) { + printf("ERROR - QID Priority test FAILED.\n"); + goto test_fail; + } + printf("*** Running Ordered Reconfigure test...\n"); + ret = ordered_reconfigure(t); + if (ret != 0) { + printf("ERROR - Ordered Reconfigure test FAILED.\n"); + goto test_fail; + } + printf("*** Running Port LB Single Reconfig test...\n"); + ret = port_single_lb_reconfig(t); + if (ret != 0) { + printf("ERROR - Port LB Single Reconfig test FAILED.\n"); + goto test_fail; + } + printf("*** Running Port Reconfig Credits test...\n"); + ret = port_reconfig_credits(t); + if (ret != 0) { + printf("ERROR - Port Reconfig Credits Reset test FAILED.\n"); + goto test_fail; + } + printf("*** Running Head-of-line-blocking test...\n"); + ret = holb(t); + if (ret != 0) { + printf("ERROR - Head-of-line-blocking test FAILED.\n"); + goto test_fail; + } + if (rte_lcore_count() >= 3) { + printf("*** Running Worker loopback test...\n"); + ret = worker_loopback(t, 0); + if (ret != 0) { + printf("ERROR - Worker loopback test FAILED.\n"); + return ret; + } + + printf("*** Running Worker loopback test (implicit release disabled)...\n"); + ret = worker_loopback(t, 1); + if (ret != 0) { + printf("ERROR - Worker loopback test FAILED.\n"); + goto test_fail; + } + } else { + printf("### Not enough cores for worker loopback tests.\n"); + printf("### Need at least 3 cores for the tests.\n"); + } + + /* + * Free test instance, leaving mempool initialized, and a pointer to it + * in static eventdev_func_mempool, as it is re-used on re-runs + */ + free(t); + + printf("SW Eventdev Selftest Successful.\n"); + return 0; +test_fail: + free(t); + printf("SW Eventdev Selftest Failed.\n"); + return -1; +} diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c index b3b3b17e..67151f77 100644 --- a/drivers/event/sw/sw_evdev_worker.c +++ b/drivers/event/sw/sw_evdev_worker.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <rte_atomic.h> @@ -85,6 +57,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num) struct sw_port *p = port; struct sw_evdev *sw = (void *)p->sw; uint32_t sw_inflights = rte_atomic32_read(&sw->inflights); + uint32_t credit_update_quanta = sw->credit_update_quanta; int new = 0; if (num > PORT_ENQUEUE_MAX_BURST_SIZE) @@ -98,7 +71,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num) if (p->inflight_credits < new) { /* check if event enqueue brings port over max threshold */ - uint32_t credit_update_quanta = sw->credit_update_quanta; if (sw_inflights + credit_update_quanta > sw->nb_events_limit) return 0; @@ -109,7 +81,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num) return 0; } - uint32_t forwards = 0; for (i = 0; i < num; i++) { int op = ev[i].op; int outstanding = p->outstanding_releases > 0; @@ -118,7 +89,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num) p->inflight_credits -= (op == RTE_EVENT_OP_NEW); p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) * outstanding; - forwards += (op == RTE_EVENT_OP_FORWARD); new_ops[i] = sw_qe_flag_map[op]; new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT); @@ -131,15 +101,12 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num) p->outstanding_releases--; /* error case: branch to avoid touching p->stats */ - if (unlikely(invalid_qid)) { + if (unlikely(invalid_qid && op != RTE_EVENT_OP_RELEASE)) { p->stats.rx_dropped++; p->inflight_credits++; } } - /* handle directed port forward credits */ - p->inflight_credits -= forwards * p->is_directed; - /* returns number of events actually enqueued */ uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i, new_ops); @@ -152,6 +119,13 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num) p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES; p->last_dequeue_ticks = 0; } + + /* Replenish credits if enough releases are performed */ + if (p->inflight_credits >= credit_update_quanta * 2) { + rte_atomic32_sub(&sw->inflights, credit_update_quanta); + p->inflight_credits -= credit_update_quanta; + } + return enq; } @@ -167,41 +141,39 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num, { RTE_SET_USED(wait); struct sw_port *p = (void *)port; - struct sw_evdev *sw = (void *)p->sw; struct rte_event_ring *ring = p->cq_worker_ring; - uint32_t credit_update_quanta = sw->credit_update_quanta; /* check that all previous dequeues have been released */ - if (!p->is_directed) { + if (p->implicit_release) { + struct sw_evdev *sw = (void *)p->sw; + uint32_t credit_update_quanta = sw->credit_update_quanta; uint16_t out_rels = p->outstanding_releases; uint16_t i; for (i = 0; i < out_rels; i++) sw_event_release(p, i); + + /* Replenish credits if enough releases are performed */ + if (p->inflight_credits >= credit_update_quanta * 2) { + rte_atomic32_sub(&sw->inflights, credit_update_quanta); + p->inflight_credits -= credit_update_quanta; + } } /* returns number of events actually dequeued */ uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL); if (unlikely(ndeq == 0)) { - p->outstanding_releases = 0; p->zero_polls++; p->total_polls++; goto end; } - /* only add credits for directed ports - LB ports send RELEASEs */ - p->inflight_credits += ndeq * p->is_directed; - p->outstanding_releases = ndeq; + p->outstanding_releases += ndeq; p->last_dequeue_burst_sz = ndeq; p->last_dequeue_ticks = rte_get_timer_cycles(); p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++; p->total_polls++; end: - if (p->inflight_credits >= credit_update_quanta * 2 && - p->inflight_credits > credit_update_quanta + ndeq) { - rte_atomic32_sub(&sw->inflights, credit_update_quanta); - p->inflight_credits -= credit_update_quanta; - } return ndeq; } diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c index 61a5c33b..7a6caa64 100644 --- a/drivers/event/sw/sw_evdev_xstats.c +++ b/drivers/event/sw/sw_evdev_xstats.c @@ -1,38 +1,10 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <rte_event_ring.h> #include "sw_evdev.h" -#include "iq_ring.h" +#include "iq_chunk.h" enum xstats_type { /* common stats */ @@ -53,7 +25,6 @@ enum xstats_type { pkt_cycles, poll_return, /* for zero-count and used also for port bucket loop */ /* qid_specific */ - iq_size, iq_used, /* qid port mapping specific */ pinned, @@ -144,7 +115,6 @@ get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx, return infl; } while (0); break; - case iq_size: return RTE_DIM(qid->iq[0]->ring); default: return -1; } } @@ -157,7 +127,7 @@ get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx, const int iq_idx = extra_arg; switch (type) { - case iq_used: return iq_ring_count(qid->iq[iq_idx]); + case iq_used: return iq_count(&qid->iq[iq_idx]); default: return -1; } } @@ -236,13 +206,13 @@ sw_xstats_init(struct sw_evdev *sw) /* all bucket dequeues are allowed to be reset, handled in loop below */ static const char * const qid_stats[] = {"rx", "tx", "drop", - "inflight", "iq_size" + "inflight" }; static const enum xstats_type qid_types[] = { rx, tx, dropped, - inflight, iq_size + inflight }; static const uint8_t qid_reset_allowed[] = {1, 1, 1, - 0, 0 + 0 }; static const char * const qid_iq_stats[] = { "used" }; |