summaryrefslogtreecommitdiffstats
path: root/drivers/event/sw/sw_evdev_scheduler.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5 /drivers/event/sw/sw_evdev_scheduler.c
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/event/sw/sw_evdev_scheduler.c')
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index a333a6f0..8a2c9d4f 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -32,9 +32,9 @@
#include <rte_ring.h>
#include <rte_hash_crc.h>
+#include <rte_event_ring.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
#define SW_IQS_MASK (SW_IQS_MAX-1)
@@ -119,11 +119,12 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
p->stats.tx_pkts++;
qid->stats.tx_pkts++;
+ qid->to_port[cq]++;
/* if we just filled in the last slot, flush the buffer */
if (sw->cq_ring_space[cq] == 0) {
- struct qe_ring *worker = p->cq_worker_ring;
- qe_ring_enqueue_burst(worker, p->cq_buf,
+ struct rte_event_ring *worker = p->cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, p->cq_buf,
p->cq_buf_count,
&sw->cq_ring_space[cq]);
p->cq_buf_count = 0;
@@ -170,7 +171,8 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
cq = qid->cq_map[cq_idx];
if (++cq_idx == qid->cq_num_mapped_cqs)
cq_idx = 0;
- } while (qe_ring_free_count(sw->ports[cq].cq_worker_ring) == 0 ||
+ } while (rte_event_ring_free_count(
+ sw->ports[cq].cq_worker_ring) == 0 ||
sw->ports[cq].inflights == SW_PORT_HIST_LIST);
struct sw_port *p = &sw->ports[cq];
@@ -362,17 +364,17 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
return pkts_iter;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
{
RTE_SET_USED(sw);
- struct qe_ring *worker = port->rx_worker_ring;
+ struct rte_event_ring *worker = port->rx_worker_ring;
port->pp_buf_start = 0;
- port->pp_buf_count = qe_ring_dequeue_burst(worker, port->pp_buf,
- RTE_DIM(port->pp_buf));
+ port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf,
+ RTE_DIM(port->pp_buf), NULL);
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
__pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
{
static struct reorder_buffer_entry dummy_rob;
@@ -585,8 +587,8 @@ sw_event_schedule(struct rte_eventdev *dev)
* worker cores: aka, do the ring transfers batched.
*/
for (i = 0; i < sw->port_count; i++) {
- struct qe_ring *worker = sw->ports[i].cq_worker_ring;
- qe_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
+ struct rte_event_ring *worker = sw->ports[i].cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
sw->ports[i].cq_buf_count,
&sw->cq_ring_space[i]);
sw->ports[i].cq_buf_count = 0;