summaryrefslogtreecommitdiffstats
path: root/drivers/event/sw/sw_evdev_scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/event/sw/sw_evdev_scheduler.c')
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c72
1 files changed, 15 insertions, 57 deletions
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index 8a2c9d4f..3106eb33 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -1,40 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_ring.h>
#include <rte_hash_crc.h>
#include <rte_event_ring.h>
#include "sw_evdev.h"
-#include "iq_ring.h"
+#include "iq_chunk.h"
#define SW_IQS_MASK (SW_IQS_MAX-1)
@@ -71,7 +43,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
*/
uint32_t qid_id = qid->id;
- iq_ring_dequeue_burst(qid->iq[iq_num], qes, count);
+ iq_dequeue_burst(sw, &qid->iq[iq_num], qes, count);
for (i = 0; i < count; i++) {
const struct rte_event *qe = &qes[i];
const uint16_t flow_id = SW_HASH_FLOWID(qes[i].flow_id);
@@ -130,7 +102,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
p->cq_buf_count = 0;
}
}
- iq_ring_put_back(qid->iq[iq_num], blocked_qes, nb_blocked);
+ iq_put_back(sw, &qid->iq[iq_num], blocked_qes, nb_blocked);
return count - nb_blocked;
}
@@ -156,7 +128,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
rte_ring_count(qid->reorder_buffer_freelist));
for (i = 0; i < count; i++) {
- const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]);
+ const struct rte_event *qe = iq_peek(&qid->iq[iq_num]);
uint32_t cq_check_count = 0;
uint32_t cq;
@@ -193,7 +165,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
(void *)&p->hist_list[head].rob_entry);
sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe;
- iq_ring_pop(qid->iq[iq_num]);
+ iq_pop(sw, &qid->iq[iq_num]);
rte_compiler_barrier();
p->inflights++;
@@ -218,8 +190,8 @@ sw_schedule_dir_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
return 0;
/* burst dequeue from the QID IQ ring */
- struct iq_ring *ring = qid->iq[iq_num];
- uint32_t ret = iq_ring_dequeue_burst(ring,
+ struct sw_iq *iq = &qid->iq[iq_num];
+ uint32_t ret = iq_dequeue_burst(sw, iq,
&port->cq_buf[port->cq_buf_count], count_free);
port->cq_buf_count += ret;
@@ -252,7 +224,7 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw)
continue;
uint32_t pkts_done = 0;
- uint32_t count = iq_ring_count(qid->iq[iq_num]);
+ uint32_t count = iq_count(&qid->iq[iq_num]);
if (count > 0) {
if (type == SW_SCHED_TYPE_DIRECT)
@@ -324,22 +296,15 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
continue;
}
- struct sw_qid *dest_qid_ptr =
- &sw->qids[dest_qid];
- const struct iq_ring *dest_iq_ptr =
- dest_qid_ptr->iq[dest_iq];
- if (iq_ring_free_count(dest_iq_ptr) == 0)
- break;
-
pkts_iter++;
struct sw_qid *q = &sw->qids[dest_qid];
- struct iq_ring *r = q->iq[dest_iq];
+ struct sw_iq *iq = &q->iq[dest_iq];
/* we checked for space above, so enqueue must
* succeed
*/
- iq_ring_enqueue(r, qe);
+ iq_enqueue(sw, iq, qe);
q->iq_pkt_mask |= (1 << (dest_iq));
q->iq_pkt_count[dest_iq]++;
q->stats.rx_pkts++;
@@ -404,10 +369,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
uint32_t iq_num = PRIO_TO_IQ(qe->priority);
struct sw_qid *qid = &sw->qids[qe->queue_id];
- if ((flags & QE_FLAG_VALID) &&
- iq_ring_free_count(qid->iq[iq_num]) == 0)
- break;
-
/* now process based on flags. Note that for directed
* queues, the enqueue_flush masks off all but the
* valid flag. This makes FWD and PARTIAL enqueues just
@@ -471,7 +432,7 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
*/
qid->iq_pkt_mask |= (1 << (iq_num));
- iq_ring_enqueue(qid->iq[iq_num], qe);
+ iq_enqueue(sw, &qid->iq[iq_num], qe);
qid->iq_pkt_count[iq_num]++;
qid->stats.rx_pkts++;
pkts_iter++;
@@ -516,10 +477,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
uint32_t iq_num = PRIO_TO_IQ(qe->priority);
struct sw_qid *qid = &sw->qids[qe->queue_id];
- struct iq_ring *iq_ring = qid->iq[iq_num];
-
- if (iq_ring_free_count(iq_ring) == 0)
- break; /* move to next port */
+ struct sw_iq *iq = &qid->iq[iq_num];
port->stats.rx_pkts++;
@@ -527,7 +485,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
* into the qid at the right priority
*/
qid->iq_pkt_mask |= (1 << (iq_num));
- iq_ring_enqueue(iq_ring, qe);
+ iq_enqueue(sw, iq, qe);
qid->iq_pkt_count[iq_num]++;
qid->stats.rx_pkts++;
pkts_iter++;