summaryrefslogtreecommitdiffstats
path: root/app/test-eventdev/test_pipeline_queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'app/test-eventdev/test_pipeline_queue.c')
-rw-r--r--app/test-eventdev/test_pipeline_queue.c260
1 files changed, 138 insertions, 122 deletions
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index ca5f4578..25217008 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -15,10 +15,10 @@ pipeline_queue_nb_event_queues(struct evt_options *opt)
return (eth_count * opt->nb_stages) + eth_count;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_single_stage_tx(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_INIT;
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -29,7 +29,7 @@ pipeline_queue_worker_single_stage_tx(void *arg)
}
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- pipeline_tx_pkt(ev.mbuf);
+ pipeline_event_tx(dev, port, &ev);
w->processed_pkts++;
} else {
ev.queue_id++;
@@ -41,11 +41,11 @@ pipeline_queue_worker_single_stage_tx(void *arg)
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_single_stage_fwd(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_INIT;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -55,7 +55,8 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
continue;
}
- ev.queue_id = tx_queue;
+ ev.queue_id = tx_queue[ev.mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
pipeline_event_enqueue(dev, port, &ev);
w->processed_pkts++;
@@ -64,10 +65,10 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_single_stage_burst_tx(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT;
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -81,8 +82,7 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
for (i = 0; i < nb_rx; i++) {
rte_prefetch0(ev[i + 1].mbuf);
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev[i].mbuf);
+ pipeline_event_tx(dev, port, &ev[i]);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
} else {
@@ -98,11 +98,11 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_single_stage_burst_fwd(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -115,23 +115,24 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
for (i = 0; i < nb_rx; i++) {
rte_prefetch0(ev[i + 1].mbuf);
- ev[i].queue_id = tx_queue;
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
- w->processed_pkts++;
}
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ w->processed_pkts += nb_rx;
}
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_multi_stage_tx(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_INIT;
- const uint8_t nb_stages = t->opt->nb_stages + 1;
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -143,31 +144,27 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
cq_id = ev.queue_id % nb_stages;
- if (cq_id >= last_queue) {
- if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev.mbuf);
- w->processed_pkts++;
- continue;
- }
- ev.queue_id += (cq_id == last_queue) ? 1 : 0;
- pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- } else {
- ev.queue_id++;
- pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+ if (ev.queue_id == tx_queue[ev.mbuf->port]) {
+ pipeline_event_tx(dev, port, &ev);
+ w->processed_pkts++;
+ continue;
}
+ ev.queue_id++;
+ pipeline_fwd_event(&ev, cq_id != last_queue ?
+ sched_type_list[cq_id] :
+ RTE_SCHED_TYPE_ATOMIC);
pipeline_event_enqueue(dev, port, &ev);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_multi_stage_fwd(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_INIT;
- const uint8_t nb_stages = t->opt->nb_stages + 1;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -180,7 +177,8 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
cq_id = ev.queue_id % nb_stages;
if (cq_id == last_queue) {
- ev.queue_id = tx_queue;
+ ev.queue_id = tx_queue[ev.mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
w->processed_pkts++;
} else {
@@ -190,14 +188,15 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
pipeline_event_enqueue(dev, port, &ev);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_multi_stage_burst_tx(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_BURST_INIT;
- const uint8_t nb_stages = t->opt->nb_stages + 1;
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -212,37 +211,30 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
rte_prefetch0(ev[i + 1].mbuf);
cq_id = ev[i].queue_id % nb_stages;
- if (cq_id >= last_queue) {
- if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev[i].mbuf);
- ev[i].op = RTE_EVENT_OP_RELEASE;
- w->processed_pkts++;
- continue;
- }
-
- ev[i].queue_id += (cq_id == last_queue) ? 1 : 0;
- pipeline_fwd_event(&ev[i],
- RTE_SCHED_TYPE_ATOMIC);
- } else {
- ev[i].queue_id++;
- pipeline_fwd_event(&ev[i],
- sched_type_list[cq_id]);
+ if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
+ pipeline_event_tx(dev, port, &ev[i]);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ continue;
}
+ ev[i].queue_id++;
+ pipeline_fwd_event(&ev[i], cq_id != last_queue ?
+ sched_type_list[cq_id] :
+ RTE_SCHED_TYPE_ATOMIC);
}
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_BURST_INIT;
- const uint8_t nb_stages = t->opt->nb_stages + 1;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -258,7 +250,8 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
cq_id = ev[i].queue_id % nb_stages;
if (cq_id == last_queue) {
- ev[i].queue_id = tx_queue;
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
pipeline_fwd_event(&ev[i],
RTE_SCHED_TYPE_ATOMIC);
w->processed_pkts++;
@@ -271,6 +264,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
}
+
return 0;
}
@@ -280,28 +274,28 @@ worker_wrapper(void *arg)
struct worker_data *w = arg;
struct evt_options *opt = w->t->opt;
const bool burst = evt_has_burst_mode(w->dev_id);
- const bool mt_safe = !w->t->mt_unsafe;
+ const bool internal_port = w->t->internal_port;
const uint8_t nb_stages = opt->nb_stages;
RTE_SET_USED(opt);
if (nb_stages == 1) {
- if (!burst && mt_safe)
+ if (!burst && internal_port)
return pipeline_queue_worker_single_stage_tx(arg);
- else if (!burst && !mt_safe)
+ else if (!burst && !internal_port)
return pipeline_queue_worker_single_stage_fwd(arg);
- else if (burst && mt_safe)
+ else if (burst && internal_port)
return pipeline_queue_worker_single_stage_burst_tx(arg);
- else if (burst && !mt_safe)
+ else if (burst && !internal_port)
return pipeline_queue_worker_single_stage_burst_fwd(
arg);
} else {
- if (!burst && mt_safe)
+ if (!burst && internal_port)
return pipeline_queue_worker_multi_stage_tx(arg);
- else if (!burst && !mt_safe)
+ else if (!burst && !internal_port)
return pipeline_queue_worker_multi_stage_fwd(arg);
- else if (burst && mt_safe)
+ else if (burst && internal_port)
return pipeline_queue_worker_multi_stage_burst_tx(arg);
- else if (burst && !mt_safe)
+ else if (burst && !internal_port)
return pipeline_queue_worker_multi_stage_burst_fwd(arg);
}
@@ -311,10 +305,6 @@ worker_wrapper(void *arg)
static int
pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
{
- struct test_pipeline *t = evt_test_priv(test);
-
- if (t->mt_unsafe)
- rte_service_component_runstate_set(t->tx_service.service_id, 1);
return pipeline_launch_lcores(test, opt, worker_wrapper);
}
@@ -326,25 +316,24 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
int nb_queues;
int nb_stages = opt->nb_stages;
uint8_t queue;
- struct rte_event_dev_info info;
- struct test_pipeline *t = evt_test_priv(test);
- uint8_t tx_evqueue_id = 0;
+ uint8_t tx_evport_id = 0;
+ uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint8_t nb_worker_queues = 0;
+ uint16_t prod = 0;
+ struct rte_event_dev_info info;
+ struct test_pipeline *t = evt_test_priv(test);
nb_ports = evt_nr_active_lcores(opt->wlcores);
nb_queues = rte_eth_dev_count_avail() * (nb_stages);
- /* Extra port for Tx service. */
- if (t->mt_unsafe) {
- tx_evqueue_id = nb_queues;
- nb_ports++;
- nb_queues++;
- } else
- nb_queues += rte_eth_dev_count_avail();
+ /* One queue for Tx adapter per port */
+ nb_queues += rte_eth_dev_count_avail();
- rte_event_dev_info_get(opt->dev_id, &info);
+ memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
+ memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ rte_event_dev_info_get(opt->dev_id, &info);
const struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
@@ -370,24 +359,19 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
for (queue = 0; queue < nb_queues; queue++) {
uint8_t slot;
- if (!t->mt_unsafe) {
- slot = queue % (nb_stages + 1);
- q_conf.schedule_type = slot == nb_stages ?
- RTE_SCHED_TYPE_ATOMIC :
- opt->sched_type_list[slot];
- } else {
- slot = queue % nb_stages;
-
- if (queue == tx_evqueue_id) {
- q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ q_conf.event_queue_cfg = 0;
+ slot = queue % (nb_stages + 1);
+ if (slot == nb_stages) {
+ q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ if (!t->internal_port) {
q_conf.event_queue_cfg =
RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
- } else {
- q_conf.schedule_type =
- opt->sched_type_list[slot];
- queue_arr[nb_worker_queues] = queue;
- nb_worker_queues++;
}
+ tx_evqueue_id[prod++] = queue;
+ } else {
+ q_conf.schedule_type = opt->sched_type_list[slot];
+ queue_arr[nb_worker_queues] = queue;
+ nb_worker_queues++;
}
ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
@@ -407,19 +391,11 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
.new_event_threshold = info.max_num_events,
};
- /*
- * If tx is multi thread safe then allow workers to do Tx else use Tx
- * service to Tx packets.
- */
- if (t->mt_unsafe) {
+ if (!t->internal_port) {
ret = pipeline_event_port_setup(test, opt, queue_arr,
nb_worker_queues, p_conf);
if (ret)
return ret;
-
- ret = pipeline_event_tx_service_setup(test, opt, tx_evqueue_id,
- nb_ports - 1, p_conf);
-
} else
ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
p_conf);
@@ -431,7 +407,6 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
*
* eth_dev_count = 2, nb_stages = 2.
*
- * Multi thread safe :
* queues = 6
* stride = 3
*
@@ -439,21 +414,14 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
* eth0 -> q0 -> q1 -> (q2->tx)
* eth1 -> q3 -> q4 -> (q5->tx)
*
- * q2, q5 configured as ATOMIC
- *
- * Multi thread unsafe :
- * queues = 5
- * stride = 2
- *
- * event queue pipelines:
- * eth0 -> q0 -> q1
- * } (q4->tx) Tx service
- * eth1 -> q2 -> q3
+ * q2, q5 configured as ATOMIC | SINGLE_LINK
*
- * q4 configured as SINGLE_LINK|ATOMIC
*/
- ret = pipeline_event_rx_adapter_setup(opt,
- t->mt_unsafe ? nb_stages : nb_stages + 1, p_conf);
+ ret = pipeline_event_rx_adapter_setup(opt, nb_stages + 1, p_conf);
+ if (ret)
+ return ret;
+
+ ret = pipeline_event_tx_adapter_setup(opt, p_conf);
if (ret)
return ret;
@@ -467,12 +435,60 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
}
+ /* Connect the tx_evqueue_id to the Tx adapter port */
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_tx_adapter_event_port_get(prod,
+ &tx_evport_id);
+ if (ret) {
+ evt_err("Unable to get Tx adptr[%d] evprt[%d]",
+ prod, tx_evport_id);
+ return ret;
+ }
+
+ if (rte_event_port_link(opt->dev_id, tx_evport_id,
+ &tx_evqueue_id[prod],
+ NULL, 1) != 1) {
+ evt_err("Unable to link Tx adptr[%d] evprt[%d]",
+ prod, tx_evport_id);
+ return ret;
+ }
+ }
+ }
+
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_eth_dev_start(prod);
+ if (ret) {
+ evt_err("Ethernet dev [%d] failed to start."
+ " Using synthetic producer", prod);
+ return ret;
+ }
+
+ }
+
ret = rte_event_dev_start(opt->dev_id);
if (ret) {
evt_err("failed to start eventdev %d", opt->dev_id);
return ret;
}
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_rx_adapter_start(prod);
+ if (ret) {
+ evt_err("Rx adapter[%d] start failed", prod);
+ return ret;
+ }
+
+ ret = rte_event_eth_tx_adapter_start(prod);
+ if (ret) {
+ evt_err("Tx adapter[%d] start failed", prod);
+ return ret;
+ }
+ }
+
+ memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
+ RTE_MAX_ETHPORTS);
+
return 0;
}