aboutsummaryrefslogtreecommitdiffstats
path: root/examples/eventdev_pipeline
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835 /examples/eventdev_pipeline
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'examples/eventdev_pipeline')
-rw-r--r--examples/eventdev_pipeline/main.c38
-rw-r--r--examples/eventdev_pipeline/pipeline_worker_generic.c7
-rw-r--r--examples/eventdev_pipeline/pipeline_worker_tx.c6
3 files changed, 29 insertions, 22 deletions
diff --git a/examples/eventdev_pipeline/main.c b/examples/eventdev_pipeline/main.c
index 2422c184..700bc696 100644
--- a/examples/eventdev_pipeline/main.c
+++ b/examples/eventdev_pipeline/main.c
@@ -267,7 +267,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
@@ -285,7 +284,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf txconf;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rte_eth_dev_info_get(port, &dev_info);
@@ -293,6 +292,17 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf_default.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port,
+ port_conf_default.rx_adv_conf.rss_conf.rss_hf,
+ port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0)
@@ -307,7 +317,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf_default.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
@@ -339,10 +348,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
}
static int
-init_ports(unsigned int num_ports)
+init_ports(uint16_t num_ports)
{
- uint8_t portid;
- unsigned int i;
+ uint16_t portid, i;
if (!cdata.num_mbuf)
cdata.num_mbuf = 16384 * num_ports;
@@ -354,12 +362,12 @@ init_ports(unsigned int num_ports)
/* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
rte_socket_id());
- for (portid = 0; portid < num_ports; portid++)
+ RTE_ETH_FOREACH_DEV(portid)
if (port_init(portid, mp) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
- for (i = 0; i < num_ports; i++) {
+ RTE_ETH_FOREACH_DEV(i) {
void *userdata = (void *)(uintptr_t) i;
fdata->tx_buf[i] =
rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
@@ -375,13 +383,13 @@ init_ports(unsigned int num_ports)
}
static void
-do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
+do_capability_setup(uint8_t eventdev_id)
{
- int i;
+ uint16_t i;
uint8_t mt_unsafe = 0;
uint8_t burst = 0;
- for (i = 0; i < nb_ethdev; i++) {
+ RTE_ETH_FOREACH_DEV(i) {
struct rte_eth_dev_info dev_info;
memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
@@ -430,7 +438,7 @@ int
main(int argc, char **argv)
{
struct worker_data *worker_data;
- unsigned int num_ports;
+ uint16_t num_ports;
int lcore_id;
int err;
@@ -452,7 +460,7 @@ main(int argc, char **argv)
/* Parse cli options*/
parse_app_args(argc, argv);
- num_ports = rte_eth_dev_count();
+ num_ports = rte_eth_dev_count_avail();
if (num_ports == 0)
rte_panic("No ethernet ports found\n");
@@ -483,7 +491,7 @@ main(int argc, char **argv)
fprintf(stderr, "Warning: More than one eventdev, using idx 0");
- do_capability_setup(num_ports, 0);
+ do_capability_setup(0);
fdata->cap.check_opt();
worker_data = rte_calloc(0, cdata.num_workers,
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index c673160f..2215e9eb 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -138,7 +138,7 @@ consumer(void)
&packet, 1, 0);
if (n == 0) {
- for (i = 0; i < rte_eth_dev_count(); i++)
+ RTE_ETH_FOREACH_DEV(i)
rte_eth_tx_buffer_flush(i, 0, fdata->tx_buf[i]);
return 0;
}
@@ -196,14 +196,13 @@ consumer_burst(void)
unsigned int i, j;
uint8_t dev_id = cons_data.dev_id;
uint8_t port_id = cons_data.port_id;
- uint16_t nb_ports = rte_eth_dev_count();
do {
uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
packets, RTE_DIM(packets), 0);
if (n == 0) {
- for (j = 0; j < nb_ports; j++)
+ RTE_ETH_FOREACH_DEV(j)
rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
return 0;
}
@@ -521,7 +520,7 @@ generic_opt_check(void)
rte_exit(EXIT_FAILURE,
"Event dev doesn't support all type queues\n");
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
rte_exit(EXIT_FAILURE,
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index b254b03f..3dbde92d 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -422,7 +422,7 @@ setup_eventdev_worker_tx(struct cons_data *cons_data,
const uint8_t dev_id = 0;
const uint8_t nb_ports = cdata.num_workers;
uint8_t nb_slots = 0;
- uint8_t nb_queues = rte_eth_dev_count();
+ uint8_t nb_queues = rte_eth_dev_count_avail();
/*
* In case where all type queues are not enabled, use queues equal to
@@ -431,7 +431,7 @@ setup_eventdev_worker_tx(struct cons_data *cons_data,
*/
if (!atq) {
nb_queues *= cdata.num_stages;
- nb_queues += rte_eth_dev_count();
+ nb_queues += rte_eth_dev_count_avail();
}
struct rte_event_dev_config config = {
@@ -735,7 +735,7 @@ worker_tx_opt_check(void)
rte_exit(EXIT_FAILURE,
"Event dev doesn't support all type queues\n");
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
rte_exit(EXIT_FAILURE,