diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-11-01 11:59:50 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-11-01 12:00:19 +0000 |
commit | 8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch) | |
tree | 208e3bc33c220854d89d010e3abf720a2e62e546 /drivers/net/bonding | |
parent | b63264c8342e6a1b6971c79550d2af2024b6a4de (diff) |
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/bonding')
-rw-r--r-- | drivers/net/bonding/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/bonding/meson.build | 1 | ||||
-rw-r--r-- | drivers/net/bonding/rte_eth_bond_8023ad.c | 48 | ||||
-rw-r--r-- | drivers/net/bonding/rte_eth_bond_8023ad_private.h | 2 | ||||
-rw-r--r-- | drivers/net/bonding/rte_eth_bond_api.c | 215 | ||||
-rw-r--r-- | drivers/net/bonding/rte_eth_bond_flow.c | 31 | ||||
-rw-r--r-- | drivers/net/bonding/rte_eth_bond_pmd.c | 275 | ||||
-rw-r--r-- | drivers/net/bonding/rte_eth_bond_private.h | 10 |
8 files changed, 376 insertions, 207 deletions
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index acad16a1..1893e3ca 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile @@ -8,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_pmd_bond.a +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring diff --git a/drivers/net/bonding/meson.build b/drivers/net/bonding/meson.build index 602d2880..00374edb 100644 --- a/drivers/net/bonding/meson.build +++ b/drivers/net/bonding/meson.build @@ -3,6 +3,7 @@ name = 'bond' #, james bond :-) version = 2 +allow_experimental_apis = true sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', 'rte_eth_bond_flow.c', 'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c') diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index f8cea4b6..dd847c6f 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -130,7 +130,7 @@ static const struct ether_addr lacp_mac_addr = { .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 } }; -struct port mode_8023ad_ports[RTE_MAX_ETHPORTS]; +struct port bond_mode_8023ad_ports[RTE_MAX_ETHPORTS]; static void timer_cancel(uint64_t *timer) @@ -187,7 +187,7 @@ set_warning_flags(struct port *port, uint16_t flags) static void show_warnings(uint16_t slave_id) { - struct port *port = &mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[slave_id]; uint8_t warnings; do { @@ -260,7 +260,7 @@ static void rx_machine(struct bond_dev_private *internals, uint16_t slave_id, struct lacpdu *lacp) { - struct port *agg, *port = &mode_8023ad_ports[slave_id]; + struct port *agg, *port = &bond_mode_8023ad_ports[slave_id]; uint64_t timeout; if (SM_FLAG(port, BEGIN)) { @@ -319,7 +319,7 @@ rx_machine(struct bond_dev_private *internals, uint16_t slave_id, ACTOR_STATE_CLR(port, DEFAULTED); /* If LACP partner params match this port actor params */ - agg = &mode_8023ad_ports[port->aggregator_port_id]; + agg = &bond_mode_8023ad_ports[port->aggregator_port_id]; bool match = port->actor.system_priority == lacp->partner.port_params.system_priority && is_same_ether_addr(&agg->actor.system, @@ -380,7 +380,7 @@ rx_machine(struct bond_dev_private *internals, uint16_t slave_id, static void periodic_machine(struct bond_dev_private *internals, uint16_t slave_id) { - struct port *port = &mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[slave_id]; /* Calculate if either site is LACP enabled */ uint64_t timeout; uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) || @@ -442,7 +442,7 @@ periodic_machine(struct bond_dev_private *internals, uint16_t slave_id) static void mux_machine(struct bond_dev_private *internals, uint16_t slave_id) { - struct port *port = &mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[slave_id]; /* Save current state for later use */ const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING | @@ -545,7 +545,7 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id) static void tx_machine(struct bond_dev_private *internals, uint16_t slave_id) { - struct port *agg, *port = &mode_8023ad_ports[slave_id]; + struct port *agg, *port = &bond_mode_8023ad_ports[slave_id]; struct rte_mbuf *lacp_pkt = NULL; struct lacpdu_header *hdr; @@ -591,7 +591,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id) lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params); memcpy(&hdr->lacpdu.actor.port_params, &port->actor, sizeof(port->actor)); - agg = &mode_8023ad_ports[port->aggregator_port_id]; + agg = &bond_mode_8023ad_ports[port->aggregator_port_id]; ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system); lacpdu->actor.state = port->actor_state; @@ -677,11 +677,11 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id) slaves = internals->active_slaves; slaves_count = internals->active_slave_count; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; /* Search for aggregator suitable for this port */ for (i = 0; i < slaves_count; ++i) { - agg = &mode_8023ad_ports[slaves[i]]; + agg = &bond_mode_8023ad_ports[slaves[i]]; /* Skip ports that are not aggreagators */ if (agg->aggregator_port_id != slaves[i]) continue; @@ -824,7 +824,7 @@ bond_mode_8023ad_periodic_cb(void *arg) } else key = 0; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; key = rte_cpu_to_be_16(key); if (key != port->actor.key) { @@ -844,7 +844,7 @@ bond_mode_8023ad_periodic_cb(void *arg) for (i = 0; i < internals->active_slave_count; i++) { slave_id = internals->active_slaves[i]; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; if ((port->actor.key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) { @@ -907,7 +907,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, { struct bond_dev_private *internals = bond_dev->data->dev_private; - struct port *port = &mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[slave_id]; struct port_params initial = { .system = { { 0 } }, .system_priority = rte_cpu_to_be_16(0xFFFF), @@ -1008,7 +1008,7 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused, struct port *port = NULL; uint8_t old_partner_state; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; ACTOR_STATE_CLR(port, AGGREGATION); port->selected = UNSELECTED; @@ -1045,7 +1045,7 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) for (i = 0; i < internals->active_slave_count; i++) { slave_id = internals->active_slaves[i]; - slave = &mode_8023ad_ports[slave_id]; + slave = &bond_mode_8023ad_ports[slave_id]; rte_eth_macaddr_get(slave_id, &slave_addr); if (is_same_ether_addr(&slave_addr, &slave->actor.system)) @@ -1058,7 +1058,7 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) continue; for (j = 0; j < internals->active_slave_count; j++) { - agg_slave = &mode_8023ad_ports[internals->active_slaves[j]]; + agg_slave = &bond_mode_8023ad_ports[internals->active_slaves[j]]; if (agg_slave->aggregator_port_id == slave_id) SM_FLAG_SET(agg_slave, NTT); } @@ -1191,7 +1191,7 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, uint16_t slave_id, struct rte_mbuf *pkt) { struct mode8023ad_private *mode4 = &internals->mode4; - struct port *port = &mode_8023ad_ports[slave_id]; + struct port *port = &bond_mode_8023ad_ports[slave_id]; struct marker_header *m_hdr; uint64_t marker_timer, old_marker_timer; int retval; @@ -1395,7 +1395,7 @@ rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id, internals->active_slave_count) return -EINVAL; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; info->selected = port->selected; info->actor_state = port->actor_state; @@ -1447,7 +1447,7 @@ rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id, if (res != 0) return res; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; if (enabled) ACTOR_STATE_SET(port, COLLECTING); @@ -1468,7 +1468,7 @@ rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id, if (res != 0) return res; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; if (enabled) ACTOR_STATE_SET(port, DISTRIBUTING); @@ -1488,7 +1488,7 @@ rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id) if (err != 0) return err; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; return ACTOR_STATE(port, DISTRIBUTING); } @@ -1502,7 +1502,7 @@ rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id) if (err != 0) return err; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; return ACTOR_STATE(port, COLLECTING); } @@ -1517,7 +1517,7 @@ rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id, if (res != 0) return res; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header)) return -EINVAL; @@ -1546,7 +1546,7 @@ bond_mode_8023ad_ext_periodic_cb(void *arg) for (i = 0; i < internals->active_slave_count; i++) { slave_id = internals->active_slaves[i]; - port = &mode_8023ad_ports[slave_id]; + port = &bond_mode_8023ad_ports[slave_id]; if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) { struct rte_mbuf *lacp_pkt = pkt; diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h index 0f490a51..c51426b8 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h +++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h @@ -174,7 +174,7 @@ struct mode8023ad_private { * The pool of *port* structures. The size of the pool * is configured at compile-time in the <rte_eth_bond_8023ad.c> file. */ -extern struct port mode_8023ad_ports[]; +extern struct port bond_mode_8023ad_ports[]; /* Forward declaration */ struct bond_dev_private; diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c index 8bc04cfd..21bcd504 100644 --- a/drivers/net/bonding/rte_eth_bond_api.c +++ b/drivers/net/bonding/rte_eth_bond_api.c @@ -245,9 +245,9 @@ slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals) } TAILQ_FOREACH(flow, &internals->flow_list, next) { flow->flows[slave_id] = rte_flow_create(slave_port_id, - &flow->fd->attr, - flow->fd->items, - flow->fd->actions, + flow->rule.attr, + flow->rule.pattern, + flow->rule.actions, &ferror); if (flow->flows[slave_id] == NULL) { RTE_BOND_LOG(ERR, "Cannot create flow for slave" @@ -269,6 +269,173 @@ slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals) return 0; } +static void +eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals, + const struct rte_eth_dev_info *di) +{ + struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; + + internals->reta_size = di->reta_size; + + /* Inherit Rx offload capabilities from the first slave device */ + internals->rx_offload_capa = di->rx_offload_capa; + internals->rx_queue_offload_capa = di->rx_queue_offload_capa; + internals->flow_type_rss_offloads = di->flow_type_rss_offloads; + + /* Inherit maximum Rx packet size from the first slave device */ + internals->candidate_max_rx_pktlen = di->max_rx_pktlen; + + /* Inherit default Rx queue settings from the first slave device */ + memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i)); + + /* + * Turn off descriptor prefetch and writeback by default for all + * slave devices. Applications may tweak this setting if need be. + */ + rxconf_i->rx_thresh.pthresh = 0; + rxconf_i->rx_thresh.hthresh = 0; + rxconf_i->rx_thresh.wthresh = 0; + + /* Setting this to zero should effectively enable default values */ + rxconf_i->rx_free_thresh = 0; + + /* Disable deferred start by default for all slave devices */ + rxconf_i->rx_deferred_start = 0; +} + +static void +eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals, + const struct rte_eth_dev_info *di) +{ + struct rte_eth_txconf *txconf_i = &internals->default_txconf; + + /* Inherit Tx offload capabilities from the first slave device */ + internals->tx_offload_capa = di->tx_offload_capa; + internals->tx_queue_offload_capa = di->tx_queue_offload_capa; + + /* Inherit default Tx queue settings from the first slave device */ + memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i)); + + /* + * Turn off descriptor prefetch and writeback by default for all + * slave devices. Applications may tweak this setting if need be. + */ + txconf_i->tx_thresh.pthresh = 0; + txconf_i->tx_thresh.hthresh = 0; + txconf_i->tx_thresh.wthresh = 0; + + /* + * Setting these parameters to zero assumes that default + * values will be configured implicitly by slave devices. + */ + txconf_i->tx_free_thresh = 0; + txconf_i->tx_rs_thresh = 0; + + /* Disable deferred start by default for all slave devices */ + txconf_i->tx_deferred_start = 0; +} + +static void +eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals, + const struct rte_eth_dev_info *di) +{ + struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; + const struct rte_eth_rxconf *rxconf = &di->default_rxconf; + + internals->rx_offload_capa &= di->rx_offload_capa; + internals->rx_queue_offload_capa &= di->rx_queue_offload_capa; + internals->flow_type_rss_offloads &= di->flow_type_rss_offloads; + + /* + * If at least one slave device suggests enabling this + * setting by default, enable it for all slave devices + * since disabling it may not be necessarily supported. + */ + if (rxconf->rx_drop_en == 1) + rxconf_i->rx_drop_en = 1; + + /* + * Adding a new slave device may cause some of previously inherited + * offloads to be withdrawn from the internal rx_queue_offload_capa + * value. Thus, the new internal value of default Rx queue offloads + * has to be masked by rx_queue_offload_capa to make sure that only + * commonly supported offloads are preserved from both the previous + * value and the value being inhereted from the new slave device. + */ + rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) & + internals->rx_queue_offload_capa; + + /* + * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be + * the power of 2, the lower one is GCD + */ + if (internals->reta_size > di->reta_size) + internals->reta_size = di->reta_size; + + if (!internals->max_rx_pktlen && + di->max_rx_pktlen < internals->candidate_max_rx_pktlen) + internals->candidate_max_rx_pktlen = di->max_rx_pktlen; +} + +static void +eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals, + const struct rte_eth_dev_info *di) +{ + struct rte_eth_txconf *txconf_i = &internals->default_txconf; + const struct rte_eth_txconf *txconf = &di->default_txconf; + + internals->tx_offload_capa &= di->tx_offload_capa; + internals->tx_queue_offload_capa &= di->tx_queue_offload_capa; + + /* + * Adding a new slave device may cause some of previously inherited + * offloads to be withdrawn from the internal tx_queue_offload_capa + * value. Thus, the new internal value of default Tx queue offloads + * has to be masked by tx_queue_offload_capa to make sure that only + * commonly supported offloads are preserved from both the previous + * value and the value being inhereted from the new slave device. + */ + txconf_i->offloads = (txconf_i->offloads | txconf->offloads) & + internals->tx_queue_offload_capa; +} + +static void +eth_bond_slave_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim, + const struct rte_eth_desc_lim *slave_desc_lim) +{ + memcpy(bond_desc_lim, slave_desc_lim, sizeof(*bond_desc_lim)); +} + +static int +eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim, + const struct rte_eth_desc_lim *slave_desc_lim) +{ + bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max, + slave_desc_lim->nb_max); + bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min, + slave_desc_lim->nb_min); + bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align, + slave_desc_lim->nb_align); + + if (bond_desc_lim->nb_min > bond_desc_lim->nb_max || + bond_desc_lim->nb_align > bond_desc_lim->nb_max) { + RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits"); + return -EINVAL; + } + + /* Treat maximum number of segments equal to 0 as unspecified */ + if (slave_desc_lim->nb_seg_max != 0 && + (bond_desc_lim->nb_seg_max == 0 || + slave_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max)) + bond_desc_lim->nb_seg_max = slave_desc_lim->nb_seg_max; + if (slave_desc_lim->nb_mtu_seg_max != 0 && + (bond_desc_lim->nb_mtu_seg_max == 0 || + slave_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max)) + bond_desc_lim->nb_mtu_seg_max = slave_desc_lim->nb_mtu_seg_max; + + return 0; +} + static int __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) { @@ -326,34 +493,28 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues; internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues; - internals->reta_size = dev_info.reta_size; + eth_bond_slave_inherit_dev_info_rx_first(internals, &dev_info); + eth_bond_slave_inherit_dev_info_tx_first(internals, &dev_info); - /* Take the first dev's offload capabilities */ - internals->rx_offload_capa = dev_info.rx_offload_capa; - internals->tx_offload_capa = dev_info.tx_offload_capa; - internals->rx_queue_offload_capa = dev_info.rx_queue_offload_capa; - internals->tx_queue_offload_capa = dev_info.tx_queue_offload_capa; - internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads; + eth_bond_slave_inherit_desc_lim_first(&internals->rx_desc_lim, + &dev_info.rx_desc_lim); + eth_bond_slave_inherit_desc_lim_first(&internals->tx_desc_lim, + &dev_info.tx_desc_lim); + } else { + int ret; - /* Inherit first slave's max rx packet size */ - internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen; + eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info); + eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info); - } else { - internals->rx_offload_capa &= dev_info.rx_offload_capa; - internals->tx_offload_capa &= dev_info.tx_offload_capa; - internals->rx_queue_offload_capa &= dev_info.rx_queue_offload_capa; - internals->tx_queue_offload_capa &= dev_info.tx_queue_offload_capa; - internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads; - - /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be - * the power of 2, the lower one is GCD - */ - if (internals->reta_size > dev_info.reta_size) - internals->reta_size = dev_info.reta_size; + ret = eth_bond_slave_inherit_desc_lim_next( + &internals->rx_desc_lim, &dev_info.rx_desc_lim); + if (ret != 0) + return ret; - if (!internals->max_rx_pktlen && - dev_info.max_rx_pktlen < internals->candidate_max_rx_pktlen) - internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen; + ret = eth_bond_slave_inherit_desc_lim_next( + &internals->tx_desc_lim, &dev_info.tx_desc_lim); + if (ret != 0) + return ret; } bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= diff --git a/drivers/net/bonding/rte_eth_bond_flow.c b/drivers/net/bonding/rte_eth_bond_flow.c index 31e4bcae..f94d46ca 100644 --- a/drivers/net/bonding/rte_eth_bond_flow.c +++ b/drivers/net/bonding/rte_eth_bond_flow.c @@ -2,8 +2,11 @@ * Copyright 2018 Mellanox Technologies, Ltd */ +#include <stddef.h> +#include <string.h> #include <sys/queue.h> +#include <rte_errno.h> #include <rte_malloc.h> #include <rte_tailq.h> #include <rte_flow.h> @@ -16,19 +19,33 @@ bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr, const struct rte_flow_action *actions) { struct rte_flow *flow; - size_t fdsz; + const struct rte_flow_conv_rule rule = { + .attr_ro = attr, + .pattern_ro = items, + .actions_ro = actions, + }; + struct rte_flow_error error; + int ret; - fdsz = rte_flow_copy(NULL, 0, attr, items, actions); - flow = rte_zmalloc_socket(NULL, sizeof(struct rte_flow) + fdsz, + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error); + if (ret < 0) { + RTE_BOND_LOG(ERR, "Unable to process flow rule (%s): %s", + error.message ? error.message : "unspecified", + strerror(rte_errno)); + return NULL; + } + flow = rte_zmalloc_socket(NULL, offsetof(struct rte_flow, rule) + ret, RTE_CACHE_LINE_SIZE, numa_node); if (unlikely(flow == NULL)) { RTE_BOND_LOG(ERR, "Could not allocate new flow"); return NULL; } - flow->fd = (void *)((uintptr_t)flow + sizeof(*flow)); - if (unlikely(rte_flow_copy(flow->fd, fdsz, attr, items, actions) != - fdsz)) { - RTE_BOND_LOG(ERR, "Failed to copy flow description"); + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule, + &error); + if (ret < 0) { + RTE_BOND_LOG(ERR, "Failed to copy flow rule (%s): %s", + error.message ? error.message : "unspecified", + strerror(rte_errno)); rte_free(flow); return NULL; } diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 58f7377c..156f31c6 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -37,7 +37,8 @@ get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto) { size_t vlan_offset = 0; - if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) { + if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto || + rte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) { struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1); vlan_offset = sizeof(struct vlan_hdr); @@ -57,28 +58,34 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { struct bond_dev_private *internals; - uint16_t num_rx_slave = 0; uint16_t num_rx_total = 0; - + uint16_t slave_count; + uint16_t active_slave; int i; /* Cast to structure, containing bonded device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; - internals = bd_rx_q->dev_private; + slave_count = internals->active_slave_count; + active_slave = internals->active_slave; + for (i = 0; i < slave_count && nb_pkts; i++) { + uint16_t num_rx_slave; - for (i = 0; i < internals->active_slave_count && nb_pkts; i++) { /* Offset of pointer to *bufs increases as packets are received * from other slaves */ - num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i], - bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts); - if (num_rx_slave) { - num_rx_total += num_rx_slave; - nb_pkts -= num_rx_slave; - } + num_rx_slave = + rte_eth_rx_burst(internals->active_slaves[active_slave], + bd_rx_q->queue_id, + bufs + num_rx_total, nb_pkts); + num_rx_total += num_rx_slave; + nb_pkts -= num_rx_slave; + if (++active_slave == slave_count) + active_slave = 0; } + if (++internals->active_slave == slave_count) + internals->active_slave = 0; return num_rx_total; } @@ -257,25 +264,32 @@ bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, uint16_t num_rx_total = 0; /* Total number of received packets */ uint16_t slaves[RTE_MAX_ETHPORTS]; uint16_t slave_count; - - uint16_t i, idx; + uint16_t active_slave; + uint16_t i; /* Copy slave list to protect against slave up/down changes during tx * bursting */ slave_count = internals->active_slave_count; + active_slave = internals->active_slave; memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * slave_count); - for (i = 0, idx = internals->active_slave; - i < slave_count && num_rx_total < nb_pkts; i++, idx++) { - idx = idx % slave_count; + for (i = 0; i < slave_count && nb_pkts; i++) { + uint16_t num_rx_slave; /* Read packets from this slave */ - num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id, - &bufs[num_rx_total], nb_pkts - num_rx_total); + num_rx_slave = rte_eth_rx_burst(slaves[active_slave], + bd_rx_q->queue_id, + bufs + num_rx_total, nb_pkts); + num_rx_total += num_rx_slave; + nb_pkts -= num_rx_slave; + + if (++active_slave == slave_count) + active_slave = 0; } - internals->active_slave = idx; + if (++internals->active_slave == slave_count) + internals->active_slave = 0; return num_rx_total; } @@ -300,10 +314,10 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, /* Mapping array generated by hash function to map mbufs to slaves */ uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 }; - uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 }; + uint16_t slave_tx_count; uint16_t total_tx_count = 0, total_tx_fail_count = 0; - uint16_t i, j; + uint16_t i; if (unlikely(nb_bufs == 0)) return 0; @@ -320,7 +334,7 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, dist_slave_count = 0; for (i = 0; i < slave_count; i++) { - struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; + struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; if (ACTOR_STATE(port, DISTRIBUTING)) dist_slave_port_ids[dist_slave_count++] = @@ -358,34 +372,12 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, /* If tx burst fails move packets to end of bufs */ if (unlikely(slave_tx_count < slave_nb_bufs[i])) { - slave_tx_fail_count[i] = slave_nb_bufs[i] - + int slave_tx_fail_count = slave_nb_bufs[i] - slave_tx_count; - total_tx_fail_count += slave_tx_fail_count[i]; - - /* - * Shift bufs to beginning of array to allow reordering - * later - */ - for (j = 0; j < slave_tx_fail_count[i]; j++) { - slave_bufs[i][j] = - slave_bufs[i][(slave_tx_count - 1) + j]; - } - } - } - - /* - * If there are tx burst failures we move packets to end of bufs to - * preserve expected PMD behaviour of all failed transmitted being - * at the end of the input mbuf array - */ - if (unlikely(total_tx_fail_count > 0)) { - int bufs_idx = nb_bufs - total_tx_fail_count - 1; - - for (i = 0; i < slave_count; i++) { - if (slave_tx_fail_count[i] > 0) { - for (j = 0; j < slave_tx_fail_count[i]; j++) - bufs[bufs_idx++] = slave_bufs[i][j]; - } + total_tx_fail_count += slave_tx_fail_count; + memcpy(&bufs[nb_bufs - total_tx_fail_count], + &slave_bufs[i][slave_tx_count], + slave_tx_fail_count * sizeof(bufs[0])); } } @@ -400,8 +392,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* Cast to structure, containing bonded device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; struct bond_dev_private *internals = bd_rx_q->dev_private; - struct ether_addr bond_mac; - + struct rte_eth_dev *bonded_eth_dev = + &rte_eth_devices[internals->port_id]; + struct ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs; struct ether_hdr *hdr; const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW); @@ -414,7 +407,6 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint8_t i, j, k; uint8_t subtype; - rte_eth_macaddr_get(internals->port_id, &bond_mac); /* Copy slave list to protect against slave up/down changes during tx * bursting */ slave_count = internals->active_slave_count; @@ -428,7 +420,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, } for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) { j = num_rx_total; - collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]], + collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]], COLLECTING); /* Read packets from this slave */ @@ -457,9 +449,11 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, * in collecting state or bonding interface is not in promiscuous * mode and packet address does not match. */ if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) || - !collecting || (!promisc && - !is_multicast_ether_addr(&hdr->d_addr) && - !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) { + !collecting || + (!promisc && + !is_multicast_ether_addr(&hdr->d_addr) && + !is_same_ether_addr(bond_mac, + &hdr->d_addr)))) { if (hdr->ether_type == ether_type_slow_be) { bond_mode_8023ad_handle_slow_pkt( @@ -480,7 +474,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, idx = 0; } - internals->active_slave = idx; + if (++internals->active_slave == slave_count) + internals->active_slave = 0; + return num_rx_total; } @@ -715,8 +711,8 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, tx_fail_total += tx_fail_slave; memcpy(&bufs[nb_pkts - tx_fail_total], - &slave_bufs[i][num_tx_slave], - tx_fail_slave * sizeof(bufs[0])); + &slave_bufs[i][num_tx_slave], + tx_fail_slave * sizeof(bufs[0])); } num_tx_total += num_tx_slave; } @@ -1221,10 +1217,10 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, /* Mapping array generated by hash function to map mbufs to slaves */ uint16_t bufs_slave_port_idxs[nb_bufs]; - uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 }; + uint16_t slave_tx_count; uint16_t total_tx_count = 0, total_tx_fail_count = 0; - uint16_t i, j; + uint16_t i; if (unlikely(nb_bufs == 0)) return 0; @@ -1265,34 +1261,12 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, /* If tx burst fails move packets to end of bufs */ if (unlikely(slave_tx_count < slave_nb_bufs[i])) { - slave_tx_fail_count[i] = slave_nb_bufs[i] - + int slave_tx_fail_count = slave_nb_bufs[i] - slave_tx_count; - total_tx_fail_count += slave_tx_fail_count[i]; - - /* - * Shift bufs to beginning of array to allow reordering - * later - */ - for (j = 0; j < slave_tx_fail_count[i]; j++) { - slave_bufs[i][j] = - slave_bufs[i][(slave_tx_count - 1) + j]; - } - } - } - - /* - * If there are tx burst failures we move packets to end of bufs to - * preserve expected PMD behaviour of all failed transmitted being - * at the end of the input mbuf array - */ - if (unlikely(total_tx_fail_count > 0)) { - int bufs_idx = nb_bufs - total_tx_fail_count - 1; - - for (i = 0; i < slave_count; i++) { - if (slave_tx_fail_count[i] > 0) { - for (j = 0; j < slave_tx_fail_count[i]; j++) - bufs[bufs_idx++] = slave_bufs[i][j]; - } + total_tx_fail_count += slave_tx_fail_count; + memcpy(&bufs[nb_bufs - total_tx_fail_count], + &slave_bufs[i][slave_tx_count], + slave_tx_fail_count * sizeof(bufs[0])); } } @@ -1319,10 +1293,10 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* Mapping array generated by hash function to map mbufs to slaves */ uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 }; - uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 }; + uint16_t slave_tx_count; uint16_t total_tx_count = 0, total_tx_fail_count = 0; - uint16_t i, j; + uint16_t i; if (unlikely(nb_bufs == 0)) return 0; @@ -1338,7 +1312,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, dist_slave_count = 0; for (i = 0; i < slave_count; i++) { - struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; + struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; if (ACTOR_STATE(port, DISTRIBUTING)) dist_slave_port_ids[dist_slave_count++] = @@ -1380,46 +1354,20 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* If tx burst fails move packets to end of bufs */ if (unlikely(slave_tx_count < slave_nb_bufs[i])) { - slave_tx_fail_count[i] = slave_nb_bufs[i] - + int slave_tx_fail_count = slave_nb_bufs[i] - slave_tx_count; - total_tx_fail_count += slave_tx_fail_count[i]; - - /* - * Shift bufs to beginning of array to allow - * reordering later - */ - for (j = 0; j < slave_tx_fail_count[i]; j++) - slave_bufs[i][j] = - slave_bufs[i] - [(slave_tx_count - 1) - + j]; - } - } + total_tx_fail_count += slave_tx_fail_count; - /* - * If there are tx burst failures we move packets to end of - * bufs to preserve expected PMD behaviour of all failed - * transmitted being at the end of the input mbuf array - */ - if (unlikely(total_tx_fail_count > 0)) { - int bufs_idx = nb_bufs - total_tx_fail_count - 1; - - for (i = 0; i < slave_count; i++) { - if (slave_tx_fail_count[i] > 0) { - for (j = 0; - j < slave_tx_fail_count[i]; - j++) { - bufs[bufs_idx++] = - slave_bufs[i][j]; - } - } + memcpy(&bufs[nb_bufs - total_tx_fail_count], + &slave_bufs[i][slave_tx_count], + slave_tx_fail_count * sizeof(bufs[0])); } } } /* Check for LACP control packets and send if available */ for (i = 0; i < slave_count; i++) { - struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; + struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; struct rte_mbuf *ctrl_pkt = NULL; if (likely(rte_ring_empty(port->tx_ring))) @@ -1770,7 +1718,7 @@ slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev, int errval = 0; struct bond_dev_private *internals = (struct bond_dev_private *) bonded_eth_dev->data->dev_private; - struct port *port = &mode_8023ad_ports[slave_eth_dev->data->port_id]; + struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id]; if (port->slow_pool == NULL) { char mem_name[256]; @@ -1847,12 +1795,11 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, /* If RSS is enabled for bonding, try to enable it for slaves */ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { - if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len - != 0) { + if (internals->rss_key_len != 0) { slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = - bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; + internals->rss_key_len; slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = - bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; + internals->rss_key; } else { slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; } @@ -2210,7 +2157,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) /* Discard all messages to/from mode 4 state machines */ for (i = 0; i < internals->active_slave_count; i++) { - port = &mode_8023ad_ports[internals->active_slaves[i]]; + port = &bond_mode_8023ad_ports[internals->active_slaves[i]]; RTE_ASSERT(port->rx_ring != NULL); while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT) @@ -2229,12 +2176,15 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) tlb_last_obytets[internals->active_slaves[i]] = 0; } - internals->link_status_polling_enabled = 0; - for (i = 0; i < internals->slave_count; i++) - internals->slaves[i].last_link_status = 0; - eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; eth_dev->data->dev_started = 0; + + internals->link_status_polling_enabled = 0; + for (i = 0; i < internals->slave_count; i++) { + internals->slaves[i].last_link_status = 0; + rte_eth_dev_stop(internals->slaves[i].port_id); + deactivate_slave(eth_dev, internals->slaves[i].port_id); + } } void @@ -2303,6 +2253,16 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = max_nb_rx_queues; dev_info->max_tx_queues = max_nb_tx_queues; + memcpy(&dev_info->default_rxconf, &internals->default_rxconf, + sizeof(dev_info->default_rxconf)); + memcpy(&dev_info->default_txconf, &internals->default_txconf, + sizeof(dev_info->default_txconf)); + + memcpy(&dev_info->rx_desc_lim, &internals->rx_desc_lim, + sizeof(dev_info->rx_desc_lim)); + memcpy(&dev_info->tx_desc_lim, &internals->tx_desc_lim, + sizeof(dev_info->tx_desc_lim)); + /** * If dedicated hw queues enabled for link bonding device in LACP mode * then we need to reduce the maximum number of data path queues by 1. @@ -3123,6 +3083,14 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) /* Initially allow to choose any offload type */ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + memset(&internals->default_rxconf, 0, + sizeof(internals->default_rxconf)); + memset(&internals->default_txconf, 0, + sizeof(internals->default_txconf)); + + memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim)); + memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim)); + memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); memset(internals->slaves, 0, sizeof(internals->slaves)); @@ -3162,10 +3130,9 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) err: rte_free(internals); - if (eth_dev != NULL) { - rte_free(eth_dev->data->mac_addrs); - rte_eth_dev_release_port(eth_dev); - } + if (eth_dev != NULL) + eth_dev->data->dev_private = NULL; + rte_eth_dev_release_port(eth_dev); return -1; } @@ -3186,8 +3153,7 @@ bond_probe(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name); - if (rte_eal_process_type() == RTE_PROC_SECONDARY && - strlen(rte_vdev_device_args(dev)) == 0) { + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { eth_dev = rte_eth_dev_attach_secondary(name); if (!eth_dev) { RTE_BOND_LOG(ERR, "Failed to probe %s", name); @@ -3302,6 +3268,9 @@ bond_remove(struct rte_vdev_device *dev) if (eth_dev == NULL) return -ENODEV; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return rte_eth_dev_release_port(eth_dev); + RTE_ASSERT(eth_dev->device == &dev->device); internals = eth_dev->data->dev_private; @@ -3324,8 +3293,6 @@ bond_remove(struct rte_vdev_device *dev) rte_mempool_free(internals->mode6.mempool); rte_bitmap_free(internals->vlan_filter_bmp); rte_free(internals->vlan_filter_bmpmem); - rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data->mac_addrs); rte_eth_dev_release_port(eth_dev); @@ -3353,16 +3320,30 @@ bond_ethdev_configure(struct rte_eth_dev *dev) unsigned i, j; - /* If RSS is enabled, fill table and key with default values */ + /* + * If RSS is enabled, fill table with default values and + * set key to the the value specified in port RSS configuration. + * Fall back to default RSS key if the key is not specified + */ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { - dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key; - dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0; - memcpy(internals->rss_key, default_rss_key, 40); + if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) { + internals->rss_key_len = + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; + memcpy(internals->rss_key, + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key, + internals->rss_key_len); + } else { + internals->rss_key_len = sizeof(default_rss_key); + memcpy(internals->rss_key, default_rss_key, + internals->rss_key_len); + } for (i = 0; i < RTE_DIM(internals->reta_conf); i++) { internals->reta_conf[i].mask = ~0LL; for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) - internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues; + internals->reta_conf[i].reta[j] = + (i * RTE_RETA_GROUP_SIZE + j) % + dev->data->nb_rx_queues; } } @@ -3618,7 +3599,7 @@ int bond_logtype; RTE_INIT(bond_init_log) { - bond_logtype = rte_log_register("pmd.net.bon"); + bond_logtype = rte_log_register("pmd.net.bond"); if (bond_logtype >= 0) rte_log_set_level(bond_logtype, RTE_LOG_NOTICE); } diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h index 43e0e448..3ea5d686 100644 --- a/drivers/net/bonding/rte_eth_bond_private.h +++ b/drivers/net/bonding/rte_eth_bond_private.h @@ -5,9 +5,11 @@ #ifndef _RTE_ETH_BOND_PRIVATE_H_ #define _RTE_ETH_BOND_PRIVATE_H_ +#include <stdint.h> #include <sys/queue.h> #include <rte_ethdev_driver.h> +#include <rte_flow.h> #include <rte_spinlock.h> #include <rte_bitmap.h> #include <rte_flow_driver.h> @@ -93,7 +95,8 @@ struct rte_flow { /* Slaves flows */ struct rte_flow *flows[RTE_MAX_ETHPORTS]; /* Flow description for synchronization */ - struct rte_flow_desc *fd; + struct rte_flow_conv_rule rule; + uint8_t rule_data[]; }; typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts, @@ -160,6 +163,11 @@ struct bond_dev_private { /** Bit mask of RSS offloads, the bit offset also means flow type */ uint64_t flow_type_rss_offloads; + struct rte_eth_rxconf default_rxconf; /**< Default RxQ conf. */ + struct rte_eth_txconf default_txconf; /**< Default TxQ conf. */ + struct rte_eth_desc_lim rx_desc_lim; /**< Rx descriptor limits */ + struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptor limits */ + uint16_t reta_size; struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE]; |