aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx5/mlx5_ethdev.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-02-19 11:16:57 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-02-19 11:17:28 +0000
commitca33590b6af032bff57d9cc70455660466a654b2 (patch)
tree0b68b090bd9b4a78a3614b62400b29279d76d553 /drivers/net/mlx5/mlx5_ethdev.c
parent169a9de21e263aa6599cdc2d87a45ae158d9f509 (diff)
New upstream version 18.02upstream/18.02
Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_ethdev.c')
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c386
1 files changed, 224 insertions, 162 deletions
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index a3cef689..66650769 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#define _GNU_SOURCE
@@ -55,7 +27,7 @@
#include <sys/un.h>
#include <rte_atomic.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_bus_pci.h>
#include <rte_mbuf.h>
#include <rte_common.h>
@@ -64,6 +36,7 @@
#include <rte_malloc.h>
#include "mlx5.h"
+#include "mlx5_glue.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
@@ -119,33 +92,6 @@ struct ethtool_link_settings {
#endif
/**
- * Return private structure associated with an Ethernet device.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * Pointer to private structure.
- */
-struct priv *
-mlx5_get_priv(struct rte_eth_dev *dev)
-{
- return dev->data->dev_private;
-}
-
-/**
- * Check if running as a secondary process.
- *
- * @return
- * Nonzero if running as a secondary process.
- */
-inline int
-mlx5_is_secondary(void)
-{
- return rte_eal_process_type() == RTE_PROC_SECONDARY;
-}
-
-/**
* Get interface name from private structure.
*
* @param[in] priv
@@ -577,8 +523,26 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int j;
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
- !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
-
+ !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t supp_rx_offloads =
+ (mlx5_priv_get_rx_port_offloads(priv) |
+ mlx5_priv_get_rx_queue_offloads(priv));
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+
+ if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+ ERROR("Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, supp_tx_offloads);
+ return ENOTSUP;
+ }
+ if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
+ ERROR("Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, supp_rx_offloads);
+ return ENOTSUP;
+ }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -606,7 +570,7 @@ dev_configure(struct rte_eth_dev *dev)
(void *)dev, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
- if (rxqs_n > priv->ind_table_max_size) {
+ if (rxqs_n > priv->config.ind_table_max_size) {
ERROR("cannot handle this many RX queues (%u)", rxqs_n);
return EINVAL;
}
@@ -619,7 +583,7 @@ dev_configure(struct rte_eth_dev *dev)
* maximum indirection table size for better balancing.
* The result is always rounded to the next power of two. */
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
- priv->ind_table_max_size :
+ priv->config.ind_table_max_size :
rxqs_n));
if (priv_rss_reta_index_resize(priv, reta_idx_n))
return ENOMEM;
@@ -649,9 +613,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
int ret;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
priv_lock(priv);
ret = dev_configure(dev);
assert(ret >= 0);
@@ -670,7 +631,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
void
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
unsigned int max;
char ifname[IF_NAMESIZE];
@@ -692,34 +654,18 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa =
- (priv->hw_csum ?
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM) :
- 0) |
- (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
- DEV_RX_OFFLOAD_TIMESTAMP;
-
- if (!priv->mps)
- info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
- if (priv->hw_csum)
- info->tx_offload_capa |=
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
- if (priv->tso)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- if (priv->tunnel_en)
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ info->rx_queue_offload_capa =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
+ info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
- priv->reta_idx_n : priv->ind_table_max_size;
+ priv->reta_idx_n : config->ind_table_max_size;
info->hash_key_size = priv->rss_conf.rss_key_len;
info->speed_capa = priv->link_speed_capa;
+ info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
priv_unlock(priv);
}
@@ -761,7 +707,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
static int
mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
};
@@ -827,7 +773,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct ifreq ifr;
struct rte_eth_link dev_link;
@@ -913,25 +859,131 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
}
/**
- * DPDK callback to retrieve physical link information.
+ * Enable receiving and transmitting traffic.
*
- * @param dev
- * Pointer to Ethernet device structure.
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_link_start(struct priv *priv)
+{
+ struct rte_eth_dev *dev = priv->dev;
+ int err;
+
+ dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
+ dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
+ err = priv_dev_traffic_enable(priv, dev);
+ if (err)
+ ERROR("%p: error occurred while configuring control flows: %s",
+ (void *)priv, strerror(err));
+ err = priv_flow_start(priv, &priv->flows);
+ if (err)
+ ERROR("%p: error occurred while configuring flows: %s",
+ (void *)priv, strerror(err));
+}
+
+/**
+ * Disable receiving and transmitting traffic.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_link_stop(struct priv *priv)
+{
+ struct rte_eth_dev *dev = priv->dev;
+
+ priv_flow_stop(priv, &priv->flows);
+ priv_dev_traffic_disable(priv, dev);
+ dev->rx_pkt_burst = removed_rx_burst;
+ dev->tx_pkt_burst = removed_tx_burst;
+}
+
+/**
+ * Retrieve physical link information and update rx/tx_pkt_burst callbacks
+ * accordingly.
+ *
+ * @param priv
+ * Pointer to private structure.
* @param wait_to_complete
* Wait for request completion (ignored).
*/
int
-mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+priv_link_update(struct priv *priv, int wait_to_complete)
{
+ struct rte_eth_dev *dev = priv->dev;
struct utsname utsname;
int ver[3];
+ int ret;
+ struct rte_eth_link dev_link = dev->data->dev_link;
if (uname(&utsname) == -1 ||
sscanf(utsname.release, "%d.%d.%d",
&ver[0], &ver[1], &ver[2]) != 3 ||
KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
- return mlx5_link_update_unlocked_gset(dev, wait_to_complete);
- return mlx5_link_update_unlocked_gs(dev, wait_to_complete);
+ ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete);
+ else
+ ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete);
+ /* If lsc interrupt is disabled, should always be ready for traffic. */
+ if (!dev->data->dev_conf.intr_conf.lsc) {
+ priv_link_start(priv);
+ return ret;
+ }
+ /* Re-select burst callbacks only if link status has been changed. */
+ if (!ret && dev_link.link_status != dev->data->dev_link.link_status) {
+ if (dev->data->dev_link.link_status == ETH_LINK_UP)
+ priv_link_start(priv);
+ else
+ priv_link_stop(priv);
+ }
+ return ret;
+}
+
+/**
+ * Querying the link status till it changes to the desired state.
+ * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param status
+ * Link desired status.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+priv_force_link_status_change(struct priv *priv, int status)
+{
+ int try = 0;
+
+ while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) {
+ priv_link_update(priv, 0);
+ if (priv->dev->data->dev_link.link_status == status)
+ return 0;
+ try++;
+ sleep(1);
+ }
+ return -EAGAIN;
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ */
+int
+mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ priv_lock(priv);
+ ret = priv_link_update(priv, wait_to_complete);
+ priv_unlock(priv);
+ return ret;
}
/**
@@ -952,9 +1004,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
uint16_t kern_mtu;
int ret = 0;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
priv_lock(priv);
ret = priv_get_mtu(priv, &kern_mtu);
if (ret)
@@ -1002,9 +1051,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
};
int ret;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
ifr.ifr_data = (void *)&ethpause;
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
@@ -1053,9 +1099,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
};
int ret;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
ifr.ifr_data = (void *)&ethpause;
ethpause.autoneg = fc_conf->autoneg;
if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
@@ -1150,7 +1193,7 @@ priv_link_status_update(struct priv *priv)
{
struct rte_eth_link *link = &priv->dev->data->dev_link;
- mlx5_link_update(priv->dev, 0);
+ priv_link_update(priv, 0);
if (((link->link_speed == 0) && link->link_status) ||
((link->link_speed != 0) && !link->link_status)) {
/*
@@ -1191,7 +1234,7 @@ priv_dev_status_handler(struct priv *priv)
/* Read all message and acknowledge them. */
for (;;) {
- if (ibv_get_async_event(priv->ctx, &event))
+ if (mlx5_glue->get_async_event(priv->ctx, &event))
break;
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
@@ -1203,7 +1246,7 @@ priv_dev_status_handler(struct priv *priv)
else
DEBUG("event type %d on port %d not handled",
event.event_type, event.element.port_num);
- ibv_ack_async_event(&event);
+ mlx5_glue->ack_async_event(&event);
}
if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
if (priv_link_status_update(priv))
@@ -1224,14 +1267,17 @@ mlx5_dev_link_status_handler(void *arg)
struct priv *priv = dev->data->dev_private;
int ret;
- priv_lock(priv);
- assert(priv->pending_alarm == 1);
+ while (!priv_trylock(priv)) {
+ /* Alarm is being canceled. */
+ if (priv->pending_alarm == 0)
+ return;
+ rte_pause();
+ }
priv->pending_alarm = 0;
ret = priv_link_status_update(priv);
priv_unlock(priv);
if (!ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
- NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/**
@@ -1253,11 +1299,9 @@ mlx5_dev_interrupt_handler(void *cb_arg)
events = priv_dev_status_handler(priv);
priv_unlock(priv);
if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
- NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL,
- NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
}
/**
@@ -1295,9 +1339,10 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
if (priv->primary_socket)
rte_intr_callback_unregister(&priv->intr_handle_socket,
mlx5_dev_handler_socket, dev);
- if (priv->pending_alarm)
+ if (priv->pending_alarm) {
+ priv->pending_alarm = 0;
rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
- priv->pending_alarm = 0;
+ }
priv->intr_handle.fd = 0;
priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
priv->intr_handle_socket.fd = 0;
@@ -1317,7 +1362,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
{
int rc, flags;
- assert(!mlx5_is_secondary());
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
@@ -1348,8 +1392,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
*
* @param priv
* Pointer to private data structure.
- * @param dev
- * Pointer to rte_eth_dev structure.
* @param up
* Nonzero for link up, otherwise link down.
*
@@ -1357,24 +1399,9 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
* 0 on success, errno value on failure.
*/
static int
-priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up)
+priv_dev_set_link(struct priv *priv, int up)
{
- int err;
-
- if (up) {
- err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
- if (err)
- return err;
- priv_dev_select_tx_function(priv, dev);
- priv_dev_select_rx_function(priv, dev);
- } else {
- err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
- if (err)
- return err;
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
- }
- return 0;
+ return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP);
}
/**
@@ -1393,7 +1420,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
int err;
priv_lock(priv);
- err = priv_dev_set_link(priv, dev, 0);
+ err = priv_dev_set_link(priv, 0);
priv_unlock(priv);
return err;
}
@@ -1414,7 +1441,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
int err;
priv_lock(priv);
- err = priv_dev_set_link(priv, dev, 1);
+ err = priv_dev_set_link(priv, 1);
priv_unlock(priv);
return err;
}
@@ -1426,32 +1453,44 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
* Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Pointer to selected Tx burst function.
*/
-void
-priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_tx_burst_t
+priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
{
+ eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
+ struct mlx5_dev_config *config = &priv->config;
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO));
+ int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
+
assert(priv != NULL);
- assert(dev != NULL);
- dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
- if (priv_check_vec_tx_support(priv) > 0) {
- if (priv_check_raw_vec_tx_support(priv) > 0)
- dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ if (vlan_insert || tso)
+ return tx_pkt_burst;
+ if (config->mps == MLX5_MPW_ENHANCED) {
+ if (priv_check_vec_tx_support(priv, dev) > 0) {
+ if (priv_check_raw_vec_tx_support(priv, dev) > 0)
+ tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
- dev->tx_pkt_burst = mlx5_tx_burst_vec;
+ tx_pkt_burst = mlx5_tx_burst_vec;
DEBUG("selected Enhanced MPW TX vectorized function");
} else {
- dev->tx_pkt_burst = mlx5_tx_burst_empw;
+ tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
- } else if (priv->mps && priv->txq_inline) {
- dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+ } else if (config->mps && (config->txq_inline > 0)) {
+ tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if (priv->mps) {
- dev->tx_pkt_burst = mlx5_tx_burst_mpw;
+ } else if (config->mps) {
+ tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
+ return tx_pkt_burst;
}
/**
@@ -1461,16 +1500,39 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
* Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Pointer to selected Rx burst function.
*/
-void
-priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_rx_burst_t
+priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{
+ eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
+
assert(priv != NULL);
- assert(dev != NULL);
if (priv_check_vec_rx_support(priv) > 0) {
- dev->rx_pkt_burst = mlx5_rx_burst_vec;
+ rx_pkt_burst = mlx5_rx_burst_vec;
DEBUG("selected RX vectorized function");
- } else {
- dev->rx_pkt_burst = mlx5_rx_burst;
}
+ return rx_pkt_burst;
+}
+
+/**
+ * Check if mlx5 device was removed.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 1 when device is removed, otherwise 0.
+ */
+int
+mlx5_is_removed(struct rte_eth_dev *dev)
+{
+ struct ibv_device_attr device_attr;
+ struct priv *priv = dev->data->dev_private;
+
+ if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
+ return 1;
+ return 0;
}