summaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx5
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-09-03 10:46:47 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-09-03 10:47:29 +0100
commit6e7cbd63706f3435b9d9a2057a37db1da01db9a7 (patch)
tree16c34356813477e4703a15f943b8ed665a39fb5f /drivers/net/mlx5
parente4df4d55df003957fc5afd7440e3d3192d7ce218 (diff)
New upstream version 17.11.4upstream/17.11.4
Change-Id: Icb6b9664e7c4adb85c087844abe6e54d6ec32db6 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/mlx5')
-rw-r--r--drivers/net/mlx5/Makefile47
-rw-r--r--drivers/net/mlx5/mlx5.c6
-rw-r--r--drivers/net/mlx5/mlx5_defs.h10
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c26
-rw-r--r--drivers/net/mlx5/mlx5_flow.c40
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c16
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c11
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h2
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h4
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h8
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h8
-rw-r--r--drivers/net/mlx5/mlx5_socket.c6
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c37
-rw-r--r--drivers/net/mlx5/mlx5_txq.c2
14 files changed, 179 insertions, 44 deletions
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index a3984eb9..c62ad118 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -145,7 +145,52 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
infiniband/verbs.h \
- enum IBV_FLOW_SPEC_ACTION_COUNT \
+ type 'struct ibv_counter_set_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_STATIC_ASSERT \
+ /usr/include/assert.h \
+ define static_assert \
$(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 10ce3359..36f3a056 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -345,6 +345,10 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.dev_set_link_down = mlx5_set_link_down,
.dev_set_link_up = mlx5_set_link_up,
.dev_close = mlx5_dev_close,
+ .promiscuous_enable = mlx5_promiscuous_enable,
+ .promiscuous_disable = mlx5_promiscuous_disable,
+ .allmulticast_enable = mlx5_allmulticast_enable,
+ .allmulticast_disable = mlx5_allmulticast_disable,
.link_update = mlx5_link_update,
.stats_get = mlx5_stats_get,
.stats_reset = mlx5_stats_reset,
@@ -680,7 +684,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
int i;
struct mlx5dv_context attrs_out;
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- struct ibv_counter_set_description cs_desc;
+ struct ibv_counter_set_description cs_desc = { .counter_type = 0 };
#endif
assert(pci_drv == &mlx5_driver);
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index d7063576..9c64bb33 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -92,10 +92,11 @@
#define MLX5_VPMD_MIN_TXQS 4
/* Threshold of buffer replenishment for vectorized Rx. */
-#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U
+#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
+ (RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2))
/* Maximum size of burst for vectorized Rx. */
-#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH
+#define MLX5_VPMD_RX_MAX_BURST 64U
/*
* Maximum size of burst for vectorized Tx. This is related to the maximum size
@@ -123,4 +124,9 @@
*/
#define MLX5_UAR_OFFSET (1ULL << 32)
+/* Definition of static_assert found in /usr/include/assert.h */
+#ifndef HAVE_STATIC_ASSERT
+#define static_assert _Static_assert
+#endif
+
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 5edc7511..e441483a 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -66,6 +66,32 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/* Supported speed values found in /usr/include/linux/ethtool.h */
+#ifndef HAVE_SUPPORTED_40000baseKR4_Full
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseCR4_Full
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseSR4_Full
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseLR4_Full
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseKR4_Full
+#define SUPPORTED_56000baseKR4_Full (1 << 27)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseCR4_Full
+#define SUPPORTED_56000baseCR4_Full (1 << 28)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseSR4_Full
+#define SUPPORTED_56000baseSR4_Full (1 << 29)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseLR4_Full
+#define SUPPORTED_56000baseLR4_Full (1 << 30)
+#endif
+
/* Add defines in case the running kernel is not the same as user headers. */
#ifndef ETHTOOL_GLINKSETTINGS
struct ethtool_link_settings {
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 57b654c3..1822c2bd 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -63,12 +63,6 @@
#define MLX5_IPV6 6
#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-struct ibv_counter_set_init_attr {
- int dummy;
-};
-struct ibv_flow_spec_counter_action {
- int dummy;
-};
struct ibv_counter_set {
int dummy;
};
@@ -885,10 +879,17 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
sizeof(struct ibv_flow_spec_action_tag);
}
if (parser->count) {
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
for (i = 0; i != hash_rxq_init_n; ++i)
parser->queue[i].offset += size;
+#else
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ items,
+ "Count action supported only on "
+ "MLNX_OFED_4.2 and above");
+#endif
}
return 0;
exit_item_not_supported:
@@ -2959,10 +2960,21 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
struct ibv_spec_header *flow_h;
void *flow_spec;
unsigned int specs_n;
- unsigned int queue_id = parser.drop ? HASH_RXQ_ETH :
- parser.layer;
+ unsigned int queue_id;
- attr = parser.queue[queue_id].ibv_attr;
+ /*
+ * Search for a non-empty ibv_attr. There should be only one
+ * because no RSS action is allowed for FDIR. This should have
+ * been referenced directly by parser.layer but due to a bug in
+ * mlx5_flow_convert() as of v17.11.4, parser.layer isn't
+ * correct. This bug will have to be addressed later.
+ */
+ for (queue_id = 0; queue_id != hash_rxq_init_n; ++queue_id) {
+ attr = parser.queue[queue_id].ibv_attr;
+ if (attr)
+ break;
+ }
+ assert(!parser.drop || queue_id == HASH_RXQ_ETH);
flow_attr = flow->frxq[queue_id].ibv_attr;
/* Compare first the attributes. */
if (!flow_attr ||
@@ -2991,16 +3003,20 @@ wrong_flow:
/* The flow does not match. */
continue;
}
- ret = rte_errno; /* Save rte_errno before cleanup. */
if (flow)
mlx5_flow_list_destroy(dev, &priv->flows, flow);
exit:
+ if (ret)
+ ret = rte_errno; /* Save rte_errno before cleanup. */
for (i = 0; i != hash_rxq_init_n; ++i) {
if (parser.queue[i].ibv_attr)
rte_free(parser.queue[i].ibv_attr);
}
- rte_errno = ret; /* Restore rte_errno. */
- return -rte_errno;
+ if (ret) {
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+ }
+ return 0;
}
/**
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 23eae7c1..617138c1 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -60,9 +60,17 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret;
dev->data->promiscuous = 1;
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable promiscuous mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
ret = mlx5_traffic_restart(dev);
if (ret)
DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s",
@@ -96,9 +104,17 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret;
dev->data->all_multicast = 1;
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable allmulticast mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
ret = mlx5_traffic_restart(dev);
if (ret)
DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s",
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 2e003aea..1bbce3b7 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -740,6 +740,8 @@ next_wqe:
/* Check whether completion threshold has been reached. */
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
last_wqe->ctrl2 = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
@@ -951,6 +953,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
@@ -1243,6 +1247,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
@@ -1370,8 +1376,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -1584,13 +1588,14 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
(1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
txq->mpw_comp = txq->wqe_ci;
- txq->cq_pi++;
} else {
txq->elts_comp += j;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 29019f79..dac3b39f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -185,7 +185,9 @@ struct mlx5_txq_data {
uint16_t elts_comp; /* Counter since last completion request. */
uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
+#ifndef NDEBUG
uint16_t cq_pi; /* Producer index for completion queue. */
+#endif
uint16_t wqe_ci; /* Consumer index for work queue. */
uint16_t wqe_pi; /* Producer index for work queue. */
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 1f08ed0b..d504e2ae 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -106,9 +106,9 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
unsigned int i;
- assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
- assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index cf424778..e748615e 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -202,10 +202,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
- ++txq->cq_pi;
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -304,9 +305,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
txq->elts_comp += pkts_n;
} else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request a completion. */
txq->elts_comp = 0;
- ++txq->cq_pi;
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -754,7 +756,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 79314292..7e8c9b88 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -203,10 +203,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
- ++txq->cq_pi;
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -305,9 +306,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
txq->elts_comp += pkts_n;
} else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request a completion. */
txq->elts_comp = 0;
- ++txq->cq_pi;
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -735,7 +737,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 7ab31000..97f90c07 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -62,6 +62,12 @@ mlx5_socket_init(struct rte_eth_dev *dev)
int flags;
/*
+ * Close the last socket that was used to communicate
+ * with the secondary process
+ */
+ if (priv->primary_socket)
+ mlx5_socket_uninit(dev);
+ /*
* Initialise the socket to communicate with the secondary
* process.
*/
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 214543f8..9a1d6f95 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -73,7 +73,6 @@ mlx5_txq_start(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- /* Add memory regions to Tx queues. */
for (i = 0; i != priv->txqs_n; ++i) {
unsigned int idx = 0;
struct mlx5_mr *mr;
@@ -94,12 +93,17 @@ mlx5_txq_start(struct rte_eth_dev *dev)
}
}
ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
- if (ret)
+ if (ret) {
+ /* Adjust index for rollback. */
+ i = priv->txqs_n - 1;
goto error;
+ }
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_txq_stop(dev);
+ do {
+ mlx5_txq_release(dev, i);
+ } while (i-- != 0);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -151,7 +155,9 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_rxq_stop(dev);
+ do {
+ mlx5_rxq_release(dev, i);
+ } while (i-- != 0);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -174,28 +180,28 @@ mlx5_dev_start(struct rte_eth_dev *dev)
struct mlx5_mr *mr = NULL;
int ret;
- dev->data->dev_started = 1;
+ DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
ret = mlx5_flow_create_drop_queue(dev);
if (ret) {
DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
goto error;
}
- DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues",
- dev->data->port_id);
rte_mempool_walk(mlx5_mp2mr_iter, priv);
ret = mlx5_txq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
- goto error;
+ return -rte_errno;
}
ret = mlx5_rxq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
- goto error;
+ mlx5_txq_stop(dev);
+ return -rte_errno;
}
+ dev->data->dev_started = 1;
ret = mlx5_rx_intr_vec_enable(dev);
if (ret) {
DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
@@ -254,8 +260,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
dev->tx_pkt_burst = removed_tx_burst;
rte_wmb();
usleep(1000 * priv->rxqs_n);
- DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
mlx5_flow_stop(dev, &priv->flows);
mlx5_traffic_disable(dev);
mlx5_rx_intr_vec_disable(dev);
@@ -336,9 +341,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
struct rte_flow_item_vlan vlan_spec = {
.tci = rte_cpu_to_be_16(vlan),
};
- struct rte_flow_item_vlan vlan_mask = {
- .tci = 0xffff,
- };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
&vlan_spec, &vlan_mask);
@@ -375,9 +379,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
struct rte_flow_item_vlan vlan_spec = {
.tci = rte_cpu_to_be_16(vlan),
};
- struct rte_flow_item_vlan vlan_mask = {
- .tci = 0xffff,
- };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
ret = mlx5_ctrl_flow_vlan(dev, &unicast,
&unicast_mask,
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index a5c6b585..760ac92d 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -467,7 +467,9 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
(volatile struct mlx5_cqe (*)[])
(uintptr_t)cq_info.buf;
txq_data->cq_ci = 0;
+#ifndef NDEBUG
txq_data->cq_pi = 0;
+#endif
txq_data->wqe_ci = 0;
txq_data->wqe_pi = 0;
txq_ibv->qp = tmpl.qp;