aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bnx2x/bnx2x.c25
-rw-r--r--drivers/net/bnx2x/bnx2x_logs.h35
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c11
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.c7
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.c16
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.h5
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c18
-rw-r--r--drivers/net/cxgbe/cxgbevf_ethdev.c12
-rw-r--r--drivers/net/cxgbe/cxgbevf_main.c6
-rw-r--r--drivers/net/cxgbe/mps_tcam.c4
-rw-r--r--drivers/net/e1000/base/e1000_i210.c1
-rw-r--r--drivers/net/ena/ena_ethdev.c11
-rw-r--r--drivers/net/enic/enic_rxtx.c19
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c11
-rw-r--r--drivers/net/i40e/i40e_ethdev.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c37
-rw-r--r--drivers/net/mlx5/Makefile4
-rw-r--r--drivers/net/mlx5/mlx5.c7
-rw-r--r--drivers/net/mlx5/mlx5_flow.c19
-rw-r--r--drivers/net/mlx5/mlx5_flow_dv.c270
-rw-r--r--drivers/net/mlx5/mlx5_flow_tcf.c100
-rw-r--r--drivers/net/mlx5/mlx5_flow_verbs.c25
-rw-r--r--drivers/net/mlx5/mlx5_utils.h10
-rw-r--r--drivers/net/octeontx/base/octeontx_pki_var.h13
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c3
-rw-r--r--drivers/net/qede/qede_rxtx.c30
-rw-r--r--drivers/net/qede/qede_rxtx.h5
-rw-r--r--drivers/net/sfc/base/efx.h3
-rw-r--r--drivers/net/tap/rte_eth_tap.c3
-rw-r--r--drivers/net/tap/tap_netlink.c3
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c10
-rw-r--r--drivers/net/virtio/virtio_pci.c10
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c9
34 files changed, 445 insertions, 304 deletions
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 27975936..a6d2687a 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -199,8 +199,12 @@ static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
uint32_t hw_lock_control_reg;
int cnt;
+#ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if (resource)
PMD_INIT_FUNC_TRACE(sc);
+#else
+ PMD_INIT_FUNC_TRACE(sc);
+#endif
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -248,8 +252,12 @@ static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
int func = SC_FUNC(sc);
uint32_t hw_lock_control_reg;
+#ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if (resource)
PMD_INIT_FUNC_TRACE(sc);
+#else
+ PMD_INIT_FUNC_TRACE(sc);
+#endif
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -7041,7 +7049,7 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc)
}
bnx2x_link_report(sc);
} else {
- bnx2x_link_report(sc);
+ bnx2x_link_report_locked(sc);
bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP);
}
}
@@ -9388,6 +9396,8 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc)
uint32_t fw, hw_lock_reg, hw_lock_val;
uint32_t rc = 0;
+ PMD_INIT_FUNC_TRACE(sc);
+
/*
* Clear HW from errors which may have resulted from an interrupted
* DMAE transaction.
@@ -9395,22 +9405,23 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc)
bnx2x_prev_interrupted_dmae(sc);
/* Release previously held locks */
- if (SC_FUNC(sc) <= 5)
- hw_lock_reg = (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8);
- else
- hw_lock_reg =
- (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
+ hw_lock_reg = (SC_FUNC(sc) <= 5) ?
+ (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
+ (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
hw_lock_val = (REG_RD(sc, hw_lock_reg));
if (hw_lock_val) {
if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock\n");
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
(MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
}
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock\n");
REG_WR(sc, hw_lock_reg, 0xffffffff);
}
if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR\n");
REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
@@ -9740,6 +9751,8 @@ int bnx2x_attach(struct bnx2x_softc *sc)
sc->fw_seq =
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
+ PMD_DRV_LOG(DEBUG, sc, "prev unload fw_seq 0x%04x",
+ sc->fw_seq);
bnx2x_prev_unload(sc);
}
diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h
index 753bccdf..f0cf69c1 100644
--- a/drivers/net/bnx2x/bnx2x_logs.h
+++ b/drivers/net/bnx2x/bnx2x_logs.h
@@ -10,43 +10,40 @@
extern int bnx2x_logtype_init;
#define PMD_INIT_LOG(level, sc, fmt, args...) \
- RTE_LOG(level, PMD, \
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_init, \
"[bnx2x_pmd: %s] %s() " fmt "\n", (sc)->devinfo.name, __func__, ##args)
#define PMD_INIT_FUNC_TRACE(sc) PMD_INIT_LOG(DEBUG, sc, " >>")
+extern int bnx2x_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, sc, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \
+ "[%s:%d(%s)] " fmt, __func__, __LINE__, \
+ (sc)->devinfo.name ? (sc)->devinfo.name : "", ## args)
+
+#define PMD_DRV_LOG(level, sc, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, sc, fmt "\n", ## args)
+
#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
#endif
#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX
#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
#endif
-#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX_FREE
-#define PMD_TX_FREE_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-extern int bnx2x_logtype_driver;
-#define PMD_DRV_LOG_RAW(level, sc, fmt, args...) \
- RTE_LOG(level, PMD, "[%s:%d(%s)] " fmt, __func__, __LINE__, \
- (sc)->devinfo.name ? (sc)->devinfo.name : "", ## args)
-
-#define PMD_DRV_LOG(level, sc, fmt, args...) \
- PMD_DRV_LOG_RAW(level, sc, fmt "\n", ## args)
-
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
#define PMD_DEBUG_PERIODIC_LOG(level, sc, fmt, args...) \
- RTE_LOG(level, PMD, "%s(%s): " fmt "\n", __func__, \
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \
+ "%s(%s): " fmt "\n", __func__, \
(sc)->devinfo.name ? (sc)->devinfo.name : "", ## args)
#else
#define PMD_DEBUG_PERIODIC_LOG(level, sc, fmt, args...) do { } while (0)
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 1a6d8e4d..2661620a 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -2181,9 +2181,14 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
internals->link_status_polling_enabled = 0;
for (i = 0; i < internals->slave_count; i++) {
- internals->slaves[i].last_link_status = 0;
- rte_eth_dev_stop(internals->slaves[i].port_id);
- deactivate_slave(eth_dev, internals->slaves[i].port_id);
+ uint16_t slave_id = internals->slaves[i].port_id;
+ if (find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) !=
+ internals->active_slave_count) {
+ internals->slaves[i].last_link_status = 0;
+ rte_eth_dev_stop(slave_id);
+ deactivate_slave(eth_dev, slave_id);
+ }
}
}
diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
index ef1102be..3a7912e4 100644
--- a/drivers/net/cxgbe/cxgbe_filter.c
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -263,8 +263,8 @@ static u64 hash_filter_ntuple(const struct filter_entry *f)
u64 ntuple = 0;
u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
- if (tp->port_shift >= 0)
- ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
+ if (tp->port_shift >= 0 && f->fs.mask.iport)
+ ntuple |= (u64)f->fs.val.iport << tp->port_shift;
if (tp->protocol_shift >= 0) {
if (!f->fs.val.proto)
@@ -278,9 +278,6 @@ static u64 hash_filter_ntuple(const struct filter_entry *f)
if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
- if (ntuple != tp->hash_filter_mask)
- return 0;
-
return ntuple;
}
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
index 54ec7e59..4deaff8f 100644
--- a/drivers/net/cxgbe/cxgbe_flow.c
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -7,14 +7,12 @@
#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
do { \
- if (!((fs)->val.elem || (fs)->mask.elem)) { \
- (fs)->val.elem = (__v); \
- (fs)->mask.elem = (__m); \
- } else { \
+ if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
- NULL, "a filter can be specified" \
- " only once"); \
- } \
+ NULL, "Redefined match item with" \
+ " different values found"); \
+ (fs)->val.elem = (__v); \
+ (fs)->mask.elem = (__m); \
} while (0)
#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
@@ -799,7 +797,7 @@ static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
/* Poll the FW for reply */
err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
- CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_MS,
CXGBE_FLOW_POLL_CNT,
&ctx.completion);
if (err) {
@@ -885,7 +883,7 @@ static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
/* Poll the FW for reply */
err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
- CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_MS,
CXGBE_FLOW_POLL_CNT,
&ctx.completion);
if (err) {
diff --git a/drivers/net/cxgbe/cxgbe_flow.h b/drivers/net/cxgbe/cxgbe_flow.h
index 718bf3d0..ec8e47ae 100644
--- a/drivers/net/cxgbe/cxgbe_flow.h
+++ b/drivers/net/cxgbe/cxgbe_flow.h
@@ -10,8 +10,9 @@
#include "mps_tcam.h"
#include "cxgbe.h"
-#define CXGBE_FLOW_POLL_US 10
-#define CXGBE_FLOW_POLL_CNT 10
+/* Max poll time is 100 * 100msec = 10 sec */
+#define CXGBE_FLOW_POLL_MS 100 /* 100 milliseconds */
+#define CXGBE_FLOW_POLL_CNT 100 /* Max number of times to poll */
struct chrte_fparse {
int (*fptr)(const void *mask, /* currently supported mask */
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 88dc851f..ec080e5d 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -157,18 +157,18 @@ out:
/**
* cxgbe_poll_for_completion: Poll rxq for completion
* @q: rxq to poll
- * @us: microseconds to delay
+ * @ms: milliseconds to delay
* @cnt: number of times to poll
* @c: completion to check for 'done' status
*
* Polls the rxq for reples until completion is done or the count
* expires.
*/
-int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int ms,
unsigned int cnt, struct t4_completion *c)
{
unsigned int i;
- unsigned int work_done, budget = 4;
+ unsigned int work_done, budget = 32;
if (!c)
return -EINVAL;
@@ -181,7 +181,7 @@ int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
return 0;
}
t4_os_unlock(&c->lock);
- udelay(us);
+ rte_delay_ms(ms);
}
return -ETIMEDOUT;
}
@@ -1339,18 +1339,22 @@ inline bool force_linkup(struct adapter *adap)
int link_start(struct port_info *pi)
{
struct adapter *adapter = pi->adapter;
- int ret;
+ u64 conf_offloads;
unsigned int mtu;
+ int ret;
mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
(ETHER_HDR_LEN + ETHER_CRC_LEN);
+ conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
+
/*
* We do not set address filters and promiscuity here, the stack does
* that step explicitly.
*/
- ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
- -1, 1, true);
+ ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
+ !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+ true);
if (ret == 0) {
ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
(u8 *)&pi->eth_dev->data->mac_addrs[0]);
diff --git a/drivers/net/cxgbe/cxgbevf_ethdev.c b/drivers/net/cxgbe/cxgbevf_ethdev.c
index 3b32ca9d..a6458d53 100644
--- a/drivers/net/cxgbe/cxgbevf_ethdev.c
+++ b/drivers/net/cxgbe/cxgbevf_ethdev.c
@@ -177,6 +177,16 @@ out_free_adapter:
return err;
}
+static int eth_cxgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ /* Free up other ports and all resources */
+ cxgbe_close(adap);
+ return 0;
+}
+
static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -186,7 +196,7 @@ static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbevf_dev_uninit);
}
static struct rte_pci_driver rte_cxgbevf_pmd = {
diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c
index 6223e125..61bd8519 100644
--- a/drivers/net/cxgbe/cxgbevf_main.c
+++ b/drivers/net/cxgbe/cxgbevf_main.c
@@ -11,6 +11,7 @@
#include "t4_regs.h"
#include "t4_msg.h"
#include "cxgbe.h"
+#include "mps_tcam.h"
/*
* Figure out how many Ports and Queue Sets we can support. This depends on
@@ -271,6 +272,11 @@ allocate_mac:
print_adapter_info(adapter);
print_port_info(adapter);
+ adapter->mpstcam = t4_init_mpstcam(adapter);
+ if (!adapter->mpstcam)
+ dev_warn(adapter,
+ "VF could not allocate mps tcam table. Continuing\n");
+
err = init_rss(adapter);
if (err)
goto out_free;
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
index 02ec69a9..71c8070b 100644
--- a/drivers/net/cxgbe/mps_tcam.c
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -236,8 +236,6 @@ struct mpstcam_table *t4_init_mpstcam(struct adapter *adap)
void t4_cleanup_mpstcam(struct adapter *adap)
{
- if (adap->mpstcam) {
- t4_os_free(adap->mpstcam->entry);
+ if (adap->mpstcam)
t4_os_free(adap->mpstcam);
- }
}
diff --git a/drivers/net/e1000/base/e1000_i210.c b/drivers/net/e1000/base/e1000_i210.c
index 277331c4..c2abb43f 100644
--- a/drivers/net/e1000/base/e1000_i210.c
+++ b/drivers/net/e1000/base/e1000_i210.c
@@ -941,6 +941,7 @@ STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
if (ret_val != E1000_SUCCESS)
nvm_word = E1000_INVM_DEFAULT_AL;
tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ phy_word = E1000_PHY_PLL_UNCONF;
for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
/* check current state directly from internal PHY */
e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index abe1e7bd..05a4fbe0 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -530,11 +530,6 @@ static void ena_close(struct rte_eth_dev *dev)
adapter);
/*
- * Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
- /*
* MAC is not allocated dynamically. Setting NULL should prevent from
* release of the resource in the rte_eth_dev_release_port().
*/
@@ -1666,6 +1661,12 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr,
(struct ether_addr *)adapter->mac_addr);
+ /*
+ * Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
adapter->drv_stats = rte_zmalloc("adapter stats",
sizeof(*adapter->drv_stats),
RTE_CACHE_LINE_SIZE);
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 5189ee63..0aadd342 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -393,11 +393,22 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
- if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
- rte_errno = EINVAL;
- return i;
- }
ol_flags = m->ol_flags;
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ rte_errno = EINVAL;
+ return i;
+ }
+ } else {
+ uint16_t header_len;
+
+ header_len = m->l2_len + m->l3_len + m->l4_len;
+ if (m->tso_segsz + header_len > ENIC_TX_MAX_PKT_SIZE) {
+ rte_errno = EINVAL;
+ return i;
+ }
+ }
+
if (ol_flags & wq->tx_offload_notsup_mask) {
rte_errno = ENOTSUP;
return i;
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index c852022d..85fb6c5c 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -464,11 +464,6 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
return 0;
}
-/* fls = find last set bit = 32 minus the number of leading zeros */
-#ifndef fls
-#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
-#endif
-
static void
fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
{
@@ -1030,8 +1025,8 @@ fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
nb_queue_pools = macvlan->nb_queue_pools;
- pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
- rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
+ pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0;
+ rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len;
/* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
@@ -1042,7 +1037,7 @@ fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
/* Flow Director configurations, only queue number is valid. */
- dglortdec = fls(dev->data->nb_rx_queues - 1);
+ dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1);
dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
(hw->mac.dglort_map + GLORT_FD_Q_BASE);
FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 1c779068..790ecc3c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -12552,13 +12552,16 @@ i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
if (in->key_len > RTE_DIM(out->key) ||
in->queue_num > RTE_DIM(out->queue))
return -EINVAL;
+ if (!in->key && in->key_len)
+ return -EINVAL;
+ if (in->key)
+ out->conf.key = memcpy(out->key, in->key, in->key_len);
out->conf = (struct rte_flow_action_rss){
.func = in->func,
.level = in->level,
.types = in->types,
.key_len = in->key_len,
.queue_num = in->queue_num,
- .key = memcpy(out->key, in->key, in->key_len),
.queue = memcpy(out->queue, in->queue,
sizeof(*in->queue) * in->queue_num),
};
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c9e82d51..91ba6201 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2549,6 +2549,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
@@ -2731,8 +2734,6 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
if (err)
goto error;
- ixgbe_dev_link_update(dev, 0);
-
skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
@@ -2768,6 +2769,12 @@ skip_link_setup:
"please call hierarchy_commit() "
"before starting the port");
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ ixgbe_dev_link_update(dev, 0);
+
return 0;
error:
@@ -3873,11 +3880,6 @@ static int
ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
int *link_up, int wait_to_complete)
{
- /**
- * for a quick link status checking, wait_to_compelet == 0,
- * skip PF link status checking
- */
- bool no_pflink_check = wait_to_complete == 0;
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
uint32_t links_reg, in_msg;
@@ -3938,14 +3940,6 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_UNKNOWN;
}
- if (no_pflink_check) {
- if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
- mac->get_link_status = true;
- else
- mac->get_link_status = false;
-
- goto out;
- }
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
@@ -3955,7 +3949,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
if (in_msg & IXGBE_VT_MSGTYPE_NACK)
- ret_val = -1;
+ mac->get_link_status = false;
goto out;
}
@@ -5061,6 +5055,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
@@ -5096,8 +5093,6 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_dev_rxtx_start(dev);
- ixgbevf_dev_link_update(dev, 0);
-
/* check and configure queue intr-vector mapping */
if (rte_intr_cap_multiple(intr_handle) &&
dev->data->dev_conf.intr_conf.rxq) {
@@ -5135,6 +5130,12 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
/* Re-enable interrupt for VF */
ixgbevf_intr_enable(dev);
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ ixgbevf_dev_link_update(dev, 0);
+
return 0;
}
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 7a50bccd..895cdfee 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -51,7 +51,7 @@ CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -Wno-strict-prototypes
-CFLAGS += $(shell pkg-config --cflags libmnl)
+CFLAGS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --cflags libmnl)
ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
CFLAGS += -DMLX5_GLUE='"$(LIB_GLUE)"'
CFLAGS += -DMLX5_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
@@ -60,7 +60,7 @@ LDLIBS += -ldl
else
LDLIBS += -libverbs -lmlx5
endif
-LDLIBS += $(shell pkg-config --libs libmnl)
+LDLIBS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --libs libmnl || echo "-lmnl")
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ed1fcfc7..9e5cab16 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -347,11 +347,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
memset(priv, 0, sizeof(*priv));
priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
/*
- * flag to rte_eth_dev_close() that it should release the port resources
- * (calling rte_eth_dev_release_port()) in addition to closing it.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
- /*
* Reset mac_addrs to NULL such that it is not freed as part of
* rte_eth_dev_release_port(). mac_addrs is part of dev_private so
* it is freed when dev_private is freed.
@@ -1114,6 +1109,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOMEM;
goto error;
}
+ /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
if (priv->representor) {
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->representor_id = priv->representor_id;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3c2ac4b3..5ad3a11a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1178,6 +1178,12 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
"L3 cannot follow an L4 layer.");
if (!mask)
mask = &rte_flow_item_ipv4_mask;
+ else if (mask->hdr.next_proto_id != 0 &&
+ mask->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "partial mask is not supported"
+ " for protocol");
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_ipv4),
@@ -1234,17 +1240,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an L4 layer.");
- /*
- * IPv6 is not recognised by the NIC inside a GRE tunnel.
- * Such support has to be disabled as the rule will be
- * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
- * Mellanox OFED 4.4-1.0.0.0.
- */
- if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "IPv6 inside a GRE tunnel is"
- " not recognised.");
if (!mask)
mask = &rte_flow_item_ipv6_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -2657,7 +2652,7 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
FLOW_FDIR_CMP(f1, f2, l3_mask) ||
FLOW_FDIR_CMP(f1, f2, l4) ||
FLOW_FDIR_CMP(f1, f2, l4_mask) ||
- FLOW_FDIR_CMP(f1, f2, actions[0]))
+ FLOW_FDIR_CMP(f1, f2, actions[0].type))
return 1;
if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
FLOW_FDIR_CMP(f1, f2, queue))
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 79096153..a2edd168 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -814,10 +814,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id)
+ items->mask)->hdr.next_proto_id) {
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
@@ -828,10 +835,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto)
+ items->mask)->hdr.proto) {
next_protocol =
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
@@ -1041,6 +1055,39 @@ flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
return flow;
}
+#ifndef NDEBUG
+/**
+ * Sanity check for match mask and value. Similar to check_valid_spec() in
+ * kernel driver. If unmasked bit is present in value, it returns failure.
+ *
+ * @param match_mask
+ * pointer to match mask buffer.
+ * @param match_value
+ * pointer to match value buffer.
+ *
+ * @return
+ * 0 if valid, -EINVAL otherwise.
+ */
+static int
+flow_dv_check_valid_spec(void *match_mask, void *match_value)
+{
+ uint8_t *m = match_mask;
+ uint8_t *v = match_value;
+ unsigned int i;
+
+ for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
+ if (v[i] & ~m[i]) {
+ DRV_LOG(ERR,
+ "match_value differs from match_criteria"
+ " %p[%u] != %p[%u]",
+ match_value, i, match_mask, i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+#endif
+
/**
* Add Ethernet item to matcher and to the value.
*
@@ -1750,114 +1797,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- void *match_mask = matcher.mask.buf;
- void *match_value = dev_flow->dv.value.buf;
-
- switch (items->type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- flow_dv_translate_item_eth(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- flow_dv_translate_item_vlan(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
- item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
- MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 |
- MLX5_FLOW_LAYER_OUTER_VLAN);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
- MLX5_IPV4_LAYER_TYPES,
- MLX5_IPV4_IBV_RX_HASH);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
- MLX5_IPV6_LAYER_TYPES,
- MLX5_IPV6_IBV_RX_HASH);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- flow_dv_translate_item_tcp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_TCP,
- IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- flow_dv_translate_item_udp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->verbs.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_UDP,
- IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- flow_dv_translate_item_gre(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_GRE;
- break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- flow_dv_translate_item_nvgre(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_GRE;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- flow_dv_translate_item_vxlan(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_VXLAN;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- flow_dv_translate_item_vxlan(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
- break;
- case RTE_FLOW_ITEM_TYPE_META:
- flow_dv_translate_item_meta(match_mask, match_value,
- items);
- item_flags |= MLX5_FLOW_ITEM_METADATA;
- break;
- default:
- break;
- }
- }
- dev_flow->layers = item_flags;
- /* Register matcher. */
- matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
- matcher.mask.size);
- matcher.priority = mlx5_flow_adjust_priority(dev, priority,
- matcher.priority);
- matcher.egress = attr->egress;
- if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
- return -rte_errno;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
@@ -1991,6 +1930,116 @@ flow_dv_translate(struct rte_eth_dev *dev,
}
dev_flow->dv.actions_n = actions_n;
flow->actions = action_flags;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ void *match_mask = matcher.mask.buf;
+ void *match_value = dev_flow->dv.value.buf;
+
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ flow_dv_translate_item_eth(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L2;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ flow_dv_translate_item_vlan(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L2;
+ item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+ MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ flow_dv_translate_item_ipv4(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L3;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel,
+ MLX5_IPV4_LAYER_TYPES,
+ MLX5_IPV4_IBV_RX_HASH);
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ flow_dv_translate_item_ipv6(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L3;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel,
+ MLX5_IPV6_LAYER_TYPES,
+ MLX5_IPV6_IBV_RX_HASH);
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ flow_dv_translate_item_tcp(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel, ETH_RSS_TCP,
+ IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP);
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ flow_dv_translate_item_udp(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel, ETH_RSS_UDP,
+ IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP);
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ flow_dv_translate_item_gre(match_mask, match_value,
+ items, tunnel);
+ item_flags |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ flow_dv_translate_item_nvgre(match_mask, match_value,
+ items, tunnel);
+ item_flags |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ flow_dv_translate_item_vxlan(match_mask, match_value,
+ items, tunnel);
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ flow_dv_translate_item_vxlan(match_mask, match_value,
+ items, tunnel);
+ item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ flow_dv_translate_item_meta(match_mask, match_value,
+ items);
+ item_flags |= MLX5_FLOW_ITEM_METADATA;
+ break;
+ default:
+ break;
+ }
+ }
+ assert(!flow_dv_check_valid_spec(matcher.mask.buf,
+ dev_flow->dv.value.buf));
+ dev_flow->layers = item_flags;
+ /* Register matcher. */
+ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+ matcher.mask.size);
+ matcher.priority = mlx5_flow_adjust_priority(dev, priority,
+ matcher.priority);
+ matcher.egress = attr->egress;
+ if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
+ return -rte_errno;
return 0;
}
@@ -2034,6 +2083,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
} else if (flow->actions &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
+
hrxq = mlx5_hrxq_get(dev, flow->key,
MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields,
diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c
index fb817b23..97d2a54c 100644
--- a/drivers/net/mlx5/mlx5_flow_tcf.c
+++ b/drivers/net/mlx5/mlx5_flow_tcf.c
@@ -3847,30 +3847,6 @@ flow_tcf_alloc_nlcmd(struct tcf_nlcb_context *ctx, uint32_t size)
}
/**
- * Set NLM_F_ACK flags in the last netlink command in buffer.
- * Only last command in the buffer will be acked by system.
- *
- * @param[in, out] buf
- * Pointer to buffer with netlink commands.
- */
-static void
-flow_tcf_setack_nlcmd(struct tcf_nlcb_buf *buf)
-{
- struct nlmsghdr *nlh;
- uint32_t size = 0;
-
- assert(buf->size);
- do {
- nlh = (struct nlmsghdr *)&buf->msg[size];
- size += NLMSG_ALIGN(nlh->nlmsg_len);
- if (size >= buf->size) {
- nlh->nlmsg_flags |= NLM_F_ACK;
- break;
- }
- } while (true);
-}
-
-/**
* Send the buffers with prepared netlink commands. Scans the list and
* sends all found buffers. Buffers are sent and freed anyway in order
* to prevent memory leakage if some every message in received packet.
@@ -3888,21 +3864,35 @@ static int
flow_tcf_send_nlcmd(struct mlx5_flow_tcf_context *tcf,
struct tcf_nlcb_context *ctx)
{
- struct tcf_nlcb_buf *bc, *bn;
- struct nlmsghdr *nlh;
+ struct tcf_nlcb_buf *bc = LIST_FIRST(&ctx->nlbuf);
int ret = 0;
- bc = LIST_FIRST(&ctx->nlbuf);
while (bc) {
+ struct tcf_nlcb_buf *bn = LIST_NEXT(bc, next);
+ struct nlmsghdr *nlh;
+ uint32_t msg = 0;
int rc;
- bn = LIST_NEXT(bc, next);
- if (bc->size) {
- flow_tcf_setack_nlcmd(bc);
- nlh = (struct nlmsghdr *)&bc->msg;
- rc = flow_tcf_nl_ack(tcf, nlh, bc->size, NULL, NULL);
- if (rc && !ret)
- ret = rc;
+ while (msg < bc->size) {
+ /*
+ * Send Netlink commands from buffer in one by one
+ * fashion. If we send multiple rule deletion commands
+ * in one Netlink message and some error occurs it may
+ * cause multiple ACK error messages and break sequence
+ * numbers of Netlink communication, because we expect
+ * the only one ACK reply.
+ */
+ assert((bc->size - msg) >= sizeof(struct nlmsghdr));
+ nlh = (struct nlmsghdr *)&bc->msg[msg];
+ assert((bc->size - msg) >= nlh->nlmsg_len);
+ msg += nlh->nlmsg_len;
+ rc = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+ if (rc) {
+ DRV_LOG(WARNING,
+ "netlink: cleanup error %d", rc);
+ if (!ret)
+ ret = rc;
+ }
}
rte_free(bc);
bc = bn;
@@ -3935,6 +3925,7 @@ flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)
struct nlattr *na_local = NULL;
struct nlattr *na_peer = NULL;
unsigned char family;
+ uint32_t size;
if (nlh->nlmsg_type != RTM_NEWADDR) {
rte_errno = EINVAL;
@@ -3962,11 +3953,11 @@ flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)
if (!na_local || !na_peer)
return 1;
/* Local rule found with scope link, permanent and assigned peer. */
- cmd = flow_tcf_alloc_nlcmd(ctx, MNL_ALIGN(sizeof(struct nlmsghdr)) +
- MNL_ALIGN(sizeof(struct ifaddrmsg)) +
- (family == AF_INET6
- ? 2 * SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
- : 2 * SZ_NLATTR_TYPE_OF(uint32_t)));
+ size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
+ MNL_ALIGN(sizeof(struct ifaddrmsg)) +
+ (family == AF_INET6 ? 2 * SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
+ : 2 * SZ_NLATTR_TYPE_OF(uint32_t));
+ cmd = flow_tcf_alloc_nlcmd(ctx, size);
if (!cmd) {
rte_errno = ENOMEM;
return -rte_errno;
@@ -3991,6 +3982,7 @@ flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)
mnl_attr_put(cmd, IFA_ADDRESS, IPV6_ADDR_LEN,
mnl_attr_get_payload(na_peer));
}
+ assert(size == cmd->nlmsg_len);
return 1;
}
@@ -4059,6 +4051,7 @@ flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
struct nlattr *na_ip = NULL;
struct nlattr *na_mac = NULL;
unsigned char family;
+ uint32_t size;
if (nlh->nlmsg_type != RTM_NEWNEIGH) {
rte_errno = EINVAL;
@@ -4085,12 +4078,12 @@ flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
if (!na_mac || !na_ip)
return 1;
/* Neigh rule with permenent attribute found. */
- cmd = flow_tcf_alloc_nlcmd(ctx, MNL_ALIGN(sizeof(struct nlmsghdr)) +
- MNL_ALIGN(sizeof(struct ndmsg)) +
- SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +
- (family == AF_INET6
- ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
- : SZ_NLATTR_TYPE_OF(uint32_t)));
+ size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
+ MNL_ALIGN(sizeof(struct ndmsg)) +
+ SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +
+ (family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
+ : SZ_NLATTR_TYPE_OF(uint32_t));
+ cmd = flow_tcf_alloc_nlcmd(ctx, size);
if (!cmd) {
rte_errno = ENOMEM;
return -rte_errno;
@@ -4113,6 +4106,7 @@ flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
}
mnl_attr_put(cmd, NDA_LLADDR, ETHER_ADDR_LEN,
mnl_attr_get_payload(na_mac));
+ assert(size == cmd->nlmsg_len);
return 1;
}
@@ -4179,6 +4173,7 @@ flow_tcf_collect_vxlan_cb(const struct nlmsghdr *nlh, void *arg)
struct nlattr *na_vxlan = NULL;
bool found = false;
unsigned int vxindex;
+ uint32_t size;
if (nlh->nlmsg_type != RTM_NEWLINK) {
rte_errno = EINVAL;
@@ -4224,9 +4219,10 @@ flow_tcf_collect_vxlan_cb(const struct nlmsghdr *nlh, void *arg)
return 1;
/* Attached VXLAN device found, store the command to delete. */
vxindex = ifm->ifi_index;
- cmd = flow_tcf_alloc_nlcmd(ctx, MNL_ALIGN(sizeof(struct nlmsghdr)) +
- MNL_ALIGN(sizeof(struct ifinfomsg)));
- if (!nlh) {
+ size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
+ MNL_ALIGN(sizeof(struct ifinfomsg));
+ cmd = flow_tcf_alloc_nlcmd(ctx, size);
+ if (!cmd) {
rte_errno = ENOMEM;
return -rte_errno;
}
@@ -4236,6 +4232,7 @@ flow_tcf_collect_vxlan_cb(const struct nlmsghdr *nlh, void *arg)
ifm = mnl_nlmsg_put_extra_header(cmd, sizeof(*ifm));
ifm->ifi_family = AF_UNSPEC;
ifm->ifi_index = vxindex;
+ assert(size == cmd->nlmsg_len);
return 1;
}
@@ -5127,6 +5124,13 @@ flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
dev_flow->tcf.applied = 1;
return 0;
}
+ if (dev_flow->tcf.tunnel) {
+ /* Rollback the VTEP configuration if rule apply failed. */
+ assert(dev_flow->tcf.tunnel->vtep);
+ flow_tcf_vtep_release(ctx, dev_flow->tcf.tunnel->vtep,
+ dev_flow);
+ dev_flow->tcf.tunnel->vtep = NULL;
+ }
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"netlink: failed to create TC flow rule");
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 699cc88c..d6d95db5 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1058,10 +1058,17 @@ flow_verbs_validate(struct rte_eth_dev *dev,
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id)
+ items->mask)->hdr.next_proto_id) {
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
@@ -1072,10 +1079,17 @@ flow_verbs_validate(struct rte_eth_dev *dev,
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto)
+ items->mask)->hdr.proto) {
next_protocol =
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
@@ -1125,13 +1139,6 @@ flow_verbs_validate(struct rte_eth_dev *dev,
error);
if (ret < 0)
return ret;
- if (next_protocol != 0xff &&
- next_protocol != IPPROTO_MPLS)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, items,
- "protocol filtering not compatible"
- " with MPLS layer");
item_flags |= MLX5_FLOW_LAYER_MPLS;
break;
default:
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 886f60e6..97092c74 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -15,6 +15,16 @@
#include "mlx5_defs.h"
+/*
+ * Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11.
+ * Otherwise there would be a type conflict between stdbool and altivec.
+ */
+#if defined(__PPC64__) && !defined(__APPLE_ALTIVEC__)
+#undef bool
+/* redefine as in stdbool.h */
+#define bool _Bool
+#endif
+
/* Bit-field manipulation. */
#define BITFIELD_DECLARE(bf, type, size) \
type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
diff --git a/drivers/net/octeontx/base/octeontx_pki_var.h b/drivers/net/octeontx/base/octeontx_pki_var.h
index c793b655..f4661d24 100644
--- a/drivers/net/octeontx/base/octeontx_pki_var.h
+++ b/drivers/net/octeontx/base/octeontx_pki_var.h
@@ -7,8 +7,17 @@
#include <rte_byteorder.h>
-#define OCTTX_PACKET_WQE_SKIP 128
-#define OCTTX_PACKET_FIRST_SKIP 240
+#define OCTTX_PACKET_WQE_SKIP 128
+#define OCTTX_PACKET_FIRST_SKIP_MAXREGVAL 496
+#define OCTTX_PACKET_FIRST_SKIP_MAXLEN 512
+#define OCTTX_PACKET_FIRST_SKIP_ADJUST(x) \
+ (RTE_MIN(x, OCTTX_PACKET_FIRST_SKIP_MAXREGVAL))
+#define OCTTX_PACKET_FIRST_SKIP_SUM(p) \
+ (OCTTX_PACKET_WQE_SKIP \
+ + rte_pktmbuf_priv_size(p) \
+ + RTE_PKTMBUF_HEADROOM)
+#define OCTTX_PACKET_FIRST_SKIP(p) \
+ OCTTX_PACKET_FIRST_SKIP_ADJUST(OCTTX_PACKET_FIRST_SKIP_SUM(p))
#define OCTTX_PACKET_LATER_SKIP 128
/* WQE descriptor */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 06814862..a3063be4 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -844,10 +844,11 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
pktbuf_conf.mmask.f_cache_mode = 1;
pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
- pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP;
+ pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP(mb_pool);
pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
pktbuf_conf.mbuff_size = (mb_pool->elt_size -
RTE_PKTMBUF_HEADROOM -
+ rte_pktmbuf_priv_size(mb_pool) -
sizeof(struct rte_mbuf));
pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 8a4772f4..0e33be1a 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -235,12 +235,13 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
void qede_rx_queue_release(void *rx_queue)
{
struct qede_rx_queue *rxq = rx_queue;
- struct qede_dev *qdev = rxq->qdev;
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- PMD_INIT_FUNC_TRACE(edev);
+ struct qede_dev *qdev;
+ struct ecore_dev *edev;
if (rxq) {
+ qdev = rxq->qdev;
+ edev = QEDE_INIT_EDEV(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
qede_rx_queue_release_mbufs(rxq);
qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
@@ -399,12 +400,13 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
void qede_tx_queue_release(void *tx_queue)
{
struct qede_tx_queue *txq = tx_queue;
- struct qede_dev *qdev = txq->qdev;
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- PMD_INIT_FUNC_TRACE(edev);
+ struct qede_dev *qdev;
+ struct ecore_dev *edev;
if (txq) {
+ qdev = txq->qdev;
+ edev = QEDE_INIT_EDEV(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
qede_tx_queue_release_mbufs(txq);
qdev->ops->common->chain_free(edev, &txq->tx_pbl);
rte_free(txq->sw_tx_ring);
@@ -1759,6 +1761,18 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
}
}
if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
+ /* We support only limited tunnel protocols */
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ uint64_t temp;
+
+ temp = ol_flags & PKT_TX_TUNNEL_MASK;
+ if (temp == PKT_TX_TUNNEL_VXLAN ||
+ temp == PKT_TX_TUNNEL_GENEVE ||
+ temp == PKT_TX_TUNNEL_MPLSINUDP ||
+ temp == PKT_TX_TUNNEL_GRE)
+ break;
+ }
+
rte_errno = -ENOTSUP;
break;
}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index d3a41e92..0afadd8d 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -153,10 +153,7 @@
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
PKT_TX_VLAN_PKT | \
- PKT_TX_TUNNEL_VXLAN | \
- PKT_TX_TUNNEL_GENEVE | \
- PKT_TX_TUNNEL_MPLSINUDP | \
- PKT_TX_TUNNEL_GRE)
+ PKT_TX_TUNNEL_MASK)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h
index 8e10e893..2e847b6c 100644
--- a/drivers/net/sfc/base/efx.h
+++ b/drivers/net/sfc/base/efx.h
@@ -2878,6 +2878,8 @@ typedef struct efx_filter_spec_s {
efx_filter_flags_t efs_flags;
uint16_t efs_dmaq_id;
uint32_t efs_rss_context;
+ uint32_t efs_mark;
+ /* Fields below here are hashed for software filter lookup */
uint16_t efs_outer_vid;
uint16_t efs_inner_vid;
uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
@@ -2891,7 +2893,6 @@ typedef struct efx_filter_spec_s {
efx_oword_t efs_loc_host;
uint8_t efs_vni_or_vsid[EFX_VNI_OR_VSID_LEN];
uint8_t efs_ifrm_loc_mac[EFX_MAC_ADDR_LEN];
- uint32_t efs_mark;
} efx_filter_spec_t;
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index e7817e89..49afd38d 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -248,7 +248,7 @@ tun_alloc(struct pmd_internals *pmd, int is_keepalive)
return fd;
error:
- if (fd > 0)
+ if (fd >= 0)
close(fd);
return -1;
}
@@ -1848,6 +1848,7 @@ disable_rte_flow:
TAP_LOG(ERR, "Remote feature requires flow support.");
goto error_exit;
}
+ rte_eth_dev_probing_finish(dev);
return 0;
error_remote:
diff --git a/drivers/net/tap/tap_netlink.c b/drivers/net/tap/tap_netlink.c
index 6cb51009..14bbbec7 100644
--- a/drivers/net/tap/tap_netlink.c
+++ b/drivers/net/tap/tap_netlink.c
@@ -51,14 +51,17 @@ tap_nl_init(uint32_t nl_groups)
}
if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int))) {
TAP_LOG(ERR, "Unable to set socket buffer send size");
+ close(fd);
return -1;
}
if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int))) {
TAP_LOG(ERR, "Unable to set socket buffer receive size");
+ close(fd);
return -1;
}
if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) {
TAP_LOG(ERR, "Unable to bind to the netlink socket");
+ close(fd);
return -1;
}
return fd;
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 247c3568..1c428743 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -61,6 +61,14 @@ fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
entry->buff[0] = sqe.buff[0];
}
+static inline void __hot
+fill_sq_desc_header_zero_w1(union sq_entry_t *entry,
+ struct rte_mbuf *pkt)
+{
+ fill_sq_desc_header(entry, pkt);
+ entry->buff[1] = 0ULL;
+}
+
void __hot
nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
{
@@ -204,7 +212,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
used_bufs += nb_segs;
txbuffs[tail] = NULL;
- fill_sq_desc_header(desc_ptr + tail, pkt);
+ fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt);
tail = (tail + 1) & qlen_mask;
txbuffs[tail] = pkt;
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 21110cd6..c8883c32 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -614,9 +614,15 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
hw->common_cfg = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_NOTIFY_CFG:
- rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ ret = rte_pci_read_config(dev,
+ &hw->notify_off_multiplier,
4, pos + sizeof(cap));
- hw->notify_base = get_cfg_addr(dev, &cap);
+ if (ret != 4)
+ PMD_INIT_LOG(DEBUG,
+ "failed to read notify_off_multiplier, ret %d",
+ ret);
+ else
+ hw->notify_base = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cfg_addr(dev, &cap);
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 61b7c0a3..f8791391 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -484,7 +484,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
} else {
PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
- VIRTIO_USER_ARG_QUEUE_SIZE);
+ VIRTIO_USER_ARG_PATH);
goto end;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 84acd9db..93e5de9a 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -318,6 +318,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+ /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
/* Put device in Quiesce Mode */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
@@ -876,12 +879,6 @@ vmxnet3_dev_close(struct rte_eth_dev *dev)
vmxnet3_dev_stop(dev);
vmxnet3_free_queues(dev);
-
- /*
- * flag to rte_eth_dev_close() that it should release the port resources
- * (calling rte_eth_dev_release_port()) in addition to closing it.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
}
static void