aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bnx2x/bnx2x.c24
-rw-r--r--drivers/net/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c214
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.h3
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c35
-rw-r--r--drivers/net/bnxt/bnxt_filter.c6
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c51
-rw-r--r--drivers/net/bnxt/bnxt_stats.c3
-rw-r--r--drivers/net/bnxt/bnxt_txr.c51
-rw-r--r--drivers/net/bnxt/bnxt_txr.h10
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c5
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h6
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c14
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c24
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c97
-rw-r--r--drivers/net/cxgbe/base/t4fw_interface.h8
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h9
-rw-r--r--drivers/net/cxgbe/sge.c15
-rw-r--r--drivers/net/dpaa2/mc/dpni.c2
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h34
-rw-r--r--drivers/net/ena/ena_ethdev.c4
-rw-r--r--drivers/net/enic/enic.h1
-rw-r--r--drivers/net/enic/enic_ethdev.c67
-rw-r--r--drivers/net/enic/enic_main.c34
-rw-r--r--drivers/net/i40e/i40e_ethdev.c149
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c12
-rw-r--r--drivers/net/mlx4/mlx4.c44
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c6
-rw-r--r--drivers/net/mlx5/Makefile47
-rw-r--r--drivers/net/mlx5/mlx5.c6
-rw-r--r--drivers/net/mlx5/mlx5_defs.h10
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c26
-rw-r--r--drivers/net/mlx5/mlx5_flow.c40
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c16
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c11
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h2
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h4
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h8
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h8
-rw-r--r--drivers/net/mlx5/mlx5_socket.c6
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c37
-rw-r--r--drivers/net/mlx5/mlx5_txq.c2
-rw-r--r--drivers/net/mrvl/mrvl_ethdev.c5
-rw-r--r--drivers/net/nfp/nfp_net.c26
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c6
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c86
-rw-r--r--drivers/net/qede/base/ecore_dev.c10
-rw-r--r--drivers/net/qede/base/ecore_int.c14
-rw-r--r--drivers/net/qede/base/ecore_sriov.c44
-rw-r--r--drivers/net/qede/base/ecore_vf.c33
-rw-r--r--drivers/net/qede/base/ecore_vf.h9
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h16
-rw-r--r--drivers/net/qede/qede_ethdev.c228
-rw-r--r--drivers/net/qede/qede_ethdev.h1
-rw-r--r--drivers/net/qede/qede_fdir.c3
-rw-r--r--drivers/net/qede/qede_main.c7
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c2
-rw-r--r--drivers/net/sfc/sfc_ethdev.c2
-rw-r--r--drivers/net/sfc/sfc_flow.c3
-rw-r--r--drivers/net/tap/tap_flow.c18
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c5
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c24
66 files changed, 1270 insertions, 461 deletions
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 98b08d11..e58684d6 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -125,7 +125,6 @@ int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
-static void bnx2x_periodic_stop(struct bnx2x_softc *sc);
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
uint8_t storm, uint16_t index, uint8_t op,
uint8_t update);
@@ -1971,9 +1970,6 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
PMD_DRV_LOG(DEBUG, "Starting NIC unload...");
- /* stop the periodic callout */
- bnx2x_periodic_stop(sc);
-
/* mark driver as unloaded in shmem2 */
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
@@ -4492,6 +4488,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
+ PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
+
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
@@ -4508,7 +4506,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
}
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
- le16toh(fp->fp_hc_idx), IGU_INT_DISABLE, 1);
+ le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
}
/*
@@ -6999,16 +6997,6 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc)
}
}
-static void bnx2x_periodic_start(struct bnx2x_softc *sc)
-{
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
-}
-
-static void bnx2x_periodic_stop(struct bnx2x_softc *sc)
-{
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
-}
-
static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
{
int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc);
@@ -7043,10 +7031,6 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
bnx2x_link_report(sc);
}
- if (!CHIP_REV_IS_SLOW(sc)) {
- bnx2x_periodic_start(sc);
- }
-
sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
return rc;
}
@@ -7078,7 +7062,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
{
if ((sc->state != BNX2X_STATE_OPEN) ||
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
- PMD_DRV_LOG(WARNING, "periodic callout exit (state=0x%x)",
+ PMD_DRV_LOG(INFO, "periodic callout exit (state=0x%x)",
sc->state);
return;
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 17075d38..c93f148b 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1930,6 +1930,7 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc);
int bnx2x_complete_sp(struct bnx2x_softc *sc);
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
void bnx2x_periodic_callout(struct bnx2x_softc *sc);
+void bnx2x_periodic_stop(void *param);
int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count);
void bnx2x_vf_close(struct bnx2x_softc *sc);
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 95861a06..650d6ce6 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -13,6 +13,8 @@
#include <rte_dev.h>
#include <rte_ethdev_pci.h>
+#include <rte_alarm.h>
+#include <rte_atomic.h>
/*
* The set of PCI devices this driver supports
@@ -78,26 +80,87 @@ static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
};
-static void
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+bnx2x_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &dev->data->dev_link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+bnx2x_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
bnx2x_link_update(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
+ struct rte_eth_link orig;
+ struct rte_eth_link link;
PMD_INIT_FUNC_TRACE();
+
bnx2x_link_status_update(sc);
+ memset(&orig, 0, sizeof(orig));
+ memset(&link, 0, sizeof(link));
+ bnx2x_dev_atomic_read_link_status(dev, &orig);
mb();
- dev->data->dev_link.link_speed = sc->link_vars.line_speed;
+ link.link_speed = sc->link_vars.line_speed;
switch (sc->link_vars.duplex) {
case DUPLEX_FULL:
- dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case DUPLEX_HALF:
- dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
break;
}
- dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- dev->data->dev_link.link_status = sc->link_vars.link_up;
+ link.link_status = sc->link_vars.link_up;
+ bnx2x_dev_atomic_write_link_status(dev, &link);
+
+ return (link.link_status == orig.link_status) ? -1 : 0;
}
static void
@@ -106,8 +169,6 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
uint32_t link_status;
- PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
-
bnx2x_intr_legacy(sc, 0);
if (sc->periodic_flags & PERIODIC_GO)
@@ -125,15 +186,74 @@ bnx2x_interrupt_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct bnx2x_softc *sc = dev->data->dev_private;
+ PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
+
bnx2x_interrupt_action(dev);
rte_intr_enable(&sc->pci_dev->intr_handle);
}
+static void bnx2x_periodic_start(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
+
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
+ bnx2x_interrupt_action(dev);
+ if (IS_PF(sc)) {
+ ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
+ bnx2x_periodic_start, (void *)dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to start periodic"
+ " timer rc %d", ret);
+ assert(false && "Unable to start periodic timer");
+ }
+ }
+}
+
+void bnx2x_periodic_stop(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
+
+ rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
+}
+
/*
* Devops - helper functions can be called from user application
*/
static int
+bnx2x_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return bnx2x_link_update(dev);
+}
+
+static int
+bnx2xvf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
+
+ ret = bnx2x_link_update(dev);
+
+ bnx2x_check_bull(sc);
+ if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
+ PMD_DRV_LOG(ERR, "PF indicated channel is down."
+ "VF device is no longer operational");
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ }
+
+ return ret;
+}
+
+static int
bnx2x_dev_configure(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
@@ -182,6 +302,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ /* start the periodic callout */
+ if (sc->periodic_flags & PERIODIC_STOP)
+ bnx2x_periodic_start(dev);
+
ret = bnx2x_init(sc);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
@@ -222,12 +346,21 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
bnx2x_interrupt_handler, (void *)dev);
}
+ /* stop the periodic callout */
+ bnx2x_periodic_stop(dev);
+
ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
return;
}
+ /* Update device link status */
+ if (IS_PF(sc))
+ bnx2x_dev_link_update(dev, 0);
+ else
+ bnx2xvf_dev_link_update(dev, 0);
+
return;
}
@@ -300,36 +433,6 @@ bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
}
static int
-bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
-{
- PMD_INIT_FUNC_TRACE();
-
- int old_link_status = dev->data->dev_link.link_status;
-
- bnx2x_link_update(dev);
-
- return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
-}
-
-static int
-bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
-{
- int old_link_status = dev->data->dev_link.link_status;
- struct bnx2x_softc *sc = dev->data->dev_private;
-
- bnx2x_link_update(dev);
-
- bnx2x_check_bull(sc);
- if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
- PMD_DRV_LOG(ERR, "PF indicated channel is down."
- "VF device is no longer operational");
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
- }
-
- return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
-}
-
-static int
bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct bnx2x_softc *sc = dev->data->dev_private;
@@ -580,6 +683,17 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
return ret;
}
+ /* schedule periodic poll for slowpath link events */
+ if (IS_PF(sc)) {
+ ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
+ bnx2x_periodic_start, (void *)eth_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to start periodic"
+ " timer rc %d", ret);
+ return -EINVAL;
+ }
+ }
+
eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
@@ -594,18 +708,20 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
if (IS_VF(sc)) {
rte_spinlock_init(&sc->vf2pf_lock);
- if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
- &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
- RTE_CACHE_LINE_SIZE) != 0)
- return -ENOMEM;
+ ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
+ &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
+ RTE_CACHE_LINE_SIZE);
+ if (ret)
+ goto out;
sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
sc->vf2pf_mbox_mapping.vaddr;
- if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
- &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
- RTE_CACHE_LINE_SIZE) != 0)
- return -ENOMEM;
+ ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
+ &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
+ RTE_CACHE_LINE_SIZE);
+ if (ret)
+ goto out;
sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
sc->pf2vf_bulletin_mapping.vaddr;
@@ -613,10 +729,14 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
sc->max_rx_queues);
if (ret)
- return ret;
+ goto out;
}
return 0;
+
+out:
+ bnx2x_periodic_stop(eth_dev);
+ return ret;
}
static int
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h
index 967d6dc5..a53d437d 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.h
+++ b/drivers/net/bnx2x/bnx2x_ethdev.h
@@ -58,7 +58,6 @@
#define wmb() rte_wmb()
#define rmb() rte_rmb()
-
#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF)
#define BNX2X_MIN_RX_BUF_SIZE 1024
@@ -72,6 +71,8 @@
/* Maximum number of Rx packets to process at a time */
#define BNX2X_RX_BUDGET 0xffffffff
+#define BNX2X_SP_TIMER_PERIOD US_PER_S /* 1 second */
+
#endif
/* MAC address operations */
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 52c511ee..7466a642 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -248,6 +248,17 @@ static int bnxt_init_chip(struct bnxt *bp)
/* VNIC configuration */
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ RTE_LOG(ERR, PMD,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
@@ -429,7 +440,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
- dev_info->reta_size = bp->max_rsscos_ctx;
+ dev_info->reta_size = HW_HASH_INDEX_SIZE;
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
@@ -1268,9 +1279,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
struct bnxt_vnic_info *vnic;
unsigned int i;
int rc = 0;
- uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
- HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
- uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+ uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
/* Cycle through all VNICs */
for (i = 0; i < bp->nr_vnics; i++) {
@@ -1317,8 +1328,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
memcpy(new_filter->l2_addr, filter->l2_addr,
ETHER_ADDR_LEN);
/* MAC + VLAN ID filter */
- new_filter->l2_ovlan = vlan_id;
- new_filter->l2_ovlan_mask = 0xF000;
+ new_filter->l2_ivlan = vlan_id;
+ new_filter->l2_ivlan_mask = 0xF000;
new_filter->enables |= en;
rc = bnxt_hwrm_set_l2_filter(bp,
vnic->fw_vnic_id,
@@ -1541,6 +1552,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint16_t size = 0;
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
@@ -1548,9 +1560,14 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
if (rc)
break;
- rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
- if (rc)
- return rc;
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ if (size < new_mtu) {
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
}
return rc;
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 8d3ddf1d..67daec41 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -1053,9 +1053,13 @@ bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
sizeof(nf->dst_ipaddr_mask))) {
if (mf->dst_id == nf->dst_id)
return -EEXIST;
- /* Same Flow, Different queue
+ /*
+ * Same Flow, Different queue
* Clear the old ntuple filter
+ * Reuse the matching L2 filter
+ * ID for the new filter
*/
+ nf->fw_l2_filter_id = mf->fw_l2_filter_id;
if (nf->filter_type == HWRM_CFA_EM_FILTER)
bnxt_hwrm_clear_em_filter(bp, mf);
if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 22f092f1..db3222f4 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -197,6 +197,10 @@ err_ret:
RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
__func__, rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
if (resp->error_code) { \
@@ -218,6 +222,10 @@ err_ret:
"%s error %d\n", __func__, rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
} while (0)
@@ -406,13 +414,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
req.l2_ovlan = filter->l2_ovlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
- req.l2_ovlan = filter->l2_ivlan;
+ req.l2_ivlan = filter->l2_ivlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
req.l2_ovlan_mask = filter->l2_ovlan_mask;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
- req.l2_ovlan_mask = filter->l2_ivlan_mask;
+ req.l2_ivlan_mask = filter->l2_ivlan_mask;
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
req.src_id = rte_cpu_to_le_32(filter->src_id);
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
@@ -1092,8 +1100,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
/* map ring groups to this vnic */
RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
- for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
+ for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+
vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
@@ -1385,6 +1394,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t size;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
HWRM_PREP(req, VNIC_PLCMODES_CFG);
req.flags = rte_cpu_to_le_32(
@@ -1798,6 +1812,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
else
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
//if (rc)
//break;
}
@@ -1885,6 +1900,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
bnxt_hwrm_vnic_free(bp, vnic);
+
+ rte_free(vnic->fw_grp_ids);
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
@@ -3097,13 +3114,12 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
-
if (rc == 0)
memcpy(data, buf, len > buflen ? buflen : len);
rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -3135,12 +3151,13 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
req.offset = rte_cpu_to_le_32(offset);
req.len = rte_cpu_to_le_32(length);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
if (rc == 0)
memcpy(data, buf, length);
rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
return rc;
}
@@ -3171,14 +3188,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
rte_iova_t dma_handle;
uint8_t *buf;
- HWRM_PREP(req, NVM_WRITE);
-
- req.dir_type = rte_cpu_to_le_16(dir_type);
- req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
- req.dir_ext = rte_cpu_to_le_16(dir_ext);
- req.dir_attr = rte_cpu_to_le_16(dir_attr);
- req.dir_data_length = rte_cpu_to_le_32(data_len);
-
buf = rte_malloc("nvm_write", data_len, 0);
rte_mem_lock_page(buf);
if (!buf)
@@ -3191,14 +3200,22 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
return -ENOMEM;
}
memcpy(buf, data, data_len);
+
+ HWRM_PREP(req, NVM_WRITE);
+
+ req.dir_type = rte_cpu_to_le_16(dir_type);
+ req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
+ req.dir_ext = rte_cpu_to_le_16(dir_ext);
+ req.dir_attr = rte_cpu_to_le_16(dir_attr);
+ req.dir_data_length = rte_cpu_to_le_32(data_len);
req.host_src_addr = rte_cpu_to_le_64(dma_handle);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rte_free(buf);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- rte_free(buf);
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index fe83d370..f8bb4ed9 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -296,6 +296,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
count = 0;
for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)rx_stats +
bnxt_rx_stats_strings[i].offset));
@@ -304,6 +305,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)tx_stats +
bnxt_tx_stats_strings[i].offset));
@@ -311,6 +313,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
}
/* The Tx drop pkts aka the Anti spoof coounter */
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
count++;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 03d2652c..f5ed03f1 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -161,7 +161,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
+ PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
+ PKT_TX_TUNNEL_GENEVE))
long_bd = true;
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
@@ -222,16 +224,46 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_UDP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
PKT_TX_IIP_TCP_UDP_CKSUM) {
/* (Inner) IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
+ PKT_TX_IIP_UDP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
+ PKT_TX_IIP_TCP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
PKT_TX_OIP_TCP_UDP_CKSUM) {
/* Outer IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
+ PKT_TX_OIP_UDP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
+ PKT_TX_OIP_TCP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
PKT_TX_OIP_IIP_CKSUM) {
/* Outer IP, Inner IP CSO */
@@ -242,11 +274,23 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
+ PKT_TX_TCP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
+ PKT_TX_UDP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
+ PKT_TX_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
+ PKT_TX_OUTER_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
txbd1->mss = 0;
@@ -270,6 +314,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
}
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
+ txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 2feac51d..0bc217f0 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -71,10 +71,20 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 5bac2605..293f9604 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -67,7 +67,7 @@ void bnxt_init_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
uint16_t max_vnics;
- int i, j;
+ int i;
max_vnics = bp->max_vnics;
STAILQ_INIT(&bp->free_vnic_list);
@@ -78,9 +78,6 @@ void bnxt_init_vnics(struct bnxt *bp)
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
- for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
- vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
-
prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
STAILQ_INIT(&vnic->filter);
STAILQ_INIT(&vnic->flow_list);
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 875dc3c1..f18fb0a4 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -43,13 +43,9 @@ struct bnxt_vnic_info {
uint16_t fw_vnic_id; /* returned by Chimp during alloc */
uint16_t rss_rule;
-#define MAX_NUM_TRAFFIC_CLASSES 8
-#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
-#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
- MAX_NUM_TRAFFIC_CLASSES)
uint16_t start_grp_id;
uint16_t end_grp_id;
- uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
+ uint16_t *fw_grp_ids;
uint16_t dflt_ring_grp;
uint16_t mru;
uint16_t hash_type;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 8fd90ae9..e80338aa 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -675,9 +675,21 @@ rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
internals->user_defined_mac = 0;
if (internals->slave_count > 0) {
+ int slave_port;
+ /* Get the primary slave location based on the primary port
+ * number as, while slave_add(), we will keep the primary
+ * slave based on slave_count,but not based on the primary port.
+ */
+ for (slave_port = 0; slave_port < internals->slave_count;
+ slave_port++) {
+ if (internals->slaves[slave_port].port_id ==
+ internals->primary_port)
+ break;
+ }
+
/* Set MAC Address of Bonded Device */
if (mac_address_set(bonded_eth_dev,
- &internals->slaves[internals->primary_port].persisted_mac_addr)
+ &internals->slaves[slave_port].persisted_mac_addr)
!= 0) {
RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
return -1;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index e19a4a3e..8880231e 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1932,10 +1932,6 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
}
}
- /* Update all slave devices MACs*/
- if (mac_address_slaves_update(eth_dev) != 0)
- goto out_err;
-
/* If bonded device is configure in promiscuous mode then re-apply config */
if (internals->promiscuous_en)
bond_ethdev_promiscuous_enable(eth_dev);
@@ -1976,6 +1972,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
(void *)&rte_eth_devices[internals->port_id]);
}
+ /* Update all slave devices MACs*/
+ if (mac_address_slaves_update(eth_dev) != 0)
+ goto out_err;
+
if (internals->user_defined_primary_port)
bond_ethdev_primary_set(internals, internals->primary_port);
@@ -2048,7 +2048,6 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- internals->active_slave_count = 0;
internals->link_status_polling_enabled = 0;
for (i = 0; i < internals->slave_count; i++)
internals->slaves[i].last_link_status = 0;
@@ -2535,10 +2534,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
rte_eth_link_get_nowait(port_id, &link);
if (link.link_status) {
- if (active_pos < internals->active_slave_count) {
- rte_spinlock_unlock(&internals->lsc_lock);
- return rc;
- }
+ if (active_pos < internals->active_slave_count)
+ goto link_update;
/* if no active slave ports then set this port to be primary port */
if (internals->active_slave_count < 1) {
@@ -2557,10 +2554,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
internals->primary_port == port_id)
bond_ethdev_primary_set(internals, port_id);
} else {
- if (active_pos == internals->active_slave_count) {
- rte_spinlock_unlock(&internals->lsc_lock);
- return rc;
- }
+ if (active_pos == internals->active_slave_count)
+ goto link_update;
/* Remove from active slave list */
deactivate_slave(bonded_eth_dev, port_id);
@@ -2579,6 +2574,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
}
}
+link_update:
/**
* Update bonded device link properties after any change to active
* slaves
@@ -2616,7 +2612,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
rte_spinlock_unlock(&internals->lsc_lock);
- return 0;
+ return rc;
}
static int
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 282e2e62..c3d46e02 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -4242,9 +4242,8 @@ struct flash_desc {
int t4_get_flash_params(struct adapter *adapter)
{
/*
- * Table for non-Numonix supported flash parts. Numonix parts are left
- * to the preexisting well-tested code. All flash parts have 64KB
- * sectors.
+ * Table for non-standard supported Flash parts. Note, all Flash
+ * parts must have 64KB sectors.
*/
static struct flash_desc supported_flash[] = {
{ 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
@@ -4253,7 +4252,7 @@ int t4_get_flash_params(struct adapter *adapter)
int ret;
u32 flashid = 0;
unsigned int part, manufacturer;
- unsigned int density, size;
+ unsigned int density, size = 0;
/**
* Issue a Read ID Command to the Flash part. We decode supported
@@ -4268,6 +4267,9 @@ int t4_get_flash_params(struct adapter *adapter)
if (ret < 0)
return ret;
+ /**
+ * Check to see if it's one of our non-standard supported Flash parts.
+ */
for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
if (supported_flash[part].vendor_and_model_id == flashid) {
adapter->params.sf_size =
@@ -4278,6 +4280,15 @@ int t4_get_flash_params(struct adapter *adapter)
}
}
+ /**
+ * Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
manufacturer = flashid & 0xff;
switch (manufacturer) {
case 0x20: { /* Micron/Numonix */
@@ -4314,20 +4325,80 @@ int t4_get_flash_params(struct adapter *adapter)
case 0x22:
size = 1 << 28; /* 256MB */
break;
- default:
- dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
+ break;
+ }
- adapter->params.sf_size = size;
- adapter->params.sf_nsec = size / SF_SEC_SIZE;
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /**
+ * This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x17:
+ size = 1 << 26; /* 64MB */
+ break;
+ }
break;
}
- default:
- dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
- return -EINVAL;
+
+ case 0xc2: { /* Macronix */
+ /**
+ * This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+
+ case 0xef: { /* Winbond */
+ /**
+ * This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
}
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adapter,
+ "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /**
+ * Store decoded Flash size and fall through into vetting code.
+ */
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
found:
/*
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index 6ca4f318..f0566e93 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -473,6 +473,11 @@ enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
};
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_NIC = 1,
+ FW_IQ_IQTYPE_OFLD,
+};
+
struct fw_iq_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
@@ -606,6 +611,9 @@ struct fw_iq_cmd {
(((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
+#define S_FW_IQ_CMD_IQTYPE 24
+#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE)
+
#define S_FW_IQ_CMD_FL0CNGCHMAP 20
#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index 03bba9fe..c1cc936d 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -226,15 +226,6 @@ static inline int cxgbe_fls(int x)
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
-/**
- * cxgbe_ffs - find first bit set
- * @x: the word to search
- */
-static inline int cxgbe_ffs(int x)
-{
- return x ? __builtin_ffs(x) : 0;
-}
-
static inline unsigned long ilog2(unsigned long n)
{
unsigned int e = 0;
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index fc10d958..51800845 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1689,6 +1689,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
unsigned int nb_refill;
+ u8 pciechan;
/* Size needs to be multiple of 16, including status entry. */
iq->size = cxgbe_roundup(iq->size, 16);
@@ -1708,6 +1709,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC |
V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
+
+ pciechan = pi->tx_chan;
+
c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
(sizeof(c) / 16));
c.type_to_iqandstindex =
@@ -1719,16 +1723,19 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize =
- htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :
- pi->tx_chan) |
+ htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
if (cong >= 0)
- c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
- F_FW_IQ_CMD_IQRO);
+ c.iqns_to_fl0congen =
+ htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ V_FW_IQ_CMD_IQTYPE(cong ?
+ FW_IQ_IQTYPE_NIC :
+ FW_IQ_IQTYPE_OFLD) |
+ F_FW_IQ_CMD_IQRO);
if (fl) {
struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 6f671fe0..fe2cb52e 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -231,7 +231,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
- for (i = 0; i < DPNI_MAX_DPBP; i++) {
+ for (i = 0; i < cmd_params->num_dpbp; i++) {
cmd_params->pool[i].dpbp_id =
cpu_to_le16(cfg->pools[i].dpbp_id);
cmd_params->pool[i].priority_mask =
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index accecf51..ea78e8d7 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -116,11 +116,13 @@ typedef uint64_t dma_addr_t;
#define ENA_MIN16(x, y) RTE_MIN((x), (y))
#define ENA_MIN8(x, y) RTE_MIN((x), (y))
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
#define U64_C(x) x ## ULL
#define BIT(nr) (1UL << (nr))
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#ifdef RTE_LIBRTE_ENA_COM_DEBUG
#define ena_trc_dbg(format, arg...) \
@@ -189,10 +191,15 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->iova; \
handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
} while (0)
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
({ ENA_TOUCH(size); ENA_TOUCH(phys); \
@@ -207,21 +214,20 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->iova; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
} while (0)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
do { \
- const struct rte_memzone *mz; \
- char z_name[RTE_MEMZONE_NAMESIZE]; \
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
- snprintf(z_name, sizeof(z_name), \
- "ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
+ virt = rte_zmalloc_socket(NULL, size, 0, node); \
} while (0)
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index aa24ef36..4e526567 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -709,7 +709,7 @@ static int ena_link_update(struct rte_eth_dev *dev,
struct rte_eth_link *link = &dev->data->dev_link;
link->link_status = 1;
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = ETH_SPEED_NUM_NONE;
link->link_duplex = ETH_LINK_FULL_DUPLEX;
return 0;
@@ -907,7 +907,7 @@ static int ena_start(struct rte_eth_dev *dev)
return rc;
if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_RSS_FLAG) {
+ ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
rc = ena_rss_init_default(adapter);
if (rc)
return rc;
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a43fddc5..8bbff18c 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -139,6 +139,7 @@ struct enic {
u8 adv_filters;
u32 flow_filter_mode;
u8 filter_tags;
+ uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
unsigned int flags;
unsigned int priv_flags;
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 98391b00..46ea2cf4 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -40,6 +40,7 @@
#include <rte_bus_pci.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
#include <rte_string_fns.h>
#include "vnic_intr.h"
@@ -66,6 +67,8 @@ static const struct rte_pci_id pci_id_enic_map[] = {
{.vendor_id = 0, /* sentinel */},
};
+#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
+
static int
enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op, void *arg)
@@ -644,6 +647,64 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.filter_ctrl = enicpmd_dev_filter_ctrl,
};
+static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "trunk") == 0) {
+ /* Trunk mode: always tag */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
+ } else if (strcmp(value, "untag") == 0) {
+ /* Untag default VLAN mode: untag if VLAN = default VLAN */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "priority") == 0) {
+ /*
+ * Priority-tag default VLAN mode: priority tag (VLAN header
+ * with ID=0) if VLAN = default
+ */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "pass") == 0) {
+ /* Pass through mode: do not touch tags */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
+ ": expected=trunk|untag|priority|pass given=%s\n",
+ value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_check_devargs(struct rte_eth_dev *dev)
+{
+ static const char *const valid_keys[] = {
+ ENIC_DEVARG_IG_VLAN_REWRITE,
+ NULL};
+ struct enic *enic = pmd_priv(dev);
+ struct rte_kvargs *kvlist;
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ if (!dev->device->devargs)
+ return 0;
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+ if (rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
+ enic_parse_ig_vlan_rewrite, enic) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
struct enic *enicpmd_list_head = NULL;
/* Initialize the driver
* It returns 0 on success.
@@ -653,6 +714,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pdev;
struct rte_pci_addr *addr;
struct enic *enic = pmd_priv(eth_dev);
+ int err;
ENICPMD_FUNC_TRACE();
@@ -670,6 +732,9 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
addr->domain, addr->bus, addr->devid, addr->function);
+ err = enic_check_devargs(eth_dev);
+ if (err)
+ return err;
return enic_probe(enic);
}
@@ -695,3 +760,5 @@ static struct rte_pci_driver rte_enic_pmd = {
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_enic,
+ ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 6356c10c..f946abf2 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -782,25 +782,23 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
static int instance;
wq->socket_id = socket_id;
- if (nb_desc) {
- if (nb_desc > enic->config.wq_desc_count) {
- dev_warning(enic,
- "WQ %d - number of tx desc in cmd line (%d)"\
- "is greater than that in the UCSM/CIMC adapter"\
- "policy. Applying the value in the adapter "\
- "policy (%d)\n",
- queue_idx, nb_desc, enic->config.wq_desc_count);
- } else if (nb_desc != enic->config.wq_desc_count) {
- enic->config.wq_desc_count = nb_desc;
- dev_info(enic,
- "TX Queues - effective number of descs:%d\n",
- nb_desc);
- }
+ if (nb_desc > enic->config.wq_desc_count) {
+ dev_warning(enic,
+ "WQ %d - number of tx desc in cmd line (%d) "
+ "is greater than that in the UCSM/CIMC adapter "
+ "policy. Applying the value in the adapter "
+ "policy (%d)\n",
+ queue_idx, nb_desc, enic->config.wq_desc_count);
+ nb_desc = enic->config.wq_desc_count;
+ } else if (nb_desc != enic->config.wq_desc_count) {
+ dev_info(enic,
+ "TX Queues - effective number of descs:%d\n",
+ nb_desc);
}
/* Allocate queue resources */
err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
- enic->config.wq_desc_count,
+ nb_desc,
sizeof(struct wq_enet_desc));
if (err) {
dev_err(enic, "error in allocation of wq\n");
@@ -808,7 +806,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
}
err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
- socket_id, enic->config.wq_desc_count,
+ socket_id, nb_desc,
sizeof(struct cq_enet_wq_desc));
if (err) {
vnic_wq_free(wq);
@@ -1402,8 +1400,10 @@ int enic_probe(struct enic *enic)
}
/* Set ingress vlan rewrite mode before vnic initialization */
+ dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
+ enic->ig_vlan_rewrite_mode);
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PASS_THRU);
+ enic->ig_vlan_rewrite_mode);
if (err) {
dev_err(enic,
"Failed to set ingress vlan rewrite mode, aborting.\n");
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 85baff9e..711c6e7b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1211,6 +1211,13 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->bus.func = pci_dev->addr.function;
hw->adapter_stopped = 0;
+ /*
+ * Switch Tag value should not be identical to either the First Tag
+ * or Second Tag values. So set something other than common Ethertype
+ * for internal switching.
+ */
+ hw->switch_tag = 0xffff;
+
/* Check if need to support multi-driver */
i40e_support_multi_driver(dev);
@@ -1976,27 +1983,40 @@ i40e_phy_conf_link(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
enum i40e_aq_phy_type cnt;
+ uint8_t avail_speed;
uint32_t phy_type_mask = 0;
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_LOW_POWER;
- const uint8_t advt = I40E_LINK_SPEED_40GB |
- I40E_LINK_SPEED_25GB |
- I40E_LINK_SPEED_10GB |
- I40E_LINK_SPEED_1GB |
- I40E_LINK_SPEED_100MB;
int ret = -ENOTSUP;
+ /* To get phy capabilities of available speeds. */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
+ status);
+ return ret;
+ }
+ avail_speed = phy_ab.link_speed;
+ /* To get the current phy config. */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
NULL);
- if (status)
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
+ status);
return ret;
+ }
- /* If link already up, no need to set up again */
- if (is_up && phy_ab.phy_type != 0)
+ /* If link needs to go up and it is in autoneg mode the speed is OK,
+ * no need to set up again.
+ */
+ if (is_up && phy_ab.phy_type != 0 &&
+ abilities & I40E_AQ_PHY_AN_ENABLED &&
+ phy_ab.link_speed != 0)
return I40E_SUCCESS;
memset(&phy_conf, 0, sizeof(phy_conf));
@@ -2005,18 +2025,20 @@ i40e_phy_conf_link(struct i40e_hw *hw,
abilities &= ~mask;
abilities |= phy_ab.abilities & mask;
- /* update ablities and speed */
- if (abilities & I40E_AQ_PHY_AN_ENABLED)
- phy_conf.link_speed = advt;
- else
- phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
-
phy_conf.abilities = abilities;
+ /* If link needs to go up, but the force speed is not supported,
+ * Warn users and config the default available speeds.
+ */
+ if (is_up && !(force_speed & avail_speed)) {
+ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
+ phy_conf.link_speed = avail_speed;
+ } else {
+ phy_conf.link_speed = is_up ? force_speed : avail_speed;
+ }
-
- /* To enable link, phy_type mask needs to include each type */
- for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ /* PHY type mask needs to include each type except PHY type extension */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
phy_type_mask |= 1 << cnt;
/* use get_phy_abilities_resp value for the rest */
@@ -2049,11 +2071,18 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *conf = &dev->data->dev_conf;
+ if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_100M;
+ }
speed = i40e_parse_link_speeds(conf->link_speeds);
- abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- abilities |= I40E_AQ_PHY_LINK_ENABLED;
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
+ I40E_AQ_PHY_AN_ENABLED |
+ I40E_AQ_PHY_LINK_ENABLED;
return i40e_phy_conf_link(hw, abilities, speed, true);
}
@@ -2160,13 +2189,6 @@ i40e_dev_start(struct rte_eth_dev *dev)
}
/* Apply link configure */
- if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G)) {
- PMD_DRV_LOG(ERR, "Invalid link setting");
- goto err_up;
- }
ret = i40e_apply_link_speed(dev);
if (I40E_SUCCESS != ret) {
PMD_DRV_LOG(ERR, "Fail to apply link setting");
@@ -9768,6 +9790,60 @@ i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+/*
+ * GL_SWR_PM_UP_THR:
+ * The value is not impacted from the link speed, its value is set according
+ * to the total number of ports for a better pipe-monitor configuration.
+ */
+static bool
+i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
+{
+#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
+
+#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
+
+ static const struct {
+ uint16_t device_id;
+ uint32_t val;
+ } swr_pm_table[] = {
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
+
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
+ };
+ uint32_t i;
+
+ if (value == NULL) {
+ PMD_DRV_LOG(ERR, "value is NULL");
+ return false;
+ }
+
+ for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
+ if (hw->device_id == swr_pm_table[i].device_id) {
+ *value = swr_pm_table[i].val;
+
+ PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
+ "value - 0x%08x",
+ hw->device_id, *value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int
i40e_dev_sync_phy_type(struct i40e_hw *hw)
{
@@ -9832,13 +9908,16 @@ i40e_configure_registers(struct i40e_hw *hw)
}
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
- I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_SF_VALUE;
- else /* For X710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+ uint32_t cfg_val;
+
+ if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
+ PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
+ "GL_SWR_PM_UP_THR value fixup",
+ hw->device_id);
+ continue;
+ }
+
+ reg_table[i].val = cfg_val;
}
ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index ab1163cc..2d25873d 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1688,6 +1688,7 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
else if (is_exist == 3)
PMD_DRV_LOG(ERR, "Profile of different group already exists");
+ i40e_update_customized_info(dev, buff, size, op);
rte_free(profile_info_sec);
return -EEXIST;
}
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 51ddcfd4..762f1ad3 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -123,6 +123,11 @@
#define IXGBE_5TUPLE_MAX_PRI 7
#define IXGBE_5TUPLE_MIN_PRI 1
+/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
+/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
+
#define IXGBE_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index c1176472..02adfa43 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -423,9 +423,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = info->mask.mac_addr_byte_mask;
- fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
- & IXGBE_FDIRIP6M_INNER_MAC;
+ fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ mac_mask = info->mask.mac_addr_byte_mask &
+ (IXGBE_FDIRIP6M_INNER_MAC >>
+ IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
+ fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
+ IXGBE_FDIRIP6M_INNER_MAC);
switch (info->mask.tunnel_type_mask) {
case 0:
@@ -800,10 +803,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
input->formatted.inner_mac,
fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
sizeof(input->formatted.inner_mac));
- input->formatted.tunnel_type =
- fdir_filter->input.flow.tunnel_flow.tunnel_type;
+ if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_VXLAN)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
+ else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
+ else
+ PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
+
input->formatted.tni_vni =
- fdir_filter->input.flow.tunnel_flow.tunnel_id;
+ fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
}
return 0;
@@ -1030,8 +1042,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
} else {
/* tunnel mode */
- if (input->formatted.tunnel_type !=
- RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ if (input->formatted.tunnel_type)
tunnel_type = 0x80000000;
tunnel_type |= addr_high;
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
@@ -1039,6 +1050,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
input->formatted.tni_vni);
}
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
}
/* record vlan (little-endian) and flex_bytes(big-endian) */
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 07abb343..e60ecce7 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1665,7 +1665,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
} else {
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2370,7 +2371,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Get the VxLAN info */
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_VXLAN;
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
/* Only care about VNI, others should be masked. */
if (!item->mask) {
@@ -2422,17 +2423,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
vxlan_spec = (const struct rte_flow_item_vxlan *)
item->spec;
rte_memcpy(((uint8_t *)
- &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ &rule->ixgbe_fdir.formatted.tni_vni),
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
- rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
- rule->ixgbe_fdir.formatted.tni_vni);
}
}
/* Get the NVGRE info */
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_NVGRE;
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
/**
* Only care about flags0, flags1, protocol and TNI,
@@ -2524,7 +2523,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* tni is a 24-bits bit field */
rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
- rule->ixgbe_fdir.formatted.tni_vni <<= 8;
}
}
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 4d7bd5f0..d39ad3e0 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -434,6 +434,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
int err = 0;
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
+ struct ibv_device_attr_ex device_attr_ex;
struct mlx4_conf conf = {
.ports.present = 0,
};
@@ -494,19 +495,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_dev = list[i];
DEBUG("device opened");
if (ibv_query_device(attr_ctx, &device_attr)) {
- rte_errno = ENODEV;
+ err = ENODEV;
goto error;
}
INFO("%u port(s) detected", device_attr.phys_port_cnt);
conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
if (mlx4_args(pci_dev->device.devargs, &conf)) {
ERROR("failed to process device arguments");
- rte_errno = EINVAL;
+ err = EINVAL;
goto error;
}
/* Use all ports when none are defined */
if (!conf.ports.enabled)
conf.ports.enabled = conf.ports.present;
+ /* Retrieve extended device attributes. */
+ if (ibv_query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
+ err = ENODEV;
+ goto error;
+ }
for (i = 0; i < device_attr.phys_port_cnt; i++) {
uint32_t port = i + 1; /* ports are indexed from one */
struct ibv_context *ctx = NULL;
@@ -522,18 +528,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("using port %u", port);
ctx = ibv_open_device(ibv_dev);
if (ctx == NULL) {
- rte_errno = ENODEV;
+ err = ENODEV;
goto port_error;
}
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
- rte_errno = err;
- ERROR("port query failed: %s", strerror(rte_errno));
+ err = ENODEV;
+ ERROR("port query failed: %s", strerror(err));
goto port_error;
}
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
- rte_errno = ENOTSUP;
+ err = ENOTSUP;
ERROR("port %d is not configured in Ethernet mode",
port);
goto port_error;
@@ -543,15 +549,16 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
port, ibv_port_state_str(port_attr.state),
port_attr.state);
/* Make asynchronous FD non-blocking to handle interrupts. */
- if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
+ err = mlx4_fd_set_non_blocking(ctx->async_fd);
+ if (err) {
ERROR("cannot make asynchronous FD non-blocking: %s",
- strerror(rte_errno));
+ strerror(err));
goto port_error;
}
/* Allocate protection domain. */
pd = ibv_alloc_pd(ctx);
if (pd == NULL) {
- rte_errno = ENOMEM;
+ err = ENOMEM;
ERROR("PD allocation failure");
goto port_error;
}
@@ -560,7 +567,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
sizeof(*priv),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- rte_errno = ENOMEM;
+ err = ENOMEM;
ERROR("priv allocation failure");
goto port_error;
}
@@ -581,10 +588,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
DEBUG("L2 tunnel checksum offloads are %ssupported",
(priv->hw_csum_l2tun ? "" : "not "));
+ priv->hw_rss_max_qps =
+ device_attr_ex.rss_caps.max_rwq_indirection_table_size;
+ DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps);
/* Configure the first MAC address by default. */
- if (mlx4_get_mac(priv, &mac.addr_bytes)) {
+ err = mlx4_get_mac(priv, &mac.addr_bytes);
+ if (err) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
- " (rte_errno: %s)", strerror(rte_errno));
+ " (error: %s)", strerror(err));
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
@@ -617,8 +628,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev = rte_eth_dev_allocate(name);
}
if (eth_dev == NULL) {
+ err = ENOMEM;
ERROR("can not allocate rte ethdev");
- rte_errno = ENOMEM;
goto port_error;
}
eth_dev->data->dev_private = priv;
@@ -663,8 +674,6 @@ port_error:
rte_eth_dev_release_port(eth_dev);
break;
}
- if (i == device_attr.phys_port_cnt)
- return 0;
/*
* XXX if something went wrong in the loop above, there is a resource
* leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
@@ -676,8 +685,9 @@ error:
claim_zero(ibv_close_device(attr_ctx));
if (list)
ibv_free_device_list(list);
- assert(rte_errno >= 0);
- return -rte_errno;
+ if (err)
+ rte_errno = err;
+ return -err;
}
static const struct rte_pci_id mlx4_pci_id_map[] = {
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 41d652ba..30f12eba 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -129,6 +129,7 @@ struct priv {
uint32_t rss_init:1; /**< Common RSS context is initialized. */
uint32_t hw_csum:1; /* Checksum offload is supported. */
uint32_t hw_csum_l2tun:1; /* Checksum support for L2 tunnels. */
+ uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 06030c2c..b3f70d6c 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -365,6 +365,12 @@ mlx4_rss_init(struct priv *priv)
if (priv->rss_init)
return 0;
+ if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
+ ERROR("RSS does not support more than %d queues",
+ priv->hw_rss_max_qps);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
/* Prepare range for RSS contexts before creating the first WQ. */
ret = mlx4dv_set_context_attr(priv->ctx,
MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index a3984eb9..c62ad118 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -145,7 +145,52 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
infiniband/verbs.h \
- enum IBV_FLOW_SPEC_ACTION_COUNT \
+ type 'struct ibv_counter_set_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_STATIC_ASSERT \
+ /usr/include/assert.h \
+ define static_assert \
$(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 10ce3359..36f3a056 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -345,6 +345,10 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.dev_set_link_down = mlx5_set_link_down,
.dev_set_link_up = mlx5_set_link_up,
.dev_close = mlx5_dev_close,
+ .promiscuous_enable = mlx5_promiscuous_enable,
+ .promiscuous_disable = mlx5_promiscuous_disable,
+ .allmulticast_enable = mlx5_allmulticast_enable,
+ .allmulticast_disable = mlx5_allmulticast_disable,
.link_update = mlx5_link_update,
.stats_get = mlx5_stats_get,
.stats_reset = mlx5_stats_reset,
@@ -680,7 +684,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
int i;
struct mlx5dv_context attrs_out;
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- struct ibv_counter_set_description cs_desc;
+ struct ibv_counter_set_description cs_desc = { .counter_type = 0 };
#endif
assert(pci_drv == &mlx5_driver);
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index d7063576..9c64bb33 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -92,10 +92,11 @@
#define MLX5_VPMD_MIN_TXQS 4
/* Threshold of buffer replenishment for vectorized Rx. */
-#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U
+#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
+ (RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2))
/* Maximum size of burst for vectorized Rx. */
-#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH
+#define MLX5_VPMD_RX_MAX_BURST 64U
/*
* Maximum size of burst for vectorized Tx. This is related to the maximum size
@@ -123,4 +124,9 @@
*/
#define MLX5_UAR_OFFSET (1ULL << 32)
+/* Definition of static_assert found in /usr/include/assert.h */
+#ifndef HAVE_STATIC_ASSERT
+#define static_assert _Static_assert
+#endif
+
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 5edc7511..e441483a 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -66,6 +66,32 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/* Supported speed values found in /usr/include/linux/ethtool.h */
+#ifndef HAVE_SUPPORTED_40000baseKR4_Full
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseCR4_Full
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseSR4_Full
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseLR4_Full
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseKR4_Full
+#define SUPPORTED_56000baseKR4_Full (1 << 27)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseCR4_Full
+#define SUPPORTED_56000baseCR4_Full (1 << 28)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseSR4_Full
+#define SUPPORTED_56000baseSR4_Full (1 << 29)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseLR4_Full
+#define SUPPORTED_56000baseLR4_Full (1 << 30)
+#endif
+
/* Add defines in case the running kernel is not the same as user headers. */
#ifndef ETHTOOL_GLINKSETTINGS
struct ethtool_link_settings {
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 57b654c3..1822c2bd 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -63,12 +63,6 @@
#define MLX5_IPV6 6
#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-struct ibv_counter_set_init_attr {
- int dummy;
-};
-struct ibv_flow_spec_counter_action {
- int dummy;
-};
struct ibv_counter_set {
int dummy;
};
@@ -885,10 +879,17 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
sizeof(struct ibv_flow_spec_action_tag);
}
if (parser->count) {
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
for (i = 0; i != hash_rxq_init_n; ++i)
parser->queue[i].offset += size;
+#else
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ items,
+ "Count action supported only on "
+ "MLNX_OFED_4.2 and above");
+#endif
}
return 0;
exit_item_not_supported:
@@ -2959,10 +2960,21 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
struct ibv_spec_header *flow_h;
void *flow_spec;
unsigned int specs_n;
- unsigned int queue_id = parser.drop ? HASH_RXQ_ETH :
- parser.layer;
+ unsigned int queue_id;
- attr = parser.queue[queue_id].ibv_attr;
+ /*
+ * Search for a non-empty ibv_attr. There should be only one
+ * because no RSS action is allowed for FDIR. This should have
+ * been referenced directly by parser.layer but due to a bug in
+ * mlx5_flow_convert() as of v17.11.4, parser.layer isn't
+ * correct. This bug will have to be addressed later.
+ */
+ for (queue_id = 0; queue_id != hash_rxq_init_n; ++queue_id) {
+ attr = parser.queue[queue_id].ibv_attr;
+ if (attr)
+ break;
+ }
+ assert(!parser.drop || queue_id == HASH_RXQ_ETH);
flow_attr = flow->frxq[queue_id].ibv_attr;
/* Compare first the attributes. */
if (!flow_attr ||
@@ -2991,16 +3003,20 @@ wrong_flow:
/* The flow does not match. */
continue;
}
- ret = rte_errno; /* Save rte_errno before cleanup. */
if (flow)
mlx5_flow_list_destroy(dev, &priv->flows, flow);
exit:
+ if (ret)
+ ret = rte_errno; /* Save rte_errno before cleanup. */
for (i = 0; i != hash_rxq_init_n; ++i) {
if (parser.queue[i].ibv_attr)
rte_free(parser.queue[i].ibv_attr);
}
- rte_errno = ret; /* Restore rte_errno. */
- return -rte_errno;
+ if (ret) {
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
+ }
+ return 0;
}
/**
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 23eae7c1..617138c1 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -60,9 +60,17 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret;
dev->data->promiscuous = 1;
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable promiscuous mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
ret = mlx5_traffic_restart(dev);
if (ret)
DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s",
@@ -96,9 +104,17 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret;
dev->data->all_multicast = 1;
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable allmulticast mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
ret = mlx5_traffic_restart(dev);
if (ret)
DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s",
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 2e003aea..1bbce3b7 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -740,6 +740,8 @@ next_wqe:
/* Check whether completion threshold has been reached. */
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
last_wqe->ctrl2 = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
@@ -951,6 +953,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
@@ -1243,6 +1247,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
@@ -1370,8 +1376,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -1584,13 +1588,14 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
(1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
txq->mpw_comp = txq->wqe_ci;
- txq->cq_pi++;
} else {
txq->elts_comp += j;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 29019f79..dac3b39f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -185,7 +185,9 @@ struct mlx5_txq_data {
uint16_t elts_comp; /* Counter since last completion request. */
uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
+#ifndef NDEBUG
uint16_t cq_pi; /* Producer index for completion queue. */
+#endif
uint16_t wqe_ci; /* Consumer index for work queue. */
uint16_t wqe_pi; /* Producer index for work queue. */
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 1f08ed0b..d504e2ae 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -106,9 +106,9 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
unsigned int i;
- assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
- assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index cf424778..e748615e 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -202,10 +202,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
- ++txq->cq_pi;
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -304,9 +305,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
txq->elts_comp += pkts_n;
} else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request a completion. */
txq->elts_comp = 0;
- ++txq->cq_pi;
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -754,7 +756,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 79314292..7e8c9b88 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -203,10 +203,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
- ++txq->cq_pi;
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -305,9 +306,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
txq->elts_comp += pkts_n;
} else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request a completion. */
txq->elts_comp = 0;
- ++txq->cq_pi;
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -735,7 +737,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 7ab31000..97f90c07 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -62,6 +62,12 @@ mlx5_socket_init(struct rte_eth_dev *dev)
int flags;
/*
+ * Close the last socket that was used to communicate
+ * with the secondary process
+ */
+ if (priv->primary_socket)
+ mlx5_socket_uninit(dev);
+ /*
* Initialise the socket to communicate with the secondary
* process.
*/
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 214543f8..9a1d6f95 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -73,7 +73,6 @@ mlx5_txq_start(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- /* Add memory regions to Tx queues. */
for (i = 0; i != priv->txqs_n; ++i) {
unsigned int idx = 0;
struct mlx5_mr *mr;
@@ -94,12 +93,17 @@ mlx5_txq_start(struct rte_eth_dev *dev)
}
}
ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
- if (ret)
+ if (ret) {
+ /* Adjust index for rollback. */
+ i = priv->txqs_n - 1;
goto error;
+ }
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_txq_stop(dev);
+ do {
+ mlx5_txq_release(dev, i);
+ } while (i-- != 0);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -151,7 +155,9 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_rxq_stop(dev);
+ do {
+ mlx5_rxq_release(dev, i);
+ } while (i-- != 0);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -174,28 +180,28 @@ mlx5_dev_start(struct rte_eth_dev *dev)
struct mlx5_mr *mr = NULL;
int ret;
- dev->data->dev_started = 1;
+ DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
ret = mlx5_flow_create_drop_queue(dev);
if (ret) {
DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
goto error;
}
- DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues",
- dev->data->port_id);
rte_mempool_walk(mlx5_mp2mr_iter, priv);
ret = mlx5_txq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
- goto error;
+ return -rte_errno;
}
ret = mlx5_rxq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
- goto error;
+ mlx5_txq_stop(dev);
+ return -rte_errno;
}
+ dev->data->dev_started = 1;
ret = mlx5_rx_intr_vec_enable(dev);
if (ret) {
DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
@@ -254,8 +260,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
dev->tx_pkt_burst = removed_tx_burst;
rte_wmb();
usleep(1000 * priv->rxqs_n);
- DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
mlx5_flow_stop(dev, &priv->flows);
mlx5_traffic_disable(dev);
mlx5_rx_intr_vec_disable(dev);
@@ -336,9 +341,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
struct rte_flow_item_vlan vlan_spec = {
.tci = rte_cpu_to_be_16(vlan),
};
- struct rte_flow_item_vlan vlan_mask = {
- .tci = 0xffff,
- };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
&vlan_spec, &vlan_mask);
@@ -375,9 +379,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
struct rte_flow_item_vlan vlan_spec = {
.tci = rte_cpu_to_be_16(vlan),
};
- struct rte_flow_item_vlan vlan_mask = {
- .tci = 0xffff,
- };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
ret = mlx5_ctrl_flow_vlan(dev, &unicast,
&unicast_mask,
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index a5c6b585..760ac92d 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -467,7 +467,9 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
(volatile struct mlx5_cqe (*)[])
(uintptr_t)cq_info.buf;
txq_data->cq_ci = 0;
+#ifndef NDEBUG
txq_data->cq_pi = 0;
+#endif
txq_data->wqe_ci = 0;
txq_data->wqe_pi = 0;
txq_ibv->qp = tmpl.qp;
diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c
index d7e9cefa..2fc24785 100644
--- a/drivers/net/mrvl/mrvl_ethdev.c
+++ b/drivers/net/mrvl/mrvl_ethdev.c
@@ -1375,9 +1375,12 @@ mrvl_rx_queue_release(void *rxq)
if (core_id == LCORE_ID_ANY)
core_id = 0;
+ if (!q)
+ return;
+
hif = mrvl_get_hif(q->priv, core_id);
- if (!q || !hif)
+ if (!hif)
return;
tc = q->priv->rxq_map[q->queue_id].tc;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index d9cd0473..8ab28dde 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -2292,11 +2292,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->wr_p = 0;
pkt_size -= dma_size;
- if (!pkt_size)
- /* End of packet */
- txds->offset_eop |= PCIE_DESC_TX_EOP;
+
+ /*
+ * Making the EOP, packets with just one segment
+ * the priority
+ */
+ if (likely(!pkt_size))
+ txds->offset_eop = PCIE_DESC_TX_EOP;
else
- txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
+ txds->offset_eop = 0;
pkt = pkt->next;
/* Referencing next free TX descriptor */
@@ -2649,6 +2653,14 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ /* NFP can not handle DMA addresses requiring more than 40 bits */
+ if (rte_eal_check_dma_mask(40) < 0) {
+ RTE_LOG(INFO, PMD, "device %s can not be used:",
+ pci_dev->device.name);
+ RTE_LOG(INFO, PMD, "\trestricted dma mask to 40 bits!\n");
+ return -ENODEV;
+ };
+
if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
(pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
port = get_pf_port_number(eth_dev->data->name);
@@ -3045,14 +3057,16 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_nfp_net_pf_pmd = {
.id_table = pci_id_nfp_pf_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = nfp_pf_pci_probe,
.remove = eth_nfp_pci_remove,
};
static struct rte_pci_driver rte_nfp_net_vf_pmd = {
.id_table = pci_id_nfp_vf_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_nfp_pci_probe,
.remove = eth_nfp_pci_remove,
};
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 33c6e78e..13dfbbc0 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -377,6 +377,9 @@ octeontx_dev_close(struct rte_eth_dev *dev)
rte_free(txq);
}
+
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
}
static int
@@ -470,9 +473,6 @@ octeontx_dev_stop(struct rte_eth_dev *dev)
ret);
return;
}
-
- dev->tx_pkt_burst = NULL;
- dev->rx_pkt_burst = NULL;
}
static void
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 39947105..f71ba001 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -687,19 +687,19 @@ static const struct eth_dev_ops ops = {
static int
open_rx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
- return -1;
+ if (rx->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
+ return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = pcap_filename;
- rx->queue[i].type = key;
- }
+ rx->queue[rx->num_of_queue].pcap = pcap;
+ rx->queue[rx->num_of_queue].name = pcap_filename;
+ rx->queue[rx->num_of_queue].type = key;
+ rx->num_of_queue++;
return 0;
}
@@ -711,19 +711,19 @@ open_rx_pcap(const char *key, const char *value, void *extra_args)
static int
open_tx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *dumpers = extra_args;
pcap_dumper_t *dumper;
- for (i = 0; i < dumpers->num_of_queue; i++) {
- if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
- return -1;
+ if (dumpers->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
+ return -1;
- dumpers->queue[i].dumper = dumper;
- dumpers->queue[i].name = pcap_filename;
- dumpers->queue[i].type = key;
- }
+ dumpers->queue[dumpers->num_of_queue].dumper = dumper;
+ dumpers->queue[dumpers->num_of_queue].name = pcap_filename;
+ dumpers->queue[dumpers->num_of_queue].type = key;
+ dumpers->num_of_queue++;
return 0;
}
@@ -754,18 +754,18 @@ open_rx_tx_iface(const char *key, const char *value, void *extra_args)
static inline int
open_rx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *iface = value;
struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = iface;
- rx->queue[i].type = key;
- }
+ if (rx->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ rx->queue[rx->num_of_queue].pcap = pcap;
+ rx->queue[rx->num_of_queue].name = iface;
+ rx->queue[rx->num_of_queue].type = key;
+ rx->num_of_queue++;
return 0;
}
@@ -776,18 +776,18 @@ open_rx_iface(const char *key, const char *value, void *extra_args)
static int
open_tx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *iface = value;
struct pmd_devargs *tx = extra_args;
pcap_t *pcap;
- for (i = 0; i < tx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- tx->queue[i].pcap = pcap;
- tx->queue[i].name = iface;
- tx->queue[i].type = key;
- }
+ if (tx->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ tx->queue[tx->num_of_queue].pcap = pcap;
+ tx->queue[tx->num_of_queue].name = iface;
+ tx->queue[tx->num_of_queue].type = key;
+ tx->num_of_queue++;
return 0;
}
@@ -977,15 +977,8 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
* We check whether we want to open a RX stream from a real NIC or a
* pcap file
*/
- pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
- if (pcaps.num_of_queue)
- is_rx_pcap = 1;
- else
- pcaps.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_RX_IFACE_ARG);
-
- if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+ pcaps.num_of_queue = 0;
if (is_rx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
@@ -1001,15 +994,8 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
* We check whether we want to open a TX stream to a real NIC or a
* pcap file
*/
- dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
- if (dumpers.num_of_queue)
- is_tx_pcap = 1;
- else
- dumpers.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_IFACE_ARG);
-
- if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
+ dumpers.num_of_queue = 0;
if (is_tx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 9affcbc9..092606be 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -2423,9 +2423,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
}
- /* Log and clean previous pglue_b errors if such exist */
+ /* Log and clear previous pglue_b errors if such exist */
ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
- ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
/* Enable the PF's internal FID_enable in the PXP */
rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
@@ -2433,6 +2432,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
goto load_err;
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index e6cef85b..61e36a43 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -231,15 +231,19 @@ static const char *grc_timeout_attn_master_to_str(u8 master)
static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u32 tmp, tmp2;
/* We've already cleared the timeout interrupt register, so we learn
- * of interrupts via the validity register
+ * of interrupts via the validity register.
+ * Any attention which is not for a timeout event is treated as fatal.
*/
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
- if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
+ if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) {
+ rc = ECORE_INVAL;
goto out;
+ }
/* Read the GRC timeout information */
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -263,11 +267,11 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
ECORE_GRC_ATTENTION_VF_SHIFT);
-out:
- /* Regardles of anything else, clean the validity bit */
+ /* Clean the validity bit */
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
- return ECORE_SUCCESS;
+out:
+ return rc;
}
#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 68f40f8a..1c885e1c 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -61,6 +61,8 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_COALESCE_UPDATE",
"CHANNEL_TLV_QID",
"CHANNEL_TLV_COALESCE_READ",
+ "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
+ "CHANNEL_TLV_UPDATE_MTU",
"CHANNEL_TLV_MAX"
};
@@ -2854,6 +2856,45 @@ out:
length, status);
}
+static enum _ecore_status_t
+ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct ecore_sp_vport_update_params params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_mtu_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Valiate PF can send such a request */
+ if (!p_vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing MTU update\n",
+ p_vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto send_status;
+ }
+
+ p_req = &mbx->req_virt->update_mtu;
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.mtu = p_req->mtu;
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+send_status:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ CHANNEL_TLV_UPDATE_MTU,
+ sizeof(struct pfvf_def_resp_tlv),
+ status);
+ return rc;
+}
+
void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type)
{
@@ -4136,6 +4177,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_READ:
ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_MTU:
+ ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
/* If we've received a message from a VF we consider malicious
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 8a08911a..334db6b9 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -1628,6 +1628,39 @@ exit:
return rc;
}
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_mtu_tlv *p_req;
+ struct pfvf_def_resp_tlv *p_resp;
+ enum _ecore_status_t rc;
+
+ if (!mtu)
+ return ECORE_INVAL;
+
+ /* clear mailbox and prep header tlv */
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU,
+ sizeof(*p_req));
+ p_req->mtu = mtu;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requesting MTU update to %d\n", mtu);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED)
+ rc = ECORE_INVAL;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id)
{
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index de2758cb..e5555bb1 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -319,5 +319,14 @@ void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
enum BAR_ID bar_id);
+
+/**
+ * @brief - ecore_vf_pf_update_mtu Update MTU for VF.
+ *
+ * @param p_hwfn
+ * @param - mtu
+ */
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu);
#endif
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index c6af9caf..08b1f248 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -531,6 +531,18 @@ struct pfvf_read_coal_resp_tlv {
u8 padding[6];
};
+struct vfpf_bulletin_update_mac_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
+struct vfpf_update_mtu_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 mtu;
+ u8 padding[6];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -545,6 +557,8 @@ union vfpf_tlvs {
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct vfpf_read_coal_req_tlv read_coal_req;
+ struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
+ struct vfpf_update_mtu_tlv update_mtu;
struct tlv_buffer_size tlv_buf_size;
};
@@ -675,6 +689,8 @@ enum {
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ CHANNEL_TLV_UPDATE_MTU,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 7462f1ad..4a5e4857 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -336,6 +336,24 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
}
static void
+qede_interrupt_handler_intx(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ u64 status;
+
+ /* Check if our device actually raised an interrupt */
+ status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
+ if (status & 0x1) {
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+
+ if (rte_intr_enable(eth_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_enable failed\n");
+ }
+}
+
+static void
qede_interrupt_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
@@ -516,12 +534,9 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
params.update_vport_active_tx_flg = 1;
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
- if (!qdev->enable_tx_switching) {
- if (IS_VF(edev)) {
- params.update_tx_switching_flg = 1;
- params.tx_switching_flg = !flg;
- DP_INFO(edev, "VF tx-switching is disabled\n");
- }
+ if (~qdev->enable_tx_switching & flg) {
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = !flg;
}
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
@@ -597,37 +612,6 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
return 0;
}
-/* Update MTU via vport-update without doing port restart.
- * The vport must be deactivated before calling this API.
- */
-int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_sp_vport_update_params params;
- struct ecore_hwfn *p_hwfn;
- int rc;
- int i;
-
- memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
- params.vport_id = 0;
- params.mtu = mtu;
- params.vport_id = 0;
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- params.opaque_fid = p_hwfn->hw_info.opaque_fid;
- rc = ecore_sp_vport_update(p_hwfn, &params,
- ECORE_SPQ_MODE_EBLOCK, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to update MTU\n");
- return -1;
- }
- }
- DP_INFO(edev, "MTU updated to %u\n", mtu);
-
- return 0;
-}
-
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
memset(ucast, 0, sizeof(struct ecore_filter_ucast));
@@ -861,7 +845,10 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
if (rc == 0)
rc = ecore_filter_ucast_cmd(edev, ucast,
ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS)
+ /* Indicate error only for add filter operation.
+ * Delete filter operations are not severe.
+ */
+ if ((rc != ECORE_SUCCESS) && add)
DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
rc, add);
@@ -875,7 +862,11 @@ qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
struct ecore_filter_ucast ucast;
int re;
+ if (!is_valid_assigned_ether_addr(mac_addr))
+ return -EINVAL;
+
qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_ADD;
ucast.type = ECORE_FILTER_MAC;
ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
@@ -897,6 +888,9 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
return;
}
+ if (!is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
+ return;
+
qede_set_ucast_cmn_params(&ucast);
ucast.opcode = ECORE_FILTER_REMOVE;
ucast.type = ECORE_FILTER_MAC;
@@ -922,6 +916,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
return;
}
+ qede_mac_addr_remove(eth_dev, 0);
qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
@@ -1182,6 +1177,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE(edev);
+ /* Update MTU only if it has changed */
+ if (eth_dev->data->mtu != qdev->mtu) {
+ if (qede_update_mtu(eth_dev, qdev->mtu))
+ goto err;
+ }
+
/* Configure TPA parameters */
if (rxmode->enable_lro) {
if (qede_enable_tpa(eth_dev, true))
@@ -1245,16 +1246,13 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
/* Disable traffic */
ecore_hw_stop_fastpath(edev); /* TBD - loop */
- if (IS_PF(edev))
- qede_mac_addr_remove(eth_dev, 0);
-
DP_INFO(edev, "Device is stopped\n");
}
-#define QEDE_TX_SWITCHING "vf_txswitch"
+#define QEDE_VF_TX_SWITCHING "vf_tx_switching"
const char *valid_args[] = {
- QEDE_TX_SWITCHING,
+ QEDE_VF_TX_SWITCHING,
NULL,
};
@@ -1264,9 +1262,7 @@ static int qede_args_check(const char *key, const char *val, void *opaque)
int ret = 0;
struct rte_eth_dev *eth_dev = opaque;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-#endif
errno = 0;
tmp = strtoul(val, NULL, 0);
@@ -1275,8 +1271,10 @@ static int qede_args_check(const char *key, const char *val, void *opaque)
return errno;
}
- if (strcmp(QEDE_TX_SWITCHING, key) == 0)
+ if (strcmp(QEDE_VF_TX_SWITCHING, key) == 0 && IS_VF(edev)) {
qdev->enable_tx_switching = !!tmp;
+ DP_INFO(edev, "Disabling VF tx-switching\n");
+ }
return ret;
}
@@ -1351,7 +1349,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
/* Parse devargs and fix up rxmode */
if (qede_args(eth_dev))
- return -ENOTSUP;
+ DP_NOTICE(edev, false,
+ "Invalid devargs supplied, requested change will not take effect\n");
/* Sanity checks and throw warnings */
if (rxmode->enable_scatter)
@@ -1392,8 +1391,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
/* Enable VLAN offloads by default */
ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK);
if (ret)
return ret;
@@ -1491,7 +1489,7 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- uint16_t link_duplex;
+ uint16_t link_duplex, old_link_status;
struct qed_link_output link;
struct rte_eth_link *curr = &eth_dev->data->dev_link;
@@ -1516,6 +1514,7 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
curr->link_duplex = link_duplex;
/* Link Status */
+ old_link_status = curr->link_status;
curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
/* AN */
@@ -1527,7 +1526,7 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
curr->link_autoneg, curr->link_status);
/* return 0 means link status changed, -1 means not changed */
- return ((curr->link_status == link.link_up) ? -1 : 0);
+ return ((curr->link_status == old_link_status) ? -1 : 0);
}
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
@@ -1613,8 +1612,20 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- qede_interrupt_handler, (void *)eth_dev);
+
+ switch (pci_dev->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler_intx,
+ (void *)eth_dev);
+ break;
+ default:
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler,
+ (void *)eth_dev);
+ }
+
if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
}
@@ -1932,6 +1943,70 @@ qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
}
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ if (IS_PF(edev)) {
+ struct ecore_sp_vport_update_params params;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ params.vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+ } else {
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
+ if (rc == ECORE_INVAL) {
+ DP_INFO(edev, "VF MTU Update TLV not supported\n");
+ /* Recreate vport */
+ rc = qede_start_vport(qdev, mtu);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Restore config lost due to vport stop */
+ if (eth_dev->data->promiscuous)
+ qede_promiscuous_enable(eth_dev);
+ else
+ qede_promiscuous_disable(eth_dev);
+
+ if (eth_dev->data->all_multicast)
+ qede_allmulticast_enable(eth_dev);
+ else
+ qede_allmulticast_disable(eth_dev);
+
+ qede_vlan_offload_set(eth_dev,
+ qdev->vlan_offload_mask);
+ } else if (rc != ECORE_SUCCESS) {
+ goto err;
+ }
+ }
+ }
+ DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
+
+ return 0;
+
+err:
+ DP_ERR(edev, "Failed to update MTU\n");
+ return -1;
+}
+
static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf)
{
@@ -2332,12 +2407,8 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
dev->data->dev_started = 0;
qede_dev_stop(dev);
restart = true;
- } else {
- if (IS_PF(edev))
- qede_mac_addr_remove(dev, 0);
}
rte_delay_ms(1000);
- qede_start_vport(qdev, mtu); /* Recreate vport */
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
@@ -2361,22 +2432,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
- /* Restore config lost due to vport stop */
- if (IS_PF(edev))
- qede_mac_addr_set(dev, &qdev->primary_mac);
-
- if (dev->data->promiscuous)
- qede_promiscuous_enable(dev);
- else
- qede_promiscuous_disable(dev);
-
- if (dev->data->all_multicast)
- qede_allmulticast_enable(dev);
- else
- qede_allmulticast_disable(dev);
-
- qede_vlan_offload_set(dev, qdev->vlan_offload_mask);
-
if (!dev->data->dev_started && restart) {
qede_dev_start(dev);
dev->data->dev_started = 1;
@@ -2792,6 +2847,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.mtu_set = qede_set_mtu,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
+ .mac_addr_add = qede_mac_addr_add,
+ .mac_addr_remove = qede_mac_addr_remove,
+ .mac_addr_set = qede_mac_addr_set,
};
static void qede_update_pf_params(struct ecore_dev *edev)
@@ -2820,6 +2878,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+ uint32_t int_mode;
int rc;
/* Extract key data structures */
@@ -2864,8 +2923,22 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
return -ENODEV;
}
qede_update_pf_params(edev);
- rte_intr_callback_register(&pci_dev->intr_handle,
- qede_interrupt_handler, (void *)eth_dev);
+
+ switch (pci_dev->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ int_mode = ECORE_INT_MODE_INTA;
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler_intx,
+ (void *)eth_dev);
+ break;
+ default:
+ int_mode = ECORE_INT_MODE_MSIX;
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler,
+ (void *)eth_dev);
+ }
+
if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
@@ -2873,7 +2946,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Start the Slowpath-process */
memset(&params, 0, sizeof(struct qed_slowpath_params));
- params.int_mode = ECORE_INT_MODE_MSIX;
+
+ params.int_mode = int_mode;
params.drv_major = QEDE_PMD_VERSION_MAJOR;
params.drv_minor = QEDE_PMD_VERSION_MINOR;
params.drv_rev = QEDE_PMD_VERSION_REVISION;
@@ -2956,7 +3030,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
ECORE_LEADING_HWFN(edev),
vf_mac,
&is_mac_forced);
- if (is_mac_exist && is_mac_forced) {
+ if (is_mac_exist) {
DP_INFO(edev, "VF macaddr received from PF\n");
ether_addr_copy((struct ether_addr *)&vf_mac,
&eth_dev->data->mac_addrs[0]);
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 2145aa6d..cc1a409f 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -34,6 +34,7 @@
#include "base/nvm_cfg.h"
#include "base/ecore_sp_commands.h"
#include "base/ecore_l2.h"
+#include "base/ecore_vf.h"
#include "qede_logs.h"
#include "qede_if.h"
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
index 153ef964..05152566 100644
--- a/drivers/net/qede/qede_fdir.c
+++ b/drivers/net/qede/qede_fdir.c
@@ -465,5 +465,8 @@ int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
udpv4_flow->src_port = ntuple->src_port;
udpv4_flow->dst_port = ntuple->dst_port;
}
+
+ fdir_entry.action.rx_queue = ntuple->queue;
+
return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
}
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index ae187321..95b4cd91 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -279,7 +279,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
/* Start the slowpath */
memset(&hw_init_params, 0, sizeof(hw_init_params));
hw_init_params.b_hw_start = true;
- hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
+ hw_init_params.int_mode = params->int_mode;
hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
@@ -633,8 +633,11 @@ void qed_link_update(struct ecore_hwfn *hwfn)
{
struct ecore_dev *edev = hwfn->p_dev;
struct qede_dev *qdev = (struct qede_dev *)edev;
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
- qede_link_update((struct rte_eth_dev *)qdev->ethdev, 0);
+ if (!qede_link_update(dev, 0))
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
static int qed_drain(struct ecore_dev *edev)
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 4c76f747..bd2cbd09 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -296,6 +296,8 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
/* Override Layer 2 packet type */
l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
break;
+ case ESE_DZ_L3_CLASS_UNKNOWN:
+ break;
default:
/* Unexpected Layer 3 class */
SFC_ASSERT(false);
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index fabcc329..1b160b5e 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -1030,7 +1030,7 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
if (rc != 0)
sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
- SFC_ASSERT(rc > 0);
+ SFC_ASSERT(rc >= 0);
return -rc;
}
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index fddc6706..90ef5bf2 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -330,7 +330,8 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
* the outer tag and the next matches the inner tag.
*/
if (mask->tci == supp_mask.tci) {
- vid = rte_bswap16(spec->tci);
+ /* Apply mask to keep VID only */
+ vid = rte_bswap16(spec->tci & mask->tci);
if (!(efx_spec->efs_match_flags &
EFX_FILTER_MATCH_OUTER_VID)) {
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index ffc0b85b..f10bbf60 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -464,7 +464,7 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data)
if (!flow)
return 0;
msg = &flow->msg;
- if (!is_zero_ether_addr(&spec->dst)) {
+ if (!is_zero_ether_addr(&mask->dst)) {
nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
&spec->dst.addr_bytes);
nlattr_add(&msg->nh,
@@ -570,13 +570,13 @@ tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
info->eth_type = htons(ETH_P_IP);
if (!spec)
return 0;
- if (spec->hdr.dst_addr) {
+ if (mask->hdr.dst_addr) {
nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
spec->hdr.dst_addr);
nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
mask->hdr.dst_addr);
}
- if (spec->hdr.src_addr) {
+ if (mask->hdr.src_addr) {
nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
spec->hdr.src_addr);
nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
@@ -626,13 +626,13 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
info->eth_type = htons(ETH_P_IPV6);
if (!spec)
return 0;
- if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
- if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
@@ -680,10 +680,10 @@ tap_flow_create_udp(const struct rte_flow_item *item, void *data)
nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
spec->hdr.src_port);
return 0;
@@ -726,10 +726,10 @@ tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
spec->hdr.src_port);
return 0;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index c62371cb..3843084b 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -897,7 +897,7 @@ nicvf_dev_tx_queue_release(void *sq)
static void
nicvf_set_tx_function(struct rte_eth_dev *dev)
{
- struct nicvf_txq *txq;
+ struct nicvf_txq *txq = NULL;
size_t i;
bool multiseg = false;
@@ -918,6 +918,9 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
dev->tx_pkt_burst = nicvf_xmit_pkts;
}
+ if (!txq)
+ return;
+
if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
else
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 9d69cf4e..06cbc463 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -190,12 +190,14 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
free_desc -= TX_DESC_PER_PKT;
}
- sq->tail = tail;
- sq->xmit_bufs += i;
- rte_wmb();
+ if (likely(i)) {
+ sq->tail = tail;
+ sq->xmit_bufs += i;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ }
return i;
}
@@ -246,12 +248,14 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- sq->tail = tail;
- sq->xmit_bufs += used_bufs;
- rte_wmb();
+ if (likely(used_desc)) {
+ sq->tail = tail;
+ sq->xmit_bufs += used_bufs;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, used_desc);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, used_desc);
+ }
return i;
}