aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-29 18:59:44 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-29 19:00:12 +0100
commit8e6d9d118f6105a3627b64a7949e1fb0b145879e (patch)
tree6494692bc19c7dd9cae2b16cf6e0ed7e90aa1da8 /drivers/net
parent43192222b329b3c984687235b0081c7fbfe484ba (diff)
New upstream version 16.11.8upstream/16.11.8
Change-Id: I3d0a7da377a86fe41f3516c5a3c458746abc33fb Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bnx2x/bnx2x.c4
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c18
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c12
-rw-r--r--drivers/net/bnxt/bnxt_txr.c59
-rw-r--r--drivers/net/bnxt/bnxt_txr.h10
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c5
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h6
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c14
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c9
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c185
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h32
-rw-r--r--drivers/net/ena/ena_ethdev.c4
-rw-r--r--drivers/net/enic/enic_main.c30
-rw-r--r--drivers/net/i40e/i40e_ethdev.c155
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c31
-rw-r--r--drivers/net/nfp/nfp_net.c14
-rw-r--r--drivers/net/null/rte_eth_null.c7
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c86
-rw-r--r--drivers/net/qede/base/ecore_int.c14
-rw-r--r--drivers/net/qede/qede_ethdev.c8
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c5
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c24
24 files changed, 527 insertions, 211 deletions
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index c2842e3a..f1de3362 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -4492,6 +4492,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
+ PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
+
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
@@ -4508,7 +4510,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
}
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
- le16toh(fp->fp_hc_idx), IGU_INT_DISABLE, 1);
+ le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
}
/*
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 3a0b1ce4..44bf6ba9 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -137,6 +137,7 @@ static struct rte_pci_id bnxt_pci_id_map[] = {
ETH_RSS_NONFRAG_IPV6_UDP)
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
+static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
/***********************/
@@ -222,6 +223,17 @@ static int bnxt_init_chip(struct bnxt *bp)
/* VNIC configuration */
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ RTE_LOG(ERR, PMD,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
@@ -347,12 +359,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->max_rx_queues = bp->pf.max_rx_rings;
dev_info->max_tx_queues = bp->pf.max_tx_rings;
dev_info->max_vfs = bp->pf.active_vfs;
- dev_info->reta_size = bp->pf.max_rsscos_ctx;
+ dev_info->reta_size = HW_HASH_INDEX_SIZE;
max_vnics = bp->pf.max_vnics;
} else {
dev_info->max_rx_queues = bp->vf.max_rx_rings;
dev_info->max_tx_queues = bp->vf.max_tx_rings;
- dev_info->reta_size = bp->vf.max_rsscos_ctx;
+ dev_info->reta_size = HW_HASH_INDEX_SIZE;
max_vnics = bp->vf.max_vnics;
}
@@ -575,6 +587,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
rte_free(bp->grp_info);
bp->grp_info = NULL;
}
+
+ bnxt_dev_uninit(eth_dev);
}
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d790b991..d65e28be 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -134,11 +134,19 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
if (rc) { \
RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
__func__, rc); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
if (resp->error_code) { \
rc = rte_le_to_cpu_16(resp->error_code); \
RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
}
@@ -837,7 +845,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
/* map ring groups to this vnic */
- for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
+ for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) {
if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
RTE_LOG(ERR, PMD,
"Not enough ring groups avail:%x req:%x\n", j,
@@ -1271,6 +1279,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_hwrm_vnic_ctx_free(bp, vnic);
bnxt_hwrm_vnic_free(bp, vnic);
+
+ rte_free(vnic->fw_grp_ids);
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 9cd44a93..ff24e232 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -161,7 +161,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
+ PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
+ PKT_TX_TUNNEL_GENEVE))
long_bd = true;
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
@@ -177,11 +179,11 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
txbd->opaque = txr->tx_prod;
txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
txbd->len = tx_pkt->data_len;
- if (txbd->len >= 2014)
+ if (tx_pkt->pkt_len >= 2014)
txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
else
- txbd->flags_type |= lhint_arr[txbd->len >> 9];
- txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf));
+ txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
+ txbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf));
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
@@ -221,16 +223,46 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_UDP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
PKT_TX_IIP_TCP_UDP_CKSUM) {
/* (Inner) IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
+ PKT_TX_IIP_UDP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
+ PKT_TX_IIP_TCP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
PKT_TX_OIP_TCP_UDP_CKSUM) {
/* Outer IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
+ PKT_TX_OIP_UDP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
+ PKT_TX_OIP_TCP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
PKT_TX_OIP_IIP_CKSUM) {
/* Outer IP, Inner IP CSO */
@@ -241,11 +273,23 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
+ PKT_TX_TCP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
+ PKT_TX_UDP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
+ PKT_TX_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
+ PKT_TX_OUTER_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
txbd1->mss = 0;
@@ -261,7 +305,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
txbd = &txr->tx_desc_ring[txr->tx_prod];
- txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(m_seg));
+ txbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(m_seg));
txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
txbd->len = m_seg->data_len;
@@ -269,6 +313,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
}
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
+ txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index cb961f1b..d9013991 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -71,10 +71,20 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 33fdde2f..58fa4bee 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -67,7 +67,7 @@ void bnxt_init_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
uint16_t max_vnics;
- int i, j;
+ int i;
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -85,9 +85,6 @@ void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
- for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
- vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
-
prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
STAILQ_INIT(&vnic->filter);
STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 9671ba42..1f9e1938 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -44,13 +44,9 @@ struct bnxt_vnic_info {
uint16_t fw_vnic_id; /* returned by Chimp during alloc */
uint16_t fw_rss_cos_lb_ctx;
uint16_t ctx_is_rss_cos_lb;
-#define MAX_NUM_TRAFFIC_CLASSES 8
-#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
-#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
- MAX_NUM_TRAFFIC_CLASSES)
uint16_t start_grp_id;
uint16_t end_grp_id;
- uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
+ uint16_t *fw_grp_ids;
uint16_t hash_type;
phys_addr_t rss_table_dma_addr;
uint16_t *rss_table;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 7bb2fe1e..fa6f9582 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -780,9 +780,21 @@ rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
internals->user_defined_mac = 0;
if (internals->slave_count > 0) {
+ int slave_port;
+ /* Get the primary slave location based on the primary port
+ * number as, while slave_add(), we will keep the primary
+ * slave based on slave_count,but not based on the primary port.
+ */
+ for (slave_port = 0; slave_port < internals->slave_count;
+ slave_port++) {
+ if (internals->slaves[slave_port].port_id ==
+ internals->primary_port)
+ break;
+ }
+
/* Set MAC Address of Bonded Device */
if (mac_address_set(bonded_eth_dev,
- &internals->slaves[internals->primary_port].persisted_mac_addr)
+ &internals->slaves[slave_port].persisted_mac_addr)
!= 0) {
RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
return -1;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 6f8931ef..6081918e 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1545,10 +1545,6 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
}
}
- /* Update all slave devices MACs*/
- if (mac_address_slaves_update(eth_dev) != 0)
- goto out_err;
-
/* If bonded device is configure in promiscuous mode then re-apply config */
if (internals->promiscuous_en)
bond_ethdev_promiscuous_enable(eth_dev);
@@ -1577,6 +1573,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
(void *)&rte_eth_devices[internals->port_id]);
}
+ /* Update all slave devices MACs*/
+ if (mac_address_slaves_update(eth_dev) != 0)
+ goto out_err;
+
if (internals->user_defined_primary_port)
bond_ethdev_primary_set(internals, internals->primary_port);
@@ -1649,7 +1649,6 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- internals->active_slave_count = 0;
internals->link_status_polling_enabled = 0;
for (i = 0; i < internals->slave_count; i++)
internals->slaves[i].last_link_status = 0;
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 565a6959..6b9f7e8e 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -3315,8 +3315,12 @@ static int t4_wait_dev_ready(struct adapter *adapter)
msleep(500);
whoami = t4_read_reg(adapter, A_PL_WHOAMI);
- return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
- ? 0 : -EIO);
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ whoami);
+ return -EIO;
}
struct flash_desc {
@@ -3327,52 +3331,172 @@ struct flash_desc {
int t4_get_flash_params(struct adapter *adapter)
{
/*
- * Table for non-Numonix supported flash parts. Numonix parts are left
- * to the preexisting well-tested code. All flash parts have 64KB
- * sectors.
+ * Table for non-standard supported Flash parts. Note, all Flash
+ * parts must have 64KB sectors.
*/
static struct flash_desc supported_flash[] = {
- { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
};
int ret;
- unsigned int i;
- u32 info = 0;
-
+ u32 flashid = 0;
+ unsigned int part, manufacturer;
+ unsigned int density, size = 0;
+
+ /**
+ * Issue a Read ID Command to the Flash part. We decode supported
+ * Flash parts and their sizes from this. There's a newer Query
+ * Command which can retrieve detailed geometry information but
+ * many Flash parts don't support it.
+ */
ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
if (!ret)
- ret = sf1_read(adapter, 3, 0, 1, &info);
+ ret = sf1_read(adapter, 3, 0, 1, &flashid);
t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
if (ret < 0)
return ret;
- for (i = 0; i < ARRAY_SIZE(supported_flash); ++i)
- if (supported_flash[i].vendor_and_model_id == info) {
- adapter->params.sf_size = supported_flash[i].size_mb;
+ /**
+ * Check to see if it's one of our non-standard supported Flash parts.
+ */
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ adapter->params.sf_size =
+ supported_flash[part].size_mb;
adapter->params.sf_nsec =
adapter->params.sf_size / SF_SEC_SIZE;
- return 0;
+ goto found;
}
+ }
- if ((info & 0xff) != 0x20) /* not a Numonix flash */
- return -EINVAL;
- info >>= 16; /* log2 of size */
- if (info >= 0x14 && info < 0x18)
- adapter->params.sf_nsec = 1 << (info - 16);
- else if (info == 0x18)
- adapter->params.sf_nsec = 64;
- else
- return -EINVAL;
- adapter->params.sf_size = 1 << info;
+ /**
+ * Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /**
+ * This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14:
+ size = 1 << 20; /* 1MB */
+ break;
+ case 0x15:
+ size = 1 << 21; /* 2MB */
+ break;
+ case 0x16:
+ size = 1 << 22; /* 4MB */
+ break;
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ case 0x19:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x20:
+ size = 1 << 26; /* 64MB */
+ break;
+ case 0x21:
+ size = 1 << 27; /* 128MB */
+ break;
+ case 0x22:
+ size = 1 << 28; /* 256MB */
+ break;
+ }
+ break;
+ }
+
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /**
+ * This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x17:
+ size = 1 << 26; /* 64MB */
+ break;
+ }
+ break;
+ }
+
+ case 0xc2: { /* Macronix */
+ /**
+ * This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+
+ case 0xef: { /* Winbond */
+ /**
+ * This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adapter,
+ "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /**
+ * Store decoded Flash size and fall through into vetting code.
+ */
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
+found:
/*
* We should reject adapters with FLASHes which are too small. So, emit
* a warning.
*/
- if (adapter->params.sf_size < FLASH_MIN_SIZE) {
- dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
- adapter->params.sf_size, FLASH_MIN_SIZE);
- }
+ if (adapter->params.sf_size < FLASH_MIN_SIZE)
+ dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
@@ -3439,8 +3563,11 @@ int t4_prep_adapter(struct adapter *adapter)
t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
ret = t4_get_flash_params(adapter);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
+ -ret);
return ret;
+ }
adapter->params.cim_la_size = CIMLA_SIZE;
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 87c3bf13..791b44ff 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -115,11 +115,13 @@ typedef uint64_t dma_addr_t;
#define ENA_MIN16(x, y) RTE_MIN((x), (y))
#define ENA_MIN8(x, y) RTE_MIN((x), (y))
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
#define U64_C(x) x ## ULL
#define BIT(nr) (1UL << (nr))
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#ifdef RTE_LIBRTE_ENA_COM_DEBUG
#define ena_trc_dbg(format, arg...) \
@@ -188,10 +190,15 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->phys_addr; \
handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->phys_addr; \
+ } \
} while (0)
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
({ ENA_TOUCH(size); ENA_TOUCH(phys); \
@@ -206,19 +213,20 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, 0); \
- virt = mz->addr; \
- phys = mz->phys_addr; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->phys_addr; \
+ } \
} while (0)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
do { \
- const struct rte_memzone *mz; \
- char z_name[RTE_MEMZONE_NAMESIZE]; \
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
- snprintf(z_name, sizeof(z_name), \
- "ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, 0); \
- virt = mz->addr; \
+ virt = rte_zmalloc_socket(NULL, size, 0, node); \
} while (0)
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 63c42362..1511c6a0 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -697,7 +697,7 @@ static int ena_link_update(struct rte_eth_dev *dev,
struct rte_eth_link *link = &dev->data->dev_link;
link->link_status = 1;
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = ETH_SPEED_NUM_NONE;
link->link_duplex = ETH_LINK_FULL_DUPLEX;
return 0;
@@ -894,7 +894,7 @@ static int ena_start(struct rte_eth_dev *dev)
return rc;
if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_RSS_FLAG) {
+ ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
rc = ena_rss_init_default(adapter);
if (rc)
return rc;
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index ef5ecd44..17606238 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -787,25 +787,23 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
static int instance;
wq->socket_id = socket_id;
- if (nb_desc) {
- if (nb_desc > enic->config.wq_desc_count) {
- dev_warning(enic,
- "WQ %d - number of tx desc in cmd line (%d)"\
- "is greater than that in the UCSM/CIMC adapter"\
- "policy. Applying the value in the adapter "\
- "policy (%d)\n",
- queue_idx, nb_desc, enic->config.wq_desc_count);
- } else if (nb_desc != enic->config.wq_desc_count) {
- enic->config.wq_desc_count = nb_desc;
- dev_info(enic,
- "TX Queues - effective number of descs:%d\n",
- nb_desc);
- }
+ if (nb_desc > enic->config.wq_desc_count) {
+ dev_warning(enic,
+ "WQ %d - number of tx desc in cmd line (%d) "
+ "is greater than that in the UCSM/CIMC adapter "
+ "policy. Applying the value in the adapter "
+ "policy (%d)\n",
+ queue_idx, nb_desc, enic->config.wq_desc_count);
+ nb_desc = enic->config.wq_desc_count;
+ } else if (nb_desc != enic->config.wq_desc_count) {
+ dev_info(enic,
+ "TX Queues - effective number of descs:%d\n",
+ nb_desc);
}
/* Allocate queue resources */
err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
- enic->config.wq_desc_count,
+ nb_desc,
sizeof(struct wq_enet_desc));
if (err) {
dev_err(enic, "error in allocation of wq\n");
@@ -813,7 +811,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
}
err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
- socket_id, enic->config.wq_desc_count,
+ socket_id, nb_desc,
sizeof(struct cq_enet_wq_desc));
if (err) {
vnic_wq_free(wq);
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7e93c3e1..30612963 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1708,27 +1708,40 @@ i40e_phy_conf_link(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
enum i40e_aq_phy_type cnt;
+ uint8_t avail_speed;
uint32_t phy_type_mask = 0;
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_LOW_POWER;
- const uint8_t advt = I40E_LINK_SPEED_40GB |
- I40E_LINK_SPEED_25GB |
- I40E_LINK_SPEED_10GB |
- I40E_LINK_SPEED_1GB |
- I40E_LINK_SPEED_100MB;
int ret = -ENOTSUP;
+ /* To get phy capabilities of available speeds. */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
+ status);
+ return ret;
+ }
+ avail_speed = phy_ab.link_speed;
+ /* To get the current phy config. */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
NULL);
- if (status)
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
+ status);
return ret;
+ }
- /* If link already up, no need to set up again */
- if (is_up && phy_ab.phy_type != 0)
+ /* If link needs to go up and it is in autoneg mode the speed is OK,
+ * no need to set up again.
+ */
+ if (is_up && phy_ab.phy_type != 0 &&
+ abilities & I40E_AQ_PHY_AN_ENABLED &&
+ phy_ab.link_speed != 0)
return I40E_SUCCESS;
memset(&phy_conf, 0, sizeof(phy_conf));
@@ -1737,18 +1750,20 @@ i40e_phy_conf_link(struct i40e_hw *hw,
abilities &= ~mask;
abilities |= phy_ab.abilities & mask;
- /* update ablities and speed */
- if (abilities & I40E_AQ_PHY_AN_ENABLED)
- phy_conf.link_speed = advt;
- else
- phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
-
phy_conf.abilities = abilities;
+ /* If link needs to go up, but the force speed is not supported,
+ * Warn users and config the default available speeds.
+ */
+ if (is_up && !(force_speed & avail_speed)) {
+ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
+ phy_conf.link_speed = avail_speed;
+ } else {
+ phy_conf.link_speed = is_up ? force_speed : avail_speed;
+ }
-
- /* To enable link, phy_type mask needs to include each type */
- for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ /* PHY type mask needs to include each type except PHY type extension */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
phy_type_mask |= 1 << cnt;
/* use get_phy_abilities_resp value for the rest */
@@ -1781,11 +1796,18 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *conf = &dev->data->dev_conf;
+ if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_100M;
+ }
speed = i40e_parse_link_speeds(conf->link_speeds);
- abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- abilities |= I40E_AQ_PHY_LINK_ENABLED;
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
+ I40E_AQ_PHY_AN_ENABLED |
+ I40E_AQ_PHY_LINK_ENABLED;
return i40e_phy_conf_link(hw, abilities, speed, true);
}
@@ -1876,13 +1898,6 @@ i40e_dev_start(struct rte_eth_dev *dev)
}
/* Apply link configure */
- if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G)) {
- PMD_DRV_LOG(ERR, "Invalid link setting");
- goto err_up;
- }
ret = i40e_apply_link_speed(dev);
if (I40E_SUCCESS != ret) {
PMD_DRV_LOG(ERR, "Fail to apply link setting");
@@ -2563,14 +2578,13 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* call read registers - updates values, now write them to struct */
i40e_read_stats_registers(pf, hw);
- stats->ipackets = ns->eth.rx_unicast +
- ns->eth.rx_multicast +
- ns->eth.rx_broadcast -
- ns->eth.rx_discards -
+ stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
+ pf->main_vsi->eth_stats.rx_multicast +
+ pf->main_vsi->eth_stats.rx_broadcast -
pf->main_vsi->eth_stats.rx_discards;
- stats->opackets = ns->eth.tx_unicast +
- ns->eth.tx_multicast +
- ns->eth.tx_broadcast;
+ stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
+ pf->main_vsi->eth_stats.tx_multicast +
+ pf->main_vsi->eth_stats.tx_broadcast;
stats->ibytes = ns->eth.rx_bytes;
stats->obytes = ns->eth.tx_bytes;
stats->oerrors = ns->eth.tx_errors +
@@ -8669,6 +8683,60 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+/*
+ * GL_SWR_PM_UP_THR:
+ * The value is not impacted from the link speed, its value is set according
+ * to the total number of ports for a better pipe-monitor configuration.
+ */
+static bool
+i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
+{
+#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
+
+#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
+
+ static const struct {
+ uint16_t device_id;
+ uint32_t val;
+ } swr_pm_table[] = {
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
+
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
+ };
+ uint32_t i;
+
+ if (value == NULL) {
+ PMD_DRV_LOG(ERR, "value is NULL");
+ return false;
+ }
+
+ for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
+ if (hw->device_id == swr_pm_table[i].device_id) {
+ *value = swr_pm_table[i].val;
+
+ PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
+ "value - 0x%08x",
+ hw->device_id, *value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int
i40e_dev_sync_phy_type(struct i40e_hw *hw)
{
@@ -8724,13 +8792,16 @@ i40e_configure_registers(struct i40e_hw *hw)
}
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
- I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_SF_VALUE;
- else /* For X710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+ uint32_t cfg_val;
+
+ if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
+ PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
+ "GL_SWR_PM_UP_THR value fixup",
+ hw->device_id);
+ continue;
+ }
+
+ reg_table[i].val = cfg_val;
}
ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index b19224d9..0ffd9f3a 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2313,7 +2313,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a4e2996a..e1a97ea3 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -113,6 +113,11 @@
#define IXGBE_5TUPLE_MAX_PRI 7
#define IXGBE_5TUPLE_MIN_PRI 1
+/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
+/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
+
#define IXGBE_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 4b81ee37..2a6f754b 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -433,10 +433,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = input_mask->mac_addr_byte_mask;
- fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
- & IXGBE_FDIRIP6M_INNER_MAC;
- info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+ fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ mac_mask = input_mask->mac_addr_byte_mask &
+ (IXGBE_FDIRIP6M_INNER_MAC >>
+ IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
+ fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
+ IXGBE_FDIRIP6M_INNER_MAC);
switch (input_mask->tunnel_type_mask) {
case 0:
@@ -720,10 +722,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
input->formatted.inner_mac,
fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
sizeof(input->formatted.inner_mac));
- input->formatted.tunnel_type =
- fdir_filter->input.flow.tunnel_flow.tunnel_type;
+ if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_VXLAN)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
+ else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
+ else
+ PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
+
input->formatted.tni_vni =
- fdir_filter->input.flow.tunnel_flow.tunnel_id;
+ fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
}
return 0;
@@ -950,8 +961,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
} else {
/* tunnel mode */
- if (input->formatted.tunnel_type !=
- RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ if (input->formatted.tunnel_type)
tunnel_type = 0x80000000;
tunnel_type |= addr_high;
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
@@ -959,6 +969,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
input->formatted.tni_vni);
}
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
}
/* record vlan (little-endian) and flex_bytes(big-endian) */
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 45811498..0f0589e0 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -2023,11 +2023,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->tail = 0;
pkt_size -= dma_size;
- if (!pkt_size) {
- /* End of packet */
- txds->offset_eop |= PCIE_DESC_TX_EOP;
- } else {
- txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
+
+ /*
+ * Making the EOP, packets with just one segment
+ * the priority
+ */
+ if (likely(!pkt_size))
+ txds->offset_eop = PCIE_DESC_TX_EOP;
+ else {
+ txds->offset_eop = 0;
pkt = pkt->next;
}
/* Referencing next free TX descriptor */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 067ee293..620f09a5 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -462,6 +462,12 @@ eth_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+static void
+eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct ether_addr *addr)
+{
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
@@ -472,6 +478,7 @@ static const struct eth_dev_ops ops = {
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
+ .mac_addr_set = eth_mac_address_set,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
.reta_update = eth_rss_reta_update,
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 27590d39..deca553e 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -687,19 +687,19 @@ static const struct eth_dev_ops ops = {
static int
open_rx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
- return -1;
+ if (rx->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
+ return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = pcap_filename;
- rx->queue[i].type = key;
- }
+ rx->queue[rx->num_of_queue].pcap = pcap;
+ rx->queue[rx->num_of_queue].name = pcap_filename;
+ rx->queue[rx->num_of_queue].type = key;
+ rx->num_of_queue++;
return 0;
}
@@ -711,19 +711,19 @@ open_rx_pcap(const char *key, const char *value, void *extra_args)
static int
open_tx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *dumpers = extra_args;
pcap_dumper_t *dumper;
- for (i = 0; i < dumpers->num_of_queue; i++) {
- if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
- return -1;
+ if (dumpers->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
+ return -1;
- dumpers->queue[i].dumper = dumper;
- dumpers->queue[i].name = pcap_filename;
- dumpers->queue[i].type = key;
- }
+ dumpers->queue[dumpers->num_of_queue].dumper = dumper;
+ dumpers->queue[dumpers->num_of_queue].name = pcap_filename;
+ dumpers->queue[dumpers->num_of_queue].type = key;
+ dumpers->num_of_queue++;
return 0;
}
@@ -754,18 +754,18 @@ open_rx_tx_iface(const char *key, const char *value, void *extra_args)
static inline int
open_rx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *iface = value;
struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = iface;
- rx->queue[i].type = key;
- }
+ if (rx->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ rx->queue[rx->num_of_queue].pcap = pcap;
+ rx->queue[rx->num_of_queue].name = iface;
+ rx->queue[rx->num_of_queue].type = key;
+ rx->num_of_queue++;
return 0;
}
@@ -776,18 +776,18 @@ open_rx_iface(const char *key, const char *value, void *extra_args)
static int
open_tx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *iface = value;
struct pmd_devargs *tx = extra_args;
pcap_t *pcap;
- for (i = 0; i < tx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- tx->queue[i].pcap = pcap;
- tx->queue[i].name = iface;
- tx->queue[i].type = key;
- }
+ if (tx->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ tx->queue[tx->num_of_queue].pcap = pcap;
+ tx->queue[tx->num_of_queue].name = iface;
+ tx->queue[tx->num_of_queue].type = key;
+ tx->num_of_queue++;
return 0;
}
@@ -983,15 +983,8 @@ pmd_pcap_probe(const char *name, const char *params)
* We check whether we want to open a RX stream from a real NIC or a
* pcap file
*/
- pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
- if (pcaps.num_of_queue)
- is_rx_pcap = 1;
- else
- pcaps.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_RX_IFACE_ARG);
-
- if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+ pcaps.num_of_queue = 0;
if (is_rx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
@@ -1007,15 +1000,8 @@ pmd_pcap_probe(const char *name, const char *params)
* We check whether we want to open a TX stream to a real NIC or a
* pcap file
*/
- dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
- if (dumpers.num_of_queue)
- is_tx_pcap = 1;
- else
- dumpers.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_IFACE_ARG);
-
- if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
+ dumpers.num_of_queue = 0;
if (is_tx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 207b01b9..8ddfdeef 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -229,15 +229,19 @@ static const char *grc_timeout_attn_master_to_str(u8 master)
static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u32 tmp, tmp2;
/* We've already cleared the timeout interrupt register, so we learn
- * of interrupts via the validity register
+ * of interrupts via the validity register.
+ * Any attention which is not for a timeout event is treated as fatal.
*/
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
- if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
+ if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) {
+ rc = ECORE_INVAL;
goto out;
+ }
/* Read the GRC timeout information */
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -261,11 +265,11 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
ECORE_GRC_ATTENTION_VF_SHIFT);
-out:
- /* Regardles of anything else, clean the validity bit */
+ /* Clean the validity bit */
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
- return ECORE_SUCCESS;
+out:
+ return rc;
}
#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index ce3a0936..679599f8 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -441,7 +441,10 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
if (rc == 0)
rc = ecore_filter_ucast_cmd(edev, ucast,
ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
+ /* Indicate error only for add filter operation.
+ * Delete filter operations are not severe.
+ */
+ if ((rc != ECORE_SUCCESS) && add) {
DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
rc, add);
}
@@ -797,8 +800,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK);
qdev->state = QEDE_DEV_CONFIG;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index d0f0d52d..82106a78 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -879,7 +879,7 @@ nicvf_dev_tx_queue_release(void *sq)
static void
nicvf_set_tx_function(struct rte_eth_dev *dev)
{
- struct nicvf_txq *txq;
+ struct nicvf_txq *txq = NULL;
size_t i;
bool multiseg = false;
@@ -900,6 +900,9 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
dev->tx_pkt_burst = nicvf_xmit_pkts;
}
+ if (!txq)
+ return;
+
if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
else
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 275adb31..4b9b2932 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -190,12 +190,14 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
free_desc -= TX_DESC_PER_PKT;
}
- sq->tail = tail;
- sq->xmit_bufs += i;
- rte_wmb();
+ if (likely(i)) {
+ sq->tail = tail;
+ sq->xmit_bufs += i;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ }
return i;
}
@@ -246,12 +248,14 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- sq->tail = tail;
- sq->xmit_bufs += used_bufs;
- rte_wmb();
+ if (likely(used_desc)) {
+ sq->tail = tail;
+ sq->xmit_bufs += used_bufs;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, used_desc);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, used_desc);
+ }
return i;
}