aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-04-23 14:16:57 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-04-23 14:17:34 +0100
commit39157ec04095ab012d11db23c462844634bfbb8f (patch)
tree643f83dc46445aa7834fe271ce2c21a5cb278cee /drivers/net
parent47d9763a1dd3103d732da9eec350cfc1cd784717 (diff)
New upstream version 16.11.5upstream/16.11.5
Change-Id: I47171042629a57c6958d50251351e668ca5f3d8b Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c2
-rw-r--r--drivers/net/bnxt/bnxt.h1
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c34
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c58
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h4
-rw-r--r--drivers/net/bnxt/bnxt_ring.c24
-rw-r--r--drivers/net/bnxt/bnxt_ring.h3
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c7
-rw-r--r--drivers/net/bnxt/bnxt_txr.c17
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c3
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c11
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c10
-rw-r--r--drivers/net/e1000/em_ethdev.c2
-rw-r--r--drivers/net/e1000/igb_ethdev.c20
-rw-r--r--drivers/net/ena/ena_ethdev.c10
-rw-r--r--drivers/net/enic/enic.h26
-rw-r--r--drivers/net/enic/enic_ethdev.c18
-rw-r--r--drivers/net/enic/enic_main.c43
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c4
-rw-r--r--drivers/net/i40e/Makefile2
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c23
-rw-r--r--drivers/net/i40e/base/i40e_common.c8
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c3
-rw-r--r--drivers/net/i40e/base/i40e_type.h1
-rw-r--r--drivers/net/i40e/i40e_ethdev.c475
-rw-r--r--drivers/net/i40e/i40e_ethdev.h63
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c13
-rw-r--r--drivers/net/i40e/i40e_fdir.c8
-rw-r--r--drivers/net/i40e/i40e_rxtx.c1
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_altivec.c654
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c7
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c10
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.c22
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c167
-rw-r--r--drivers/net/mlx5/mlx5.h16
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c18
-rw-r--r--drivers/net/nfp/nfp_net.c19
-rw-r--r--drivers/net/null/rte_eth_null.c2
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c6
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c7
-rw-r--r--drivers/net/qede/base/ecore_vf.c6
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h2
-rw-r--r--drivers/net/qede/qede_ethdev.c160
-rw-r--r--drivers/net/qede/qede_rxtx.c57
-rw-r--r--drivers/net/qede/qede_rxtx.h15
-rw-r--r--drivers/net/ring/rte_eth_ring.c2
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c4
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c2
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c2
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c4
-rw-r--r--drivers/net/virtio/virtio_ethdev.c32
-rw-r--r--drivers/net/virtio/virtio_rxtx.c40
-rw-r--r--drivers/net/virtio/virtio_rxtx.h3
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c30
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.h2
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c4
-rw-r--r--drivers/net/virtio/virtqueue.c61
-rw-r--r--drivers/net/virtio/virtqueue.h15
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c2
-rw-r--r--drivers/net/xenvirt/virtqueue.h2
62 files changed, 1817 insertions, 456 deletions
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 45c6519f..6d73f127 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -121,7 +121,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG
+ .link_autoneg = ETH_LINK_AUTONEG
};
static uint16_t
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index ff3c2406..b934605e 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -110,6 +110,7 @@ struct bnxt_link_info {
uint16_t link_speed;
uint16_t support_speeds;
uint16_t auto_link_speed;
+ uint16_t force_link_speed;
uint16_t auto_link_speed_mask;
uint32_t preemphasis;
uint8_t phy_type;
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index b6feb5ad..359a95d4 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -63,19 +63,34 @@ static const char bnxt_version[] =
#define BROADCOM_DEV_ID_57302 0x16c9
#define BROADCOM_DEV_ID_57304_PF 0x16ca
#define BROADCOM_DEV_ID_57304_VF 0x16cb
+#define BROADCOM_DEV_ID_57417_MF 0x16cc
#define BROADCOM_DEV_ID_NS2 0x16cd
+#define BROADCOM_DEV_ID_57311 0x16ce
+#define BROADCOM_DEV_ID_57312 0x16cf
#define BROADCOM_DEV_ID_57402 0x16d0
#define BROADCOM_DEV_ID_57404 0x16d1
#define BROADCOM_DEV_ID_57406_PF 0x16d2
#define BROADCOM_DEV_ID_57406_VF 0x16d3
#define BROADCOM_DEV_ID_57402_MF 0x16d4
#define BROADCOM_DEV_ID_57407_RJ45 0x16d5
+#define BROADCOM_DEV_ID_57412 0x16d6
+#define BROADCOM_DEV_ID_57414 0x16d7
+#define BROADCOM_DEV_ID_57416_RJ45 0x16d8
+#define BROADCOM_DEV_ID_57417_RJ45 0x16d9
#define BROADCOM_DEV_ID_5741X_VF 0x16dc
+#define BROADCOM_DEV_ID_57412_MF 0x16de
+#define BROADCOM_DEV_ID_57314 0x16df
+#define BROADCOM_DEV_ID_57317_RJ45 0x16e0
#define BROADCOM_DEV_ID_5731X_VF 0x16e1
+#define BROADCOM_DEV_ID_57417_SFP 0x16e2
+#define BROADCOM_DEV_ID_57416_SFP 0x16e3
+#define BROADCOM_DEV_ID_57317_SFP 0x16e4
#define BROADCOM_DEV_ID_57404_MF 0x16e7
#define BROADCOM_DEV_ID_57406_MF 0x16e8
#define BROADCOM_DEV_ID_57407_SFP 0x16e9
#define BROADCOM_DEV_ID_57407_MF 0x16ea
+#define BROADCOM_DEV_ID_57414_MF 0x16ec
+#define BROADCOM_DEV_ID_57416_MF 0x16ee
static struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
@@ -95,6 +110,21 @@ static struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -285,7 +315,9 @@ static int bnxt_init_nic(struct bnxt *bp)
{
int rc;
- bnxt_init_ring_grps(bp);
+ rc = bnxt_init_ring_grps(bp);
+ if (rc)
+ return rc;
bnxt_init_vnics(bp);
bnxt_init_filters(bp);
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 619bc979..8ff4c15d 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -174,9 +174,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
* by ethtool.
*/
if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
req.mask = rte_cpu_to_le_32(mask);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -504,7 +504,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
}
/* AutoNeg - Advertise speeds specified. */
- if (conf->auto_link_speed_mask) {
+ if (conf->auto_link_speed_mask &&
+ !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
req.auto_link_speed_mask =
@@ -566,6 +567,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+ link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
@@ -604,7 +606,7 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id)
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
{
int rc = 0;
struct hwrm_ring_alloc_input req = {.req_type = 0 };
@@ -625,11 +627,12 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
/* FALLTHROUGH */
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
- req.cmpl_ring_id =
- rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
+ req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.length = rte_cpu_to_le_32(ring->ring_size);
req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
- req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
+ if (stats_ctx_id != INVALID_STATS_CTX_ID)
+ req.enables =
+ rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
break;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
@@ -796,7 +799,9 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
HWRM_CHECK_RESULT;
cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
- bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ //Tx rings don't need grp_info entry. It is a Rx only attribute.
+ if (idx)
+ bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
return rc;
}
@@ -818,7 +823,9 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
HWRM_CHECK_RESULT;
cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
- bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ //Tx rings don't have a grp_info entry. It is a Rx only attribute.
+ if (idx)
+ bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
return rc;
}
@@ -1025,10 +1032,13 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
unsigned int idx = i + 1;
- if (i >= bp->rx_cp_nr_rings)
+ if (i >= bp->rx_cp_nr_rings) {
cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
- else
+ //Tx rings don't have a grp_info entry.
+ idx = 0;
+ } else {
cpr = bp->rx_queues[i]->cp_ring;
+ }
if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
if (rc)
@@ -1052,6 +1062,8 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
if (i >= bp->rx_cp_nr_rings) {
txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
cpr = txq->cp_ring;
+ //Tx rings don't need grp_info entry.
+ idx = 0;
} else {
rxq = bp->rx_queues[i];
cpr = rxq->cp_ring;
@@ -1089,14 +1101,13 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
}
static void bnxt_free_cp_ring(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr, unsigned int idx)
+ struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
bnxt_hwrm_ring_free(bp, cp_ring,
HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
cp_ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
@@ -1112,7 +1123,6 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
- unsigned int idx = bp->rx_cp_nr_rings + i + 1;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
@@ -1128,7 +1138,7 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
txr->tx_cons = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
- bnxt_free_cp_ring(bp, cpr, idx);
+ bnxt_free_cp_ring(bp, cpr);
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -1152,7 +1162,8 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
rxr->rx_prod = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
- bnxt_free_cp_ring(bp, cpr, idx);
+ bnxt_free_cp_ring(bp, cpr);
+ bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
}
/* Default completion ring */
@@ -1160,7 +1171,8 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
- bnxt_free_cp_ring(bp, cpr, 0);
+ bnxt_free_cp_ring(bp, cpr);
+ bp->grp_info[0].cp_fw_ring_id = INVALID_HW_RING_ID;
}
return rc;
@@ -1511,7 +1523,9 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
- if (autoneg == 1) {
+ /* Autoneg can be done only when the FW allows */
+ if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
+ bp->link_info.force_link_speed)) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_link_speed_mask =
@@ -1529,7 +1543,13 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
}
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
- link_req.link_speed = speed;
+ /* If user wants a particular speed try that first. */
+ if (speed)
+ link_req.link_speed = speed;
+ else if (bp->link_info.force_link_speed)
+ link_req.link_speed = bp->link_info.force_link_speed;
+ else
+ link_req.link_speed = bp->link_info.auto_link_speed;
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
link_req.auto_pause = bp->link_info.auto_pause;
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 6519ef21..32c74c8b 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -65,7 +65,7 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id);
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id);
int bnxt_hwrm_ring_free(struct bnxt *bp,
struct bnxt_ring *ring, uint32_t ring_type);
int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
@@ -101,5 +101,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp);
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
int bnxt_hwrm_func_qcfg(struct bnxt *bp);
+#define HWRM_RING_ALLOC_INPUT_EN_STAT_CTX_ID_VALID \
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID
#endif
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 3f81ffcc..2bceb4db 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -61,13 +61,19 @@ void bnxt_free_ring(struct bnxt_ring *ring)
* Ring groups
*/
-void bnxt_init_ring_grps(struct bnxt *bp)
+int bnxt_init_ring_grps(struct bnxt *bp)
{
unsigned int i;
+ //One slot is still consumed by Default ring.
+ if (bp->max_ring_grps < 1 + bp->rx_cp_nr_rings)
+ return -ENOMEM;
+
for (i = 0; i < bp->max_ring_grps; i++)
memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
sizeof(struct bnxt_ring_grp_info));
+
+ return 0;
}
/*
@@ -219,7 +225,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- 0, HWRM_NA_SIGNATURE);
+ 0, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell =
@@ -239,7 +246,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
/* Rx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- idx, HWRM_NA_SIGNATURE);
+ idx, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell =
@@ -251,7 +259,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
/* Rx ring */
rc = bnxt_hwrm_ring_alloc(bp, ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
- idx, cpr->hw_stats_ctx_id);
+ idx, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
if (rc)
goto err_out;
rxr->rx_prod = 0;
@@ -279,20 +288,21 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
/* Tx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- idx, HWRM_NA_SIGNATURE);
+ idx, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell =
(char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
idx * 0x80;
- bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
/* Tx ring */
rc = bnxt_hwrm_ring_alloc(bp, ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
- idx, cpr->hw_stats_ctx_id);
+ idx, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
if (rc)
goto err_out;
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index 8656549a..22a56eb1 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -65,6 +65,7 @@
#define MAX_CP_DESC_CNT (16 * 1024)
#define INVALID_HW_RING_ID ((uint16_t)-1)
+#define INVALID_STATS_CTX_ID ((uint16_t)-1)
struct bnxt_ring {
void *bd;
@@ -92,7 +93,7 @@ struct bnxt_tx_ring_info;
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
-void bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_init_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct bnxt_tx_ring_info *tx_ring_info,
struct bnxt_rx_ring_info *rx_ring_info,
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 980f3ec5..5698f02f 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -72,7 +72,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf->mbuf = data;
- rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+ rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(data));
return 0;
}
@@ -126,6 +126,7 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf = rx_buf->mbuf;
rte_prefetch0(mbuf);
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
mbuf->next = NULL;
mbuf->pkt_len = rxcmp->len;
@@ -152,12 +153,12 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
else
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
else
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
/* Re-install the mbuf back to the rx ring */
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index c2f9ae78..9cd44a93 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -101,7 +101,7 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
txr->tx_ring_struct = ring;
- ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
ring->bd_dma = txr->tx_desc_mapping;
@@ -216,23 +216,28 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_pkt->l4_len;
txbd1->mss = tx_pkt->tso_segsz;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
+ PKT_TX_IIP_TCP_UDP_CKSUM) {
/* (Inner) IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
+ PKT_TX_OIP_TCP_UDP_CKSUM) {
/* Outer IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
+ PKT_TX_OIP_IIP_CKSUM) {
/* Outer IP, Inner IP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
+ PKT_TX_TCP_UDP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 8081981c..eea6ccc5 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -1109,7 +1109,8 @@ bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
uint8_t i;
for (i = 0; i < internals->active_slave_count; i++)
- bond_mode_8023ad_activate_slave(bond_dev, i);
+ bond_mode_8023ad_activate_slave(bond_dev,
+ internals->active_slaves[i]);
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 2a3893a1..4b6f1471 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -412,8 +412,13 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
if (internals->slave_count < 1) {
/* if MAC is not user defined then use MAC of first slave add to
* bonded device */
- if (!internals->user_defined_mac)
- mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
+ if (!internals->user_defined_mac) {
+ if (mac_address_set(bonded_eth_dev,
+ slave_eth_dev->data->mac_addrs)) {
+ RTE_BOND_LOG(ERR, "Failed to set MAC address");
+ return -1;
+ }
+ }
/* Inherit eth dev link properties from first slave */
link_properties_set(bonded_eth_dev,
@@ -565,7 +570,7 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
&rte_eth_devices[bonded_port_id].data->port_id);
/* Restore original MAC address of slave device */
- mac_address_set(&rte_eth_devices[slave_port_id],
+ rte_eth_dev_default_mac_addr_set(slave_port_id,
&(internals->slaves[slave_idx].persisted_mac_addr));
slave_eth_dev = &rte_eth_devices[slave_port_id];
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 7811a5ac..c1ec3aa4 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1217,7 +1217,8 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
case BONDING_MODE_BALANCE:
case BONDING_MODE_BROADCAST:
for (i = 0; i < internals->slave_count; i++) {
- if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->slaves[i].port_id,
bonded_eth_dev->data->mac_addrs)) {
RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
internals->slaves[i].port_id);
@@ -1235,15 +1236,16 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
for (i = 0; i < internals->slave_count; i++) {
if (internals->slaves[i].port_id ==
internals->current_primary_port) {
- if (mac_address_set(&rte_eth_devices[internals->primary_port],
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->primary_port,
bonded_eth_dev->data->mac_addrs)) {
RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
internals->current_primary_port);
return -1;
}
} else {
- if (mac_address_set(
- &rte_eth_devices[internals->slaves[i].port_id],
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->slaves[i].port_id,
&internals->slaves[i].persisted_mac_addr)) {
RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
internals->slaves[i].port_id);
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index aee3d340..58495a50 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1145,7 +1145,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_speed = 0;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_autoneg = ETH_LINK_FIXED;
}
rte_em_dev_atomic_write_link_status(dev, &link);
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 407021df..5108ff35 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1121,7 +1121,7 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
- uint16_t nb_tx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
tx_mq_mode == ETH_MQ_TX_DCB ||
@@ -2226,7 +2226,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_speed = 0;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_autoneg = ETH_LINK_FIXED;
}
rte_igb_dev_atomic_write_link_status(dev, &link);
@@ -2757,12 +2757,17 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
struct e1000_mbx_info *mbx = &hw->mbx;
u32 in_msg = 0;
- if (mbx->ops.read(hw, &in_msg, 1, 0))
- return;
+ /* peek the message first */
+ in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
/* PF reset VF event */
- if (in_msg == E1000_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ if (in_msg == E1000_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ return;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
}
static int
@@ -3085,7 +3090,8 @@ igbvf_dev_start(struct rte_eth_dev *dev)
}
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
intr_vector = dev->data->nb_rx_queues;
ret = rte_intr_efd_enable(intr_handle, intr_vector);
if (ret)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 6efe0c3c..63c42362 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -248,16 +248,17 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
struct ena_com_rx_ctx *ena_rx_ctx)
{
uint64_t ol_flags = 0;
+ uint32_t packet_type = 0;
if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
- ol_flags |= PKT_TX_TCP_CKSUM;
+ packet_type |= RTE_PTYPE_L4_TCP;
else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
- ol_flags |= PKT_TX_UDP_CKSUM;
+ packet_type |= RTE_PTYPE_L4_UDP;
if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
- ol_flags |= PKT_TX_IPV4;
+ packet_type |= RTE_PTYPE_L3_IPV4;
else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
- ol_flags |= PKT_TX_IPV6;
+ packet_type |= RTE_PTYPE_L3_IPV6;
if (unlikely(ena_rx_ctx->l4_csum_err))
ol_flags |= PKT_RX_L4_CKSUM_BAD;
@@ -265,6 +266,7 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags = ol_flags;
+ mbuf->packet_type = packet_type;
}
static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a3d2a0fb..46f20b2d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -53,14 +53,6 @@
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
-#define ENIC_WQ_MAX 8
-/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both
- * RQs use the same CQ.
- */
-#define ENIC_RQ_MAX 16
-#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
-#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
-
#define VLAN_ETH_HLEN 18
#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
@@ -139,17 +131,17 @@ struct enic {
unsigned int flags;
unsigned int priv_flags;
- /* work queue */
- struct vnic_wq wq[ENIC_WQ_MAX];
- unsigned int wq_count;
+ /* work queue (len = conf_wq_count) */
+ struct vnic_wq *wq;
+ unsigned int wq_count; /* equals eth_dev nb_tx_queues */
- /* receive queue */
- struct vnic_rq rq[ENIC_RQ_MAX];
- unsigned int rq_count;
+ /* receive queue (len = conf_rq_count) */
+ struct vnic_rq *rq;
+ unsigned int rq_count; /* equals eth_dev nb_rx_queues */
- /* completion queue */
- struct vnic_cq cq[ENIC_CQ_MAX];
- unsigned int cq_count;
+ /* completion queue (len = conf_cq_count) */
+ struct vnic_cq *cq;
+ unsigned int cq_count; /* equals rq_count + wq_count */
/* interrupt resource */
struct vnic_intr intr;
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 2b154ec2..17479d4d 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -184,13 +184,7 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- if (queue_idx >= ENIC_WQ_MAX) {
- dev_err(enic,
- "Max number of TX queues exceeded. Max is %d\n",
- ENIC_WQ_MAX);
- return -EINVAL;
- }
-
+ RTE_ASSERT(queue_idx < enic->conf_wq_count);
eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
@@ -302,16 +296,8 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- /* With Rx scatter support, two RQs are now used on VIC per RQ used
- * by the application.
- */
- if (queue_idx * 2 >= ENIC_RQ_MAX) {
- dev_err(enic,
- "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
- ENIC_RQ_MAX);
- return -EINVAL;
- }
+ RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
eth_dev->data->rx_queues[queue_idx] =
(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index b25eff4f..63d0c50d 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1080,6 +1080,9 @@ static void enic_dev_deinit(struct enic *enic)
vnic_dev_notify_unset(enic->vdev);
rte_free(eth_dev->data->mac_addrs);
+ rte_free(enic->cq);
+ rte_free(enic->rq);
+ rte_free(enic->wq);
}
@@ -1087,27 +1090,28 @@ int enic_set_vnic_res(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
int rc = 0;
+ unsigned int required_rq, required_wq, required_cq;
- /* With Rx scatter support, two RQs are now used per RQ used by
- * the application.
- */
- if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
+ /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
+ required_rq = eth_dev->data->nb_rx_queues * 2;
+ required_wq = eth_dev->data->nb_tx_queues;
+ required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+
+ if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
eth_dev->data->nb_rx_queues,
- eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
+ required_rq, enic->conf_rq_count);
rc = -EINVAL;
}
- if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
+ if (enic->conf_wq_count < required_wq) {
dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
eth_dev->data->nb_tx_queues, enic->conf_wq_count);
rc = -EINVAL;
}
- if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
- eth_dev->data->nb_tx_queues)) {
+ if (enic->conf_cq_count < required_cq) {
dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
- (eth_dev->data->nb_rx_queues +
- eth_dev->data->nb_tx_queues), enic->conf_cq_count);
+ required_cq, enic->conf_cq_count);
rc = -EINVAL;
}
@@ -1309,6 +1313,25 @@ static int enic_dev_init(struct enic *enic)
dev_err(enic, "See the ENIC PMD guide for more information.\n");
return -EINVAL;
}
+ /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
+ enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
+ enic->conf_cq_count, 8);
+ enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
+ enic->conf_rq_count, 8);
+ enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
+ enic->conf_wq_count, 8);
+ if (enic->conf_cq_count > 0 && enic->cq == NULL) {
+ dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_rq_count > 0 && enic->rq == NULL) {
+ dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_wq_count > 0 && enic->wq == NULL) {
+ dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
+ return -1;
+ }
/* Get the supported filters */
enic_fdir_info(enic);
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index d04efdc6..0fac816f 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -53,7 +53,7 @@
/* Wait interval to get switch status */
#define WAIT_SWITCH_MSG_US 100000
/* A period of quiescence for switch */
-#define FM10K_SWITCH_QUIESCE_US 10000
+#define FM10K_SWITCH_QUIESCE_US 100000
/* Number of chars per uint32 type */
#define CHARS_PER_UINT32 (sizeof(uint32_t))
#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
@@ -1239,7 +1239,7 @@ fm10k_dev_close(struct rte_eth_dev *dev)
MAX_LPORT_NUM, false);
fm10k_mbx_unlock(hw);
- /* allow 10ms for device to quiesce */
+ /* allow 100ms for device to quiesce */
rte_delay_us(FM10K_SWITCH_QUIESCE_US);
/* Stop mailbox service first */
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 13085fb7..9c9a8671 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -99,6 +99,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
+else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
else
SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
endif
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index 0d3a83fa..e231582c 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -682,6 +682,12 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+ /* Newer versions of firmware require lock when reading the NVM */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 5)))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
@@ -1051,22 +1057,19 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
-#ifdef PF_DRIVER
#ifdef INTEGRATED_VF
if (!i40e_is_vf(hw))
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ else
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
#else
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+#ifdef PF_DRIVER
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
#endif /* PF_DRIVER */
#ifdef VF_DRIVER
-#ifdef INTEGRATED_VF
- if (i40e_is_vf(hw))
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#else
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
#endif /* VF_DRIVER */
+#endif /* INTEGRATED_VF */
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 9a6b3ed6..de60e2a1 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -1046,7 +1046,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
#ifdef X722_SUPPORT
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
#endif
status = i40e_init_nvm(hw);
@@ -1578,6 +1579,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1626,6 +1628,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1636,9 +1639,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
- if (mode == I40E_LINK_ACTIVITY)
- blink = false;
-
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index 4fa1220b..4976b1ff 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -221,7 +221,8 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
i40e_release_nvm(hw);
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index b5f72c32..d514abe3 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -685,6 +685,7 @@ struct i40e_hw {
#endif
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
/* debug mask */
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 0835c2d4..0b270b69 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -716,6 +716,15 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
+static inline void
+i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ i40e_write_rx_ctl(hw, reg_addr, reg_val);
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
+ "with value 0x%08x",
+ reg_addr, reg_val);
+}
+
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
@@ -735,9 +744,10 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
* configuration API is added to avoid configuration conflicts
* between ports of the same device.
*/
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+ i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
/*
* Initialize registers for parsing packet type of QinQ
@@ -745,8 +755,26 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
* configuration API is added to avoid configuration conflicts
* between ports of the same device.
*/
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+ i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
+}
+
+static inline void i40e_config_automask(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t val;
+
+ /* INTENA flag is not auto-cleared for interrupt */
+ val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+
+ /* If support multi-driver, PF will use INT0. */
+ if (!pf->support_multi_driver)
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
+
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
}
#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
@@ -933,6 +961,71 @@ config_floating_veb(struct rte_eth_dev *dev)
#define I40E_L2_TAGS_S_TAG_SHIFT 1
#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
+#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
+RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
+ ETH_I40E_SUPPORT_MULTI_DRIVER "=0|1");
+
+static int
+i40e_parse_multi_drv_handler(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_pf *pf;
+ unsigned long support_multi_driver;
+ char *end;
+
+ pf = (struct i40e_pf *)opaque;
+
+ errno = 0;
+ support_multi_driver = strtoul(value, &end, 10);
+ if (errno != 0 || end == value || *end != 0) {
+ PMD_DRV_LOG(WARNING, "Wrong global configuration");
+ return -(EINVAL);
+ }
+
+ if (support_multi_driver == 1 || support_multi_driver == 0)
+ pf->support_multi_driver = (bool)support_multi_driver;
+ else
+ PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
+ "enable global configuration by default."
+ ETH_I40E_SUPPORT_MULTI_DRIVER);
+ return 0;
+}
+
+static int
+i40e_support_multi_driver(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = dev->pci_dev;
+ static const char *valid_keys[] = {
+ ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
+ struct rte_kvargs *kvlist;
+
+ /* Enable global configuration by default */
+ pf->support_multi_driver = false;
+
+ if (!pci_dev->device.devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(pci_dev->device.devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+
+ if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_SUPPORT_MULTI_DRIVER);
+
+ if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
+ i40e_parse_multi_drv_handler, pf) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
@@ -982,6 +1075,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->bus.func = pci_dev->addr.function;
hw->adapter_stopped = 0;
+ /* Check if need to support multi-driver */
+ i40e_support_multi_driver(dev);
+
/* Make sure all is clean before doing PF reset */
i40e_clear_hw(hw);
@@ -1002,13 +1098,16 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return ret;
}
+ i40e_config_automask(pf);
+
/*
* To work around the NVM issue, initialize registers
* for flexible payload and packet type of QinQ by
* software. It should be removed once issues are fixed
* in NVM.
*/
- i40e_GLQF_reg_init(hw);
+ if (!pf->support_multi_driver)
+ i40e_GLQF_reg_init(hw);
/* Initialize the input set for filters (hash and fd) to default value */
i40e_filter_input_set_init(pf);
@@ -1104,11 +1203,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
i40e_set_fc(hw, &aq_fail, TRUE);
/* Set the global registers with default ether type value */
- ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to set the default outer "
- "VLAN ether type");
- goto err_setup_pf_switch;
+ if (!pf->support_multi_driver) {
+ ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ETHER_TYPE_VLAN);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to set the default outer "
+ "VLAN ether type");
+ goto err_setup_pf_switch;
+ }
}
/* PF setup, which includes VSI setup */
@@ -1384,6 +1486,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
int i;
uint32_t val;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
/* Bind all RX queues to allocated MSIX interrupt */
for (i = 0; i < nb_queue; i++) {
@@ -1402,7 +1505,8 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
/* Write first RX queue to Link list register as the head element */
if (vsi->type != I40E_VSI_SRIOV) {
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL,
+ pf->support_multi_driver);
if (msix_vect == I40E_MISC_VEC_ID) {
I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
@@ -1460,7 +1564,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
uint16_t queue_idx = 0;
int record = 0;
- uint32_t val;
int i;
for (i = 0; i < vsi->nb_qps; i++) {
@@ -1468,13 +1571,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
}
- /* INTENA flag is not auto-cleared for interrupt */
- val = I40E_READ_REG(hw, I40E_GLINT_CTL);
- val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
- I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
- I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
- I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
-
/* VF bind interrupt */
if (vsi->type == I40E_VSI_SRIOV) {
__vsi_queues_bind_intr(vsi, msix_vect,
@@ -1527,27 +1623,22 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- uint16_t interval = i40e_calc_itr_interval(\
- RTE_LIBRTE_I40E_ITR_INTERVAL);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
- if (rte_intr_allow_others(intr_handle))
+ if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
for (i = 0; i < vsi->nb_msix; i++) {
msix_intr = vsi->msix_intr + i;
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
- I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
}
else
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
@@ -1558,16 +1649,18 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
- if (rte_intr_allow_others(intr_handle))
+ if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
for (i = 0; i < vsi->nb_msix; i++) {
msix_intr = vsi->msix_intr + i;
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
- 0);
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
}
else
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
@@ -2743,11 +2836,17 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
uint16_t tpid)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
uint64_t reg_r = 0, reg_w = 0;
uint16_t reg_id = 0;
int ret = 0;
int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
+ return -ENOTSUP;
+ }
+
switch (vlan_type) {
case ETH_VLAN_TYPE_OUTER:
if (qinq)
@@ -2797,8 +2896,11 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
"I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
+ PMD_DRV_LOG(DEBUG,
+ "Global register 0x%08x is changed with value 0x%08x",
+ I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
+
+ i40e_global_cfg_warning(I40E_WARNING_TPID);
return ret;
}
@@ -3025,19 +3127,25 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
}
- /* config the water marker both based on the packets and bytes */
- I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
- (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
- I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
- (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
- I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
- pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT);
- I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
- pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT);
+ if (!pf->support_multi_driver) {
+ /* config water marker both based on the packets and bytes */
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
+ (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
+ (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Water marker configuration is not supported.");
+ }
I40E_WRITE_FLUSH(hw);
@@ -4524,16 +4632,28 @@ i40e_vsi_setup(struct i40e_pf *pf,
/* VF has MSIX interrupt in VF range, don't allocate here */
if (type == I40E_VSI_MAIN) {
- ret = i40e_res_pool_alloc(&pf->msix_pool,
- RTE_MIN(vsi->nb_qps,
- RTE_MAX_RXTX_INTR_VEC_ID));
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
- vsi->seid, ret);
- goto fail_queue_alloc;
+ if (pf->support_multi_driver) {
+ /* If support multi-driver, need to use INT0 instead of
+ * allocating from msix pool. The Msix pool is init from
+ * INT1, so it's OK just set msix_intr to 0 and nb_msix
+ * to 1 without calling i40e_res_pool_alloc.
+ */
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 1;
+ } else {
+ ret = i40e_res_pool_alloc(&pf->msix_pool,
+ RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "VSI MAIN %d get heap failed %d",
+ vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID);
}
- vsi->msix_intr = ret;
- vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
} else if (type != I40E_VSI_SRIOV) {
ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
if (ret < 0) {
@@ -4888,11 +5008,11 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
int mask = 0;
/* Apply vlan offload setting */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+ mask = ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
i40e_vlan_offload_set(dev, mask);
- /* Apply double-vlan setting, not implemented yet */
-
/* Apply pvid setting */
ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
data->dev_conf.txmode.hw_vlan_insert_pvid);
@@ -5446,7 +5566,8 @@ void
i40e_pf_disable_irq0(struct i40e_hw *hw)
{
/* Disable all interrupt types */
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
@@ -6507,7 +6628,7 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
uint8_t add)
{
uint16_t ip_type;
- uint32_t ipv4_addr;
+ uint32_t ipv4_addr, ipv4_addr_le;
uint8_t i, tun_type = 0;
/* internal varialbe to convert ipv6 byte order */
uint32_t convert_ipv6[4];
@@ -6534,8 +6655,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
rte_memcpy(&pfilter->ipaddr.v4.data,
- &rte_cpu_to_le_32(ipv4_addr),
+ &ipv4_addr_le,
sizeof(pfilter->ipaddr.v4.data));
} else {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
@@ -6855,9 +6977,15 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf,
static int
i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
uint32_t val, reg;
int ret = -EINVAL;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
+ return -ENOTSUP;
+ }
+
val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
@@ -6875,6 +7003,10 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
reg, NULL);
if (ret != 0)
return ret;
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
+ "with value 0x%08x",
+ I40E_GL_PRS_FVBM(2), reg);
+ i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
} else {
ret = 0;
}
@@ -7095,12 +7227,18 @@ static int
i40e_set_hash_filter_global_config(struct i40e_hw *hw,
struct rte_eth_hash_global_conf *g_cfg)
{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
int ret;
uint16_t i;
uint32_t reg;
uint32_t mask0 = g_cfg->valid_bit_mask[0];
enum i40e_filter_pctype pctype;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
+ return -ENOTSUP;
+ }
+
/* Check the input parameters */
ret = i40e_hash_global_config_check(g_cfg);
if (ret < 0)
@@ -7118,42 +7256,45 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
if (hw->mac.type == I40E_MAC_X722) {
if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
reg);
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
reg);
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
reg);
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
reg);
} else {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
- reg);
+ i40e_write_global_rx_ctl(hw,
+ I40E_GLQF_HSYM(pctype),
+ reg);
}
} else {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
+ reg);
}
+ i40e_global_cfg_warning(I40E_WARNING_HSYM);
}
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
@@ -7177,7 +7318,8 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
/* Use the default, and keep it as it is */
goto out;
- i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
out:
I40E_WRITE_FLUSH(hw);
@@ -7791,6 +7933,18 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
}
static void
+i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, addr);
+
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
+ if (reg != val)
+ i40e_write_global_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+}
+
+static void
i40e_filter_input_set_init(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
@@ -7815,6 +7969,12 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return;
+
+ if (pf->support_multi_driver && num > 0) {
+ PMD_DRV_LOG(ERR, "Input set setting is not supported.");
+ return;
+ }
+
inset_reg = i40e_translate_input_set_reg(hw->mac.type,
input_set);
@@ -7823,31 +7983,49 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
-
- for (i = 0; i < num; i++) {
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
- }
- /*clear unused mask registers of the pctype */
- for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- 0);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- 0);
+ if (!pf->support_multi_driver) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ }
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ }
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Input set setting is not supported.");
}
I40E_WRITE_FLUSH(hw);
/* store the default input set */
- pf->hash_input_set[pctype] = input_set;
+ if (!pf->support_multi_driver)
+ pf->hash_input_set[pctype] = input_set;
pf->fdir.input_set[pctype] = input_set;
}
+
+ if (!pf->support_multi_driver) {
+ i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
+ i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
+ i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
+ }
}
int
@@ -7860,6 +8038,11 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
int ret, i, num;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
+ return -ENOTSUP;
+ }
+
if (!conf) {
PMD_DRV_LOG(ERR, "Invalid pointer");
return -EFAULT;
@@ -7908,19 +8091,21 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
for (i = 0; i < num; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
/*clear unused mask registers of the pctype */
for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- 0);
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
I40E_WRITE_FLUSH(hw);
pf->hash_input_set[pctype] = input_set;
@@ -7984,6 +8169,11 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
if (num < 0)
return -EINVAL;
+ if (pf->support_multi_driver && num > 0) {
+ PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+ return -ENOTSUP;
+ }
+
inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
@@ -7992,13 +8182,20 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- for (i = 0; i < num; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
- /*clear unused mask registers of the pctype */
- for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- 0);
+ if (!pf->support_multi_driver) {
+ for (i = 0; i < num; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
+ } else {
+ PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+ }
I40E_WRITE_FLUSH(hw);
pf->fdir.input_set[pctype] = input_set;
@@ -9694,27 +9891,21 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
if (msix_intr == I40E_MISC_VEC_ID)
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
else
I40E_WRITE_REG(hw,
I40E_PFINT_DYN_CTLN(msix_intr -
I40E_RX_VEC_START),
I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
rte_intr_enable(&dev->pci_dev->intr_handle);
@@ -9731,12 +9922,13 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
msix_intr = intr_handle->intr_vec[queue_id];
if (msix_intr == I40E_MISC_VEC_ID)
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
else
I40E_WRITE_REG(hw,
I40E_PFINT_DYN_CTLN(msix_intr -
I40E_RX_VEC_START),
- 0);
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
return 0;
@@ -9832,14 +10024,43 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_mac_filter_info mac_filter;
+ struct i40e_mac_filter *f;
+ int ret;
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
return;
}
- /* Flags: 0x3 updates port address */
- i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+ break;
+ }
+
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+ return;
+ }
+
+ mac_filter = f->mac_info;
+ ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+ return;
+ }
+ memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add mac filter");
+ return;
+ }
+ memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+
+ i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ mac_addr->addr_bytes, NULL);
}
static int
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index f2833197..77a44668 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -103,6 +103,14 @@
(((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
((vf)->version_minor == 1))
+static inline void
+I40E_WRITE_GLB_REG(struct i40e_hw *hw, uint32_t reg, uint32_t value) {
+ I40E_WRITE_REG(hw, reg, value);
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
+ "with value 0x%08x",
+ reg, value);
+}
+
/* index flex payload per layer */
enum i40e_flxpld_layer_idx {
I40E_FLXPLD_L2_IDX = 0,
@@ -477,6 +485,8 @@ struct i40e_pf {
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+
+ bool support_multi_driver; /* 1 - support multiple driver */
};
enum pending_msg {
@@ -569,6 +579,22 @@ struct i40e_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+enum I40E_WARNING_IDX {
+ I40E_WARNING_DIS_FLX_PLD,
+ I40E_WARNING_ENA_FLX_PLD,
+ I40E_WARNING_QINQ_PARSER,
+ I40E_WARNING_QINQ_CLOUD_FILTER,
+ I40E_WARNING_TPID,
+ I40E_WARNING_FLOW_CTL,
+ I40E_WARNING_GRE_KEY_LEN,
+ I40E_WARNING_QF_CTL,
+ I40E_WARNING_HASH_INSET,
+ I40E_WARNING_HSYM,
+ I40E_WARNING_HASH_MSK,
+ I40E_WARNING_FD_MSK,
+ I40E_WARNING_RPL_CLD_FILTER,
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -694,15 +720,46 @@ i40e_align_floor(int n)
}
static inline uint16_t
-i40e_calc_itr_interval(int16_t interval)
+i40e_calc_itr_interval(int16_t interval, bool is_multi_drv)
{
- if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
- interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) {
+ if (is_multi_drv)
+ interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+ else
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ }
/* Convert to hardware count, as writing each 1 represents 2 us */
return interval / 2;
}
+static inline void
+i40e_global_cfg_warning(enum I40E_WARNING_IDX idx)
+{
+ const char *warning;
+ static const char *const warning_list[] = {
+ [I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload",
+ [I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload",
+ [I40E_WARNING_QINQ_PARSER] = "support QinQ parser",
+ [I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter",
+ [I40E_WARNING_TPID] = "support TPID configuration",
+ [I40E_WARNING_FLOW_CTL] = "configure water marker",
+ [I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting",
+ [I40E_WARNING_QF_CTL] = "support hash function setting",
+ [I40E_WARNING_HASH_INSET] = "configure hash input set",
+ [I40E_WARNING_HSYM] = "set symmetric hash",
+ [I40E_WARNING_HASH_MSK] = "configure hash mask",
+ [I40E_WARNING_FD_MSK] = "configure fdir mask",
+ [I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter",
+ };
+
+ warning = warning_list[idx];
+
+ RTE_LOG(WARNING, PMD,
+ "Global register is changed during %s\n",
+ warning);
+}
+
#define I40E_VALID_FLOW(flow_type) \
((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 1686914a..b19224d9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1035,14 +1035,16 @@ i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
{
+ int ret;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_eth_stats *pstats = NULL;
/* read stat values to clear hardware registers */
- i40evf_update_stats(dev, &pstats);
+ ret = i40evf_update_stats(dev, &pstats);
/* set stats offset base on current values */
- vf->vsi.eth_stats_offset = *pstats;
+ if (ret == 0)
+ vf->vsi.eth_stats_offset = *pstats;
}
static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
@@ -1246,7 +1248,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct ether_addr *p_mac_addr;
uint16_t interval =
- i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
+ i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX, 0);
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
@@ -1986,7 +1988,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
@@ -2113,7 +2115,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_tx_queues);
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
intr_vector = dev->data->nb_rx_queues;
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index e78610b8..e25b8e0c 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -165,7 +165,6 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
rte_wmb();
/* Init the RX tail regieter. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
return err;
@@ -1011,13 +1010,18 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
PMD_DRV_LOG(ERR, "invalid programming status"
" reported, error = %u.", error);
} else
- PMD_DRV_LOG(ERR, "unknown programming status"
+ PMD_DRV_LOG(INFO, "unknown programming status"
" reported, len = %d, id = %u.", len, id);
rxdp->wb.qword1.status_error_len = 0;
rxq->rx_tail++;
if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
rxq->rx_tail = 0;
+ if (rxq->rx_tail == 0)
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ else
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
}
+
return ret;
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 86546ca8..777ffc21 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2606,6 +2606,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
rxq->vsi = pf->fdir.fdir_vsi;
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc));
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
new file mode 100644
index 00000000..40d1929f
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -0,0 +1,654 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <altivec.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+
+ vector unsigned long hdr_room = (vector unsigned long){
+ RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM};
+ vector unsigned long dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = (vector unsigned long){};
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vec_st(dma_addr0, 0,
+ (vector unsigned long *)&rxdp[i].read);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ vector unsigned long vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
+ vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = vec_mergel(vaddr0, vaddr0);
+ dma_addr1 = vec_mergel(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = vec_add(dma_addr0, hdr_room);
+ dma_addr1 = vec_add(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
+ vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+ vector unsigned int vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const vector unsigned int rss_vlan_msk = (vector unsigned int){
+ (int32_t)0x1c03804, (int32_t)0x1c03804,
+ (int32_t)0x1c03804, (int32_t)0x1c03804};
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const vector unsigned char vlan_flags = (vector unsigned char){
+ 0, 0, 0, 0,
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char rss_flags = (vector unsigned char){
+ 0, PKT_RX_FDIR, 0, 0,
+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char l3_l4e_flags = (vector unsigned char){
+ 0,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ | PKT_RX_IP_CKSUM_BAD,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
+ vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
+ vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
+
+ vlan1 = vec_and(vlan0, rss_vlan_msk);
+ vlan0 = (vector unsigned int)vec_perm(vlan_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&vlan1);
+
+ rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
+ rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
+ *(vector unsigned char *)&rss);
+
+ l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
+ l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&l3_l4e);
+
+ vlan0 = vec_or(vlan0, rss);
+ vlan0 = vec_or(vlan0, l3_l4e);
+
+ rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
+ rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
+ rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
+ rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+ vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+ vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+
+ ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
+ ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
+
+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(
+ (*(vector unsigned char *)&ptype0)[0]);
+ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(
+ (*(vector unsigned char *)&ptype0)[8]);
+ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(
+ (*(vector unsigned char *)&ptype1)[0]);
+ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(
+ (*(vector unsigned char *)&ptype1)[8]);
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ vector unsigned char shuf_msk;
+
+ vector unsigned short crc_adjust = (vector unsigned short){
+ 0, 0, /* ignore pkt_type field */
+ rxq->crc_len, /* sub crc on pkt_len */
+ 0, /* ignore high-16bits of pkt_len */
+ rxq->crc_len, /* sub crc on data_len */
+ 0, 0, 0 /* ignore non-length fields */
+ };
+ vector unsigned long dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = (vector unsigned long){0x0000000100000001ULL,
+ 0x0000000100000001ULL};
+
+ /* 4 packets EOP mask */
+ eop_check = (vector unsigned long){0x0000000200000002ULL,
+ 0x0000000200000002ULL};
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = (vector unsigned char){
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 14, 15, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 14, 15, /* octet 15~14, 16 bits data_len */
+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+ vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+ vector unsigned long mbp1, mbp2; /* two mbuf pointer
+ * in one XMM reg.
+ */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = *(vector unsigned long *)&sw_ring[pos];
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = *(vector unsigned long *)(rxdp + 3);
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos] = mbp1;
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+
+ descs[2] = *(vector unsigned long *)(rxdp + 2);
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = *(vector unsigned long *)(rxdp + 1);
+ rte_compiler_barrier();
+ descs[0] = *(vector unsigned long *)(rxdp);
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2;
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len3 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[3]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ const vector unsigned int len2 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[2]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = (vector unsigned long)len3;
+ descs[2] = (vector unsigned long)len2;
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vec_perm((vector unsigned char)descs[3],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb3 = vec_perm((vector unsigned char)descs[2],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
+ (vector unsigned short)descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
+ (vector unsigned short)descs[0]);
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb4, crc_adjust);
+ pkt_mb3 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len1 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[1]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+ const vector unsigned int len0 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[0]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = (vector unsigned long)len1;
+ descs[0] = (vector unsigned long)len0;
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vec_perm((vector unsigned char)descs[1],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb1 = vec_perm((vector unsigned char)descs[0],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = (vector unsigned short)vec_mergeh(
+ sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vec_st(pkt_mb4, 0,
+ (vector unsigned char *)&rx_pkts[pos + 3]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb3, 0,
+ (vector unsigned char *)&rx_pkts[pos + 2]
+ ->rx_descriptor_fields1
+ );
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb2, crc_adjust);
+ pkt_mb1 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ vector unsigned char eop_shuf_mask =
+ (vector unsigned char){
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ };
+
+ /* and with mask to extract bits, flipping 1-0 */
+ vector unsigned char eop_bits = vec_and(
+ (vector unsigned char)vec_nor(staterr, staterr),
+ (vector unsigned char)eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = vec_perm(eop_bits, (vector unsigned char){},
+ eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *split_packet = (vec_ld(0,
+ (vector unsigned int *)&eop_bits))[0];
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = vec_and(staterr, (vector unsigned short)dd_check);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vec_st(pkt_mb2, 0,
+ (vector unsigned char *)&rx_pkts[pos + 1]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb1, 0,
+ (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+ );
+ desc_to_ptype_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll((vec_ld(0,
+ (vector unsigned long *)&staterr)[0]));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ vector unsigned long descriptor = (vector unsigned long){
+ pkt->buf_physaddr + pkt->data_off, high_qw};
+ *(vector unsigned long *)txdp = descriptor;
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ nb_commit = nb_pkts;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 832242ee..9543fe1b 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -87,6 +87,9 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
mac->ops.set_rate_select_speed =
ixgbe_set_hard_rate_select_speed;
+ if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_soft_rate_select_speed;
} else {
if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -564,6 +567,10 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_QSFP_SF_QP:
media_type = ixgbe_media_type_fiber_qsfp;
break;
+ case IXGBE_DEV_ID_82599_BYPASS:
+ media_type = ixgbe_media_type_fiber_fixed;
+ hw->phy.multispeed_fiber = true;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index 094ee526..368bf7d5 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -178,6 +178,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_LS:
+ case IXGBE_DEV_ID_82599_BYPASS:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
@@ -192,6 +193,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X540_BYPASS:
hw->mac.type = ixgbe_mac_X540;
hw->mvals = ixgbe_mvals_X540;
break;
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index cca19efc..e54fd306 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -166,6 +166,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
/* flow control autoneg black list */
@@ -196,6 +197,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X540_BYPASS:
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
@@ -261,6 +263,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
goto out;
/* only backplane uses autoc so fall though */
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -3068,6 +3071,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
@@ -4552,7 +4556,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
}
/* If there is any thing in data position pull it in */
@@ -4572,7 +4576,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
}
rel_out:
@@ -5065,6 +5069,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
ixgbe_set_rate_select_speed(hw,
IXGBE_LINK_SPEED_10GB_FULL);
@@ -5115,6 +5120,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
ixgbe_set_rate_select_speed(hw,
IXGBE_LINK_SPEED_1GB_FULL);
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.c b/drivers/net/ixgbe/base/ixgbe_mbx.c
index 042e5cc1..2785bbad 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.c
@@ -444,17 +444,6 @@ STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
- /*
- * Complete the remaining mailbox data registers with zero to reset
- * the data sent in a previous exchange (in either side) with the PF,
- * including exchanges performed by another Guest OS to which that VF
- * was previously assigned.
- */
- while (i < hw->mbx.size) {
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, 0);
- i++;
- }
-
/* update stats */
hw->mbx.stats.msgs_tx++;
@@ -693,17 +682,6 @@ STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
- /*
- * Complete the remaining mailbox data registers with zero to reset
- * the data sent in a previous exchange (in either side) with the VF,
- * including exchanges performed by another Guest OS to which that VF
- * was previously assigned.
- */
- while (i < hw->mbx.size) {
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, 0);
- i++;
- }
-
/* Interrupt VF to tell it a message has been sent and release buffer*/
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 4982e035..b12573c3 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -123,9 +123,11 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_82599_VF_HV 0x152E
#define IXGBE_DEV_ID_82599_LS 0x154F
+#define IXGBE_DEV_ID_82599_BYPASS 0x155D
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X540_BYPASS 0x155C
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
#define IXGBE_DEV_ID_X550T1 0x15D1
@@ -270,7 +272,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_I2C_BB_EN_X550 0x00000100
#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550
#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550
-
#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN)
#define IXGBE_I2C_CLK_OE_N_EN 0
@@ -3626,6 +3627,7 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_fixed,
ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 73996bbe..2dc69ff9 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -93,6 +93,9 @@
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
+/*Default value of Max Rx Queue*/
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+
#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
@@ -249,6 +252,8 @@ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
+static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
@@ -619,7 +624,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.dev_configure = ixgbevf_dev_configure,
.dev_start = ixgbevf_dev_start,
.dev_stop = ixgbevf_dev_stop,
- .link_update = ixgbe_dev_link_update,
+ .link_update = ixgbevf_dev_link_update,
.stats_get = ixgbevf_dev_stats_get,
.xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
@@ -1959,9 +1964,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
return -EINVAL;
}
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
- RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
-
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+ IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+ dev->pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
return 0;
}
@@ -2001,8 +2007,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
case ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
- if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
break;
default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
@@ -3217,15 +3221,123 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->tx_desc_lim = tx_desc_lim;
}
+static int
+ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ int *link_up, int wait_to_complete)
+{
+ /**
+ * for a quick link status checking, wait_to_compelet == 0,
+ * skip PF link status checking
+ */
+ bool no_pflink_check = wait_to_complete == 0;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ uint32_t links_reg, in_msg;
+ int ret_val = 0;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ rte_delay_us(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ if (no_pflink_check) {
+ if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
+ mac->get_link_status = true;
+ else
+ mac->get_link_status = false;
+
+ goto out;
+ }
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
/* return 0 means link status changed, -1 means not changed */
static int
-ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link, old;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
int link_up;
int diag;
+ int wait = 1;
link.link_status = ETH_LINK_DOWN;
link.link_speed = 0;
@@ -3238,9 +3350,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
- diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+ wait = 0;
+
+ if (vf)
+ diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
else
- diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+ diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
if (diag != 0) {
link.link_speed = ETH_SPEED_NUM_100M;
@@ -3287,6 +3402,18 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
return 0;
}
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
+}
+
+static int
+ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
+}
+
static void
ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
@@ -4206,7 +4333,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- hw->mac.ops.reset_hw(hw);
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+ return err;
+ }
hw->mac.get_link_status = true;
/* negotiate mailbox API version to use with the PF. */
@@ -4233,7 +4364,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_dev_rxtx_start(dev);
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = dev->data->nb_rx_queues;
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
@@ -7569,12 +7701,17 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
u32 in_msg = 0;
- if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
- return;
+ /* peek the message first */
+ in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
/* PF reset VF event */
- if (in_msg == IXGBE_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ if (in_msg == IXGBE_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
}
static int
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 79b7a600..0ffb9401 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -161,6 +161,22 @@ priv_lock(struct priv *priv)
}
/**
+ * Try to lock private structure to protect it from concurrent access in the
+ * control path.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+priv_trylock(struct priv *priv)
+{
+ return rte_spinlock_trylock(&priv->lock);
+}
+
+/**
* Unlock private structure.
*
* @param priv
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 2ea995f8..e98959fb 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -658,6 +658,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
(*priv->rss_conf)[0]->rss_key_len :
0);
info->speed_capa = priv->link_speed_capa;
+ info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
priv_unlock(priv);
}
@@ -985,9 +986,7 @@ recover:
/* Provide new values to rxq_setup(). */
dev->data->dev_conf.rxmode.jumbo_frame = sp;
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
- if (rehash)
- ret = rxq_rehash(dev, rxq_ctrl);
- else
+ if (!rehash)
ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
rxq_ctrl->socket, NULL, rxq->mp);
if (!ret)
@@ -1239,8 +1238,12 @@ mlx5_dev_link_status_handler(void *arg)
struct priv *priv = dev->data->dev_private;
int ret;
- priv_lock(priv);
- assert(priv->pending_alarm == 1);
+ while (!priv_trylock(priv)) {
+ /* Alarm is being canceled. */
+ if (priv->pending_alarm == 0)
+ return;
+ rte_pause();
+ }
priv->pending_alarm = 0;
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
@@ -1287,9 +1290,10 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
rte_intr_callback_unregister(&priv->intr_handle,
mlx5_dev_interrupt_handler,
dev);
- if (priv->pending_alarm)
+ if (priv->pending_alarm) {
+ priv->pending_alarm = 0;
rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
- priv->pending_alarm = 0;
+ }
priv->intr_handle.fd = 0;
priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
}
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 74453204..1f42dac8 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -517,12 +517,10 @@ nfp_net_configure(struct rte_eth_dev *dev)
new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
if (rxmode->jumbo_frame)
- /* this is handled in rte_eth_dev_configure */
+ hw->mtu = rxmode->max_rx_pkt_len;
- if (rxmode->hw_strip_crc) {
- PMD_INIT_LOG(INFO, "strip CRC not supported\n");
- return -EINVAL;
- }
+ if (!rxmode->hw_strip_crc)
+ PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable\n");
if (rxmode->enable_scatter) {
PMD_INIT_LOG(INFO, "Scatter not supported\n");
@@ -1012,7 +1010,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = hw->mtu;
+ dev_info->max_rx_pktlen = hw->max_mtu;
/* Next should change when PF support is implemented */
dev_info->max_mac_addrs = 1;
@@ -1240,6 +1238,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
return -EINVAL;
+ /* mtu setting is forbidden if port is started */
+ if (dev->data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev->data->port_id);
+ return -EBUSY;
+ }
+
/* switch to jumbo mode if needed */
if ((uint32_t)mtu > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
@@ -2390,7 +2395,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
- hw->mtu = hw->max_mtu;
+ hw->mtu = ETHER_MTU;
if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
hw->rx_offset = NFP_NET_RX_OFFSET;
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 836d982a..9704895f 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -93,7 +93,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+ .link_autoneg = ETH_LINK_AUTONEG,
};
static uint16_t
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index f6b3c10c..76c131b3 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -124,7 +124,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_FIXED,
+ .link_autoneg = ETH_LINK_AUTONEG,
};
static int
@@ -801,7 +801,7 @@ pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
struct rte_eth_dev_data *data = NULL;
unsigned int numa_node = rte_socket_id();
- RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
+ RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %d\n",
numa_node);
/* now do all data allocation - for eth_dev structure
@@ -1042,7 +1042,7 @@ pmd_pcap_remove(const char *name)
{
struct rte_eth_dev *eth_dev = NULL;
- RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
+ RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %d\n",
rte_socket_id());
if (name == NULL)
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 8aa3c0b7..8e521ecb 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -239,10 +239,9 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
status = true;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
- DP_ERR(p_hwfn,
- "No action required, App TLV id = 0x%x"
- " app_prio_bitmap = 0x%x\n",
- id, app_prio_bitmap);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "No action required, App TLV entry = 0x%x\n",
+ app_prio_bitmap);
}
return status;
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 5ff8f28a..de0c5877 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -1027,6 +1027,12 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
if (sge_tpa_params->tpa_gro_consistent_flg)
p_sge_tpa_tlv->sge_tpa_flags |=
VFPF_TPA_GRO_CONSIST_FLAG;
+ if (sge_tpa_params->tpa_ipv4_tunn_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_TUNN_IPV4_EN_FLAG;
+ if (sge_tpa_params->tpa_ipv6_tunn_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_TUNN_IPV6_EN_FLAG;
p_sge_tpa_tlv->tpa_max_aggs_num =
sge_tpa_params->tpa_max_aggs_num;
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 149d092b..cc835d76 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -379,6 +379,8 @@ struct vfpf_vport_update_sge_tpa_tlv {
#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
+ #define VFPF_TPA_TUNN_IPV4_EN_FLAG (1 << 5)
+ #define VFPF_TPA_TUNN_IPV6_EN_FLAG (1 << 6)
u8 update_sge_tpa_flags;
#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 9d782ac7..5275ef9d 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -282,6 +282,67 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
return 0;
}
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+ unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+ struct qede_tx_queue *txq;
+
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rcv_pkts), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+ sizeof(uint64_t));
+
+ if (xstats)
+ for (j = 0;
+ j < RTE_DIM(qede_rxq_xstats_strings); j++)
+ OSAL_MEMSET((((char *)
+ (qdev->fp_array[qid].rxq)) +
+ qede_rxq_xstats_strings[j].offset),
+ 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+ }
+
+ i = 0;
+
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+ txq = qdev->fp_array[(qid)].txqs[0];
+
+ OSAL_MEMSET((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == txq_stat_cntrs)
+ break;
+ }
+ }
+}
+
static int
qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
bool add)
@@ -629,7 +690,7 @@ static int qede_init_vport(struct qede_dev *qdev)
start.remove_inner_vlan = 1;
start.gro_enable = 0;
- start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
+ start.mtu = qdev->mtu;
start.vport_id = 0;
start.drop_ttl0 = false;
start.clear_stats = 1;
@@ -674,6 +735,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
}
}
+ /* We need to have min 1 RX queue.There is no min check in
+ * rte_eth_dev_configure(), so we are checking it here.
+ */
+ if (eth_dev->data->nb_rx_queues == 0) {
+ DP_ERR(edev, "Minimum one RX queue is required\n");
+ return -EINVAL;
+ }
+
/* Sanity checks and throw warnings */
if (rxmode->enable_scatter == 1)
eth_dev->data->scattered_rx = 1;
@@ -709,6 +778,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rc != 0)
return rc;
+ /* If jumbo enabled adjust MTU */
+ if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+ eth_dev->data->mtu =
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+ qdev->mtu = eth_dev->data->mtu;
+
/* Issue VPORT-START with default config values to allow
* other port configurations early on.
*/
@@ -756,8 +833,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
- QEDE_ETH_OVERHEAD);
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->tx_desc_lim = qede_tx_desc_lim;
@@ -1115,6 +1191,7 @@ qede_reset_xstats(struct rte_eth_dev *dev)
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, true);
}
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
@@ -1150,6 +1227,7 @@ static void qede_reset_stats(struct rte_eth_dev *eth_dev)
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, false);
}
static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
@@ -1395,32 +1473,76 @@ int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- uint32_t frame_size;
- struct qede_dev *qdev = dev->data->dev_private;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
+ struct qede_fastpath *fp;
+ uint32_t max_rx_pkt_len;
+ uint32_t frame_size;
+ uint16_t rx_buf_size;
+ uint16_t bufsz;
+ bool restart = false;
+ int i;
+ PMD_INIT_FUNC_TRACE(edev);
+ if (IS_VF(edev))
+ return -ENOTSUP;
qede_dev_info_get(dev, &dev_info);
-
- /* VLAN_TAG = 4 */
- frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
-
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+ DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+ mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+ ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
return -EINVAL;
-
+ }
if (!dev->data->scattered_rx &&
- frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+ dev->data->min_rx_buf_size);
return -EINVAL;
-
- if (frame_size > ETHER_MAX_LEN)
+ }
+ /* Temporarily replace I/O functions with dummy ones. It cannot
+ * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+ */
+ dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+ if (dev->data->dev_started) {
+ dev->data->dev_started = 0;
+ qede_dev_stop(dev);
+ restart = true;
+ }
+ rte_delay_ms(1000);
+ qdev->mtu = mtu;
+ /* Fix up RX buf size for all queues of the port */
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if ((fp->type & QEDE_FASTPATH_RX) && (fp->rxq != NULL)) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = frame_size;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+ }
+ }
+ if (max_rx_pkt_len > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
+ if (!dev->data->dev_started && restart) {
+ qede_dev_start(dev);
+ dev->data->dev_started = 1;
+ }
/* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
- qdev->mtu = mtu;
- qede_dev_stop(dev);
- qede_dev_start(dev);
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
+ /* Reassign back */
+ dev->rx_pkt_burst = qede_recv_pkts;
+ dev->tx_pkt_burst = qede_xmit_pkts;
return 0;
}
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 9cce13df..e586dc7f 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -89,11 +89,11 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
{
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct rte_eth_dev_data *eth_data = dev->data;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
- uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint16_t max_rx_pkt_len;
+ uint16_t bufsz;
size_t size;
- uint16_t data_size;
int rc;
int i;
@@ -127,34 +127,27 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
-
- /* Sanity check */
- data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
- RTE_PKTMBUF_HEADROOM;
-
- if (pkt_len > data_size && !dev->data->scattered_rx) {
- DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
- pkt_len, data_size);
- rte_free(rxq);
- return -EINVAL;
+ max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+
+ /* Fix up RX buffer size */
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ if ((rxmode->enable_scatter) ||
+ (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if (!dev->data->scattered_rx) {
+ DP_INFO(edev, "Forcing scatter-gather mode\n");
+ dev->data->scattered_rx = 1;
+ }
}
-
if (dev->data->scattered_rx)
- rxq->rx_buf_size = data_size;
+ rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
else
- rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+ rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ /* Align to cache-line size if needed */
+ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
- qdev->mtu = pkt_len;
-
- DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
- qdev->mtu, rxq->rx_buf_size);
-
- if (pkt_len > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- DP_NOTICE(edev, false, "jumbo frame enabled\n");
- } else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
- }
+ DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+ qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
@@ -222,7 +215,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
dev->data->rx_queues[queue_idx] = rxq;
DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
- queue_idx, nb_desc, qdev->mtu, socket_id);
+ queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
return 0;
err4:
@@ -1541,3 +1534,11 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
}
+
+uint16_t
+qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index ed9a529b..d1f3e995 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -55,14 +55,21 @@
((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+#define QEDE_MIN_RX_BUFF_SIZE (1024)
+#define QEDE_VLAN_TAG_SIZE (4)
+#define QEDE_LLC_SNAP_HDR_LEN (8)
+
/* Max supported alignment is 256 (8 shift)
* minimal alignment shift 6 is optimal for 57xxx HW performance
*/
#define QEDE_L1_CACHE_SHIFT 6
#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
-
-#define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+ ~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
+ + (QEDE_LLC_SNAP_HDR_LEN))
/* TBD: Excluding IPV6 */
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
@@ -180,6 +187,10 @@ uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts);
+
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index c1767c48..729d38c5 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -80,7 +80,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG
+ .link_autoneg = ETH_LINK_AUTONEG
};
static uint16_t
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index f3cd52dc..9cf408ed 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1169,7 +1169,7 @@ eth_link_update(struct rte_eth_dev *dev,
link.link_status = (cgmii_ibuf_is_enabled(ibuf) &&
cgmii_ibuf_is_link_up(ibuf)) ? ETH_LINK_UP : ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_autoneg = ETH_LINK_FIXED;
rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link,
*(uint64_t *)link_ptr);
@@ -1494,7 +1494,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
- if (pci_resource_ptr == NULL) {
+ if (pci_resource_ptr == MAP_FAILED) {
RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
rsc_filename, fd);
return -EINVAL;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 2da5af04..d229bdff 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -98,7 +98,7 @@ nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
else if (nic->duplex == NICVF_FULL_DUPLEX)
link->link_duplex = ETH_LINK_FULL_DUPLEX;
link->link_speed = nic->speed;
- link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
+ link->link_autoneg = ETH_LINK_AUTONEG;
}
static void
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 87e9de1a..275adb31 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -252,7 +252,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Inform HW to xmit the packets */
nicvf_addr_write(sq->sq_door, used_desc);
- return nb_pkts;
+ return i;
}
static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 328dde08..8fde6030 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -558,7 +558,7 @@ new_device(int vid)
rte_atomic32_set(&vq->allow_queuing, 1);
}
- RTE_LOG(INFO, PMD, "New connection established\n");
+ RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
@@ -625,7 +625,7 @@ destroy_device(int vid)
state->max_vring = 0;
rte_spinlock_unlock(&state->lock);
- RTE_LOG(INFO, PMD, "Connection closed\n");
+ RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 8592485d..5a27ab77 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -288,17 +288,6 @@ virtio_dev_queue_release(void *queue __rte_unused)
/* do nothing */
}
-static int
-virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
-{
- if (vtpci_queue_idx == hw->max_queue_pairs * 2)
- return VTNET_CQ;
- else if (vtpci_queue_idx % 2 == 0)
- return VTNET_RQ;
- else
- return VTNET_TQ;
-}
-
static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
{
@@ -847,7 +836,7 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
/* Note: limit checked in rte_eth_xstats_names() */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct virtqueue *rxvq = dev->data->rx_queues[i];
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
if (rxvq == NULL)
continue;
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
@@ -860,7 +849,7 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct virtqueue *txvq = dev->data->tx_queues[i];
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
if (txvq == NULL)
continue;
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
@@ -1205,6 +1194,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
/* Reset the device although not necessary at startup */
vtpci_reset(hw);
+ if (hw->vqs) {
+ virtio_dev_free_mbufs(eth_dev);
+ virtio_free_queues(hw);
+ }
+
/* Tell the host we've noticed this device. */
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
@@ -1565,7 +1559,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxvq = dev->data->rx_queues[i];
/* Flush the old packets */
- virtqueue_flush(rxvq->vq);
+ virtqueue_rxvq_flush(rxvq->vq);
virtqueue_notify(rxvq->vq);
}
@@ -1597,12 +1591,15 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL || rxvq->vq == NULL)
+ continue;
+
PMD_INIT_LOG(DEBUG,
"Before freeing rxq[%d] used and unused buf", i);
VIRTQUEUE_DUMP(rxvq->vq);
PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
- while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
+ while ((buf = virtqueue_detach_unused(rxvq->vq)) != NULL) {
rte_pktmbuf_free(buf);
mbuf_num++;
}
@@ -1616,13 +1613,16 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct virtnet_tx *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL || txvq->vq == NULL)
+ continue;
+
PMD_INIT_LOG(DEBUG,
"Before freeing txq[%d] used and unused bufs",
i);
VIRTQUEUE_DUMP(txvq->vq);
mbuf_num = 0;
- while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
+ while ((buf = virtqueue_detach_unused(txvq->vq)) != NULL) {
rte_pktmbuf_free(buf);
mbuf_num++;
}
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 43fac63b..c2fe9eb5 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -61,6 +61,7 @@
#include "virtio_pci.h"
#include "virtqueue.h"
#include "virtio_rxtx.h"
+#include "virtio_rxtx_simple.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -458,6 +459,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
vq->vq_ring.desc[desc_idx].flags =
VRING_DESC_F_WRITE;
}
+
+ virtio_rxq_vec_setup(rxvq);
}
memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
@@ -467,30 +470,31 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
&rxvq->fake_mbuf;
}
- while (!virtqueue_full(vq)) {
- m = rte_mbuf_raw_alloc(rxvq->mpool);
- if (m == NULL)
- break;
+ if (hw->use_simple_rxtx) {
+ while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxvq);
+ nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ }
+ } else {
+ while (!virtqueue_full(vq)) {
+ m = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (m == NULL)
+ break;
- /* Enqueue allocated buffers */
- if (hw->use_simple_rxtx)
- error = virtqueue_enqueue_recv_refill_simple(vq, m);
- else
+ /* Enqueue allocated buffers */
error = virtqueue_enqueue_recv_refill(vq, m);
-
- if (error) {
- rte_pktmbuf_free(m);
- break;
+ if (error) {
+ rte_pktmbuf_free(m);
+ break;
+ }
+ nbufs++;
}
- nbufs++;
- }
- vq_update_avail_idx(vq);
+ vq_update_avail_idx(vq);
+ }
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
- virtio_rxq_vec_setup(rxvq);
-
VIRTQUEUE_DUMP(vq);
return 0;
@@ -506,7 +510,7 @@ virtio_update_rxtx_handler(struct rte_eth_dev *dev,
#if defined RTE_ARCH_X86
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
use_simple_rxtx = 1;
-#elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
+#elif defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
use_simple_rxtx = 1;
#endif
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 28f82d6a..e2db823d 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -88,7 +88,4 @@ struct virtnet_ctl {
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
-int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
- struct rte_mbuf *m);
-
#endif /* _VIRTIO_RXTX_H_ */
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index a6c0b34e..5285c2b1 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -57,34 +57,6 @@
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
-int __attribute__((cold))
-virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
- struct rte_mbuf *cookie)
-{
- struct vq_desc_extra *dxp;
- struct vring_desc *start_dp;
- uint16_t desc_idx;
-
- cookie->port = vq->rxq.port_id;
-
- desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
- dxp = &vq->vq_descx[desc_idx];
- dxp->cookie = (void *)cookie;
- vq->sw_ring[desc_idx] = cookie;
-
- start_dp = vq->vq_ring.desc;
- start_dp[desc_idx].addr =
- VIRTIO_MBUF_ADDR(cookie, vq) +
- RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
- start_dp[desc_idx].len = cookie->buf_len -
- RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
-
- vq->vq_free_cnt--;
- vq->vq_avail_idx++;
-
- return 0;
-}
-
uint16_t
virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -102,7 +74,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_compiler_barrier();
if (nb_used >= VIRTIO_TX_FREE_THRESH)
- virtio_xmit_cleanup(vq);
+ virtio_xmit_cleanup_simple(vq);
nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index b08f8594..bca677c0 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -89,7 +89,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
#define VIRTIO_TX_FREE_NR 32
/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
static inline void
-virtio_xmit_cleanup(struct virtqueue *vq)
+virtio_xmit_cleanup_simple(struct virtqueue *vq)
{
uint16_t i, desc_idx;
uint32_t nb_free = 0;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 91f6a59a..8bb155d8 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -142,6 +142,9 @@ virtio_user_start_device(struct virtio_user_dev *dev)
uint64_t features;
int ret;
+ /* Do not check return as already done in init, or reset in stop */
+ vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL);
+
/* Step 0: tell vhost to create queues */
if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
goto error;
@@ -240,6 +243,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
PMD_INIT_LOG(ERR, "backend set up fails");
return -1;
}
+
if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) {
PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
return -1;
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 4f8707ae..0e9e03c7 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -37,6 +37,7 @@
#include "virtqueue.h"
#include "virtio_logs.h"
#include "virtio_pci.h"
+#include "virtio_rxtx_simple.h"
void
virtqueue_disable_intr(struct virtqueue *vq)
@@ -55,26 +56,50 @@ virtqueue_disable_intr(struct virtqueue *vq)
* 2) mbuf that hasn't been consued by backend.
*/
struct rte_mbuf *
-virtqueue_detatch_unused(struct virtqueue *vq)
+virtqueue_detach_unused(struct virtqueue *vq)
{
struct rte_mbuf *cookie;
- int idx;
+ struct virtio_hw *hw;
+ uint16_t start, end;
+ int type, idx;
- if (vq != NULL)
- for (idx = 0; idx < vq->vq_nentries; idx++) {
+ if (vq == NULL)
+ return NULL;
+
+ hw = vq->hw;
+ type = virtio_get_queue_type(hw, vq->vq_queue_index);
+ start = vq->vq_avail_idx & (vq->vq_nentries - 1);
+ end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
+
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ if (hw->use_simple_rxtx && type == VTNET_RQ) {
+ if (start <= end && idx >= start && idx < end)
+ continue;
+ if (start > end && (idx >= start || idx < end))
+ continue;
+ cookie = vq->sw_ring[idx];
+ if (cookie != NULL) {
+ vq->sw_ring[idx] = NULL;
+ return cookie;
+ }
+ } else {
cookie = vq->vq_descx[idx].cookie;
if (cookie != NULL) {
vq->vq_descx[idx].cookie = NULL;
return cookie;
}
}
+ }
+
return NULL;
}
/* Flush the elements in the used ring. */
void
-virtqueue_flush(struct virtqueue *vq)
+virtqueue_rxvq_flush(struct virtqueue *vq)
{
+ struct virtnet_rx *rxq = &vq->rxq;
+ struct virtio_hw *hw = vq->hw;
struct vring_used_elem *uep;
struct vq_desc_extra *dxp;
uint16_t used_idx, desc_idx;
@@ -85,13 +110,27 @@ virtqueue_flush(struct virtqueue *vq)
for (i = 0; i < nb_used; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t)uep->id;
- dxp = &vq->vq_descx[desc_idx];
- if (dxp->cookie != NULL) {
- rte_pktmbuf_free(dxp->cookie);
- dxp->cookie = NULL;
+ if (hw->use_simple_rxtx) {
+ desc_idx = used_idx;
+ rte_pktmbuf_free(vq->sw_ring[desc_idx]);
+ vq->vq_free_cnt++;
+ } else {
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_chain(vq, desc_idx);
}
vq->vq_used_cons_idx++;
- vq_ring_free_chain(vq, desc_idx);
+ }
+
+ if (hw->use_simple_rxtx) {
+ while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxq);
+ if (virtqueue_kick_prepare(vq))
+ virtqueue_notify(vq);
+ }
}
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index ec967a5b..3748f606 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -288,10 +288,10 @@ void virtqueue_dump(struct virtqueue *vq);
/**
* Get all mbufs to be freed.
*/
-struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
/* Flush the elements in the used ring. */
-void virtqueue_flush(struct virtqueue *vq);
+void virtqueue_rxvq_flush(struct virtqueue *vq);
static inline int
virtqueue_full(const struct virtqueue *vq)
@@ -299,6 +299,17 @@ virtqueue_full(const struct virtqueue *vq)
return vq->vq_free_cnt == 0;
}
+static inline int
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+{
+ if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+ return VTNET_CQ;
+ else if (vtpci_queue_idx % 2 == 0)
+ return VTNET_RQ;
+ else
+ return VTNET_TQ;
+}
+
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 2bd2f272..9a889c6f 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -790,7 +790,7 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev,
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_autoneg = ETH_LINK_AUTONEG;
}
vmxnet3_dev_atomic_write_link_status(dev, &link);
diff --git a/drivers/net/xenvirt/virtqueue.h b/drivers/net/xenvirt/virtqueue.h
index 350eae3e..0a6dcd85 100644
--- a/drivers/net/xenvirt/virtqueue.h
+++ b/drivers/net/xenvirt/virtqueue.h
@@ -121,7 +121,7 @@ void virtqueue_dump(struct virtqueue *vq);
/**
* Get all mbufs to be freed.
*/
-struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf * virtqueue_detach_unused(struct virtqueue *vq);
static inline int __attribute__((always_inline))
virtqueue_full(const struct virtqueue *vq)