aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/atlantic/atl_ethdev.c106
-rw-r--r--drivers/net/atlantic/atl_rxtx.c21
-rw-r--r--drivers/net/atlantic/atl_types.h9
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_b0.c14
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_b0.h2
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils.c19
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils.h16
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c130
-rw-r--r--drivers/net/avf/avf_ethdev.c3
-rw-r--r--drivers/net/axgbe/axgbe_common.h4
-rw-r--r--drivers/net/bnx2x/bnx2x.c59
-rw-r--r--drivers/net/bnx2x/bnx2x.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c32
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c21
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h3
-rw-r--r--drivers/net/bnx2x/ecore_hsi.h2
-rw-r--r--drivers/net/bnx2x/ecore_reg.h2
-rw-r--r--drivers/net/bnx2x/ecore_sp.c12
-rw-r--r--drivers/net/bnx2x/ecore_sp.h17
-rw-r--r--drivers/net/bnx2x/elink.h2
-rw-r--r--drivers/net/bnxt/bnxt.h2
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c22
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c3
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c4
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c22
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad_private.h1
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c4
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c8
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c167
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h15
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c2
-rw-r--r--drivers/net/cxgbe/base/t4_pci_id_tbl.h52
-rw-r--r--drivers/net/cxgbe/cxgbe.h24
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c15
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c36
-rw-r--r--drivers/net/cxgbe/cxgbevf_main.c10
-rw-r--r--drivers/net/cxgbe/sge.c78
-rw-r--r--drivers/net/dpaa2/dpaa2_pmd_logs.h3
-rw-r--r--drivers/net/e1000/base/e1000_82575.h4
-rw-r--r--drivers/net/e1000/base/e1000_ich8lan.c2
-rw-r--r--drivers/net/enetc/enetc_ethdev.c6
-rw-r--r--drivers/net/enetc/enetc_rxtx.c12
-rw-r--r--drivers/net/enic/enic.h12
-rw-r--r--drivers/net/enic/enic_clsf.c38
-rw-r--r--drivers/net/enic/enic_flow.c659
-rw-r--r--drivers/net/enic/enic_main.c9
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_rxtx_common.h3
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c4
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c6
-rw-r--r--drivers/net/i40e/i40e_ethdev.c60
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c5
-rw-r--r--drivers/net/i40e/i40e_flow.c8
-rw-r--r--drivers/net/i40e/i40e_rxtx.c11
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c6
-rw-r--r--drivers/net/kni/rte_eth_kni.c5
-rw-r--r--drivers/net/mlx4/mlx4.c14
-rw-r--r--drivers/net/mlx4/mlx4.h26
-rw-r--r--drivers/net/mlx4/mlx4_ethdev.c38
-rw-r--r--drivers/net/mlx4/mlx4_flow.c67
-rw-r--r--drivers/net/mlx4/mlx4_flow.h6
-rw-r--r--drivers/net/mlx4/mlx4_intr.c40
-rw-r--r--drivers/net/mlx4/mlx4_mr.c51
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c56
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h18
-rw-r--r--drivers/net/mlx4/mlx4_txq.c14
-rw-r--r--drivers/net/mlx5/mlx5.c23
-rw-r--r--drivers/net/mlx5/mlx5.h7
-rw-r--r--drivers/net/mlx5/mlx5_defs.h3
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c28
-rw-r--r--drivers/net/mlx5/mlx5_flow.c72
-rw-r--r--drivers/net/mlx5/mlx5_flow.h1
-rw-r--r--drivers/net/mlx5/mlx5_flow_dv.c20
-rw-r--r--drivers/net/mlx5/mlx5_flow_tcf.c8
-rw-r--r--drivers/net/mlx5/mlx5_flow_verbs.c12
-rw-r--r--drivers/net/mlx5/mlx5_mac.c4
-rw-r--r--drivers/net/mlx5/mlx5_mr.c57
-rw-r--r--drivers/net/mlx5/mlx5_nl.c12
-rw-r--r--drivers/net/mlx5/mlx5_rss.c10
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c8
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c71
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c5
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h9
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.c4
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h17
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h12
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h11
-rw-r--r--drivers/net/mlx5/mlx5_socket.c8
-rw-r--r--drivers/net/mlx5/mlx5_stats.c14
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c16
-rw-r--r--drivers/net/mlx5/mlx5_txq.c65
-rw-r--r--drivers/net/mlx5/mlx5_vlan.c6
-rw-r--r--drivers/net/mvpp2/mrvl_mtr.c3
-rw-r--r--drivers/net/netvsc/hn_ethdev.c3
-rw-r--r--drivers/net/netvsc/hn_rxtx.c55
-rw-r--r--drivers/net/netvsc/hn_var.h32
-rw-r--r--drivers/net/netvsc/hn_vf.c84
-rw-r--r--drivers/net/nfp/nfp_net.c53
-rw-r--r--drivers/net/nfp/nfp_net_ctrl.h4
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h4
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h3
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c18
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cppcore.c9
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.h1
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c5
-rw-r--r--drivers/net/qede/base/common_hsi.h4
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h2
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h2
-rw-r--r--drivers/net/qede/base/ecore_hw_defs.h2
-rw-r--r--drivers/net/qede/qede_ethdev.c6
-rw-r--r--drivers/net/qede/qede_rxtx.c17
-rw-r--r--drivers/net/ring/rte_eth_ring.c96
-rw-r--r--drivers/net/sfc/sfc.c6
-rw-r--r--drivers/net/sfc/sfc.h2
-rw-r--r--drivers/net/sfc/sfc_debug.h3
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c11
-rw-r--r--drivers/net/sfc/sfc_ethdev.c72
-rw-r--r--drivers/net/sfc/sfc_mcdi.c3
-rw-r--r--drivers/net/sfc/sfc_rx.c22
-rw-r--r--drivers/net/sfc/sfc_rx.h4
-rw-r--r--drivers/net/sfc/sfc_tso.c11
-rw-r--r--drivers/net/sfc/sfc_tx.c3
-rw-r--r--drivers/net/softnic/rte_eth_softnic_flow.c5
-rw-r--r--drivers/net/tap/rte_eth_tap.c28
-rw-r--r--drivers/net/tap/tap_bpf_program.c2
-rw-r--r--drivers/net/vdev_netvsc/vdev_netvsc.c7
-rw-r--r--drivers/net/virtio/virtio_ethdev.c24
-rw-r--r--drivers/net/virtio/virtio_ethdev.h2
-rw-r--r--drivers/net/virtio/virtio_rxtx.c36
-rw-r--r--drivers/net/virtio/virtio_user/vhost.h4
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c12
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c5
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c3
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h2
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c61
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c1
137 files changed, 2036 insertions, 1339 deletions
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 5bc04f55..2d05bb4c 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -165,7 +165,8 @@ static struct rte_pci_driver rte_atl_pmd = {
| DEV_RX_OFFLOAD_IPV4_CKSUM \
| DEV_RX_OFFLOAD_UDP_CKSUM \
| DEV_RX_OFFLOAD_TCP_CKSUM \
- | DEV_RX_OFFLOAD_JUMBO_FRAME)
+ | DEV_RX_OFFLOAD_JUMBO_FRAME \
+ | DEV_RX_OFFLOAD_VLAN_FILTER)
#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
| DEV_TX_OFFLOAD_IPV4_CKSUM \
@@ -174,6 +175,8 @@ static struct rte_pci_driver rte_atl_pmd = {
| DEV_TX_OFFLOAD_TCP_TSO \
| DEV_TX_OFFLOAD_MULTI_SEGS)
+#define SFP_EEPROM_SIZE 0x100
+
static const struct rte_eth_desc_lim rx_desc_lim = {
.nb_max = ATL_MAX_RING_DESC,
.nb_min = ATL_MIN_RING_DESC,
@@ -465,8 +468,6 @@ atl_dev_start(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
- uint32_t *link_speeds;
- uint32_t speed = 0;
int status;
int err;
@@ -543,6 +544,8 @@ atl_dev_start(struct rte_eth_dev *dev)
goto error;
}
+ err = atl_dev_set_link_up(dev);
+
err = hw->aq_fw_ops->update_link_status(hw);
if (err)
@@ -550,26 +553,6 @@ atl_dev_start(struct rte_eth_dev *dev)
dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
- link_speeds = &dev->data->dev_conf.link_speeds;
-
- speed = 0x0;
-
- if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
- speed = hw->aq_nic_cfg->link_speed_msk;
- } else {
- if (*link_speeds & ETH_LINK_SPEED_10G)
- speed |= AQ_NIC_RATE_10G;
- if (*link_speeds & ETH_LINK_SPEED_5G)
- speed |= AQ_NIC_RATE_5G;
- if (*link_speeds & ETH_LINK_SPEED_1G)
- speed |= AQ_NIC_RATE_1G;
- if (*link_speeds & ETH_LINK_SPEED_2_5G)
- speed |= AQ_NIC_RATE_2G5;
- if (*link_speeds & ETH_LINK_SPEED_100M)
- speed |= AQ_NIC_RATE_100M;
- }
-
- err = hw->aq_fw_ops->set_link_speed(hw, speed);
if (err)
goto error;
@@ -657,9 +640,25 @@ static int
atl_dev_set_link_up(struct rte_eth_dev *dev)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t link_speeds = dev->data->dev_conf.link_speeds;
+ uint32_t speed_mask = 0;
+
+ if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ speed_mask = hw->aq_nic_cfg->link_speed_msk;
+ } else {
+ if (link_speeds & ETH_LINK_SPEED_10G)
+ speed_mask |= AQ_NIC_RATE_10G;
+ if (link_speeds & ETH_LINK_SPEED_5G)
+ speed_mask |= AQ_NIC_RATE_5G;
+ if (link_speeds & ETH_LINK_SPEED_1G)
+ speed_mask |= AQ_NIC_RATE_1G;
+ if (link_speeds & ETH_LINK_SPEED_2_5G)
+ speed_mask |= AQ_NIC_RATE_2G5;
+ if (link_speeds & ETH_LINK_SPEED_100M)
+ speed_mask |= AQ_NIC_RATE_100M;
+ }
- return hw->aq_fw_ops->set_link_speed(hw,
- hw->aq_nic_cfg->link_speed_msk);
+ return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
}
/*
@@ -761,7 +760,7 @@ atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
atl_xstats_tbl[i].name);
- return size;
+ return i;
}
static int
@@ -781,7 +780,7 @@ atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
atl_xstats_tbl[i].offset);
}
- return n;
+ return i;
}
static int
@@ -879,6 +878,7 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
struct atl_interrupt *intr =
ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct rte_eth_link link, old;
+ u32 fc = AQ_NIC_FC_OFF;
int err = 0;
link.link_status = ETH_LINK_DOWN;
@@ -915,6 +915,15 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
if (link.link_status == old.link_status)
return -1;
+ /* Driver has to update flow control settings on RX block
+ * on any link event.
+ * We should query FW whether it negotiated FC.
+ */
+ if (hw->aq_fw_ops->get_flow_control) {
+ hw->aq_fw_ops->get_flow_control(hw, &fc);
+ hw_atl_b0_set_fc(hw, fc, 0U);
+ }
+
return 0;
}
@@ -1094,8 +1103,6 @@ atl_dev_interrupt_handler(void *param)
atl_dev_interrupt_action(dev, dev->intr_handle);
}
-#define SFP_EEPROM_SIZE 0xff
-
static int
atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
{
@@ -1106,28 +1113,46 @@ static int
atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int dev_addr = SMBUS_DEVICE_ID;
if (hw->aq_fw_ops->get_eeprom == NULL)
return -ENOTSUP;
- if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
+ if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
+ eeprom->data == NULL)
+ return -EINVAL;
+
+ if (eeprom->magic > 0x7F)
return -EINVAL;
- return hw->aq_fw_ops->get_eeprom(hw, eeprom->data, eeprom->length);
+ if (eeprom->magic)
+ dev_addr = eeprom->magic;
+
+ return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
+ eeprom->length, eeprom->offset);
}
static int
atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int dev_addr = SMBUS_DEVICE_ID;
if (hw->aq_fw_ops->set_eeprom == NULL)
return -ENOTSUP;
- if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
+ if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
+ eeprom->data == NULL)
+ return -EINVAL;
+
+ if (eeprom->magic > 0x7F)
return -EINVAL;
- return hw->aq_fw_ops->set_eeprom(hw, eeprom->data, eeprom->length);
+ if (eeprom->magic)
+ dev_addr = eeprom->magic;
+
+ return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
+ eeprom->length, eeprom->offset);
}
static int
@@ -1160,16 +1185,21 @@ static int
atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 fc = AQ_NIC_FC_OFF;
+
+ if (hw->aq_fw_ops->get_flow_control == NULL)
+ return -ENOTSUP;
+
+ hw->aq_fw_ops->get_flow_control(hw, &fc);
- if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
+ if (fc == AQ_NIC_FC_OFF)
fc_conf->mode = RTE_FC_NONE;
- else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
+ else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
fc_conf->mode = RTE_FC_FULL;
- else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ else if (fc & AQ_NIC_FC_RX)
fc_conf->mode = RTE_FC_RX_PAUSE;
- else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ else if (fc & AQ_NIC_FC_TX)
fc_conf->mode = RTE_FC_TX_PAUSE;
-
return 0;
}
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 40c91379..fe007704 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -812,12 +812,12 @@ atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
ol_flags = m->ol_flags;
if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
return i;
}
if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ rte_errno = ENOTSUP;
return i;
}
@@ -946,7 +946,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
}
- PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u tail=%u "
"eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
(unsigned int)rxq->port_id,
(unsigned int)rxq->queue_id,
@@ -981,7 +981,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (true) {
new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (new_mbuf == NULL) {
- PMD_RX_LOG(ERR,
+ PMD_RX_LOG(DEBUG,
"RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned int)rxq->port_id,
(unsigned int)rxq->queue_id);
@@ -1084,7 +1084,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
adapter->sw_stats.q_ibytes[rxq->queue_id] +=
rx_mbuf_first->pkt_len;
- PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
+ PMD_RX_LOG(DEBUG, "add mbuf segs=%d pkt_len=%d",
rx_mbuf_first->nb_segs,
rx_mbuf_first->pkt_len);
}
@@ -1104,7 +1104,7 @@ err_stop:
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
- PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
"nb_hold=%u nb_rx=%u",
(unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
(unsigned int)tail, (unsigned int)nb_hold,
@@ -1129,8 +1129,6 @@ atl_xmit_cleanup(struct atl_tx_queue *txq)
struct hw_atl_txd_s *txd;
int to_clean = 0;
- PMD_INIT_FUNC_TRACE();
-
if (txq != NULL) {
sw_ring = txq->sw_ring;
int head = txq->tx_head;
@@ -1181,11 +1179,7 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
uint32_t tx_cmd = 0;
uint64_t ol_flags = tx_pkt->ol_flags;
- PMD_INIT_FUNC_TRACE();
-
if (ol_flags & PKT_TX_TCP_SEG) {
- PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
-
tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
txc->cmd = 0x4;
@@ -1240,8 +1234,6 @@ atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
u32 tx_cmd = 0U;
int desc_count = 0;
- PMD_INIT_FUNC_TRACE();
-
tail = txq->tx_tail;
txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
@@ -1356,4 +1348,3 @@ atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
-
diff --git a/drivers/net/atlantic/atl_types.h b/drivers/net/atlantic/atl_types.h
index 3d90f6ca..c53d5896 100644
--- a/drivers/net/atlantic/atl_types.h
+++ b/drivers/net/atlantic/atl_types.h
@@ -94,6 +94,8 @@ struct aq_hw_s {
struct hw_atl_stats_s last_stats;
struct aq_stats_s curr_stats;
+ u32 caps_lo;
+
u64 speed;
unsigned int chip_features;
u32 fw_ver_actual;
@@ -133,13 +135,16 @@ struct aq_fw_ops {
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
u32 *supported_rates);
+ int (*get_flow_control)(struct aq_hw_s *self, u32 *fc);
int (*set_flow_control)(struct aq_hw_s *self);
int (*led_control)(struct aq_hw_s *self, u32 mode);
- int (*get_eeprom)(struct aq_hw_s *self, u32 *data, u32 len);
+ int (*get_eeprom)(struct aq_hw_s *self, int dev_addr,
+ u32 *data, u32 len, u32 offset);
- int (*set_eeprom)(struct aq_hw_s *self, u32 *data, u32 len);
+ int (*set_eeprom)(struct aq_hw_s *self, int dev_addr,
+ u32 *data, u32 len, u32 offset);
};
struct atl_sw_stats {
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
index 9400e0ed..a76268e9 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
@@ -26,12 +26,17 @@ int hw_atl_b0_hw_reset(struct aq_hw_s *self)
return err;
}
+int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+{
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+ return 0;
+}
+
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
- bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -64,7 +69,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
- is_rx_flow_control = 0;
buff_size = HW_ATL_B0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -76,9 +80,7 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
- hw_atl_rpb_rx_xoff_en_per_tc_set(self,
- is_rx_flow_control ? 1U : 0U,
- tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
@@ -290,6 +292,8 @@ int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_en_set(self, 1U);
+
hw_atl_rdm_rx_dca_en_set(self, 0U);
hw_atl_rdm_rx_dca_mode_set(self, 0U);
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
index 06feb56c..d1ba2ace 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
@@ -11,6 +11,8 @@
int hw_atl_b0_hw_reset(struct aq_hw_s *self);
int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr);
+int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc);
+
int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr,
int index, int size, int cpu, int vec);
int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, uint64_t base_addr,
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
index f11093a5..26260194 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
@@ -306,6 +306,11 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
HW_ATL_MIF_CMD)),
1, 1000U);
+ if (err) {
+ err = -ETIMEDOUT;
+ goto err_exit;
+ }
+
*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
a += 4;
}
@@ -328,12 +333,13 @@ int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
goto err_exit;
}
if (IS_CHIP_FEATURE(REVISION_B1)) {
- u32 offset = 0;
+ u32 mbox_offset = (a - self->rpc_addr) / sizeof(u32);
+ u32 data_offset = 0;
- for (; offset < cnt; ++offset) {
- aq_hw_write_reg(self, 0x328, p[offset]);
+ for (; data_offset < cnt; ++mbox_offset, ++data_offset) {
+ aq_hw_write_reg(self, 0x328, p[data_offset]);
aq_hw_write_reg(self, 0x32C,
- (0x80000000 | (0xFFFF & (offset * 4))));
+ (0x80000000 | (0xFFFF & (mbox_offset * 4))));
hw_atl_mcp_up_force_intr_set(self, 1);
/* 1000 times by 10us = 10ms */
AQ_HW_WAIT_FOR((aq_hw_read_reg(self,
@@ -462,8 +468,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
- if (err < 0)
- goto err_exit;
if (rpc) {
if (fw.len) {
@@ -875,8 +879,7 @@ static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
}
err = hw_atl_utils_fw_rpc_call(self, rpc_size);
- if (err < 0)
- goto err_exit;
+
err_exit:
return err;
}
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
index 5f3f7084..b1f03f42 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
@@ -336,15 +336,8 @@ struct offload_info {
u8 buf[0];
} __attribute__((__packed__));
-struct smbus_read_request {
- u32 offset; /* not used */
- u32 device_id;
- u32 address;
- u32 length;
-} __attribute__((__packed__));
-
-struct smbus_write_request {
- u32 offset; /* not used */
+struct smbus_request {
+ u32 msg_id; /* not used */
u32 device_id;
u32 address;
u32 length;
@@ -389,8 +382,6 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 10U
#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 13U // 0xd
-#define SMBUS_READ_REQUEST BIT(13)
-#define SMBUS_WRITE_REQUEST BIT(14)
#define SMBUS_DEVICE_ID 0x50
enum hw_atl_fw2x_rate {
@@ -414,6 +405,9 @@ enum hw_atl_fw2x_caps_lo {
CAPS_LO_2P5GBASET_FD,
CAPS_LO_5GBASET_FD,
CAPS_LO_10GBASET_FD,
+ CAPS_LO_AUTONEG,
+ CAPS_LO_SMBUS_READ,
+ CAPS_LO_SMBUS_WRITE,
};
enum hw_atl_fw2x_caps_hi {
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 6841d9bc..11f14d1a 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -34,7 +34,6 @@
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
-#define HW_ATL_FW_FEATURE_EEPROM 0x03010025
#define HW_ATL_FW_FEATURE_LED 0x03010026
struct fw2x_msg_wol_pattern {
@@ -62,6 +61,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static int aq_fw2x_init(struct aq_hw_s *self)
{
int err = 0;
+ struct hw_aq_atl_utils_mbox mbox;
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
@@ -70,6 +70,12 @@ static int aq_fw2x_init(struct aq_hw_s *self)
AQ_HW_WAIT_FOR(0U != (self->rpc_addr =
aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)),
1000U, 100U);
+
+ /* Read caps */
+ hw_atl_utils_mpi_read_stats(self, &mbox);
+
+ self->caps_lo = mbox.info.caps_lo;
+
return err;
}
@@ -462,7 +468,15 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
return err;
}
+static int aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fc)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ *fc = ((mpi_state & BIT(CAPS_HI_PAUSE)) ? AQ_NIC_FC_RX : 0) |
+ ((mpi_state & BIT(CAPS_HI_ASYMMETRIC_PAUSE)) ? AQ_NIC_FC_TX : 0);
+
+ return 0;
+}
static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
{
@@ -484,38 +498,42 @@ static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
return 0;
}
-static int aq_fw2x_get_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
+static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr,
+ u32 *data, u32 len, u32 offset)
{
- int err = 0;
- struct smbus_read_request request;
- u32 mpi_opts;
+ u32 bytes_remains = len % sizeof(u32);
+ u32 num_dwords = len / sizeof(u32);
+ struct smbus_request request;
u32 result = 0;
+ u32 mpi_opts;
+ int err = 0;
- if (self->fw_ver_actual < HW_ATL_FW_FEATURE_EEPROM)
+ if ((self->caps_lo & BIT(CAPS_LO_SMBUS_READ)) == 0)
return -EOPNOTSUPP;
- request.device_id = SMBUS_DEVICE_ID;
- request.address = 0;
+ request.msg_id = 0;
+ request.device_id = dev_addr;
+ request.address = offset;
request.length = len;
/* Write SMBUS request to cfg memory */
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
(u32 *)(void *)&request,
- RTE_ALIGN(sizeof(request), sizeof(u32)));
+ sizeof(request) / sizeof(u32));
if (err < 0)
return err;
- /* Toggle 0x368.SMBUS_READ_REQUEST bit */
+ /* Toggle 0x368.CAPS_LO_SMBUS_READ bit */
mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR);
- mpi_opts ^= SMBUS_READ_REQUEST;
+ mpi_opts ^= BIT(CAPS_LO_SMBUS_READ);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts);
/* Wait until REQUEST_BIT matched in 0x370 */
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) &
- SMBUS_READ_REQUEST) == (mpi_opts & SMBUS_READ_REQUEST),
+ BIT(CAPS_LO_SMBUS_READ)) == (mpi_opts & BIT(CAPS_LO_SMBUS_READ)),
10U, 10000U);
if (err < 0)
@@ -523,64 +541,106 @@ static int aq_fw2x_get_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32),
&result,
- RTE_ALIGN(sizeof(result), sizeof(u32)));
+ sizeof(result) / sizeof(u32));
if (err < 0)
return err;
- if (result == 0) {
+ if (result)
+ return -EIO;
+
+ if (num_dwords) {
err = hw_atl_utils_fw_downld_dwords(self,
- self->rpc_addr + sizeof(u32) * 2,
- data,
- RTE_ALIGN(len, sizeof(u32)));
+ self->rpc_addr + sizeof(u32) * 2,
+ data,
+ num_dwords);
if (err < 0)
return err;
}
+ if (bytes_remains) {
+ u32 val = 0;
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ self->rpc_addr + (sizeof(u32) * 2) +
+ (num_dwords * sizeof(u32)),
+ &val,
+ 1);
+
+ if (err < 0)
+ return err;
+
+ rte_memcpy((u8 *)data + len - bytes_remains,
+ &val, bytes_remains);
+ }
+
return 0;
}
-static int aq_fw2x_set_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
+static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr,
+ u32 *data, u32 len, u32 offset)
{
- struct smbus_write_request request;
+ struct smbus_request request;
u32 mpi_opts, result = 0;
int err = 0;
- if (self->fw_ver_actual < HW_ATL_FW_FEATURE_EEPROM)
+ if ((self->caps_lo & BIT(CAPS_LO_SMBUS_WRITE)) == 0)
return -EOPNOTSUPP;
- request.device_id = SMBUS_DEVICE_ID;
- request.address = 0;
+ request.msg_id = 0;
+ request.device_id = dev_addr;
+ request.address = offset;
request.length = len;
/* Write SMBUS request to cfg memory */
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
(u32 *)(void *)&request,
- RTE_ALIGN(sizeof(request), sizeof(u32)));
+ sizeof(request) / sizeof(u32));
if (err < 0)
return err;
/* Write SMBUS data to cfg memory */
- err = hw_atl_utils_fw_upload_dwords(self,
- self->rpc_addr + sizeof(request),
- (u32 *)(void *)data,
- RTE_ALIGN(len, sizeof(u32)));
+ u32 num_dwords = len / sizeof(u32);
+ u32 bytes_remains = len % sizeof(u32);
- if (err < 0)
- return err;
+ if (num_dwords) {
+ err = hw_atl_utils_fw_upload_dwords(self,
+ self->rpc_addr + sizeof(request),
+ (u32 *)(void *)data,
+ num_dwords);
+
+ if (err < 0)
+ return err;
+ }
- /* Toggle 0x368.SMBUS_WRITE_REQUEST bit */
+ if (bytes_remains) {
+ u32 val = 0;
+
+ rte_memcpy(&val, (u8 *)data + (sizeof(u32) * num_dwords),
+ bytes_remains);
+
+ err = hw_atl_utils_fw_upload_dwords(self,
+ self->rpc_addr + sizeof(request) +
+ (num_dwords * sizeof(u32)),
+ &val,
+ 1);
+
+ if (err < 0)
+ return err;
+ }
+
+ /* Toggle 0x368.CAPS_LO_SMBUS_WRITE bit */
mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR);
- mpi_opts ^= SMBUS_WRITE_REQUEST;
+ mpi_opts ^= BIT(CAPS_LO_SMBUS_WRITE);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts);
/* Wait until REQUEST_BIT matched in 0x370 */
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) &
- SMBUS_WRITE_REQUEST) == (mpi_opts & SMBUS_WRITE_REQUEST),
+ BIT(CAPS_LO_SMBUS_WRITE)) == (mpi_opts & BIT(CAPS_LO_SMBUS_WRITE)),
10U, 10000U);
if (err < 0)
@@ -589,11 +649,14 @@ static int aq_fw2x_set_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
/* Read status of write operation */
err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32),
&result,
- RTE_ALIGN(sizeof(result), sizeof(u32)));
+ sizeof(result) / sizeof(u32));
if (err < 0)
return err;
+ if (result)
+ return -EIO;
+
return 0;
}
@@ -611,6 +674,7 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.get_cable_len = aq_fw2x_get_cable_len,
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
+ .get_flow_control = aq_fw2x_get_flow_control,
.set_flow_control = aq_fw2x_set_flow_control,
.led_control = aq_fw2x_led_control,
.get_eeprom = aq_fw2x_get_eeprom,
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
index 13eec1b4..4dc61d9f 100644
--- a/drivers/net/avf/avf_ethdev.c
+++ b/drivers/net/avf/avf_ethdev.c
@@ -498,7 +498,6 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
@@ -1159,7 +1158,7 @@ avf_enable_irq0(struct avf_hw *hw)
AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
- AVFINT_DYN_CTL01_ITR_INDX_MASK);
+ AVFINT_DYN_CTL01_CLEARPBA_MASK | AVFINT_DYN_CTL01_ITR_INDX_MASK);
AVF_WRITE_FLUSH(hw);
}
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index d25d54ca..34f60f15 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -1351,9 +1351,9 @@ do { \
#define SET_BITS_LE(_var, _index, _width, _val) \
do { \
- (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\
+ (_var) &= rte_cpu_to_le_32(~(((0x1U << (_width)) - 1) << (_index)));\
(_var) |= rte_cpu_to_le_32((((_val) & \
- ((0x1 << (_width)) - 1)) << (_index))); \
+ ((0x1U << (_width)) - 1)) << (_index))); \
} while (0)
/* Bit setting and getting macros based on register fields
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 26b3828e..3e705c7a 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -25,6 +25,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <zlib.h>
+#include <rte_string_fns.h>
#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
#define BNX2X_PMD_VERSION_MAJOR 1
@@ -123,7 +124,7 @@ static __rte_noinline
int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
-static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp);
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
uint8_t storm, uint16_t index, uint8_t op,
uint8_t update);
@@ -184,6 +185,7 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
}
dma->paddr = (uint64_t) z->iova;
dma->vaddr = z->addr;
+ dma->mzone = (const void *)z;
PMD_DRV_LOG(DEBUG, sc,
"%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
@@ -191,6 +193,19 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
return 0;
}
+void bnx2x_dma_free(struct bnx2x_dma *dma)
+{
+ if (dma->mzone == NULL)
+ return;
+
+ rte_memzone_free((const struct rte_memzone *)dma->mzone);
+ dma->sc = NULL;
+ dma->paddr = 0;
+ dma->vaddr = NULL;
+ dma->nseg = 0;
+ dma->mzone = NULL;
+}
+
static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
{
uint32_t lock_status;
@@ -1099,6 +1114,12 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
atomic_load_acq_long(&sc->cq_spq_left),
atomic_load_acq_long(&sc->eq_spq_left));
+ /* RAMROD completion is processed in bnx2x_intr_legacy()
+ * which can run from different contexts.
+ * Ask bnx2x_intr_intr() to process RAMROD
+ * completion whenever it gets scheduled.
+ */
+ rte_atomic32_set(&sc->scan_fp, 1);
bnx2x_sp_prod_update(sc);
return 0;
@@ -2435,6 +2456,7 @@ static int bnx2x_alloc_mem(struct bnx2x_softc *sc)
static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc)
{
+ bnx2x_dma_free(&sc->fw_stats_dma);
sc->fw_stats_num = 0;
sc->fw_stats_req_size = 0;
@@ -4523,7 +4545,7 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
return rc;
}
-static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp)
{
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
@@ -4538,14 +4560,14 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
- if (scan_fp) {
+ if (rte_atomic32_read(&sc->scan_fp) == 1) {
if (bnx2x_has_rx_work(fp)) {
more_rx = bnx2x_rxeof(sc, fp);
}
if (more_rx) {
/* still more work to do */
- bnx2x_handle_fp_tq(fp, scan_fp);
+ bnx2x_handle_fp_tq(fp);
return;
}
}
@@ -4561,7 +4583,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
* then calls a separate routine to handle the various
* interrupt causes: link, RX, and TX.
*/
-int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
+int bnx2x_intr_legacy(struct bnx2x_softc *sc)
{
struct bnx2x_fastpath *fp;
uint32_t status, mask;
@@ -4593,7 +4615,7 @@ int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
/* acknowledge and disable further fastpath interrupts */
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
0, IGU_INT_DISABLE, 0);
- bnx2x_handle_fp_tq(fp, scan_fp);
+ bnx2x_handle_fp_tq(fp);
status &= ~mask;
}
}
@@ -8081,6 +8103,27 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
}
+ val = sc->devinfo.bc_ver >> 8;
+ if (val < BNX2X_BC_VER) {
+ /* for now only warn later we might need to enforce this */
+ PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC\n",
+ BNX2X_BC_VER, val);
+ }
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY :
+ 0;
+
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
/* get the initial value of the link params */
sc->link_params.multi_phy_config =
SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
@@ -11741,13 +11784,13 @@ static const char *get_bnx2x_flags(uint32_t flags)
for (i = 0; i < 5; i++)
if (flags & (1 << i)) {
- strcat(flag_str, flag[i]);
+ strlcat(flag_str, flag[i], sizeof(flag_str));
flags ^= (1 << i);
}
if (flags) {
static char unknown[BNX2X_INFO_STR_MAX];
snprintf(unknown, 32, "Unknown flag mask %x", flags);
- strcat(flag_str, unknown);
+ strlcat(flag_str, unknown, sizeof(flag_str));
}
return flag_str;
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 32a12294..ef1688ff 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -119,6 +119,8 @@ int bnx2x_ilog2(int x)
#define ilog2(x) bnx2x_ilog2(x)
#endif
+#define BNX2X_BC_VER 0x040200
+
#include "ecore_sp.h"
struct bnx2x_device_type {
@@ -319,6 +321,7 @@ struct bnx2x_dma {
rte_iova_t paddr;
void *vaddr;
int nseg;
+ const void *mzone;
char msg[RTE_MEMZONE_NAMESIZE - 6];
};
@@ -1089,7 +1092,7 @@ struct bnx2x_softc {
#define PERIODIC_STOP 0
#define PERIODIC_GO 1
volatile unsigned long periodic_flags;
-
+ rte_atomic32_t scan_fp;
struct bnx2x_fastpath fp[MAX_RSS_CHAINS];
struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS];
@@ -1753,7 +1756,7 @@ int bnx2x_cmpxchg(volatile int *addr, int old, int new);
int bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size,
struct bnx2x_dma *dma, const char *msg, uint32_t align);
-
+void bnx2x_dma_free(struct bnx2x_dma *dma);
uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type);
uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode);
uint32_t bnx2x_dmae_opcode(struct bnx2x_softc *sc, uint8_t src_type,
@@ -1938,7 +1941,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0);
uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp);
void bnx2x_print_adapter_info(struct bnx2x_softc *sc);
void bnx2x_print_device_info(struct bnx2x_softc *sc);
-int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp);
+int bnx2x_intr_legacy(struct bnx2x_softc *sc);
void bnx2x_link_status_update(struct bnx2x_softc *sc);
int bnx2x_complete_sp(struct bnx2x_softc *sc);
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index cc7816dd..c628cdc0 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -107,14 +107,15 @@ bnx2x_link_update(struct rte_eth_dev *dev)
}
static void
-bnx2x_interrupt_action(struct rte_eth_dev *dev)
+bnx2x_interrupt_action(struct rte_eth_dev *dev, int intr_cxt)
{
struct bnx2x_softc *sc = dev->data->dev_private;
uint32_t link_status;
- bnx2x_intr_legacy(sc, 0);
+ bnx2x_intr_legacy(sc);
- if (sc->periodic_flags & PERIODIC_GO)
+ if ((atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO) &&
+ !intr_cxt)
bnx2x_periodic_callout(sc);
link_status = REG_RD(sc, sc->link_params.shmem_base +
offsetof(struct shmem_region,
@@ -131,9 +132,7 @@ bnx2x_interrupt_handler(void *param)
PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled");
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
- bnx2x_interrupt_action(dev);
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
+ bnx2x_interrupt_action(dev, 1);
rte_intr_enable(&sc->pci_dev->intr_handle);
}
@@ -144,7 +143,7 @@ static void bnx2x_periodic_start(void *param)
int ret = 0;
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
- bnx2x_interrupt_action(dev);
+ bnx2x_interrupt_action(dev, 0);
if (IS_PF(sc)) {
ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
bnx2x_periodic_start, (void *)dev);
@@ -164,6 +163,8 @@ void bnx2x_periodic_stop(void *param)
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
+
+ PMD_DRV_LOG(DEBUG, sc, "Periodic poll stopped");
}
/*
@@ -180,8 +181,10 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE(sc);
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ dev->data->mtu = sc->mtu;
+ }
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
@@ -210,6 +213,7 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
return -ENXIO;
}
+ bnx2x_dev_rxtx_init_dummy(dev);
return 0;
}
@@ -222,8 +226,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE(sc);
/* start the periodic callout */
- if (sc->periodic_flags & PERIODIC_STOP)
+ if (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP) {
bnx2x_periodic_start(dev);
+ PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started");
+ }
ret = bnx2x_init(sc);
if (ret) {
@@ -239,11 +245,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed");
}
- ret = bnx2x_dev_rx_init(dev);
- if (ret != 0) {
- PMD_DRV_LOG(DEBUG, sc, "bnx2x_dev_rx_init returned error code");
- return -3;
- }
+ bnx2x_dev_rxtx_init(dev);
bnx2x_print_device_info(sc);
@@ -258,6 +260,8 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE(sc);
+ bnx2x_dev_rxtx_init_dummy(dev);
+
if (IS_PF(sc)) {
rte_intr_disable(&sc->pci_dev->intr_handle);
rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index ca28aacc..e5a2b25b 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -311,7 +311,6 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_bd_tail = 0;
txq->tx_bd_head = 0;
txq->nb_tx_avail = txq->nb_tx_desc;
- dev->tx_pkt_burst = bnx2x_xmit_pkts;
dev->data->tx_queues[queue_idx] = txq;
if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
@@ -441,14 +440,26 @@ next_rx:
return nb_rx;
}
-int
-bnx2x_dev_rx_init(struct rte_eth_dev *dev)
+static uint16_t
+bnx2x_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
{
- dev->rx_pkt_burst = bnx2x_recv_pkts;
-
return 0;
}
+void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev)
+{
+ dev->rx_pkt_burst = bnx2x_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = bnx2x_rxtx_pkts_dummy;
+}
+
+void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev)
+{
+ dev->rx_pkt_burst = bnx2x_recv_pkts;
+ dev->tx_pkt_burst = bnx2x_xmit_pkts;
+}
+
void
bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index 6ad4928c..3f4692b4 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -74,7 +74,8 @@ int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
void bnx2x_dev_rx_queue_release(void *rxq);
void bnx2x_dev_tx_queue_release(void *txq);
-int bnx2x_dev_rx_init(struct rte_eth_dev *dev);
+void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev);
+void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev);
void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);
#endif /* _BNX2X_RXTX_H_ */
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 1192e5dd..74189eed 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -3545,7 +3545,7 @@ struct igu_regular
#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
#define IGU_REGULAR_CLEANUP_SET (0x1<<30) /* BitField sb_id_and_flags */
#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
-#define IGU_REGULAR_BCLEANUP (0x1<<31) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_BCLEANUP (0x1U<<31) /* BitField sb_id_and_flags */
#define IGU_REGULAR_BCLEANUP_SHIFT 31
uint32_t reserved_2;
};
diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h
index d69e857b..7af9a2d8 100644
--- a/drivers/net/bnx2x/ecore_reg.h
+++ b/drivers/net/bnx2x/ecore_reg.h
@@ -1981,7 +1981,7 @@
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
-#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1U<<31)
#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index 6d2bb815..43194095 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -291,25 +291,33 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state,
cnt *= 20;
ECORE_MSG(sc, "waiting for state to become %d", state);
+ /* being over protective to remind bnx2x_intr_legacy() to
+ * process RAMROD
+ */
+ rte_atomic32_set(&sc->scan_fp, 1);
ECORE_MIGHT_SLEEP();
while (cnt--) {
- bnx2x_intr_legacy(sc, 1);
+ bnx2x_intr_legacy(sc);
if (!ECORE_TEST_BIT(state, pstate)) {
#ifdef ECORE_STOP_ON_ERROR
ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
#endif
+ rte_atomic32_set(&sc->scan_fp, 0);
return ECORE_SUCCESS;
}
ECORE_WAIT(sc, delay_us);
- if (sc->panic)
+ if (sc->panic) {
+ rte_atomic32_set(&sc->scan_fp, 0);
return ECORE_IO;
+ }
}
/* timeout! */
PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
+ rte_atomic32_set(&sc->scan_fp, 0);
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index f295bf5a..7126097d 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -151,14 +151,15 @@ typedef rte_spinlock_t ECORE_MUTEX_SPIN;
} \
} while (0)
-#define ECORE_ILT_FREE(x, y, size) \
- do { \
- if (x) { \
- rte_free(x); \
- x = NULL; \
- y = 0; \
- } \
- } while (0)
+#define ECORE_ILT_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ bnx2x_dma_free((struct bnx2x_dma *)x); \
+ rte_free(x); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index c8b08bc3..dd70ac6c 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -440,7 +440,7 @@ struct elink_params {
#define ELINK_EEE_MODE_OUTPUT_TIME (1 << 28)
#define ELINK_EEE_MODE_OVERRIDE_NVRAM (1 << 29)
#define ELINK_EEE_MODE_ENABLE_LPI (1 << 30)
-#define ELINK_EEE_MODE_ADV_LPI (1 << 31)
+#define ELINK_EEE_MODE_ADV_LPI (1U << 31)
uint16_t hw_led_mode; /* part of the hw_config read from the shmem */
uint32_t multi_phy_config;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index f75b0ad3..5535c376 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -263,7 +263,7 @@ struct bnxt {
#define BNXT_FLAG_TRUSTED_VF_EN (1 << 11)
#define BNXT_FLAG_DFLT_VNIC_SET (1 << 12)
#define BNXT_FLAG_NEW_RM (1 << 30)
-#define BNXT_FLAG_INIT_DONE (1 << 31)
+#define BNXT_FLAG_INIT_DONE (1U << 31)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 801c6ffa..e26b9e3c 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -2649,7 +2649,7 @@ static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
return -ERANGE;
}
win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
- rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
+ rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
return 0;
}
@@ -2680,10 +2680,10 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
static void bnxt_unmap_ptp_regs(struct bnxt *bp)
{
- rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
- BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
- rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
- BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
+ rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
+ rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
}
static uint64_t bnxt_cc_read(struct bnxt *bp)
@@ -2733,8 +2733,8 @@ static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
return -EAGAIN;
port_id = pf->port_id;
- rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
- ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
+ rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
@@ -3242,10 +3242,8 @@ skip_init:
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(WARNING,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(WARNING,
- "Using rte_mem_virt2iova()\n");
+ PMD_DRV_LOG(INFO,
+ "Memzone physical address same as virtual using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
PMD_DRV_LOG(ERR,
@@ -3548,7 +3546,7 @@ static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver bnxt_rte_pmd = {
.id_table = bnxt_pci_id_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
- RTE_PCI_DRV_INTR_LSC,
+ RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_IOVA_AS_VA,
.probe = bnxt_pci_probe,
.remove = bnxt_pci_remove,
};
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 5345d393..17e2909a 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -100,7 +100,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
}
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
- PMD_DRV_LOG(ERR, "pools = %u nb_q_per_grp = %u\n", pools, nb_q_per_grp);
+ PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
+ pools, nb_q_per_grp);
start_grp_id = 0;
end_grp_id = nb_q_per_grp;
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 1bfc63d9..dc695e17 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -154,7 +154,7 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
if (tpa_start1->flags2 &
rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
if (likely(tpa_start1->flags2 &
rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
@@ -437,7 +437,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
(RX_PKT_CMPL_METADATA_VID_MASK |
RX_PKT_CMPL_METADATA_DE |
RX_PKT_CMPL_METADATA_PRI_MASK);
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index dd847c6f..1e6a3fc7 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -664,7 +664,7 @@ max_index(uint64_t *a, int n)
* @param port_pos Port to assign.
*/
static void
-selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
+selection_logic(struct bond_dev_private *internals, uint16_t slave_id)
{
struct port *agg, *port;
uint16_t slaves_count, new_agg_id, i, j = 0;
@@ -781,16 +781,23 @@ link_speed_key(uint16_t speed) {
}
static void
-rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
+rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id,
struct rte_mbuf *lacp_pkt) {
struct lacpdu_header *lacp;
+ struct lacpdu_actor_partner_params *partner;
if (lacp_pkt != NULL) {
lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
- /* This is LACP frame so pass it to rx_machine */
- rx_machine(internals, slave_id, &lacp->lacpdu);
+ partner = &lacp->lacpdu.partner;
+ if (is_same_ether_addr(&partner->port_params.system,
+ &internals->mode4.mac_addr)) {
+ /* This LACP frame is sending to the bonding port
+ * so pass it to rx_machine.
+ */
+ rx_machine(internals, slave_id, &lacp->lacpdu);
+ }
rte_pktmbuf_free(lacp_pkt);
} else
rx_machine(internals, slave_id, NULL);
@@ -805,8 +812,8 @@ bond_mode_8023ad_periodic_cb(void *arg)
struct rte_eth_link link_info;
struct ether_addr slave_addr;
struct rte_mbuf *lacp_pkt = NULL;
-
- uint8_t i, slave_id;
+ uint16_t slave_id;
+ uint16_t i;
/* Update link status on each port */
@@ -1149,7 +1156,7 @@ int
bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
{
struct bond_dev_private *internals = bond_dev->data->dev_private;
- uint8_t i;
+ uint16_t i;
for (i = 0; i < internals->active_slave_count; i++)
bond_mode_8023ad_activate_slave(bond_dev,
@@ -1165,6 +1172,7 @@ bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
struct mode8023ad_private *mode4 = &internals->mode4;
static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
+ rte_eth_macaddr_get(internals->port_id, &mode4->mac_addr);
if (mode4->slowrx_cb)
return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
bond_dev);
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index c51426b8..f91902eb 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -150,6 +150,7 @@ struct mode8023ad_private {
uint64_t update_timeout_us;
rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
uint8_t external_sm;
+ struct ether_addr mac_addr;
struct rte_eth_link slave_link;
/***< slave link properties */
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index c3891c7e..d3e16d4b 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -18,10 +18,10 @@ simple_hash(uint8_t *hash_start, int hash_size)
return hash;
}
-static uint8_t
+static uint16_t
calculate_slave(struct bond_dev_private *internals)
{
- uint8_t idx;
+ uint16_t idx;
idx = (internals->mode6.last_slave + 1) % internals->active_slave_count;
internals->mode6.last_slave = idx;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index ac084c4f..a23988dc 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -76,7 +76,7 @@ void
activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t active_count = internals->active_slave_count;
+ uint16_t active_count = internals->active_slave_count;
if (internals->mode == BONDING_MODE_8023AD)
bond_mode_8023ad_activate_slave(eth_dev, port_id);
@@ -490,10 +490,6 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
}
}
- /* Inherit eth dev link properties from first slave */
- link_properties_set(bonded_eth_dev,
- &(slave_eth_dev->data->dev_link));
-
/* Make primary slave */
internals->primary_port = slave_port_id;
internals->current_primary_port = slave_port_id;
@@ -800,7 +796,7 @@ rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
uint16_t len)
{
struct bond_dev_private *internals;
- uint8_t i;
+ uint16_t i;
if (valid_bonded_port_id(bonded_port_id) != 0)
return -1;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 7ed69b38..154257ff 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -353,7 +353,7 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
for (i = 0; i < nb_bufs; i++) {
/* Populate slave mbuf arrays with mbufs for that slave. */
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
}
@@ -404,8 +404,10 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint8_t collecting; /* current slave collecting status */
const uint8_t promisc = internals->promiscuous_en;
- uint8_t i, j, k;
uint8_t subtype;
+ uint16_t i;
+ uint16_t j;
+ uint16_t k;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
@@ -487,35 +489,31 @@ uint32_t burstnumberTX;
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
static void
-arp_op_name(uint16_t arp_op, char *buf)
+arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
{
switch (arp_op) {
case ARP_OP_REQUEST:
- snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
+ snprintf(buf, buf_len, "%s", "ARP Request");
return;
case ARP_OP_REPLY:
- snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
+ snprintf(buf, buf_len, "%s", "ARP Reply");
return;
case ARP_OP_REVREQUEST:
- snprintf(buf, sizeof("Reverse ARP Request"), "%s",
- "Reverse ARP Request");
+ snprintf(buf, buf_len, "%s", "Reverse ARP Request");
return;
case ARP_OP_REVREPLY:
- snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
- "Reverse ARP Reply");
+ snprintf(buf, buf_len, "%s", "Reverse ARP Reply");
return;
case ARP_OP_INVREQUEST:
- snprintf(buf, sizeof("Peer Identify Request"), "%s",
- "Peer Identify Request");
+ snprintf(buf, buf_len, "%s", "Peer Identify Request");
return;
case ARP_OP_INVREPLY:
- snprintf(buf, sizeof("Peer Identify Reply"), "%s",
- "Peer Identify Reply");
+ snprintf(buf, buf_len, "%s", "Peer Identify Reply");
return;
default:
break;
}
- snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
+ snprintf(buf, buf_len, "%s", "Unknown");
return;
}
#endif
@@ -619,7 +617,8 @@ mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
- arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
+ arp_op_name(rte_be_to_cpu_16(arp_h->arp_op),
+ ArpOp, sizeof(ArpOp));
MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
}
#endif
@@ -774,7 +773,7 @@ ipv6_hash(struct ipv6_hdr *ipv6_hdr)
void
burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
struct ether_hdr *eth_hdr;
uint32_t hash;
@@ -791,7 +790,7 @@ burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
void
burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
uint16_t i;
struct ether_hdr *eth_hdr;
@@ -829,7 +828,7 @@ burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
void
burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
struct ether_hdr *eth_hdr;
uint16_t proto;
@@ -899,7 +898,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
struct bwg_slave {
uint64_t bwg_left_int;
uint64_t bwg_left_remainder;
- uint8_t slave;
+ uint16_t slave;
};
void
@@ -952,11 +951,12 @@ bond_ethdev_update_tlb_slave_cb(void *arg)
struct bond_dev_private *internals = arg;
struct rte_eth_stats slave_stats;
struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
+ uint16_t slave_count;
uint64_t tx_bytes;
uint8_t update_stats = 0;
- uint8_t i, slave_id;
+ uint16_t slave_id;
+ uint16_t i;
internals->slave_update_idx++;
@@ -1243,7 +1243,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
for (i = 0; i < nb_bufs; i++) {
/* Populate slave mbuf arrays with mbufs for that slave. */
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
}
@@ -1298,9 +1298,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint16_t i;
- if (unlikely(nb_bufs == 0))
- return 0;
-
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
@@ -1310,6 +1307,30 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
memcpy(slave_port_ids, internals->active_slaves,
sizeof(slave_port_ids[0]) * slave_count);
+ /* Check for LACP control packets and send if available */
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
+ struct rte_mbuf *ctrl_pkt = NULL;
+
+ if (likely(rte_ring_empty(port->tx_ring)))
+ continue;
+
+ if (rte_ring_dequeue(port->tx_ring,
+ (void **)&ctrl_pkt) != -ENOENT) {
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
+ bd_tx_q->queue_id, &ctrl_pkt, 1);
+ /*
+ * re-enqueue LAG control plane packets to buffering
+ * ring if transmission fails so the packet isn't lost.
+ */
+ if (slave_tx_count != 1)
+ rte_ring_enqueue(port->tx_ring, ctrl_pkt);
+ }
+ }
+
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
@@ -1319,7 +1340,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
slave_port_ids[i];
}
- if (likely(dist_slave_count > 1)) {
+ if (likely(dist_slave_count > 0)) {
/*
* Populate slaves mbuf with the packets which are to be sent
@@ -1333,7 +1354,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
* Populate slave mbuf arrays with mbufs for that
* slave
*/
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
bufs[i];
@@ -1365,27 +1386,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
}
}
- /* Check for LACP control packets and send if available */
- for (i = 0; i < slave_count; i++) {
- struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
- struct rte_mbuf *ctrl_pkt = NULL;
-
- if (likely(rte_ring_empty(port->tx_ring)))
- continue;
-
- if (rte_ring_dequeue(port->tx_ring,
- (void **)&ctrl_pkt) != -ENOENT) {
- slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
- bd_tx_q->queue_id, &ctrl_pkt, 1);
- /*
- * re-enqueue LAG control plane packets to buffering
- * ring if transmission fails so the packet isn't lost.
- */
- if (slave_tx_count != 1)
- rte_ring_enqueue(port->tx_ring, ctrl_pkt);
- }
- }
-
return total_tx_count;
}
@@ -1396,8 +1396,9 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t tx_failed_flag = 0, num_of_slaves;
uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint8_t tx_failed_flag = 0;
+ uint16_t num_of_slaves;
uint16_t max_nb_of_tx_pkts = 0;
@@ -1449,7 +1450,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
return max_nb_of_tx_pkts;
}
-void
+static void
link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
{
struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
@@ -1474,7 +1475,7 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
}
}
-int
+static int
link_properties_valid(struct rte_eth_dev *ethdev,
struct rte_eth_link *slave_link)
{
@@ -1948,7 +1949,7 @@ void
slave_remove(struct bond_dev_private *internals,
struct rte_eth_dev *slave_eth_dev)
{
- uint8_t i;
+ uint16_t i;
for (i = 0; i < internals->slave_count; i++)
if (internals->slaves[i].port_id ==
@@ -2124,7 +2125,7 @@ out_err:
static void
bond_ethdev_free_queues(struct rte_eth_dev *dev)
{
- uint8_t i;
+ uint16_t i;
if (dev->data->rx_queues != NULL) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -2147,7 +2148,7 @@ void
bond_ethdev_stop(struct rte_eth_dev *eth_dev)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t i;
+ uint16_t i;
if (internals->mode == BONDING_MODE_8023AD) {
struct port *port;
@@ -2196,7 +2197,7 @@ void
bond_ethdev_close(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
- uint8_t bond_port_id = internals->port_id;
+ uint16_t bond_port_id = internals->port_id;
int skipped = 0;
struct rte_flow_error ferror;
@@ -2228,6 +2229,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
uint16_t max_nb_rx_queues = UINT16_MAX;
uint16_t max_nb_tx_queues = UINT16_MAX;
+ uint16_t max_rx_desc_lim = UINT16_MAX;
+ uint16_t max_tx_desc_lim = UINT16_MAX;
dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
@@ -2241,7 +2244,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
*/
if (internals->slave_count > 0) {
struct rte_eth_dev_info slave_info;
- uint8_t idx;
+ uint16_t idx;
for (idx = 0; idx < internals->slave_count; idx++) {
rte_eth_dev_info_get(internals->slaves[idx].port_id,
@@ -2252,6 +2255,12 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (slave_info.max_tx_queues < max_nb_tx_queues)
max_nb_tx_queues = slave_info.max_tx_queues;
+
+ if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
+ max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
+
+ if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
+ max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
}
}
@@ -2263,10 +2272,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
memcpy(&dev_info->default_txconf, &internals->default_txconf,
sizeof(dev_info->default_txconf));
- memcpy(&dev_info->rx_desc_lim, &internals->rx_desc_lim,
- sizeof(dev_info->rx_desc_lim));
- memcpy(&dev_info->tx_desc_lim, &internals->tx_desc_lim,
- sizeof(dev_info->tx_desc_lim));
+ dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
+ dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
/**
* If dedicated hw queues enabled for link bonding device in LACP mode
@@ -2593,6 +2600,9 @@ bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
case BONDING_MODE_TLB:
case BONDING_MODE_ALB:
default:
+ /* Do not touch promisc when there cannot be primary ports */
+ if (internals->slave_count == 0)
+ break;
rte_eth_promiscuous_enable(internals->current_primary_port);
}
}
@@ -2621,6 +2631,9 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
case BONDING_MODE_TLB:
case BONDING_MODE_ALB:
default:
+ /* Do not touch promisc when there cannot be primary ports */
+ if (internals->slave_count == 0)
+ break;
rte_eth_promiscuous_disable(internals->current_primary_port);
}
}
@@ -2644,14 +2657,15 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
struct rte_eth_link link;
int rc = -1;
- int i, valid_slave = 0;
- uint8_t active_pos;
uint8_t lsc_flag = 0;
+ int valid_slave = 0;
+ uint16_t active_pos;
+ uint16_t i;
if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
return rc;
- bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
+ bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
if (check_for_bonded_ethdev(bonded_eth_dev))
return rc;
@@ -2687,16 +2701,6 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
if (active_pos < internals->active_slave_count)
goto link_update;
- /* if no active slave ports then set this port to be primary port */
- if (internals->active_slave_count < 1) {
- /* If first active slave, then change link status */
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
- internals->current_primary_port = port_id;
- lsc_flag = 1;
-
- mac_address_slaves_update(bonded_eth_dev);
- }
-
/* check link state properties if bonded link is up*/
if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
if (link_properties_valid(bonded_eth_dev, &link) != 0)
@@ -2708,9 +2712,24 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
link_properties_set(bonded_eth_dev, &link);
}
+ /* If no active slave ports then set this port to be
+ * the primary port.
+ */
+ if (internals->active_slave_count < 1) {
+ /* If first active slave, then change link status */
+ bonded_eth_dev->data->dev_link.link_status =
+ ETH_LINK_UP;
+ internals->current_primary_port = port_id;
+ lsc_flag = 1;
+
+ mac_address_slaves_update(bonded_eth_dev);
+ }
+
activate_slave(bonded_eth_dev, port_id);
- /* If user has defined the primary port then default to using it */
+ /* If the user has defined the primary port then default to
+ * using it.
+ */
if (internals->user_defined_primary_port &&
internals->primary_port == port_id)
bond_ethdev_primary_set(internals, port_id);
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 3ea5d686..8afef39b 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -100,7 +100,7 @@ struct rte_flow {
};
typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves);
+ uint16_t slave_count, uint16_t *slaves);
/** Link Bonding PMD device private configuration Structure */
struct bond_dev_private {
@@ -222,13 +222,6 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
void
activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
-void
-link_properties_set(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_link *slave_dev_link);
-int
-link_properties_valid(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_link *slave_dev_link);
-
int
mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr);
@@ -263,15 +256,15 @@ slave_add(struct bond_dev_private *internals,
void
burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves);
+ uint16_t slave_count, uint16_t *slaves);
void
burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves);
+ uint16_t slave_count, uint16_t *slaves);
void
burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves);
+ uint16_t slave_count, uint16_t *slaves);
void
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 701e0b1f..774dd082 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -4246,7 +4246,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
V_FW_CMD_EXEC(0) |
V_FW_VI_MAC_CMD_VIID(viid));
raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
- c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
+ c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) |
raw |
V_FW_CMD_LEN16(1));
diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
index 5f5cbe04..f5f027a2 100644
--- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h
+++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Chelsio Communications.
+ * Copyright(c) 2014-2019 Chelsio Communications.
* All rights reserved.
*/
@@ -103,6 +103,12 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -116,19 +122,63 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
+ CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
+ CH_PCI_ID_TABLE_FENTRY(0x509A), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509B), /* Custom T540-CR LOM */
+ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR SFP+ LOM */
+ CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR SFP+ */
+ CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T580-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a5), /* Custom T522-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50a6), /* Custom T522-BT-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */
/* T6 adapter */
CH_PCI_ID_TABLE_FENTRY(0x6001), /* T6225-CR */
CH_PCI_ID_TABLE_FENTRY(0x6002), /* T6225-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x6003), /* T6425-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6004), /* T6425-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x6005), /* T6225-OCP */
+ CH_PCI_ID_TABLE_FENTRY(0x6006), /* T62100-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x6007), /* T62100-LP-CR */
CH_PCI_ID_TABLE_FENTRY(0x6008), /* T62100-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6009), /* T6210-BT */
CH_PCI_ID_TABLE_FENTRY(0x600d), /* T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6011), /* T6225-LL-CR */
CH_PCI_ID_TABLE_FENTRY(0x6014), /* T61100-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6015), /* T6201-BT */
CH_PCI_ID_TABLE_FENTRY(0x6080), /* Custom T6225-CR SFP28 */
CH_PCI_ID_TABLE_FENTRY(0x6081), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index eb58f880..cf262134 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -52,7 +52,7 @@
#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
-bool force_linkup(struct adapter *adap);
+bool cxgbe_force_linkup(struct adapter *adap);
int cxgbe_probe(struct adapter *adapter);
int cxgbevf_probe(struct adapter *adapter);
void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
@@ -65,19 +65,17 @@ void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);
void cxgbe_stats_reset(struct port_info *pi);
int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
unsigned int cnt, struct t4_completion *c);
-int link_start(struct port_info *pi);
-void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
- unsigned int cnt, unsigned int size, unsigned int iqe_size);
-int setup_sge_fwevtq(struct adapter *adapter);
-int setup_sge_ctrl_txq(struct adapter *adapter);
-void cfg_queues(struct rte_eth_dev *eth_dev);
-int cfg_queue_count(struct rte_eth_dev *eth_dev);
-int init_rss(struct adapter *adap);
-int setup_rss(struct port_info *pi);
+int cxgbe_link_start(struct port_info *pi);
+int cxgbe_setup_sge_fwevtq(struct adapter *adapter);
+int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter);
+void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev);
+int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev);
+int cxgbe_init_rss(struct adapter *adap);
+int cxgbe_setup_rss(struct port_info *pi);
void cxgbe_enable_rx_queues(struct port_info *pi);
-void print_port_info(struct adapter *adap);
-void print_adapter_info(struct adapter *adap);
+void cxgbe_print_port_info(struct adapter *adap);
+void cxgbe_print_adapter_info(struct adapter *adap);
int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key);
-void configure_max_ethqsets(struct adapter *adapter);
+void cxgbe_configure_max_ethqsets(struct adapter *adapter);
#endif /* _CXGBE_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index b2f83ea3..7babdfb4 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -200,7 +200,8 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
/* Exit if link status changed or always forced up */
- if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
+ if (pi->link_cfg.link_ok != old_link ||
+ cxgbe_force_linkup(adapter))
break;
if (!wait_to_complete)
@@ -209,7 +210,7 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
}
- new_link.link_status = force_linkup(adapter) ?
+ new_link.link_status = cxgbe_force_linkup(adapter) ?
ETH_LINK_UP : pi->link_cfg.link_ok;
new_link.link_autoneg = pi->link_cfg.autoneg;
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -356,7 +357,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
cxgbe_enable_rx_queues(pi);
- err = setup_rss(pi);
+ err = cxgbe_setup_rss(pi);
if (err)
goto out;
@@ -372,7 +373,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
goto out;
}
- err = link_start(pi);
+ err = cxgbe_link_start(pi);
if (err)
goto out;
@@ -412,18 +413,18 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
CXGBE_FUNC_TRACE();
if (!(adapter->flags & FW_QUEUE_BOUND)) {
- err = setup_sge_fwevtq(adapter);
+ err = cxgbe_setup_sge_fwevtq(adapter);
if (err)
return err;
adapter->flags |= FW_QUEUE_BOUND;
if (is_pf4(adapter)) {
- err = setup_sge_ctrl_txq(adapter);
+ err = cxgbe_setup_sge_ctrl_txq(adapter);
if (err)
return err;
}
}
- err = cfg_queue_count(eth_dev);
+ err = cxgbe_cfg_queue_count(eth_dev);
if (err)
return err;
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 5fa6cdd0..6a3cbc1e 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -116,7 +116,7 @@ out:
/**
* Setup sge control queues to pass control information.
*/
-int setup_sge_ctrl_txq(struct adapter *adapter)
+int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int err = 0, i = 0;
@@ -190,7 +190,7 @@ int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int ms,
return -ETIMEDOUT;
}
-int setup_sge_fwevtq(struct adapter *adapter)
+int cxgbe_setup_sge_fwevtq(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int err = 0;
@@ -465,7 +465,7 @@ static inline bool is_x_10g_port(const struct link_config *lc)
return high_speeds != 0;
}
-inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
unsigned int us, unsigned int cnt,
unsigned int size, unsigned int iqe_size)
{
@@ -475,7 +475,7 @@ inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
q->size = size;
}
-int cfg_queue_count(struct rte_eth_dev *eth_dev)
+int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adap = pi->adapter;
@@ -502,7 +502,7 @@ int cfg_queue_count(struct rte_eth_dev *eth_dev)
return 0;
}
-void cfg_queues(struct rte_eth_dev *eth_dev)
+void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
{
struct rte_config *config = rte_eal_get_configuration();
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -596,7 +596,7 @@ static void setup_memwin(struct adapter *adap)
MEMWIN_NIC));
}
-int init_rss(struct adapter *adap)
+int cxgbe_init_rss(struct adapter *adap)
{
unsigned int i;
@@ -623,7 +623,7 @@ int init_rss(struct adapter *adap)
/**
* Dump basic information about the adapter.
*/
-void print_adapter_info(struct adapter *adap)
+void cxgbe_print_adapter_info(struct adapter *adap)
{
/**
* Hardware/Firmware/etc. Version/Revision IDs.
@@ -631,7 +631,7 @@ void print_adapter_info(struct adapter *adap)
t4_dump_version_info(adap);
}
-void print_port_info(struct adapter *adap)
+void cxgbe_print_port_info(struct adapter *adap)
{
int i;
char buf[80];
@@ -779,7 +779,7 @@ static void configure_pcie_ext_tag(struct adapter *adapter)
}
/* Figure out how many Queue Sets we can support */
-void configure_max_ethqsets(struct adapter *adapter)
+void cxgbe_configure_max_ethqsets(struct adapter *adapter)
{
unsigned int ethqsets;
@@ -1268,7 +1268,7 @@ static int adap_init0(struct adapter *adap)
t4_init_tp_params(adap);
configure_pcie_ext_tag(adap);
configure_vlan_types(adap);
- configure_max_ethqsets(adap);
+ cxgbe_configure_max_ethqsets(adap);
adap->params.drv_memwin = MEMWIN_NIC;
adap->flags |= FW_OK;
@@ -1322,7 +1322,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
pi->port_id, pi->mod_type);
}
-inline bool force_linkup(struct adapter *adap)
+bool cxgbe_force_linkup(struct adapter *adap)
{
struct rte_pci_device *pdev = adap->pdev;
@@ -1340,7 +1340,7 @@ inline bool force_linkup(struct adapter *adap)
*
* Performs the MAC and PHY actions needed to enable a port.
*/
-int link_start(struct port_info *pi)
+int cxgbe_link_start(struct port_info *pi)
{
struct adapter *adapter = pi->adapter;
u64 conf_offloads;
@@ -1382,7 +1382,7 @@ int link_start(struct port_info *pi)
true, true, false);
}
- if (ret == 0 && force_linkup(adapter))
+ if (ret == 0 && cxgbe_force_linkup(adapter))
pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
return ret;
}
@@ -1490,7 +1490,7 @@ int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
* We always configure the RSS mapping for all ports since the mapping
* table has plenty of entries.
*/
-int setup_rss(struct port_info *pi)
+int cxgbe_setup_rss(struct port_info *pi)
{
int j, err;
struct adapter *adapter = pi->adapter;
@@ -1864,10 +1864,10 @@ allocate_mac:
}
}
- cfg_queues(adapter->eth_dev);
+ cxgbe_cfg_queues(adapter->eth_dev);
- print_adapter_info(adapter);
- print_port_info(adapter);
+ cxgbe_print_adapter_info(adapter);
+ cxgbe_print_port_info(adapter);
adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
adapter->clipt_end);
@@ -1909,7 +1909,7 @@ allocate_mac:
"Maskless filter support disabled. Continuing\n");
}
- err = init_rss(adapter);
+ err = cxgbe_init_rss(adapter);
if (err)
goto out_free;
diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c
index 61bd8519..c46bc98a 100644
--- a/drivers/net/cxgbe/cxgbevf_main.c
+++ b/drivers/net/cxgbe/cxgbevf_main.c
@@ -50,7 +50,7 @@ static void size_nports_qsets(struct adapter *adapter)
adapter->params.nports = pmask_nports;
}
- configure_max_ethqsets(adapter);
+ cxgbe_configure_max_ethqsets(adapter);
if (adapter->sge.max_ethqsets < adapter->params.nports) {
dev_warn(adapter->pdev_dev, "only using %d of %d available"
" virtual interfaces (too few Queue Sets)\n",
@@ -268,16 +268,16 @@ allocate_mac:
}
}
- cfg_queues(adapter->eth_dev);
- print_adapter_info(adapter);
- print_port_info(adapter);
+ cxgbe_cfg_queues(adapter->eth_dev);
+ cxgbe_print_adapter_info(adapter);
+ cxgbe_print_port_info(adapter);
adapter->mpstcam = t4_init_mpstcam(adapter);
if (!adapter->mpstcam)
dev_warn(adapter,
"VF could not allocate mps tcam table. Continuing\n");
- err = init_rss(adapter);
+ err = cxgbe_init_rss(adapter);
if (err)
goto out_free;
return 0;
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index f9d2d48a..663c0a79 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1604,6 +1604,52 @@ static inline void rspq_next(struct sge_rspq *q)
}
}
+static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype,
+ uint64_t ol_flags)
+{
+ pkt->packet_type |= ptype;
+ pkt->ol_flags |= ol_flags;
+}
+
+static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
+ const struct cpl_rx_pkt *cpl,
+ struct rte_mbuf *pkt)
+{
+ bool csum_ok;
+ u16 err_vec;
+
+ if (adap->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+
+ csum_ok = cpl->csum_calc && !err_vec;
+
+ if (cpl->vlan_ex)
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ else
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
+
+ if (cpl->l2info & htonl(F_RXF_IP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_IP6))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+
+ if (cpl->l2info & htonl(F_RXF_TCP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_UDP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+}
+
/**
* process_responses - process responses from an SGE response queue
* @q: the ingress queue to process
@@ -1655,8 +1701,6 @@ static int process_responses(struct sge_rspq *q, int budget,
(const void *)&q->cur_desc[1];
struct rte_mbuf *pkt, *npkt;
u32 len, bufsz;
- bool csum_ok;
- u16 err_vec;
rc = (const struct rsp_ctrl *)
((const char *)q->cur_desc +
@@ -1673,16 +1717,6 @@ static int process_responses(struct sge_rspq *q, int budget,
len = G_RSPD_LEN(len);
pkt->pkt_len = len;
- /* Compressed error vector is enabled for
- * T6 only
- */
- if (q->adapter->params.tp.rx_pkt_encap)
- err_vec = G_T6_COMPR_RXERR_VEC(
- ntohs(cpl->err_vec));
- else
- err_vec = ntohs(cpl->err_vec);
- csum_ok = cpl->csum_calc && !err_vec;
-
/* Chain mbufs into len if necessary */
while (len) {
struct rte_mbuf *new_pkt = rsd->buf;
@@ -1700,20 +1734,7 @@ static int process_responses(struct sge_rspq *q, int budget,
npkt->next = NULL;
pkt->nb_segs--;
- if (cpl->l2info & htonl(F_RXF_IP)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV4;
- if (unlikely(!csum_ok))
- pkt->ol_flags |=
- PKT_RX_IP_CKSUM_BAD;
-
- if ((cpl->l2info &
- htonl(F_RXF_UDP | F_RXF_TCP)) &&
- !csum_ok)
- pkt->ol_flags |=
- PKT_RX_L4_CKSUM_BAD;
- } else if (cpl->l2info & htonl(F_RXF_IP6)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV6;
- }
+ cxgbe_fill_mbuf_info(q->adapter, cpl, pkt);
if (!rss_hdr->filter_tid &&
rss_hdr->hash_type) {
@@ -1722,11 +1743,8 @@ static int process_responses(struct sge_rspq *q, int budget,
ntohl(rss_hdr->hash_val);
}
- if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED;
+ if (cpl->vlan_ex)
pkt->vlan_tci = ntohs(cpl->vlan);
- }
rte_pktmbuf_adj(pkt, s->pktshift);
rxq->stats.pkts++;
diff --git a/drivers/net/dpaa2/dpaa2_pmd_logs.h b/drivers/net/dpaa2/dpaa2_pmd_logs.h
index c04babdb..c47ba8e1 100644
--- a/drivers/net/dpaa2/dpaa2_pmd_logs.h
+++ b/drivers/net/dpaa2/dpaa2_pmd_logs.h
@@ -1,5 +1,4 @@
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 NXP
*/
diff --git a/drivers/net/e1000/base/e1000_82575.h b/drivers/net/e1000/base/e1000_82575.h
index 4133cdd8..6f2b22c1 100644
--- a/drivers/net/e1000/base/e1000_82575.h
+++ b/drivers/net/e1000/base/e1000_82575.h
@@ -383,7 +383,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_ETQF_FILTER_ENABLE (1 << 26)
#define E1000_ETQF_IMM_INT (1 << 29)
#define E1000_ETQF_1588 (1 << 30)
-#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
+#define E1000_ETQF_QUEUE_ENABLE (1U << 31)
/*
* ETQF filter list: one static filter per filter consumer. This is
* to avoid filter collisions later. Add new filters
@@ -410,7 +410,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
#define E1000_DTXSWC_LLE_SHIFT 16
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1U << 31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
diff --git a/drivers/net/e1000/base/e1000_ich8lan.c b/drivers/net/e1000/base/e1000_ich8lan.c
index 92ab6fc6..2654a18a 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.c
+++ b/drivers/net/e1000/base/e1000_ich8lan.c
@@ -5166,7 +5166,7 @@ STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Device Status */
if (hw->mac.type == e1000_ich8lan) {
reg = E1000_READ_REG(hw, E1000_STATUS);
- reg &= ~(1 << 31);
+ reg &= ~(1U << 31);
E1000_WRITE_REG(hw, E1000_STATUS, reg);
}
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 023fe751..a21205af 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -490,15 +490,15 @@ enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
ENETC_RTBLENR_LEN(rx_ring->bd_count));
rx_ring->mb_pool = mb_pool;
- /* enable ring */
- enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
- enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
rx_ring->rcir = (void *)((size_t)hw->reg +
ENETC_BDR(RX, idx, ENETC_RBCIR));
enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
RTE_PKTMBUF_HEADROOM);
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
+ /* enable ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
}
static int
diff --git a/drivers/net/enetc/enetc_rxtx.c b/drivers/net/enetc/enetc_rxtx.c
index 631e2430..ce5a542a 100644
--- a/drivers/net/enetc/enetc_rxtx.c
+++ b/drivers/net/enetc/enetc_rxtx.c
@@ -49,11 +49,16 @@ enetc_xmit_pkts(void *tx_queue,
uint16_t nb_pkts)
{
struct enetc_swbd *tx_swbd;
- int i, start;
+ int i, start, bds_to_use;
struct enetc_tx_bd *txbd;
struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
i = tx_ring->next_to_use;
+
+ bds_to_use = enetc_bd_unused(tx_ring);
+ if (bds_to_use < nb_pkts)
+ nb_pkts = bds_to_use;
+
start = 0;
while (nb_pkts--) {
enetc_clean_tx_ring(tx_ring);
@@ -88,8 +93,9 @@ enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
rx_swbd = &rx_ring->q_swbd[i];
rxbd = ENETC_RXBD(*rx_ring, i);
for (j = 0; j < buff_cnt; j++) {
- rx_swbd->buffer_addr =
- rte_cpu_to_le_64(rte_mbuf_raw_alloc(rx_ring->mb_pool));
+ rx_swbd->buffer_addr = (void *)(uintptr_t)
+ rte_cpu_to_le_64((uint64_t)(uintptr_t)
+ rte_pktmbuf_alloc(rx_ring->mb_pool));
rxbd->w.addr = (uint64_t)(uintptr_t)
rx_swbd->buffer_addr->buf_addr +
rx_swbd->buffer_addr->data_off;
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 7bca3cad..377f607f 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -78,8 +78,8 @@ struct enic_fdir {
u32 modes;
u32 types_mask;
void (*copy_fltr_fn)(struct filter_v2 *filt,
- struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks);
+ const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks);
};
struct enic_soft_stats {
@@ -201,8 +201,8 @@ struct enic {
/* Compute ethdev's max packet size from MTU */
static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
{
- /* ethdev max size includes eth and crc whereas NIC MTU does not */
- return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ /* ethdev max size includes eth whereas NIC MTU does not */
+ return mtu + ETHER_HDR_LEN;
}
/* Get the CQ index from a Start of Packet(SOP) RQ index */
@@ -340,9 +340,5 @@ int enic_link_update(struct enic *enic);
bool enic_use_vector_rx_handler(struct enic *enic);
void enic_fdir_info(struct enic *enic);
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
-void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks);
-void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks);
extern const struct rte_flow_ops enic_flow_ops;
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 9e9e548c..48c8e626 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -36,6 +36,13 @@
#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
+static void copy_fltr_v1(struct filter_v2 *fltr,
+ const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks);
+static void copy_fltr_v2(struct filter_v2 *fltr,
+ const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks);
+
void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
{
*stats = enic->fdir.stats;
@@ -79,9 +86,9 @@ enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
* without advanced filter support.
*/
-void
-copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- __rte_unused struct rte_eth_fdir_masks *masks)
+static void
+copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
+ __rte_unused const struct rte_eth_fdir_masks *masks)
{
fltr->type = FILTER_IPV4_5TUPLE;
fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
@@ -104,9 +111,9 @@ copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
/* Copy Flow Director filter to a VIC generic filter (requires advanced
* filter support.
*/
-void
-copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks)
+static void
+copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks)
{
struct filter_generic_1 *gp = &fltr->u.generic_1;
@@ -163,9 +170,11 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
sctp_val.tag = input->flow.sctp4_flow.verify_tag;
}
- /* v4 proto should be 132, override ip4_flow.proto */
- input->flow.ip4_flow.proto = 132;
-
+ /*
+ * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware
+ * has no "packet is SCTP" flag. Use flag=0 (generic L4) and
+ * manually set proto_id=sctp below.
+ */
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
&sctp_val, sizeof(struct sctp_hdr));
}
@@ -189,6 +198,10 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
if (input->flow.ip4_flow.proto) {
ip4_mask.next_proto_id = masks->ipv4_mask.proto;
ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+ /* Explicitly match the SCTP protocol number */
+ ip4_mask.next_proto_id = 0xff;
+ ip4_val.next_proto_id = IPPROTO_SCTP;
}
if (input->flow.ip4_flow.src_ip) {
ip4_mask.src_addr = masks->ipv4_mask.src_ip;
@@ -251,9 +264,6 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
sctp_val.tag = input->flow.sctp6_flow.verify_tag;
}
- /* v4 proto should be 132, override ipv6_flow.proto */
- input->flow.ipv6_flow.proto = 132;
-
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
&sctp_val, sizeof(struct sctp_hdr));
}
@@ -269,6 +279,10 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
if (input->flow.ipv6_flow.proto) {
ipv6_mask.proto = masks->ipv6_mask.proto;
ipv6_val.proto = input->flow.ipv6_flow.proto;
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+ /* See comments for IPv4 SCTP above. */
+ ipv6_mask.proto = 0xff;
+ ipv6_val.proto = IPPROTO_SCTP;
}
memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
sizeof(ipv6_mask.src_addr));
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index bb9ed037..dbc8de83 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -23,33 +23,54 @@
rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
fmt "\n", ##args)
+/*
+ * Common arguments passed to copy_item functions. Use this structure
+ * so we can easily add new arguments.
+ * item: Item specification.
+ * filter: Partially filled in NIC filter structure.
+ * inner_ofst: If zero, this is an outer header. If non-zero, this is
+ * the offset into L5 where the header begins.
+ * l2_proto_off: offset to EtherType eth or vlan header.
+ * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
+ */
+struct copy_item_args {
+ const struct rte_flow_item *item;
+ struct filter_v2 *filter;
+ uint8_t *inner_ofst;
+ uint8_t l2_proto_off;
+ uint8_t l3_proto_off;
+ struct enic *enic;
+};
+
+/* functions for copying items into enic filters */
+typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
+
/** Info about how to copy items into enic filters. */
struct enic_items {
/** Function for copying and validating an item. */
- int (*copy_item)(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst);
+ enic_copy_item_fn *copy_item;
/** List of valid previous items. */
const enum rte_flow_item_type * const prev_items;
/** True if it's OK for this item to be the first item. For some NIC
* versions, it's invalid to start the stack above layer 3.
*/
const u8 valid_start_item;
+ /* Inner packet version of copy_item. */
+ enic_copy_item_fn *inner_copy_item;
};
/** Filtering capabilities for various NIC and firmware versions. */
struct enic_filter_cap {
/** list of valid items and their handlers and attributes. */
const struct enic_items *item_info;
+ /* Max type in the above list, used to detect unsupported types */
+ enum rte_flow_item_type max_item_type;
};
/* functions for copying flow actions into enic actions */
typedef int (copy_action_fn)(const struct rte_flow_action actions[],
struct filter_action_v2 *enic_action);
-/* functions for copying items into enic filters */
-typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst);
-
/** Action capabilities for various NICs. */
struct enic_action_cap {
/** list of valid actions */
@@ -70,8 +91,13 @@ static enic_copy_item_fn enic_copy_item_ipv6_v2;
static enic_copy_item_fn enic_copy_item_udp_v2;
static enic_copy_item_fn enic_copy_item_tcp_v2;
static enic_copy_item_fn enic_copy_item_sctp_v2;
-static enic_copy_item_fn enic_copy_item_sctp_v2;
static enic_copy_item_fn enic_copy_item_vxlan_v2;
+static enic_copy_item_fn enic_copy_item_inner_eth_v2;
+static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
+static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
+static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
+static enic_copy_item_fn enic_copy_item_inner_udp_v2;
+static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
static copy_action_fn enic_copy_action_v1;
static copy_action_fn enic_copy_action_v2;
@@ -86,6 +112,7 @@ static const struct enic_items enic_items_v1[] = {
.prev_items = (const enum rte_flow_item_type[]) {
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.copy_item = enic_copy_item_udp_v1,
@@ -94,6 +121,7 @@ static const struct enic_items enic_items_v1[] = {
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.copy_item = enic_copy_item_tcp_v1,
@@ -102,6 +130,7 @@ static const struct enic_items enic_items_v1[] = {
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
};
@@ -117,6 +146,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_VXLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_eth_v2,
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
.copy_item = enic_copy_item_vlan_v2,
@@ -125,6 +155,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_vlan_v2,
},
[RTE_FLOW_ITEM_TYPE_IPV4] = {
.copy_item = enic_copy_item_ipv4_v2,
@@ -134,6 +165,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_ipv4_v2,
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.copy_item = enic_copy_item_ipv6_v2,
@@ -143,6 +175,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_ipv6_v2,
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.copy_item = enic_copy_item_udp_v2,
@@ -152,6 +185,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_udp_v2,
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.copy_item = enic_copy_item_tcp_v2,
@@ -161,6 +195,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_tcp_v2,
},
[RTE_FLOW_ITEM_TYPE_SCTP] = {
.copy_item = enic_copy_item_sctp_v2,
@@ -170,6 +205,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
[RTE_FLOW_ITEM_TYPE_VXLAN] = {
.copy_item = enic_copy_item_vxlan_v2,
@@ -178,6 +214,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
};
@@ -190,6 +227,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_VXLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_eth_v2,
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
.copy_item = enic_copy_item_vlan_v2,
@@ -198,6 +236,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_vlan_v2,
},
[RTE_FLOW_ITEM_TYPE_IPV4] = {
.copy_item = enic_copy_item_ipv4_v2,
@@ -207,6 +246,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_ipv4_v2,
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.copy_item = enic_copy_item_ipv6_v2,
@@ -216,6 +256,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_ipv6_v2,
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.copy_item = enic_copy_item_udp_v2,
@@ -225,6 +266,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_udp_v2,
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.copy_item = enic_copy_item_tcp_v2,
@@ -234,15 +276,17 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_tcp_v2,
},
[RTE_FLOW_ITEM_TYPE_SCTP] = {
.copy_item = enic_copy_item_sctp_v2,
- .valid_start_item = 1,
+ .valid_start_item = 0,
.prev_items = (const enum rte_flow_item_type[]) {
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
[RTE_FLOW_ITEM_TYPE_VXLAN] = {
.copy_item = enic_copy_item_vxlan_v2,
@@ -251,6 +295,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
};
@@ -258,12 +303,15 @@ static const struct enic_items enic_items_v3[] = {
static const struct enic_filter_cap enic_filter_cap[] = {
[FILTER_IPV4_5TUPLE] = {
.item_info = enic_items_v1,
+ .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
},
[FILTER_USNIC_IP] = {
.item_info = enic_items_v2,
+ .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
[FILTER_DPDK_1] = {
.item_info = enic_items_v3,
+ .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
};
@@ -330,20 +378,11 @@ mask_exact_match(const u8 *supported, const u8 *supplied,
return 1;
}
-/**
- * Copy IPv4 item into version 1 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Should always be 0 for version 1.
- */
static int
-enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_ipv4_v1(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
@@ -354,9 +393,6 @@ enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
FLOW_TRACE();
- if (*inner_ofst)
- return ENOTSUP;
-
if (!mask)
mask = &rte_flow_item_ipv4_mask;
@@ -380,20 +416,11 @@ enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
return 0;
}
-/**
- * Copy UDP item into version 1 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Should always be 0 for version 1.
- */
static int
-enic_copy_item_udp_v1(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_udp_v1(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
@@ -404,9 +431,6 @@ enic_copy_item_udp_v1(const struct rte_flow_item *item,
FLOW_TRACE();
- if (*inner_ofst)
- return ENOTSUP;
-
if (!mask)
mask = &rte_flow_item_udp_mask;
@@ -431,20 +455,11 @@ enic_copy_item_udp_v1(const struct rte_flow_item *item,
return 0;
}
-/**
- * Copy TCP item into version 1 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Should always be 0 for version 1.
- */
static int
-enic_copy_item_tcp_v1(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_tcp_v1(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
@@ -455,9 +470,6 @@ enic_copy_item_tcp_v1(const struct rte_flow_item *item,
FLOW_TRACE();
- if (*inner_ofst)
- return ENOTSUP;
-
if (!mask)
mask = &rte_flow_item_tcp_mask;
@@ -482,21 +494,150 @@ enic_copy_item_tcp_v1(const struct rte_flow_item *item,
return 0;
}
-/**
- * Copy ETH item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * If zero, this is an outer header. If non-zero, this is the offset into L5
- * where the header begins.
+/*
+ * The common 'copy' function for all inner packet patterns. Patterns are
+ * first appended to the L5 pattern buffer. Then, since the NIC filter
+ * API has no special support for inner packet matching at the moment,
+ * we set EtherType and IP proto as necessary.
*/
static int
-enic_copy_item_eth_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
+ const void *val, const void *mask, uint8_t val_size,
+ uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
+{
+ uint8_t *l5_mask, *l5_val;
+ uint8_t start_off;
+
+ /* No space left in the L5 pattern buffer. */
+ start_off = *inner_ofst;
+ if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
+ l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
+ /* Copy the pattern into the L5 buffer. */
+ if (val) {
+ memcpy(l5_mask + start_off, mask, val_size);
+ memcpy(l5_val + start_off, val, val_size);
+ }
+ /* Set the protocol field in the previous header. */
+ if (proto_off) {
+ void *m, *v;
+
+ m = l5_mask + proto_off;
+ v = l5_val + proto_off;
+ if (proto_size == 1) {
+ *(uint8_t *)m = 0xff;
+ *(uint8_t *)v = (uint8_t)proto_val;
+ } else if (proto_size == 2) {
+ *(uint16_t *)m = 0xffff;
+ *(uint16_t *)v = proto_val;
+ }
+ }
+ /* All inner headers land in L5 buffer even if their spec is null. */
+ *inner_ofst += val_size;
+ return 0;
+}
+
+static int
+enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ arg->l2_proto_off = *off + offsetof(struct ether_hdr, ether_type);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct ether_hdr),
+ 0 /* no previous protocol */, 0, 0);
+}
+
+static int
+enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+ uint8_t eth_type_off;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ /* Append vlan header to L5 and set ether type = TPID */
+ eth_type_off = arg->l2_proto_off;
+ arg->l2_proto_off = *off + offsetof(struct vlan_hdr, eth_proto);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct vlan_hdr),
+ eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
+}
+
+static int
+enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ /* Append ipv4 header to L5 and set ether type = ipv4 */
+ arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct ipv4_hdr),
+ arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
+}
+
+static int
+enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ /* Append ipv6 header to L5 and set ether type = ipv6 */
+ arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct ipv6_hdr),
+ arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
+}
+
+static int
+enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ /* Append udp header to L5 and set ip proto = udp */
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct udp_hdr),
+ arg->l3_proto_off, IPPROTO_UDP, 1);
+}
+
+static int
+enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ /* Append tcp header to L5 and set ip proto = tcp */
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct tcp_hdr),
+ arg->l3_proto_off, IPPROTO_TCP, 1);
+}
+
+static int
+enic_copy_item_eth_v2(struct copy_item_args *arg)
+{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
struct ether_hdr enic_spec;
struct ether_hdr enic_mask;
const struct rte_flow_item_eth *spec = item->spec;
@@ -524,45 +665,24 @@ enic_copy_item_eth_v2(const struct rte_flow_item *item,
enic_spec.ether_type = spec->type;
enic_mask.ether_type = mask->type;
- if (*inner_ofst == 0) {
- /* outer header */
- memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
- sizeof(struct ether_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
- sizeof(struct ether_hdr));
- } else {
- /* inner header */
- if ((*inner_ofst + sizeof(struct ether_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- /* Offset into L5 where inner Ethernet header goes */
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- &enic_mask, sizeof(struct ether_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- &enic_spec, sizeof(struct ether_hdr));
- *inner_ofst += sizeof(struct ether_hdr);
- }
+ /* outer header */
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
+ sizeof(struct ether_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
+ sizeof(struct ether_hdr));
return 0;
}
-/**
- * Copy VLAN item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * If zero, this is an outer header. If non-zero, this is the offset into L5
- * where the header begins.
- */
static int
-enic_copy_item_vlan_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_vlan_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+ struct ether_hdr *eth_mask;
+ struct ether_hdr *eth_val;
FLOW_TRACE();
@@ -573,99 +693,72 @@ enic_copy_item_vlan_v2(const struct rte_flow_item *item,
if (!mask)
mask = &rte_flow_item_vlan_mask;
- if (*inner_ofst == 0) {
- struct ether_hdr *eth_mask =
- (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
- struct ether_hdr *eth_val =
- (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+ eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
+ eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+ /* Outer TPID cannot be matched */
+ if (eth_mask->ether_type)
+ return ENOTSUP;
+ /*
+ * For recent models:
+ * When packet matching, the VIC always compares vlan-stripped
+ * L2, regardless of vlan stripping settings. So, the inner type
+ * from vlan becomes the ether type of the eth header.
+ *
+ * Older models w/o hardware vxlan parser have a different
+ * behavior when vlan stripping is disabled. In this case,
+ * vlan tag remains in the L2 buffer.
+ */
+ if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
+ struct vlan_hdr *vlan;
- /* Outer TPID cannot be matched */
- if (eth_mask->ether_type)
- return ENOTSUP;
+ vlan = (struct vlan_hdr *)(eth_mask + 1);
+ vlan->eth_proto = mask->inner_type;
+ vlan = (struct vlan_hdr *)(eth_val + 1);
+ vlan->eth_proto = spec->inner_type;
+ } else {
eth_mask->ether_type = mask->inner_type;
eth_val->ether_type = spec->inner_type;
-
- /* Outer header. Use the vlan mask/val fields */
- gp->mask_vlan = mask->tci;
- gp->val_vlan = spec->tci;
- } else {
- /* Inner header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct vlan_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct vlan_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct vlan_hdr));
- *inner_ofst += sizeof(struct vlan_hdr);
}
+ /* For TCI, use the vlan mask/val fields (little endian). */
+ gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
+ gp->val_vlan = rte_be_to_cpu_16(spec->tci);
return 0;
}
-/**
- * Copy IPv4 item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner IPv4 filtering.
- */
static int
-enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_ipv4_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
FLOW_TRACE();
- if (*inner_ofst == 0) {
- /* Match IPv4 */
- gp->mask_flags |= FILTER_GENERIC_1_IPV4;
- gp->val_flags |= FILTER_GENERIC_1_IPV4;
+ /* Match IPv4 */
+ gp->mask_flags |= FILTER_GENERIC_1_IPV4;
+ gp->val_flags |= FILTER_GENERIC_1_IPV4;
- /* Match all if no spec */
- if (!spec)
- return 0;
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
- if (!mask)
- mask = &rte_flow_item_ipv4_mask;
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
- memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
- sizeof(struct ipv4_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
- sizeof(struct ipv4_hdr));
- } else {
- /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct ipv4_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct ipv4_hdr));
- *inner_ofst += sizeof(struct ipv4_hdr);
- }
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv4_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv4_hdr));
return 0;
}
-/**
- * Copy IPv6 item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner IPv6 filtering.
- */
static int
-enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_ipv6_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
@@ -683,39 +776,18 @@ enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
if (!mask)
mask = &rte_flow_item_ipv6_mask;
- if (*inner_ofst == 0) {
- memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
- sizeof(struct ipv6_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
- sizeof(struct ipv6_hdr));
- } else {
- /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct ipv6_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct ipv6_hdr));
- *inner_ofst += sizeof(struct ipv6_hdr);
- }
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv6_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv6_hdr));
return 0;
}
-/**
- * Copy UDP item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner UDP filtering.
- */
static int
-enic_copy_item_udp_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_udp_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
@@ -733,39 +805,18 @@ enic_copy_item_udp_v2(const struct rte_flow_item *item,
if (!mask)
mask = &rte_flow_item_udp_mask;
- if (*inner_ofst == 0) {
- memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
- sizeof(struct udp_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
- sizeof(struct udp_hdr));
- } else {
- /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct udp_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct udp_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct udp_hdr));
- *inner_ofst += sizeof(struct udp_hdr);
- }
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct udp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct udp_hdr));
return 0;
}
-/**
- * Copy TCP item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner TCP filtering.
- */
static int
-enic_copy_item_tcp_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_tcp_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
@@ -783,47 +834,48 @@ enic_copy_item_tcp_v2(const struct rte_flow_item *item,
if (!mask)
return ENOTSUP;
- if (*inner_ofst == 0) {
- memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
- sizeof(struct tcp_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
- sizeof(struct tcp_hdr));
- } else {
- /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct tcp_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct tcp_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct tcp_hdr));
- *inner_ofst += sizeof(struct tcp_hdr);
- }
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct tcp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct tcp_hdr));
return 0;
}
-/**
- * Copy SCTP item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner SCTP filtering.
- */
static int
-enic_copy_item_sctp_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_sctp_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_sctp *spec = item->spec;
const struct rte_flow_item_sctp *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+ uint8_t *ip_proto_mask = NULL;
+ uint8_t *ip_proto = NULL;
FLOW_TRACE();
- if (*inner_ofst)
- return ENOTSUP;
+ /*
+ * The NIC filter API has no flags for "match sctp", so explicitly set
+ * the protocol number in the IP pattern.
+ */
+ if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
+ struct ipv4_hdr *ip;
+ ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
+ ip_proto_mask = &ip->next_proto_id;
+ ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
+ ip_proto = &ip->next_proto_id;
+ } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
+ struct ipv6_hdr *ip;
+ ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
+ ip_proto_mask = &ip->proto;
+ ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
+ ip_proto = &ip->proto;
+ } else {
+ /* Need IPv4/IPv6 pattern first */
+ return EINVAL;
+ }
+ *ip_proto = IPPROTO_SCTP;
+ *ip_proto_mask = 0xff;
/* Match all if no spec */
if (!spec)
@@ -839,29 +891,29 @@ enic_copy_item_sctp_v2(const struct rte_flow_item *item,
return 0;
}
-/**
- * Copy UDP item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. VxLAN headers always start at the beginning of L5.
- */
static int
-enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_vxlan_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
+ uint8_t *inner_ofst = arg->inner_ofst;
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+ struct udp_hdr *udp;
FLOW_TRACE();
- if (*inner_ofst)
- return EINVAL;
-
+ /*
+ * The NIC filter API has no flags for "match vxlan". Set UDP port to
+ * avoid false positives.
+ */
+ gp->mask_flags |= FILTER_GENERIC_1_UDP;
+ gp->val_flags |= FILTER_GENERIC_1_UDP;
+ udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
+ udp->dst_port = 0xffff;
+ udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
+ udp->dst_port = RTE_BE16(4789);
/* Match all if no spec */
if (!spec)
return 0;
@@ -909,6 +961,36 @@ item_stacking_valid(enum rte_flow_item_type prev_item,
return 0;
}
+/*
+ * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
+ * Instead it is in L4 following the UDP header. Append the vxlan
+ * pattern to L4 (udp) and shift any inner packet pattern in L5.
+ */
+static void
+fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
+ uint8_t inner_ofst)
+{
+ uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
+ uint8_t inner;
+ uint8_t vxlan;
+
+ if (!(inner_ofst > 0 && enic->vxlan))
+ return;
+ FLOW_TRACE();
+ vxlan = sizeof(struct vxlan_hdr);
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
+ gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
+ gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
+ inner = inner_ofst - vxlan;
+ memset(layer, 0, sizeof(layer));
+ memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
+ memset(layer, 0, sizeof(layer));
+ memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
+}
+
/**
* Build the intenal enic filter structure from the provided pattern. The
* pattern is validated as the items are copied.
@@ -922,7 +1004,8 @@ item_stacking_valid(enum rte_flow_item_type prev_item,
*/
static int
enic_copy_filter(const struct rte_flow_item pattern[],
- const struct enic_items *items_info,
+ const struct enic_filter_cap *cap,
+ struct enic *enic,
struct filter_v2 *enic_filter,
struct rte_flow_error *error)
{
@@ -931,13 +1014,17 @@ enic_copy_filter(const struct rte_flow_item pattern[],
u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
enum rte_flow_item_type prev_item;
const struct enic_items *item_info;
-
+ struct copy_item_args args;
+ enic_copy_item_fn *copy_fn;
u8 is_first_item = 1;
FLOW_TRACE();
prev_item = 0;
+ args.filter = enic_filter;
+ args.inner_ofst = &inner_ofst;
+ args.enic = enic;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
/* Get info about how to validate and copy the item. If NULL
* is returned the nic does not support the item.
@@ -945,18 +1032,31 @@ enic_copy_filter(const struct rte_flow_item pattern[],
if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
continue;
- item_info = &items_info[item->type];
+ item_info = &cap->item_info[item->type];
+ if (item->type > cap->max_item_type ||
+ item_info->copy_item == NULL ||
+ (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Unsupported item.");
+ return -rte_errno;
+ }
/* check to see if item stacking is valid */
if (!item_stacking_valid(prev_item, item_info, is_first_item))
goto stacking_error;
- ret = item_info->copy_item(item, enic_filter, &inner_ofst);
+ args.item = item;
+ copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
+ item_info->copy_item;
+ ret = copy_fn(&args);
if (ret)
goto item_not_supported;
prev_item = item->type;
is_first_item = 0;
}
+ fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
+
return 0;
item_not_supported:
@@ -1057,12 +1157,18 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
if (overlap & MARK)
return ENOTSUP;
overlap |= MARK;
- /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
- * in the range of allows mark ids.
+ /*
+ * Map mark ID (32-bit) to filter ID (16-bit):
+ * - Reject values > 16 bits
+ * - Filter ID 0 is reserved for filters that steer
+ * but not mark. So add 1 to the mark ID to avoid
+ * using 0.
+ * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
+ * reserved for the "flag" action below.
*/
- if (mark->id >= ENIC_MAGIC_FILTER_ID)
+ if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
return EINVAL;
- enic_action->filter_id = mark->id;
+ enic_action->filter_id = mark->id + 1;
enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
break;
}
@@ -1070,6 +1176,7 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
if (overlap & MARK)
return ENOTSUP;
overlap |= MARK;
+ /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
break;
@@ -1392,7 +1499,7 @@ enic_flow_parse(struct rte_eth_dev *dev,
return -rte_errno;
}
enic_filter->type = enic->flow_filter_mode;
- ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
+ ret = enic_copy_filter(pattern, enic_filter_cap, enic,
enic_filter, error);
return ret;
}
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 406f92a8..098a18d6 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1715,8 +1715,15 @@ static int enic_dev_init(struct enic *enic)
PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_MASK;
enic->overlay_offload = true;
- enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
dev_info(enic, "Overlay offload is enabled\n");
+ }
+ /*
+ * Reset the vxlan port if HW vxlan parsing is available. It
+ * is always enabled regardless of overlay offload
+ * enable/disable.
+ */
+ if (enic->vxlan) {
+ enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
/*
* Reset the vxlan port to the default, as the NIC firmware
* does not reset it automatically and keeps the old setting.
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 24b2844f..78bb6b8f 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -61,10 +61,9 @@ int enic_get_vnic_config(struct enic *enic)
* and will be 0 for legacy firmware and VICs
*/
if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
- enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4);
+ enic->max_mtu = c->max_pkt_size - ETHER_HDR_LEN;
else
- enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE
- - (ETHER_HDR_LEN + 4);
+ enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE - ETHER_HDR_LEN;
if (c->mtu == 0)
c->mtu = 1500;
diff --git a/drivers/net/enic/enic_rxtx_common.h b/drivers/net/enic/enic_rxtx_common.h
index bfbb4909..66f631df 100644
--- a/drivers/net/enic/enic_rxtx_common.h
+++ b/drivers/net/enic/enic_rxtx_common.h
@@ -226,7 +226,8 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
if (filter_id) {
pkt_flags |= PKT_RX_FDIR;
if (filter_id != ENIC_MAGIC_FILTER_ID) {
- mbuf->hash.fdir.hi = clsf_cqd->filter_id;
+ /* filter_id = mark id + 1, so subtract 1 */
+ mbuf->hash.fdir.hi = filter_id - 1;
pkt_flags |= PKT_RX_FDIR_ID;
}
}
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 1d0f09d2..fb02e115 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -134,7 +134,7 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* So, always PKT_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mbuf->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
@@ -295,7 +295,7 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* So, always PKT_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- first_seg->ol_flags |= PKT_RX_VLAN;
+ first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
first_seg->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 005fda63..96b46a2b 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -52,8 +52,10 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN, PKT_RX_VLAN,
- PKT_RX_VLAN, PKT_RX_VLAN);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dca61f03..af5e844b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -2671,11 +2671,11 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
#define I40E_PRTMAC_MACC 0x001E24E0
#define I40E_REG_MACC_25GB 0x00020000
#define I40E_REG_SPEED_MASK 0x38000000
-#define I40E_REG_SPEED_100MB 0x00000000
-#define I40E_REG_SPEED_1GB 0x08000000
-#define I40E_REG_SPEED_10GB 0x10000000
-#define I40E_REG_SPEED_20GB 0x20000000
-#define I40E_REG_SPEED_25_40GB 0x18000000
+#define I40E_REG_SPEED_0 0x00000000
+#define I40E_REG_SPEED_1 0x08000000
+#define I40E_REG_SPEED_2 0x10000000
+#define I40E_REG_SPEED_3 0x18000000
+#define I40E_REG_SPEED_4 0x20000000
uint32_t link_speed;
uint32_t reg_val;
@@ -2689,26 +2689,35 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
/* Parse the link status */
switch (link_speed) {
- case I40E_REG_SPEED_100MB:
+ case I40E_REG_SPEED_0:
link->link_speed = ETH_SPEED_NUM_100M;
break;
- case I40E_REG_SPEED_1GB:
+ case I40E_REG_SPEED_1:
link->link_speed = ETH_SPEED_NUM_1G;
break;
- case I40E_REG_SPEED_10GB:
- link->link_speed = ETH_SPEED_NUM_10G;
- break;
- case I40E_REG_SPEED_20GB:
- link->link_speed = ETH_SPEED_NUM_20G;
+ case I40E_REG_SPEED_2:
+ if (hw->mac.type == I40E_MAC_X722)
+ link->link_speed = ETH_SPEED_NUM_2_5G;
+ else
+ link->link_speed = ETH_SPEED_NUM_10G;
break;
- case I40E_REG_SPEED_25_40GB:
- reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
+ case I40E_REG_SPEED_3:
+ if (hw->mac.type == I40E_MAC_X722) {
+ link->link_speed = ETH_SPEED_NUM_5G;
+ } else {
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
- if (reg_val & I40E_REG_MACC_25GB)
- link->link_speed = ETH_SPEED_NUM_25G;
+ if (reg_val & I40E_REG_MACC_25GB)
+ link->link_speed = ETH_SPEED_NUM_25G;
+ else
+ link->link_speed = ETH_SPEED_NUM_40G;
+ }
+ break;
+ case I40E_REG_SPEED_4:
+ if (hw->mac.type == I40E_MAC_X722)
+ link->link_speed = ETH_SPEED_NUM_10G;
else
- link->link_speed = ETH_SPEED_NUM_40G;
-
+ link->link_speed = ETH_SPEED_NUM_20G;
break;
default:
PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -10830,6 +10839,7 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
switch (link.link_speed) {
case ETH_SPEED_NUM_40G:
+ case ETH_SPEED_NUM_25G:
tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
break;
@@ -11890,16 +11900,17 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
bool is_sfp = false;
i40e_status status;
- uint8_t *data = info->data;
+ uint8_t *data;
uint32_t value = 0;
uint32_t i;
- if (!info || !info->length || !data)
+ if (!info || !info->length || !info->data)
return -EINVAL;
if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
is_sfp = true;
+ data = info->data;
for (i = 0; i < info->length; i++) {
u32 offset = i + info->offset;
u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
@@ -12201,8 +12212,8 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
for (n = 0; n < proto_num; n++) {
if (proto[n].proto_id != proto_id)
continue;
- strcat(name, proto[n].name);
- strcat(name, "_");
+ strlcat(name, proto[n].name, sizeof(name));
+ strlcat(name, "_", sizeof(name));
break;
}
}
@@ -12699,9 +12710,6 @@ i40e_config_rss_filter(struct i40e_pf *pf,
return -EINVAL;
}
- if (rss_info->conf.queue_num)
- return -EINVAL;
-
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
* It's necessary to calculate the actual PF queues that are configured.
*/
@@ -12744,6 +12752,8 @@ i40e_config_rss_filter(struct i40e_pf *pf,
rss_conf.rss_key = (uint8_t *)rss_key_default;
rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
+ PMD_DRV_LOG(INFO,
+ "No valid RSS key config for i40e, using default\n");
}
i40e_hw_rss_hash_set(pf, &rss_conf);
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 100e71cc..551f6fa6 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1724,9 +1724,8 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
- (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- }
return 0;
}
@@ -2262,11 +2261,11 @@ i40evf_dev_close(struct rte_eth_dev *dev)
*/
i40evf_dev_promiscuous_disable(dev);
i40evf_dev_allmulticast_disable(dev);
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
i40evf_reset_vf(dev);
i40e_shutdown_adminq(hw);
i40evf_disable_irq0(hw);
- rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
hw->adapter_closed = 1;
}
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 3694df25..a614ec1d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4445,6 +4445,14 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
}
+ if (rss_info->conf.queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "rss only allow one valid rule");
+ return -rte_errno;
+ }
+
/* Parse RSS related parameters from configuration */
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
return rte_flow_error_set
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 8f727fae..1489552d 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1718,7 +1718,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
(uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
int use_scattered_rx =
- ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size);
+ (rxq->max_pkt_len > buf_size);
if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
@@ -2423,13 +2423,13 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
struct rte_eth_dev *dev;
uint16_t i;
- dev = &rte_eth_devices[txq->port_id];
-
if (!txq || !txq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
return;
}
+ dev = &rte_eth_devices[txq->port_id];
+
/**
* vPMD tx will not set sw_ring's mbuf to NULL after free,
* so need to free remains more carefully.
@@ -2708,9 +2708,8 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
RTE_PKTMBUF_HEADROOM);
/* Check if scattered RX needs to be used. */
- if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ if (rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- }
/* Init the RX tail regieter. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index be4a6024..a1313146 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -270,7 +270,7 @@ struct rte_pmd_i40e_pkt_template_action {
struct rte_pmd_i40e_pkt_template_input {
/** the pctype used for raw packet template */
uint16_t pctype;
- /** the buffer conatining raw packet template */
+ /** the buffer containing raw packet template */
void *packet;
/** the length of buffer with raw packet template */
uint32_t length;
@@ -314,7 +314,7 @@ struct rte_pmd_i40e_inset {
* @param conf
* Specifies configuration parameters of raw packet template filter.
* @param add
- * Speicifes an action to be taken - add or remove raw packet template filter.
+ * Specifies an action to be taken - add or remove raw packet template filter.
* @return
* - (0) if successful.
* - (-ENODEV) if *port* invalid.
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9a79d18e..46c93f59 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2029,7 +2029,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
struct ixgbe_rx_entry *next_rxe = NULL;
struct rte_mbuf *first_seg;
struct rte_mbuf *rxm;
- struct rte_mbuf *nmb;
+ struct rte_mbuf *nmb = NULL;
union ixgbe_adv_rx_desc rxd;
uint16_t data_len;
uint16_t next_id;
@@ -2853,14 +2853,14 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER;
if (hw->mac.type == ixgbe_mac_82598EB)
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
if (ixgbe_is_vf(dev) == 0)
- offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND);
+ offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
/*
* RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index a1e9970d..9879985e 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -454,6 +454,7 @@ eth_kni_remove(struct rte_vdev_device *vdev)
struct rte_eth_dev *eth_dev;
struct pmd_internals *internals;
const char *name;
+ int ret;
name = rte_vdev_device_name(vdev);
PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name);
@@ -472,7 +473,9 @@ eth_kni_remove(struct rte_vdev_device *vdev)
eth_kni_dev_stop(eth_dev);
internals = eth_dev->data->dev_private;
- rte_kni_release(internals->kni);
+ ret = rte_kni_release(internals->kni);
+ if (ret)
+ PMD_LOG(WARNING, "Not able to release kni for %s", name);
rte_eth_dev_release_port(eth_dev);
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 7f07b8dc..4bc966d5 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -81,7 +81,7 @@ static void mlx4_dev_stop(struct rte_eth_dev *dev);
static int
mlx4_dev_configure(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
int ret;
@@ -117,7 +117,7 @@ exit:
static int
mlx4_dev_start(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
int ret;
@@ -169,7 +169,7 @@ err:
static void
mlx4_dev_stop(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
if (!priv->started)
return;
@@ -194,7 +194,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
static void
mlx4_dev_close(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
unsigned int i;
DEBUG("%p: closing device \"%s\"",
@@ -599,7 +599,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_context *ctx = NULL;
struct ibv_port_attr port_attr;
struct ibv_pd *pd = NULL;
- struct priv *priv = NULL;
+ struct mlx4_priv *priv = NULL;
struct rte_eth_dev *eth_dev = NULL;
struct ether_addr mac;
@@ -752,11 +752,11 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
* handled by rte_intr_rx_ctl().
*/
eth_dev->intr_handle = &priv->intr_handle;
- priv->dev = eth_dev;
+ priv->dev_data = eth_dev->data;
eth_dev->dev_ops = &mlx4_dev_ops;
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
- mlx4_dev_set_link_up(priv->dev);
+ mlx4_dev_set_link_up(eth_dev);
/* Update link status once if waiting for LSC. */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
mlx4_link_update(eth_dev, 0);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index e6fb934f..fc568eb3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -72,13 +72,14 @@ struct rxq;
struct txq;
struct rte_flow;
-LIST_HEAD(mlx4_dev_list, priv);
+LIST_HEAD(mlx4_dev_list, mlx4_priv);
LIST_HEAD(mlx4_mr_list, mlx4_mr);
/** Private data structure. */
-struct priv {
- LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
- struct rte_eth_dev *dev; /**< Ethernet device. */
+struct mlx4_priv {
+ LIST_ENTRY(mlx4_priv) mem_event_cb;
+ /**< Called by memory event callback. */
+ struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /**< Verbs context. */
struct ibv_device_attr device_attr; /**< Device properties. */
struct ibv_pd *pd; /**< Protection Domain. */
@@ -112,11 +113,14 @@ struct priv {
/**< Configured MAC addresses. Unused entries are zeroed. */
};
+#define PORT_ID(priv) ((priv)->dev_data->port_id)
+#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
+
/* mlx4_ethdev.c */
-int mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]);
-int mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
-int mlx4_mtu_get(struct priv *priv, uint16_t *mtu);
+int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]);
+int mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
+int mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu);
int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
int mlx4_dev_set_link_down(struct rte_eth_dev *dev);
int mlx4_dev_set_link_up(struct rte_eth_dev *dev);
@@ -143,10 +147,10 @@ int mlx4_is_removed(struct rte_eth_dev *dev);
/* mlx4_intr.c */
-int mlx4_intr_uninstall(struct priv *priv);
-int mlx4_intr_install(struct priv *priv);
-int mlx4_rxq_intr_enable(struct priv *priv);
-void mlx4_rxq_intr_disable(struct priv *priv);
+int mlx4_intr_uninstall(struct mlx4_priv *priv);
+int mlx4_intr_install(struct mlx4_priv *priv);
+int mlx4_rxq_intr_enable(struct mlx4_priv *priv);
+void mlx4_rxq_intr_disable(struct mlx4_priv *priv);
int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 195a1b6d..084b24e4 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -59,7 +59,7 @@
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
+mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE])
{
DIR *dir;
struct dirent *dent;
@@ -146,7 +146,7 @@ try_dev_id:
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
+mlx4_ifreq(const struct mlx4_priv *priv, int req, struct ifreq *ifr)
{
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
int ret;
@@ -176,7 +176,7 @@ mlx4_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
+mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
{
struct ifreq request;
int ret = mlx4_ifreq(priv, SIOCGIFHWADDR, &request);
@@ -199,7 +199,7 @@ mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
+mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu)
{
struct ifreq request;
int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request);
@@ -224,7 +224,7 @@ mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
int
mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct ifreq request = { .ifr_mtu = mtu, };
int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request);
@@ -248,7 +248,7 @@ mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
+mlx4_set_flags(struct mlx4_priv *priv, unsigned int keep, unsigned int flags)
{
struct ifreq request;
int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request);
@@ -272,7 +272,7 @@ mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_dev_set_link(struct priv *priv, int up)
+mlx4_dev_set_link(struct mlx4_priv *priv, int up)
{
int err;
@@ -300,7 +300,7 @@ mlx4_dev_set_link(struct priv *priv, int up)
int
mlx4_dev_set_link_down(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
return mlx4_dev_set_link(priv, 0);
}
@@ -317,7 +317,7 @@ mlx4_dev_set_link_down(struct rte_eth_dev *dev)
int
mlx4_dev_set_link_up(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
return mlx4_dev_set_link(priv, 1);
}
@@ -345,7 +345,7 @@ enum rxmode_toggle {
static void
mlx4_rxmode_toggle(struct rte_eth_dev *dev, enum rxmode_toggle toggle)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
const char *mode;
struct rte_flow_error error;
@@ -430,7 +430,7 @@ mlx4_allmulticast_disable(struct rte_eth_dev *dev)
void
mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
if (index >= RTE_DIM(priv->mac)) {
@@ -466,7 +466,7 @@ int
mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
int ret;
@@ -503,7 +503,7 @@ mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
int
mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
unsigned int vidx = vlan_id / 64;
unsigned int vbit = vlan_id % 64;
@@ -557,7 +557,7 @@ mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
void
mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
unsigned int max;
char ifname[IF_NAMESIZE];
@@ -688,7 +688,7 @@ mlx4_stats_reset(struct rte_eth_dev *dev)
int
mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- const struct priv *priv = dev->data->dev_private;
+ const struct mlx4_priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
.cmd = ETHTOOL_GSET,
};
@@ -741,7 +741,7 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
int
mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_GPAUSEPARAM,
@@ -785,7 +785,7 @@ out:
int
mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_SPAUSEPARAM,
@@ -853,7 +853,7 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_UNKNOWN
};
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
if (dev->rx_pkt_burst == mlx4_rx_burst) {
if (priv->hw_csum_l2tun)
@@ -877,7 +877,7 @@ int
mlx4_is_removed(struct rte_eth_dev *dev)
{
struct ibv_device_attr device_attr;
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
if (mlx4_glue->query_device(priv->ctx, &device_attr) == EIO)
return 1;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index b40e7e5c..5136d136 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -71,7 +71,7 @@ struct mlx4_flow_proc_item {
struct mlx4_drop {
struct ibv_qp *qp; /**< QP target. */
struct ibv_cq *cq; /**< CQ associated with above QP. */
- struct priv *priv; /**< Back pointer to private data. */
+ struct mlx4_priv *priv; /**< Back pointer to private data. */
uint32_t refcnt; /**< Reference count. */
};
@@ -95,7 +95,7 @@ struct mlx4_drop {
* rte_errno is set.
*/
uint64_t
-mlx4_conv_rss_types(struct priv *priv, uint64_t types, int verbs_to_dpdk)
+mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
{
enum {
INNER,
@@ -657,7 +657,7 @@ static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_prepare(struct priv *priv,
+mlx4_flow_prepare(struct mlx4_priv *priv,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -767,7 +767,7 @@ fill:
if (flow->rss)
break;
queue = action->conf;
- if (queue->index >= priv->dev->data->nb_rx_queues) {
+ if (queue->index >= ETH_DEV(priv)->data->nb_rx_queues) {
msg = "queue target index beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
@@ -796,7 +796,7 @@ fill:
/* Sanity checks. */
for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
- priv->dev->data->nb_rx_queues)
+ ETH_DEV(priv)->data->nb_rx_queues)
break;
if (i != rss->queue_num) {
msg = "queue index target beyond number of"
@@ -928,7 +928,7 @@ mlx4_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
}
@@ -944,7 +944,7 @@ mlx4_flow_validate(struct rte_eth_dev *dev,
* is set.
*/
static struct mlx4_drop *
-mlx4_drop_get(struct priv *priv)
+mlx4_drop_get(struct mlx4_priv *priv)
{
struct mlx4_drop *drop = priv->drop;
@@ -1020,7 +1020,7 @@ mlx4_drop_put(struct mlx4_drop *drop)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_toggle(struct priv *priv,
+mlx4_flow_toggle(struct mlx4_priv *priv,
struct rte_flow *flow,
int enable,
struct rte_flow_error *error)
@@ -1066,8 +1066,8 @@ mlx4_flow_toggle(struct priv *priv,
/* Stop at the first nonexistent target queue. */
for (i = 0; i != rss->queues; ++i)
if (rss->queue_id[i] >=
- priv->dev->data->nb_rx_queues ||
- !priv->dev->data->rx_queues[rss->queue_id[i]]) {
+ ETH_DEV(priv)->data->nb_rx_queues ||
+ !ETH_DEV(priv)->data->rx_queues[rss->queue_id[i]]) {
missing = 1;
break;
}
@@ -1136,7 +1136,7 @@ mlx4_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int err;
@@ -1177,7 +1177,7 @@ mlx4_flow_isolate(struct rte_eth_dev *dev,
int enable,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
if (!!enable == !!priv->isolated)
return 0;
@@ -1200,7 +1200,7 @@ mlx4_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
int err = mlx4_flow_toggle(priv, flow, 0, error);
if (err)
@@ -1224,7 +1224,7 @@ static int
mlx4_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow *flow = LIST_FIRST(&priv->flows);
while (flow) {
@@ -1249,10 +1249,10 @@ mlx4_flow_flush(struct rte_eth_dev *dev,
* Next configured VLAN ID or a high value (>= 4096) if there is none.
*/
static uint16_t
-mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
+mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
{
while (vlan < 4096) {
- if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
+ if (ETH_DEV(priv)->data->vlan_filter_conf.ids[vlan / 64] &
(UINT64_C(1) << (vlan % 64)))
return vlan;
++vlan;
@@ -1289,7 +1289,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
+mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
{
struct rte_flow_attr attr = {
.priority = MLX4_FLOW_PRIORITY_LAST,
@@ -1329,7 +1329,7 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
* get RSS by default.
*/
uint32_t queues =
- rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
+ rte_align32pow2(ETH_DEV(priv)->data->nb_rx_queues + 1) >> 1;
uint16_t queue[queues];
struct rte_flow_action_rss action_rss = {
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
@@ -1351,9 +1351,9 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
};
struct ether_addr *rule_mac = &eth_spec.dst;
rte_be16_t *rule_vlan =
- (priv->dev->data->dev_conf.rxmode.offloads &
+ (ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER) &&
- !priv->dev->data->promiscuous ?
+ !ETH_DEV(priv)->data->promiscuous ?
&vlan_spec.tci :
NULL;
uint16_t vlan = 0;
@@ -1433,7 +1433,7 @@ next_vlan:
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
memcpy(rule_mac, mac, sizeof(*mac));
- flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
@@ -1449,15 +1449,16 @@ next_vlan:
goto next_vlan;
}
/* Take care of promiscuous and all multicast flow rules. */
- if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
+ if (ETH_DEV(priv)->data->promiscuous ||
+ ETH_DEV(priv)->data->all_multicast) {
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_NEXT(flow, next)) {
- if (priv->dev->data->promiscuous) {
+ if (ETH_DEV(priv)->data->promiscuous) {
if (flow->promisc)
break;
} else {
- assert(priv->dev->data->all_multicast);
+ assert(ETH_DEV(priv)->data->all_multicast);
if (flow->allmulti)
break;
}
@@ -1471,16 +1472,16 @@ next_vlan:
}
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
- if (priv->dev->data->promiscuous) {
+ if (ETH_DEV(priv)->data->promiscuous) {
pattern[1].spec = NULL;
pattern[1].mask = NULL;
} else {
- assert(priv->dev->data->all_multicast);
+ assert(ETH_DEV(priv)->data->all_multicast);
pattern[1].spec = &eth_allmulti;
pattern[1].mask = &eth_allmulti;
}
pattern[2] = pattern[3];
- flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
@@ -1497,7 +1498,8 @@ error:
struct rte_flow *next = LIST_NEXT(flow, next);
if (!flow->select)
- claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
+ error));
else
flow->select = 0;
flow = next;
@@ -1521,7 +1523,7 @@ error:
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
+mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error)
{
struct rte_flow *flow;
int ret;
@@ -1535,7 +1537,8 @@ mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
- claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
+ error));
} else {
/* Refresh internal rules. */
ret = mlx4_flow_internal(priv, error);
@@ -1563,12 +1566,12 @@ mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
* Pointer to private structure.
*/
void
-mlx4_flow_clean(struct priv *priv)
+mlx4_flow_clean(struct mlx4_priv *priv)
{
struct rte_flow *flow;
while ((flow = LIST_FIRST(&priv->flows)))
- mlx4_flow_destroy(priv->dev, flow, NULL);
+ mlx4_flow_destroy(ETH_DEV(priv), flow, NULL);
assert(LIST_EMPTY(&priv->rss));
}
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
index 2917ebe9..03a4bd05 100644
--- a/drivers/net/mlx4/mlx4_flow.h
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -48,10 +48,10 @@ struct rte_flow {
/* mlx4_flow.c */
-uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t types,
+uint64_t mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types,
int verbs_to_dpdk);
-int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
-void mlx4_flow_clean(struct priv *priv);
+int mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error);
+void mlx4_flow_clean(struct mlx4_priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index eeb982a0..4f335267 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -33,7 +33,7 @@
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
-static int mlx4_link_status_check(struct priv *priv);
+static int mlx4_link_status_check(struct mlx4_priv *priv);
/**
* Clean up Rx interrupts handler.
@@ -42,7 +42,7 @@ static int mlx4_link_status_check(struct priv *priv);
* Pointer to private structure.
*/
static void
-mlx4_rx_intr_vec_disable(struct priv *priv)
+mlx4_rx_intr_vec_disable(struct mlx4_priv *priv)
{
struct rte_intr_handle *intr_handle = &priv->intr_handle;
@@ -62,10 +62,10 @@ mlx4_rx_intr_vec_disable(struct priv *priv)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_rx_intr_vec_enable(struct priv *priv)
+mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
{
unsigned int i;
- unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
+ unsigned int rxqs_n = ETH_DEV(priv)->data->nb_rx_queues;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
unsigned int count = 0;
struct rte_intr_handle *intr_handle = &priv->intr_handle;
@@ -79,7 +79,7 @@ mlx4_rx_intr_vec_enable(struct priv *priv)
return -rte_errno;
}
for (i = 0; i != n; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
/* Skip queues that cannot request interrupts. */
if (!rxq || !rxq->channel) {
@@ -117,15 +117,15 @@ mlx4_rx_intr_vec_enable(struct priv *priv)
* Pointer to private structure.
*/
static void
-mlx4_link_status_alarm(struct priv *priv)
+mlx4_link_status_alarm(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ &ETH_DEV(priv)->data->dev_conf.intr_conf;
assert(priv->intr_alarm == 1);
priv->intr_alarm = 0;
if (intr_conf->lsc && !mlx4_link_status_check(priv))
- _rte_eth_dev_callback_process(priv->dev,
+ _rte_eth_dev_callback_process(ETH_DEV(priv),
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
@@ -143,10 +143,10 @@ mlx4_link_status_alarm(struct priv *priv)
* otherwise and rte_errno is set.
*/
static int
-mlx4_link_status_check(struct priv *priv)
+mlx4_link_status_check(struct mlx4_priv *priv)
{
- struct rte_eth_link *link = &priv->dev->data->dev_link;
- int ret = mlx4_link_update(priv->dev, 0);
+ struct rte_eth_link *link = &ETH_DEV(priv)->data->dev_link;
+ int ret = mlx4_link_update(ETH_DEV(priv), 0);
if (ret)
return ret;
@@ -175,7 +175,7 @@ mlx4_link_status_check(struct priv *priv)
* Pointer to private structure.
*/
static void
-mlx4_interrupt_handler(struct priv *priv)
+mlx4_interrupt_handler(struct mlx4_priv *priv)
{
enum { LSC, RMV, };
static const enum rte_eth_event_type type[] = {
@@ -185,7 +185,7 @@ mlx4_interrupt_handler(struct priv *priv)
uint32_t caught[RTE_DIM(type)] = { 0 };
struct ibv_async_event event;
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ &ETH_DEV(priv)->data->dev_conf.intr_conf;
unsigned int i;
/* Read all message and acknowledge them. */
@@ -208,7 +208,7 @@ mlx4_interrupt_handler(struct priv *priv)
}
for (i = 0; i != RTE_DIM(caught); ++i)
if (caught[i])
- _rte_eth_dev_callback_process(priv->dev, type[i],
+ _rte_eth_dev_callback_process(ETH_DEV(priv), type[i],
NULL);
}
@@ -251,7 +251,7 @@ mlx4_arm_cq(struct rxq *rxq, int solicited)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_intr_uninstall(struct priv *priv)
+mlx4_intr_uninstall(struct mlx4_priv *priv)
{
int err = rte_errno; /* Make sure rte_errno remains unchanged. */
@@ -279,10 +279,10 @@ mlx4_intr_uninstall(struct priv *priv)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_intr_install(struct priv *priv)
+mlx4_intr_install(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ &ETH_DEV(priv)->data->dev_conf.intr_conf;
int rc;
mlx4_intr_uninstall(priv);
@@ -378,10 +378,10 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_rxq_intr_enable(struct priv *priv)
+mlx4_rxq_intr_enable(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ &ETH_DEV(priv)->data->dev_conf.intr_conf;
if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
goto error;
@@ -397,7 +397,7 @@ error:
* Pointer to private structure.
*/
void
-mlx4_rxq_intr_disable(struct priv *priv)
+mlx4_rxq_intr_disable(struct mlx4_priv *priv)
{
int err = rte_errno; /* Make sure rte_errno remains unchanged. */
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index a0094483..98c236fb 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -348,7 +348,7 @@ mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
static int
mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
unsigned int n;
DEBUG("port %u inserting MR(%p) to global cache",
@@ -389,7 +389,7 @@ static struct mlx4_mr *
mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
/* Iterate all the existing MRs. */
@@ -430,7 +430,7 @@ static uint32_t
mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
uint16_t idx;
uint32_t lkey = UINT32_MAX;
struct mlx4_mr *mr;
@@ -477,7 +477,7 @@ mr_free(struct mlx4_mr *mr)
}
/**
- * Releass resources of detached MR having no online entry.
+ * Release resources of detached MR having no online entry.
*
* @param dev
* Pointer to Ethernet device.
@@ -485,7 +485,7 @@ mr_free(struct mlx4_mr *mr)
static void
mlx4_mr_garbage_collect(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr_next;
struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
@@ -525,7 +525,7 @@ mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
}
/**
- * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Create a new global Memory Region (MR) for a missing virtual address.
* Register entire virtually contiguous memory chunk around the address.
*
* @param dev
@@ -543,7 +543,7 @@ static uint32_t
mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
@@ -623,7 +623,7 @@ alloc_resources:
bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
if (mr->ms_bmp == NULL) {
- WARN("port %u unable to initialize bitamp for a new MR of"
+ WARN("port %u unable to initialize bitmap for a new MR of"
" address (%p).",
dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
@@ -769,7 +769,7 @@ err_nolock:
static void
mr_rebuild_dev_cache(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
DEBUG("port %u rebuild dev cache[]", dev->data->port_id);
@@ -801,7 +801,7 @@ mr_rebuild_dev_cache(struct rte_eth_dev *dev)
static void
mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
const struct rte_memseg_list *msl;
struct mlx4_mr *mr;
int ms_n;
@@ -889,14 +889,14 @@ void
mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
- struct priv *priv;
+ struct mlx4_priv *priv;
switch (event_type) {
case RTE_MEM_EVENT_FREE:
rte_rwlock_read_lock(&mlx4_mem_event_rwlock);
/* Iterate all the existing mlx4 devices. */
LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb)
- mlx4_mr_mem_event_free_cb(priv->dev, addr, len);
+ mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
rte_rwlock_read_unlock(&mlx4_mem_event_rwlock);
break;
case RTE_MEM_EVENT_ALLOC:
@@ -926,7 +926,7 @@ static uint32_t
mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
struct mlx4_mr_cache *entry, uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh;
uint16_t idx;
uint32_t lkey;
@@ -1024,11 +1024,9 @@ uint32_t
mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
{
struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
- struct priv *priv = rxq->priv;
+ struct mlx4_priv *priv = rxq->priv;
- DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
- return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+ return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
@@ -1046,11 +1044,9 @@ static uint32_t
mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
{
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq->priv;
+ struct mlx4_priv *priv = txq->priv;
- DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
- return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+ return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
@@ -1122,7 +1118,7 @@ mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
{
struct mr_update_mp_data *data = opaque;
struct rte_eth_dev *dev = data->dev;
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl;
struct mlx4_mr *mr = NULL;
uintptr_t addr = (uintptr_t)memhdr->addr;
@@ -1223,9 +1219,9 @@ uint32_t
mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp)
{
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq->priv;
+ struct mlx4_priv *priv = txq->priv;
- mlx4_mr_update_ext_mp(priv->dev, mr_ctrl, mp);
+ mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
return mlx4_tx_addr2mr_bh(txq, addr);
}
@@ -1289,7 +1285,7 @@ mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
void
mlx4_mr_dump_dev(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
int mr_n = 0;
int chunk_n = 0;
@@ -1332,8 +1328,8 @@ mlx4_mr_dump_dev(struct rte_eth_dev *dev)
void
mlx4_mr_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+ struct mlx4_priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next;
/* Remove from memory callback device list. */
rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
@@ -1344,6 +1340,7 @@ mlx4_mr_release(struct rte_eth_dev *dev)
#endif
rte_rwlock_write_lock(&priv->mr.rwlock);
/* Detach from MR list and move to free list. */
+ mr_next = LIST_FIRST(&priv->mr.mr_list);
while (mr_next != NULL) {
struct mlx4_mr *mr = mr_next;
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 6804c634..50f33eb0 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -87,7 +87,7 @@ mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
* Pointer to RSS context on success, NULL otherwise and rte_errno is set.
*/
struct mlx4_rss *
-mlx4_rss_get(struct priv *priv, uint64_t fields,
+mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields,
const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[])
{
@@ -175,7 +175,8 @@ mlx4_rss_attach(struct mlx4_rss *rss)
}
struct ibv_wq *ind_tbl[rss->queues];
- struct priv *priv = rss->priv;
+ struct mlx4_priv *priv = rss->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
const char *msg;
unsigned int i = 0;
int ret;
@@ -189,8 +190,8 @@ mlx4_rss_attach(struct mlx4_rss *rss)
uint16_t id = rss->queue_id[i];
struct rxq *rxq = NULL;
- if (id < priv->dev->data->nb_rx_queues)
- rxq = priv->dev->data->rx_queues[id];
+ if (id < dev->data->nb_rx_queues)
+ rxq = dev->data->rx_queues[id];
if (!rxq) {
ret = EINVAL;
msg = "RSS target queue is not configured";
@@ -269,7 +270,7 @@ error:
rss->ind = NULL;
}
while (i--)
- mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
ERROR("mlx4: %s", msg);
--rss->usecnt;
rte_errno = ret;
@@ -290,7 +291,8 @@ error:
void
mlx4_rss_detach(struct mlx4_rss *rss)
{
- struct priv *priv = rss->priv;
+ struct mlx4_priv *priv = rss->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
unsigned int i;
assert(rss->refcnt);
@@ -303,7 +305,7 @@ mlx4_rss_detach(struct mlx4_rss *rss)
claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
rss->ind = NULL;
for (i = 0; i != rss->queues; ++i)
- mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
}
/**
@@ -327,9 +329,9 @@ mlx4_rss_detach(struct mlx4_rss *rss)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_rss_init(struct priv *priv)
+mlx4_rss_init(struct mlx4_priv *priv)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
uint32_t wq_num_prev = 0;
const char *msg;
@@ -338,7 +340,7 @@ mlx4_rss_init(struct priv *priv)
if (priv->rss_init)
return 0;
- if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
+ if (ETH_DEV(priv)->data->nb_rx_queues > priv->hw_rss_max_qps) {
ERROR("RSS does not support more than %d queues",
priv->hw_rss_max_qps);
rte_errno = EINVAL;
@@ -356,8 +358,8 @@ mlx4_rss_init(struct priv *priv)
rte_errno = ret;
return -ret;
}
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
struct ibv_cq *cq;
struct ibv_wq *wq;
uint32_t wq_num;
@@ -432,7 +434,7 @@ error:
ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
i, msg, strerror(ret));
while (i--) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq)
mlx4_rxq_detach(rxq);
@@ -451,14 +453,14 @@ error:
* Pointer to private structure.
*/
void
-mlx4_rss_deinit(struct priv *priv)
+mlx4_rss_deinit(struct mlx4_priv *priv)
{
unsigned int i;
if (!priv->rss_init)
return;
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq) {
assert(rxq->usecnt == 1);
@@ -493,8 +495,8 @@ mlx4_rxq_attach(struct rxq *rxq)
return 0;
}
- struct priv *priv = rxq->priv;
- struct rte_eth_dev *dev = priv->dev;
+ struct mlx4_priv *priv = rxq->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
const uint32_t elts_n = 1 << rxq->elts_n;
const uint32_t sges_n = 1 << rxq->sges_n;
struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
@@ -561,7 +563,7 @@ mlx4_rxq_attach(struct rxq *rxq)
}
/* Pre-register Rx mempool. */
DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
- priv->dev->data->port_id, rxq->stats.idx,
+ ETH_DEV(priv)->data->port_id, rxq->stats.idx,
rxq->mp->name, rxq->mp->nb_mem_chunks);
mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
wqes = (volatile struct mlx4_wqe_data_seg (*)[])
@@ -675,7 +677,7 @@ mlx4_rxq_detach(struct rxq *rxq)
* Supported Tx offloads.
*/
uint64_t
-mlx4_get_rx_queue_offloads(struct priv *priv)
+mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC |
@@ -696,7 +698,7 @@ mlx4_get_rx_queue_offloads(struct priv *priv)
* Supported Rx offloads.
*/
uint64_t
-mlx4_get_rx_port_offloads(struct priv *priv)
+mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -728,7 +730,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
struct rxq *rxq;
@@ -911,17 +913,17 @@ void
mlx4_rx_queue_release(void *dpdk_rxq)
{
struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct priv *priv;
+ struct mlx4_priv *priv;
unsigned int i;
if (rxq == NULL)
return;
priv = rxq->priv;
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
- if (priv->dev->data->rx_queues[i] == rxq) {
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)
+ if (ETH_DEV(priv)->data->rx_queues[i] == rxq) {
DEBUG("%p: removing Rx queue %p from list",
- (void *)priv->dev, (void *)rxq);
- priv->dev->data->rx_queues[i] = NULL;
+ (void *)ETH_DEV(priv), (void *)rxq);
+ ETH_DEV(priv)->data->rx_queues[i] = NULL;
break;
}
assert(!rxq->cq);
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index d7ec4e0c..29389f1e 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -38,7 +38,7 @@ struct mlx4_rxq_stats {
/** Rx queue descriptor. */
struct rxq {
- struct priv *priv; /**< Back pointer to private data. */
+ struct mlx4_priv *priv; /**< Back pointer to private data. */
struct rte_mempool *mp; /**< Memory pool for allocations. */
struct ibv_cq *cq; /**< Completion queue. */
struct ibv_wq *wq; /**< Work queue. */
@@ -65,7 +65,7 @@ struct rxq {
/** Shared flow target for Rx queues. */
struct mlx4_rss {
LIST_ENTRY(mlx4_rss) next; /**< Next entry in list. */
- struct priv *priv; /**< Back pointer to private data. */
+ struct mlx4_priv *priv; /**< Back pointer to private data. */
uint32_t refcnt; /**< Reference count for this object. */
uint32_t usecnt; /**< Number of users relying on @p qp and @p ind. */
struct ibv_qp *qp; /**< Queue pair. */
@@ -111,7 +111,7 @@ struct txq {
uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */
uint8_t *bounce_buf;
/**< Memory used for storing the first DWORD of data TXBBs. */
- struct priv *priv; /**< Back pointer to private data. */
+ struct mlx4_priv *priv; /**< Back pointer to private data. */
unsigned int socket; /**< CPU socket ID for allocations. */
struct ibv_cq *cq; /**< Completion queue. */
struct ibv_qp *qp; /**< Queue pair. */
@@ -121,9 +121,9 @@ struct txq {
/* mlx4_rxq.c */
uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
-int mlx4_rss_init(struct priv *priv);
-void mlx4_rss_deinit(struct priv *priv);
-struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
+int mlx4_rss_init(struct mlx4_priv *priv);
+void mlx4_rss_deinit(struct mlx4_priv *priv);
+struct mlx4_rss *mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields,
const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[]);
void mlx4_rss_put(struct mlx4_rss *rss);
@@ -131,8 +131,8 @@ int mlx4_rss_attach(struct mlx4_rss *rss);
void mlx4_rss_detach(struct mlx4_rss *rss);
int mlx4_rxq_attach(struct rxq *rxq);
void mlx4_rxq_detach(struct rxq *rxq);
-uint64_t mlx4_get_rx_port_offloads(struct priv *priv);
-uint64_t mlx4_get_rx_queue_offloads(struct priv *priv);
+uint64_t mlx4_get_rx_port_offloads(struct mlx4_priv *priv);
+uint64_t mlx4_get_rx_queue_offloads(struct mlx4_priv *priv);
int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
@@ -152,7 +152,7 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
/* mlx4_txq.c */
-uint64_t mlx4_get_tx_port_offloads(struct priv *priv);
+uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv);
int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_txconf *conf);
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 9aa7440d..35270082 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -107,7 +107,7 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
* Supported Tx offloads.
*/
uint64_t
-mlx4_get_tx_port_offloads(struct priv *priv)
+mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
{
uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
@@ -148,7 +148,7 @@ int
mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4dv_obj mlxdv;
struct mlx4dv_qp dv_qp;
struct mlx4dv_cq dv_cq;
@@ -351,17 +351,17 @@ void
mlx4_tx_queue_release(void *dpdk_txq)
{
struct txq *txq = (struct txq *)dpdk_txq;
- struct priv *priv;
+ struct mlx4_priv *priv;
unsigned int i;
if (txq == NULL)
return;
priv = txq->priv;
- for (i = 0; i != priv->dev->data->nb_tx_queues; ++i)
- if (priv->dev->data->tx_queues[i] == txq) {
+ for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)
+ if (ETH_DEV(priv)->data->tx_queues[i] == txq) {
DEBUG("%p: removing Tx queue %p from list",
- (void *)priv->dev, (void *)txq);
- priv->dev->data->tx_queues[i] = NULL;
+ (void *)ETH_DEV(priv), (void *)txq);
+ ETH_DEV(priv)->data->tx_queues[i] = NULL;
break;
}
mlx4_txq_free_elts(txq);
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index e7668bd5..d91d55b5 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -157,9 +157,10 @@ mlx5_prepare_shared_data(void)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_mr_mem_event_cb,
+ NULL);
}
- rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
- mlx5_mr_mem_event_cb, NULL);
}
rte_spinlock_unlock(&mlx5_shared_data_lock);
}
@@ -200,7 +201,7 @@ mlx5_getenv_int(const char *name)
static void *
mlx5_alloc_verbs_buf(size_t size, void *data)
{
- struct priv *priv = data;
+ struct mlx5_priv *priv = data;
void *ret;
size_t alignment = sysconf(_SC_PAGESIZE);
unsigned int socket = SOCKET_ID_ANY;
@@ -248,7 +249,7 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
static void
mlx5_dev_close(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret;
@@ -335,7 +336,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
while (i--) {
- struct priv *opriv =
+ struct mlx5_priv *opriv =
rte_eth_devices[port_id[i]].data->dev_private;
if (!opriv ||
@@ -630,7 +631,7 @@ find_lower_va_bound(const struct rte_memseg_list *msl,
static int
mlx5_uar_init_primary(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
void *addr = (void *)0;
if (uar_base) { /* UAR address space mapped. */
@@ -676,7 +677,7 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
static int
mlx5_uar_init_secondary(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
void *addr;
assert(priv->uar_base);
@@ -739,7 +740,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
struct ibv_pd *pd = NULL;
struct mlx5dv_context dv_attr = { .comp_mask = 0 };
struct rte_eth_dev *eth_dev = NULL;
- struct priv *priv = NULL;
+ struct mlx5_priv *priv = NULL;
int err = 0;
unsigned int hw_padding = 0;
unsigned int mps;
@@ -1001,7 +1002,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
while (i--) {
- const struct priv *opriv =
+ const struct mlx5_priv *opriv =
rte_eth_devices[port_id[i]].data->dev_private;
if (!opriv ||
@@ -1233,8 +1234,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->config = config;
/* Supported Verbs flow priority number detection. */
err = mlx5_flow_discover_priorities(eth_dev);
- if (err < 0)
+ if (err < 0) {
+ err = -err;
goto error;
+ }
priv->config.flow_prio = err;
/*
* Once the device is added to the list of memory event
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index bc500b2b..91efd21b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -62,7 +62,7 @@ struct mlx5_switch_info {
uint64_t switch_id; /**< Switch identifier. */
};
-LIST_HEAD(mlx5_dev_list, priv);
+LIST_HEAD(mlx5_dev_list, mlx5_priv);
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data {
@@ -172,8 +172,9 @@ struct mlx5_drop {
struct mlx5_flow_tcf_context;
-struct priv {
- LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
+struct mlx5_priv {
+ LIST_ENTRY(mlx5_priv) mem_event_cb;
+ /**< Called by memory event callback. */
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr_ex device_attr; /* Device properties. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index bfe66558..480b33c8 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -63,10 +63,11 @@
/* Default maximum number of Tx queues for vectorized Tx. */
#if defined(RTE_ARCH_ARM64)
#define MLX5_VPMD_MAX_TXQS 8
+#define MLX5_VPMD_MAX_TXQS_BLUEFIELD 16
#else
#define MLX5_VPMD_MAX_TXQS 4
+#define MLX5_VPMD_MAX_TXQS_BLUEFIELD MLX5_VPMD_MAX_TXQS
#endif
-#define MLX5_VPMD_MAX_TXQS_BLUEFIELD 16
/* Threshold of buffer replenishment for vectorized Rx. */
#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index d178ed6a..fb8e313a 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -131,7 +131,7 @@ static int
mlx5_get_master_ifname(const struct rte_eth_dev *dev,
char (*ifname)[IF_NAMESIZE])
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
DIR *dir;
struct dirent *dent;
unsigned int dev_type = 0;
@@ -219,7 +219,7 @@ try_dev_id:
int
mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int ifindex =
priv->nl_socket_rdma >= 0 ?
mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0;
@@ -377,7 +377,7 @@ mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
int
mlx5_dev_configure(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int rxqs_n = dev->data->nb_rx_queues;
unsigned int txqs_n = dev->data->nb_tx_queues;
unsigned int i;
@@ -460,7 +460,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
static void
mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
/* Minimum CPU utilization. */
info->default_rxportconf.ring_size = 256;
@@ -499,7 +499,7 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
void
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
unsigned int max;
char ifname[IF_NAMESIZE];
@@ -540,7 +540,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
while (i--) {
- struct priv *opriv =
+ struct mlx5_priv *opriv =
rte_eth_devices[port_id[i]].data->dev_private;
if (!opriv ||
@@ -609,7 +609,7 @@ static int
mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
};
@@ -685,7 +685,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct ifreq ifr;
struct rte_eth_link dev_link;
@@ -840,7 +840,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
int
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t kern_mtu = 0;
int ret;
@@ -1015,7 +1015,7 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
static uint32_t
mlx5_dev_status_handler(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_async_event event;
uint32_t ret = 0;
@@ -1087,7 +1087,7 @@ mlx5_dev_handler_socket(void *cb_arg)
void
mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rmv)
@@ -1111,7 +1111,7 @@ mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
void
mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
int flags;
@@ -1187,7 +1187,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
eth_tx_burst_t
mlx5_select_tx_function(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
struct mlx5_dev_config *config = &priv->config;
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
@@ -1271,7 +1271,7 @@ int
mlx5_is_removed(struct rte_eth_dev *dev)
{
struct ibv_device_attr device_attr;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
return 1;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ee129b97..222cd81d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -350,6 +350,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
claim_zero(mlx5_glue->destroy_flow(flow));
priority = vprio[i];
}
+ mlx5_hrxq_drop_release(dev);
switch (priority) {
case 8:
priority = RTE_DIM(priority_map_3);
@@ -361,10 +362,9 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
rte_errno = ENOTSUP;
DRV_LOG(ERR,
"port %u verbs maximum priority: %d expected 8/16",
- dev->data->port_id, vprio[i]);
+ dev->data->port_id, priority);
return -rte_errno;
}
- mlx5_hrxq_drop_release(dev);
DRV_LOG(INFO, "port %u flow maximum priority: %d",
dev->data->port_id, priority);
return priority;
@@ -387,7 +387,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
uint32_t subpriority)
{
uint32_t res = 0;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
switch (priv->config.flow_prio) {
case RTE_DIM(priority_map_3):
@@ -536,7 +536,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
static void
flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
const int mark = !!(flow->actions &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
@@ -599,7 +599,7 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
static void
flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
const int mark = !!(flow->actions &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
@@ -661,7 +661,7 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
static void
flow_rxq_flags_clear(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
@@ -786,7 +786,7 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_validate_action_drop(uint64_t action_flags,
@@ -829,7 +829,7 @@ mlx5_flow_validate_action_drop(uint64_t action_flags,
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
@@ -838,7 +838,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_queue *queue = action->conf;
if (action_flags & MLX5_FLOW_FATE_ACTIONS)
@@ -875,21 +875,25 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
* Pointer to the Ethernet device structure.
* @param[in] attr
* Attributes of flow that includes this action.
+ * @param[in] item_flags
+ * Items that were detected.
* @param[out] error
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
uint64_t action_flags,
struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
+ uint64_t item_flags,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
if (action_flags & MLX5_FLOW_FATE_ACTIONS)
@@ -950,6 +954,11 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
"rss action not supported for "
"egress");
+ if (rss->level > 1 && !tunnel)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "inner RSS is not supported for "
+ "non-tunnel flows");
return 0;
}
@@ -964,7 +973,7 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
@@ -998,7 +1007,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint32_t priority_max = priv->config.flow_prio - 1;
if (attributes->group)
@@ -1462,7 +1471,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_vxlan_gpe *spec = item->spec;
const struct rte_flow_item_vxlan_gpe *mask = item->mask;
int ret;
@@ -1616,7 +1625,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
{
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
const struct rte_flow_item_mpls *mask = item->mask;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
if (!priv->config.mpls_en)
@@ -1747,7 +1756,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
static enum mlx5_flow_drv_type
flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
if (attr->transfer)
@@ -1776,7 +1785,7 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
* Pointer to the error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static inline int
flow_drv_validate(struct rte_eth_dev *dev,
@@ -1815,7 +1824,7 @@ flow_drv_validate(struct rte_eth_dev *dev,
* Pointer to the error structure.
*
* @return
- * Pointer to device flow on success, otherwise NULL and rte_ernno is set.
+ * Pointer to device flow on success, otherwise NULL and rte_errno is set.
*/
static inline struct mlx5_flow *
flow_drv_prepare(const struct rte_flow *flow,
@@ -1859,7 +1868,7 @@ flow_drv_prepare(const struct rte_flow *flow,
* Pointer to the error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static inline int
flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
@@ -2121,8 +2130,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- return flow_list_create(dev,
- &((struct priv *)dev->data->dev_private)->flows,
+ struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private;
+
+ return flow_list_create(dev, &priv->flows,
attr, items, actions, error);
}
@@ -2232,7 +2242,7 @@ error:
int
mlx5_flow_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int ret = 0;
@@ -2268,7 +2278,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
struct rte_flow_item_vlan *vlan_spec,
struct rte_flow_item_vlan *vlan_mask)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
.priority = MLX5_FLOW_PRIO_RSVD,
@@ -2359,7 +2369,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
flow_list_destroy(dev, &priv->flows, flow);
return 0;
@@ -2375,7 +2385,7 @@ int
mlx5_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->flows);
return 0;
@@ -2392,7 +2402,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
int enable,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
@@ -2470,7 +2480,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
struct mlx5_fdir *attributes)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_eth_fdir_input *input = &fdir_filter->input;
const struct rte_eth_fdir_masks *mask =
&dev->data->dev_conf.fdir_conf.mask;
@@ -2687,7 +2697,7 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
static struct rte_flow *
flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = NULL;
assert(fdir_flow);
@@ -2716,7 +2726,7 @@ static int
flow_fdir_filter_add(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_fdir *fdir_flow;
struct rte_flow *flow;
int ret;
@@ -2763,7 +2773,7 @@ static int
flow_fdir_filter_delete(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
struct mlx5_fdir fdir_flow = {
.attr.group = 0,
@@ -2816,7 +2826,7 @@ flow_fdir_filter_update(struct rte_eth_dev *dev,
static void
flow_fdir_filter_flush(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->flows);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4a7c0529..e1424c78 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -363,6 +363,7 @@ int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
uint64_t action_flags,
struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
+ uint64_t item_flags,
struct rte_flow_error *error);
int mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 1f318748..207edcbc 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -293,7 +293,7 @@ flow_dv_encap_decap_resource_register
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
/* Lookup a matching resource from cache. */
@@ -722,7 +722,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint32_t priority_max = priv->config.flow_prio - 1;
if (attributes->group)
@@ -764,7 +764,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
* Pointer to the error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
@@ -776,7 +776,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
uint64_t action_flags = 0;
uint64_t item_flags = 0;
uint64_t last_item = 0;
- int tunnel = 0;
uint8_t next_protocol = 0xff;
int actions_n = 0;
@@ -786,7 +785,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
if (ret < 0)
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -958,7 +957,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
case RTE_FLOW_ACTION_TYPE_RSS:
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
- attr, error);
+ attr, item_flags,
+ error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_RSS;
@@ -1043,7 +1043,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
*
* @return
* Pointer to mlx5_flow object on success,
- * otherwise NULL and rte_ernno is set.
+ * otherwise NULL and rte_errno is set.
*/
static struct mlx5_flow *
flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
@@ -1800,7 +1800,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_matcher *cache_matcher;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
@@ -1873,7 +1873,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
* Pointer to the error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_translate(struct rte_eth_dev *dev,
@@ -1883,7 +1883,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
uint64_t item_flags = 0;
uint64_t last_item = 0;
diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c
index 96b9dd72..92f984f9 100644
--- a/drivers/net/mlx5/mlx5_flow_tcf.c
+++ b/drivers/net/mlx5/mlx5_flow_tcf.c
@@ -2344,7 +2344,7 @@ flow_tcf_validate(struct rte_eth_dev *dev,
*/
if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
(action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
- ((struct priv *)port_id_dev->data->dev_private)->representor)
+ ((struct mlx5_priv *)port_id_dev->data->dev_private)->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan push can only be applied"
@@ -5321,7 +5321,7 @@ flow_tcf_check_inhw(struct mlx5_flow_tcf_context *tcf,
static void
flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
@@ -5410,7 +5410,7 @@ static int
flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
@@ -5894,7 +5894,7 @@ flow_tcf_query_count(struct rte_eth_dev *dev,
{
struct flow_tcf_stats_basic sb_data;
struct rte_flow_query_count *qc = data;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mnl_socket *nl = ctx->nl;
struct mlx5_flow *dev_flow;
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 409e1cd0..1fdbca3d 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -55,7 +55,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
struct mlx5_flow_counter *counter)
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_counter_set_init_attr init = {
.counter_set_id = counter->id};
@@ -66,7 +66,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
}
return 0;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_counters_init_attr init = {0};
struct ibv_counter_attach_attr attach;
int ret;
@@ -117,7 +117,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
static struct mlx5_flow_counter *
flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter *cnt;
int ret;
@@ -1191,7 +1191,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RSS:
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
- attr,
+ attr, item_flags,
error);
if (ret < 0)
return ret;
@@ -1383,7 +1383,7 @@ flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
* Pointer to the error structure.
*
* @return
- * 0 on success, else a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, else a negative errno value otherwise and rte_errno is set.
*/
static int
flow_verbs_translate(struct rte_eth_dev *dev,
@@ -1398,7 +1398,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
uint64_t action_flags = 0;
uint64_t priority = attr->priority;
uint32_t subpriority = 0;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 672a4761..bce026f9 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -67,7 +67,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
static void
mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const int vf = priv->config.vf;
assert(index < MLX5_MAX_MAC_ADDRESSES);
@@ -96,7 +96,7 @@ static int
mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const int vf = priv->config.vf;
unsigned int i;
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 442b2d23..01bf5910 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -336,7 +336,7 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
static int
mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int n;
DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
@@ -377,7 +377,7 @@ static struct mlx5_mr *
mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
/* Iterate all the existing MRs. */
@@ -418,7 +418,7 @@ static uint32_t
mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t idx;
uint32_t lkey = UINT32_MAX;
struct mlx5_mr *mr;
@@ -465,7 +465,7 @@ mr_free(struct mlx5_mr *mr)
}
/**
- * Releass resources of detached MR having no online entry.
+ * Release resources of detached MR having no online entry.
*
* @param dev
* Pointer to Ethernet device.
@@ -473,7 +473,7 @@ mr_free(struct mlx5_mr *mr)
static void
mlx5_mr_garbage_collect(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr_next;
struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
@@ -515,7 +515,7 @@ mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
}
/**
- * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Create a new global Memory Region (MR) for a missing virtual address.
* Register entire virtually contiguous memory chunk around the address.
*
* @param dev
@@ -533,7 +533,7 @@ static uint32_t
mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
@@ -623,7 +623,7 @@ alloc_resources:
bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
if (mr->ms_bmp == NULL) {
- DEBUG("port %u unable to initialize bitamp for a new MR of"
+ DEBUG("port %u unable to initialize bitmap for a new MR of"
" address (%p).",
dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
@@ -769,7 +769,7 @@ err_nolock:
static void
mr_rebuild_dev_cache(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
@@ -803,7 +803,7 @@ mr_rebuild_dev_cache(struct rte_eth_dev *dev)
static void
mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_memseg_list *msl;
struct mlx5_mr *mr;
int ms_n;
@@ -888,9 +888,11 @@ void
mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
- struct priv *priv;
+ struct mlx5_priv *priv;
struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
+ /* Must be called from the primary process. */
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
switch (event_type) {
case RTE_MEM_EVENT_FREE:
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
@@ -926,7 +928,7 @@ static uint32_t
mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
struct mlx5_mr_cache *entry, uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
uint16_t idx;
uint32_t lkey;
@@ -1026,11 +1028,8 @@ mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
- struct priv *priv = rxq_ctrl->priv;
+ struct mlx5_priv *priv = rxq_ctrl->priv;
- DRV_LOG(DEBUG,
- "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
@@ -1051,11 +1050,8 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq_ctrl->priv;
+ struct mlx5_priv *priv = txq_ctrl->priv;
- DRV_LOG(DEBUG,
- "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
@@ -1128,7 +1124,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
{
struct mr_update_mp_data *data = opaque;
struct rte_eth_dev *dev = data->dev;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
struct mlx5_mr *mr = NULL;
uintptr_t addr = (uintptr_t)memhdr->addr;
@@ -1136,6 +1132,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
struct mlx5_mr_cache entry;
uint32_t lkey;
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* If already registered, it should return. */
rte_rwlock_read_lock(&priv->mr.rwlock);
lkey = mr_lookup_dev(dev, &entry, addr);
@@ -1235,8 +1232,17 @@ mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq_ctrl->priv;
+ struct mlx5_priv *priv = txq_ctrl->priv;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DRV_LOG(WARNING,
+ "port %u using address (%p) from unregistered mempool"
+ " having externally allocated memory"
+ " in secondary process, please create mempool"
+ " prior to rte_eth_dev_start()",
+ PORT_ID(priv), (void *)addr);
+ return UINT32_MAX;
+ }
mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
return mlx5_tx_addr2mr_bh(txq, addr);
}
@@ -1301,7 +1307,7 @@ void
mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
{
#ifndef NDEBUG
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
int mr_n = 0;
int chunk_n = 0;
@@ -1343,8 +1349,8 @@ mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
void
mlx5_mr_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr_next;
/* Remove from memory callback device list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
@@ -1354,6 +1360,7 @@ mlx5_mr_release(struct rte_eth_dev *dev)
mlx5_mr_dump_dev(dev);
rte_rwlock_write_lock(&priv->mr.rwlock);
/* Detach from MR list and move to free list. */
+ mr_next = LIST_FIRST(&priv->mr.mr_list);
while (mr_next != NULL) {
struct mlx5_mr *mr = mr_next;
diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c
index d61826ae..fe5a2746 100644
--- a/drivers/net/mlx5/mlx5_nl.c
+++ b/drivers/net/mlx5/mlx5_nl.c
@@ -361,7 +361,7 @@ static int
mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[],
int *mac_n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
@@ -420,7 +420,7 @@ static int
mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac,
int add)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
@@ -492,7 +492,7 @@ int
mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
ret = mlx5_nl_mac_addr_modify(dev, mac, 1);
@@ -520,7 +520,7 @@ int
mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
BITFIELD_RESET(priv->mac_own, index);
return mlx5_nl_mac_addr_modify(dev, mac, 0);
@@ -572,7 +572,7 @@ mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev)
void
mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int i;
for (i = MLX5_MAX_MAC_ADDRESSES - 1; i >= 0; --i) {
@@ -599,7 +599,7 @@ mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev)
static int
mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index b95778a8..891d764b 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -41,7 +41,7 @@ int
mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int idx;
@@ -95,7 +95,7 @@ int
mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (!rss_conf) {
rte_errno = EINVAL;
@@ -125,7 +125,7 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
int
mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
void *mem;
unsigned int old_size = priv->reta_idx_n;
@@ -165,7 +165,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
@@ -201,7 +201,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t reta_size)
{
int ret;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
unsigned int pos;
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index e74fdef8..d5077db0 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -32,7 +32,7 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
dev->data->promiscuous = 1;
@@ -60,7 +60,7 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
dev->data->promiscuous = 0;
@@ -81,7 +81,7 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
dev->data->all_multicast = 1;
@@ -109,7 +109,7 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
dev->data->all_multicast = 0;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 10b6ce0c..f1ce3170 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -68,7 +68,7 @@ static_assert(MLX5_RSS_HASH_KEY_LEN ==
inline int
mlx5_check_mprq_support(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (priv->config.mprq.enabled &&
priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
@@ -103,7 +103,7 @@ mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
inline int
mlx5_mprq_enabled(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t i;
uint16_t n = 0;
@@ -382,7 +382,7 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
uint64_t
mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
@@ -438,7 +438,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
@@ -489,7 +489,7 @@ mlx5_rx_queue_release(void *dpdk_rxq)
{
struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
- struct priv *priv;
+ struct mlx5_priv *priv;
if (rxq == NULL)
return;
@@ -514,7 +514,7 @@ mlx5_rx_queue_release(void *dpdk_rxq)
int
mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
@@ -592,7 +592,7 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
void
mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_intr_handle *intr_handle = dev->intr_handle;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
@@ -664,7 +664,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
@@ -702,7 +702,7 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_rxq_ibv *rxq_ibv = NULL;
@@ -730,6 +730,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq_data->cq_arm_sn++;
mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
+ mlx5_rxq_ibv_release(rxq_ibv);
return 0;
exit:
ret = rte_errno; /* Save rte_errno before cleanup. */
@@ -755,7 +756,7 @@ exit:
struct mlx5_rxq_ibv *
mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
@@ -1042,7 +1043,7 @@ error:
struct mlx5_rxq_ibv *
mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl;
@@ -1098,7 +1099,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
int
mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_rxq_ibv *rxq_ibv;
@@ -1149,7 +1150,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
int
mlx5_mprq_free_mp(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_mempool *mp = priv->mprq_mp;
unsigned int i;
@@ -1200,7 +1201,7 @@ mlx5_mprq_free_mp(struct rte_eth_dev *dev)
int
mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_mempool *mp = priv->mprq_mp;
char name[RTE_MEMPOOL_NAMESIZE];
unsigned int desc = 0;
@@ -1272,7 +1273,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
return -rte_errno;
}
}
- snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
+ snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init, NULL,
dev->device->numa_node, 0);
@@ -1319,7 +1320,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
unsigned int mprq_stride_size;
@@ -1494,7 +1495,7 @@ error:
* @param dev
* Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
*
* @return
* A pointer to the queue if it exists, NULL otherwise.
@@ -1502,7 +1503,7 @@ error:
struct mlx5_rxq_ctrl *
mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
if ((*priv->rxqs)[idx]) {
@@ -1521,7 +1522,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
* @param dev
* Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
*
* @return
* 1 while a reference on it exists, 0 when freed.
@@ -1529,7 +1530,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (!(*priv->rxqs)[idx])
@@ -1554,7 +1555,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
* @param dev
* Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
*
* @return
* 1 if the queue can be released, negative errno otherwise and rte_errno is
@@ -1563,7 +1564,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (!(*priv->rxqs)[idx]) {
@@ -1586,7 +1587,7 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_rxq_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
@@ -1615,7 +1616,7 @@ struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
@@ -1679,7 +1680,7 @@ struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
@@ -1741,7 +1742,7 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
int
mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
int ret = 0;
@@ -1783,7 +1784,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint16_t *queues, uint32_t queues_n,
int tunnel __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
@@ -1899,7 +1900,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
queues_n = hash_fields ? queues_n : 1;
@@ -1962,7 +1963,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
int
mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
int ret = 0;
@@ -1987,7 +1988,7 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
struct mlx5_rxq_ibv *
mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_cq *cq;
struct ibv_wq *wq = NULL;
struct mlx5_rxq_ibv *rxq;
@@ -2046,7 +2047,7 @@ error:
void
mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
if (rxq->wq)
@@ -2069,7 +2070,7 @@ mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
struct mlx5_rxq_ibv *rxq;
struct mlx5_ind_table_ibv tmpl;
@@ -2112,7 +2113,7 @@ error:
void
mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
@@ -2133,7 +2134,7 @@ mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
struct mlx5_hrxq *
mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
struct mlx5_hrxq *hrxq;
@@ -2196,7 +2197,7 @@ error:
void
mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 6eceea5f..38ce0e29 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -509,7 +509,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
uint32_t
mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq;
if (dev->rx_pkt_burst != mlx5_rx_burst) {
@@ -693,7 +693,8 @@ pkt_inline:
RTE_CACHE_LINE_SIZE);
copy_b = (addr_end > addr) ?
RTE_MIN((addr_end - addr), length) : 0;
- if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
+ if (copy_b && ((end - (uintptr_t)raw) >
+ (copy_b + sizeof(inl)))) {
/*
* One Dseg remains in the current WQE. To
* keep the computation positive, it is
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 75194a3f..820675b8 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -58,7 +58,7 @@ struct mlx5_txq_stats {
uint64_t oerrors; /**< Total number of failed transmitted packets. */
};
-struct priv;
+struct mlx5_priv;
/* Compressed CQE context. */
struct rxq_zip {
@@ -143,7 +143,7 @@ struct mlx5_rxq_ctrl {
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
- struct priv *priv; /* Back pointer to private data. */
+ struct mlx5_priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
@@ -228,7 +228,7 @@ struct mlx5_txq_ctrl {
unsigned int max_inline_data; /* Max inline data. */
unsigned int max_tso_header; /* Max TSO header size. */
struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
- struct priv *priv; /* Back pointer to private data. */
+ struct mlx5_priv *priv; /* Back pointer to private data. */
struct mlx5_txq_data txq; /* Data path structure. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
volatile void *bf_reg_orig; /* Blueflame register from verbs. */
@@ -491,7 +491,7 @@ check_cqe(volatile struct mlx5_cqe *cqe,
op_code, op_code, syndrome);
rte_hexdump(stderr, "MLX5 Error CQE:",
(const void *)((uintptr_t)err_cqe),
- sizeof(*err_cqe));
+ sizeof(*cqe));
}
return 1;
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
@@ -568,6 +568,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
}
#endif /* NDEBUG */
++cq_ci;
+ rte_cio_rmb();
txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
ctrl = (volatile struct mlx5_wqe_ctrl *)
tx_mlx5_wqe(txq, txq->wqe_pi);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 340292ad..9a3a5ae4 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -273,7 +273,7 @@ mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev)
int __attribute__((cold))
mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
if (!priv->config.tx_vec_en ||
@@ -318,7 +318,7 @@ mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
int __attribute__((cold))
mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t i;
if (!priv->config.rx_vec_en)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index fda7004e..86735044 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -102,7 +102,22 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
return;
}
for (i = 0; i < n; ++i) {
- wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
+ void *buf_addr;
+
+ /*
+ * Load the virtual address for Rx WQE. non-x86 processors
+ * (mostly RISC such as ARM and Power) are more vulnerable to
+ * load stall. For x86, reducing the number of instructions
+ * seems to matter most.
+ */
+#ifdef RTE_ARCH_X86_64
+ buf_addr = elts[i]->buf_addr;
+#else
+ buf_addr = (char *)elts[i] + sizeof(struct rte_mbuf) +
+ rte_pktmbuf_priv_size(rxq->mp);
+ assert(buf_addr == elts[i]->buf_addr);
+#endif
+ wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
RTE_PKTMBUF_HEADROOM);
/* If there's only one MR, no need to replace LKey in WQE. */
if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 883fe1bf..38e915c5 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -104,6 +104,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
unsigned int n;
volatile struct mlx5_wqe *wqe = NULL;
+ bool metadata_ol =
+ txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false;
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
@@ -127,6 +129,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
uint8x16_t *t_wqe;
uint8_t *dseg;
uint8x16_t ctrl;
+ rte_be32_t metadata =
+ metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ?
+ buf->tx_metadata : 0;
assert(segs_n);
max_elts = elts_n - (elts_head - txq->elts_tail);
@@ -164,9 +169,10 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
vst1q_u8((void *)t_wqe, ctrl);
/* Fill ESEG in the header. */
- vst1q_u16((void *)(t_wqe + 1),
- ((uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
- 0, 0, 0, 0 }));
+ vst1q_u32((void *)(t_wqe + 1),
+ ((uint32x4_t){ 0,
+ cs_flags << 16 | rte_cpu_to_be_16(len),
+ metadata, 0 }));
txq->wqe_ci = wqe_ci;
}
if (!n)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 14117c4b..fb384efd 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -104,6 +104,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
unsigned int n;
volatile struct mlx5_wqe *wqe = NULL;
+ bool metadata_ol =
+ txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false;
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
@@ -125,6 +127,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
uint16_t max_wqe;
__m128i *t_wqe, *dseg;
__m128i ctrl;
+ rte_be32_t metadata =
+ metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ?
+ buf->tx_metadata : 0;
assert(segs_n);
max_elts = elts_n - (elts_head - txq->elts_tail);
@@ -165,9 +170,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
_mm_store_si128(t_wqe, ctrl);
/* Fill ESEG in the header. */
_mm_store_si128(t_wqe + 1,
- _mm_set_epi16(0, 0, 0, 0,
- rte_cpu_to_be_16(len), cs_flags,
- 0, 0));
+ _mm_set_epi32(0, metadata,
+ (rte_cpu_to_be_16(len) << 16) |
+ cs_flags, 0));
txq->wqe_ci = wqe_ci;
}
if (!n)
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 00106171..41cac3c6 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -26,7 +26,7 @@
int
mlx5_socket_init(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
@@ -98,7 +98,7 @@ error:
void
mlx5_socket_uninit(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket);
claim_zero(close(priv->primary_socket));
@@ -115,7 +115,7 @@ mlx5_socket_uninit(struct rte_eth_dev *dev)
void
mlx5_socket_handle(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int conn_sock;
int ret = 0;
struct cmsghdr *cmsg = NULL;
@@ -208,7 +208,7 @@ error:
int
mlx5_socket_connect(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index fccb9af0..132bf5b4 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -143,7 +143,7 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
static int
mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
struct ifreq ifr;
@@ -221,7 +221,7 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
void
mlx5_xstats_init(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
unsigned int j;
@@ -312,7 +312,7 @@ int
mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
unsigned int n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
uint64_t counters[n];
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
@@ -353,7 +353,7 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
int
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_eth_stats tmp;
unsigned int i;
unsigned int idx;
@@ -416,7 +416,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
void
mlx5_stats_reset(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int idx;
@@ -448,7 +448,7 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
void
mlx5_xstats_reset(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
unsigned int i;
@@ -492,7 +492,7 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
unsigned int i;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int mlx5_xstats_n = xstats_ctrl->mlx5_stats_n;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index e2a9bb70..f874657c 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -23,7 +23,7 @@
static void
mlx5_txq_stop(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->txqs_n; ++i)
@@ -42,7 +42,7 @@ mlx5_txq_stop(struct rte_eth_dev *dev)
static int
mlx5_txq_start(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret;
@@ -83,7 +83,7 @@ error:
static void
mlx5_rxq_stop(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i)
@@ -102,7 +102,7 @@ mlx5_rxq_stop(struct rte_eth_dev *dev)
static int
mlx5_rxq_start(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret = 0;
@@ -157,7 +157,7 @@ error:
int
mlx5_dev_start(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
@@ -221,7 +221,7 @@ error:
void
mlx5_dev_stop(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
@@ -252,7 +252,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
int
mlx5_traffic_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_item_eth bcast = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
@@ -379,7 +379,7 @@ error:
void
mlx5_traffic_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->ctrl_flows);
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index b01bd675..c5a3d1b4 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -102,7 +102,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
uint64_t
mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT);
struct mlx5_dev_config *config = &priv->config;
@@ -155,7 +155,7 @@ int
mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
@@ -213,7 +213,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
{
struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
struct mlx5_txq_ctrl *txq_ctrl;
- struct priv *priv;
+ struct mlx5_priv *priv;
unsigned int i;
if (txq == NULL)
@@ -246,7 +246,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
int
mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i, j;
uintptr_t pages[priv->txqs_n];
unsigned int pages_n = 0;
@@ -346,7 +346,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
* @param dev
* Pointer to Ethernet device.
* @param idx
- * Queue index in DPDK Rx queue array
+ * Queue index in DPDK Tx queue array.
*
* @return
* The Verbs object initialised, NULL otherwise and rte_errno is set.
@@ -354,7 +354,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
struct mlx5_txq_ibv *
mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
@@ -554,7 +554,7 @@ error:
* @param dev
* Pointer to Ethernet device.
* @param idx
- * Queue index in DPDK Rx queue array
+ * Queue index in DPDK Tx queue array.
*
* @return
* The Verbs object if it exists.
@@ -562,7 +562,7 @@ error:
struct mlx5_txq_ibv *
mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq_ctrl;
if (idx >= priv->txqs_n)
@@ -623,7 +623,7 @@ mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
int
mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_txq_ibv *txq_ibv;
@@ -636,6 +636,27 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
}
/**
+ * Calcuate the total number of WQEBB for Tx queue.
+ *
+ * Simplified version of calc_sq_size() in rdma-core.
+ *
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
+ *
+ * @return
+ * The number of WQEBB.
+ */
+static int
+txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
+{
+ unsigned int wqe_size;
+ const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
+
+ wqe_size = MLX5_WQE_SIZE + txq_ctrl->max_inline_data;
+ return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
+}
+
+/**
* Set Tx queue parameters from device configuration.
*
* @param txq_ctrl
@@ -644,7 +665,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
static void
txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
- struct priv *priv = txq_ctrl->priv;
+ struct mlx5_priv *priv = txq_ctrl->priv;
struct mlx5_dev_config *config = &priv->config;
const unsigned int max_tso_inline =
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
@@ -754,7 +775,7 @@ struct mlx5_txq_ctrl *
mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
tmpl = rte_calloc_socket("TXQ", 1,
@@ -780,10 +801,16 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->txq.elts_n = log2above(desc);
tmpl->idx = idx;
txq_set_params(tmpl);
- DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
- DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
- dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ if (txq_calc_wqebb_cnt(tmpl) >
+ priv->device_attr.orig_attr.max_qp_wr) {
+ DRV_LOG(ERR,
+ "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
+ " try smaller queue size",
+ dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
+ priv->device_attr.orig_attr.max_qp_wr);
+ rte_errno = ENOMEM;
+ goto error;
+ }
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
@@ -809,7 +836,7 @@ error:
struct mlx5_txq_ctrl *
mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *ctrl = NULL;
if ((*priv->txqs)[idx]) {
@@ -835,7 +862,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
size_t page_size = sysconf(_SC_PAGESIZE);
@@ -872,7 +899,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
if (!(*priv->txqs)[idx])
@@ -893,7 +920,7 @@ mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_txq_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
int ret = 0;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index c91d08be..6568a3a4 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -48,7 +48,7 @@
int
mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
@@ -102,7 +102,7 @@ out:
void
mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
@@ -160,7 +160,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
int
mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
if (mask & ETH_VLAN_STRIP_MASK) {
diff --git a/drivers/net/mvpp2/mrvl_mtr.c b/drivers/net/mvpp2/mrvl_mtr.c
index 9adcd975..39272ace 100644
--- a/drivers/net/mvpp2/mrvl_mtr.c
+++ b/drivers/net/mvpp2/mrvl_mtr.c
@@ -1,5 +1,4 @@
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Marvell International Ltd.
* Copyright(c) 2018 Semihalf.
* All rights reserved.
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index da76b0db..fad209f2 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -733,6 +733,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
hv->port_id = eth_dev->data->port_id;
hv->latency = HN_CHAN_LATENCY_NS;
hv->max_queues = 1;
+ hv->vf_port = HN_INVALID_PORT;
err = hn_parse_args(eth_dev);
if (err)
@@ -786,7 +787,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
/* If VF was reported but not added, do it now */
- if (hv->vf_present && !hv->vf_dev) {
+ if (hv->vf_present && !hn_vf_attached(hv)) {
PMD_INIT_LOG(DEBUG, "Adding VF device");
err = hn_vf_add(eth_dev, hv);
diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
index 487f7646..5ffc0ee1 100644
--- a/drivers/net/netvsc/hn_rxtx.c
+++ b/drivers/net/netvsc/hn_rxtx.c
@@ -123,7 +123,7 @@ hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
stats->size_bins[0]++;
else if (s < 1519)
stats->size_bins[6]++;
- else if (s >= 1519)
+ else
stats->size_bins[7]++;
}
@@ -1305,8 +1305,8 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return 0;
/* Transmit over VF if present and up */
- vf_dev = hv->vf_dev;
- rte_compiler_barrier();
+ vf_dev = hn_get_vf_dev(hv);
+
if (vf_dev && vf_dev->data->dev_started) {
void *sub_q = vf_dev->data->tx_queues[queue_id];
@@ -1385,6 +1385,24 @@ fail:
return nb_tx;
}
+static uint16_t
+hn_recv_vf(uint16_t vf_port, const struct hn_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ uint16_t i, n;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ n = rte_eth_rx_burst(vf_port, rxq->queue_id, rx_pkts, nb_pkts);
+
+ /* relabel the received mbufs */
+ for (i = 0; i < n; i++)
+ rx_pkts[i]->port = rxq->port_id;
+
+ return n;
+}
+
uint16_t
hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
@@ -1396,30 +1414,21 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (unlikely(hv->closed))
return 0;
- vf_dev = hv->vf_dev;
- rte_compiler_barrier();
+ /* Receive from VF if present and up */
+ vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->data->dev_started) {
- /* Normally, with SR-IOV the ring buffer will be empty */
+ /* Check for new completions */
+ if (likely(rte_ring_count(rxq->rx_ring) < nb_pkts))
hn_process_events(hv, rxq->queue_id, 0);
- /* Get mbufs some bufs off of staging ring */
- nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
- (void **)rx_pkts,
- nb_pkts / 2, NULL);
- /* And rest off of VF */
- nb_rcv += rte_eth_rx_burst(vf_dev->data->port_id,
- rxq->queue_id,
- rx_pkts + nb_rcv, nb_pkts - nb_rcv);
- } else {
- /* If receive ring is not full then get more */
- if (rte_ring_count(rxq->rx_ring) < nb_pkts)
- hn_process_events(hv, rxq->queue_id, 0);
+ /* Always check the vmbus path for multicast and new flows */
+ nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
+ (void **)rx_pkts, nb_pkts, NULL);
- nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
- (void **)rx_pkts,
- nb_pkts, NULL);
- }
+ /* If VF is available, check that as well */
+ if (vf_dev && vf_dev->data->dev_started)
+ nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
+ rx_pkts + nb_rcv, nb_pkts - nb_rcv);
return nb_rcv;
}
diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h
index a6516c1e..b3156343 100644
--- a/drivers/net/netvsc/hn_var.h
+++ b/drivers/net/netvsc/hn_var.h
@@ -91,14 +91,18 @@ struct hn_rx_bufinfo {
struct rte_mbuf_ext_shared_info shinfo;
} __rte_cache_aligned;
+#define HN_INVALID_PORT UINT16_MAX
+
struct hn_data {
struct rte_vmbus_device *vmbus;
struct hn_rx_queue *primary;
- struct rte_eth_dev *vf_dev; /* Subordinate device */
rte_spinlock_t vf_lock;
uint16_t port_id;
- bool closed;
- bool vf_present;
+ uint16_t vf_port;
+
+ uint8_t vf_present;
+ uint8_t closed;
+
uint32_t link_status;
uint32_t link_speed;
@@ -169,6 +173,28 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_mempool *mp);
void hn_dev_rx_queue_release(void *arg);
+/* Check if VF is attached */
+static inline bool
+hn_vf_attached(const struct hn_data *hv)
+{
+ return hv->vf_port != HN_INVALID_PORT;
+}
+
+/* Get VF device for existing netvsc device */
+static inline struct rte_eth_dev *
+hn_get_vf_dev(const struct hn_data *hv)
+{
+ uint16_t vf_port = hv->vf_port;
+
+ /* make sure vf_port is loaded */
+ rte_smp_rmb();
+
+ if (vf_port == HN_INVALID_PORT)
+ return NULL;
+ else
+ return &rte_eth_devices[vf_port];
+}
+
void hn_vf_info_get(struct hn_data *hv,
struct rte_eth_dev_info *info);
int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c
index 3f714ec9..4127e411 100644
--- a/drivers/net/netvsc/hn_vf.c
+++ b/drivers/net/netvsc/hn_vf.c
@@ -10,8 +10,8 @@
#include <errno.h>
#include <unistd.h>
#include <dirent.h>
+#include <fcntl.h>
#include <sys/types.h>
-#include <sys/fcntl.h>
#include <sys/uio.h>
#include <rte_ether.h>
@@ -51,15 +51,20 @@ static int hn_vf_match(const struct rte_eth_dev *dev)
return -ENOENT;
}
+
/*
* Attach new PCI VF device and return the port_id
*/
-static int hn_vf_attach(struct hn_data *hv, uint16_t port_id,
- struct rte_eth_dev **vf_dev)
+static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
{
struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
int ret;
+ if (hn_vf_attached(hv)) {
+ PMD_DRV_LOG(ERR, "VF already attached");
+ return -EEXIST;
+ }
+
ret = rte_eth_dev_owner_get(port_id, &owner);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
@@ -79,8 +84,9 @@ static int hn_vf_attach(struct hn_data *hv, uint16_t port_id,
}
PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
+ hv->vf_port = port_id;
rte_smp_wmb();
- *vf_dev = &rte_eth_devices[port_id];
+
return 0;
}
@@ -96,12 +102,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
}
rte_spinlock_lock(&hv->vf_lock);
- if (hv->vf_dev) {
- PMD_DRV_LOG(ERR, "VF already attached");
- err = -EBUSY;
- } else {
- err = hn_vf_attach(hv, port, &hv->vf_dev);
- }
+ err = hn_vf_attach(hv, port);
if (err == 0) {
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
@@ -120,22 +121,22 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
/* Remove new VF device */
static void hn_vf_remove(struct hn_data *hv)
{
- struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
- if (!vf_dev) {
+
+ if (!hn_vf_attached(hv)) {
PMD_DRV_LOG(ERR, "VF path not active");
- rte_spinlock_unlock(&hv->vf_lock);
- return;
- }
+ } else {
+ /* Stop incoming packets from arriving on VF */
+ hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
- /* Stop incoming packets from arriving on VF */
- hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
- hv->vf_dev = NULL;
+ /* Stop transmission over VF */
+ hv->vf_port = HN_INVALID_PORT;
+ rte_smp_wmb();
- /* Give back ownership */
- rte_eth_dev_owner_unset(vf_dev->data->port_id, hv->owner.id);
+ /* Give back ownership */
+ rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
+ }
rte_spinlock_unlock(&hv->vf_lock);
}
@@ -207,7 +208,7 @@ void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
hn_vf_info_merge(vf_dev, info);
rte_spinlock_unlock(&hv->vf_lock);
@@ -221,7 +222,7 @@ int hn_vf_link_update(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->link_update)
ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
rte_spinlock_unlock(&hv->vf_lock);
@@ -249,13 +250,14 @@ static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
}
static int _hn_vf_configure(struct rte_eth_dev *dev,
- struct rte_eth_dev *vf_dev,
+ uint16_t vf_port,
const struct rte_eth_conf *dev_conf)
{
struct rte_eth_conf vf_conf = *dev_conf;
- uint16_t vf_port = vf_dev->data->port_id;
+ struct rte_eth_dev *vf_dev;
int ret;
+ vf_dev = &rte_eth_devices[vf_port];
if (dev_conf->intr_conf.lsc &&
(vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
@@ -294,13 +296,11 @@ int hn_vf_configure(struct rte_eth_dev *dev,
const struct rte_eth_conf *dev_conf)
{
struct hn_data *hv = dev->data->dev_private;
- struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
- if (vf_dev)
- ret = _hn_vf_configure(dev, vf_dev, dev_conf);
+ if (hv->vf_port != HN_INVALID_PORT)
+ ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
@@ -312,7 +312,7 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
const uint32_t *ptypes = NULL;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
rte_spinlock_unlock(&hv->vf_lock);
@@ -327,7 +327,7 @@ int hn_vf_start(struct rte_eth_dev *dev)
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_start(vf_dev->data->port_id);
rte_spinlock_unlock(&hv->vf_lock);
@@ -340,7 +340,7 @@ void hn_vf_stop(struct rte_eth_dev *dev)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
rte_eth_dev_stop(vf_dev->data->port_id);
rte_spinlock_unlock(&hv->vf_lock);
@@ -352,7 +352,7 @@ void hn_vf_stop(struct rte_eth_dev *dev)
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
rte_spinlock_lock(&hv->vf_lock); \
- vf_dev = hv->vf_dev; \
+ vf_dev = hn_get_vf_dev(hv); \
if (vf_dev) \
func(vf_dev->data->port_id); \
rte_spinlock_unlock(&hv->vf_lock); \
@@ -402,7 +402,7 @@ int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
mc_addr_set, nb_mc_addr);
@@ -420,7 +420,7 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
@@ -434,7 +434,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
void *subq = vf_dev->data->tx_queues[queue_id];
@@ -455,7 +455,7 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
@@ -469,7 +469,7 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
void *subq = vf_dev->data->rx_queues[queue_id];
@@ -486,7 +486,7 @@ int hn_vf_stats_get(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
rte_spinlock_unlock(&hv->vf_lock);
@@ -503,7 +503,7 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
char tmp[RTE_ETH_XSTATS_NAME_SIZE];
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->xstats_get_names)
count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
rte_spinlock_unlock(&hv->vf_lock);
@@ -528,7 +528,7 @@ int hn_vf_xstats_get(struct rte_eth_dev *dev,
int count = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->xstats_get)
count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
rte_spinlock_unlock(&hv->vf_lock);
@@ -542,7 +542,7 @@ void hn_vf_xstats_reset(struct rte_eth_dev *dev)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->xstats_reset)
vf_dev->dev_ops->xstats_reset(vf_dev);
rte_spinlock_unlock(&hv->vf_lock);
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 54c6da92..68c853c9 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -566,7 +566,10 @@ nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
/* Signal the NIC about the change */
update = NFP_NET_CFG_UPDATE_MACADDR;
- ctrl = hw->ctrl | NFP_NET_CFG_CTRL_LIVE_ADDR;
+ ctrl = hw->ctrl;
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+ (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+ ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
if (nfp_net_reconfig(hw, ctrl, update) < 0) {
PMD_INIT_LOG(INFO, "MAC address update failed");
return -EIO;
@@ -758,7 +761,7 @@ nfp_net_start(struct rte_eth_dev *dev)
return -EIO;
/*
- * Allocating rte mbuffs for configured rx queues.
+ * Allocating rte mbufs for configured rx queues.
* This requires queues being enabled before
*/
if (nfp_net_rx_freelist_setup(dev) < 0) {
@@ -1487,7 +1490,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
if (rxq == NULL)
return -ENOMEM;
- /* Hw queues mapping based on firmware confifguration */
+ /* Hw queues mapping based on firmware configuration */
rxq->qidx = queue_idx;
rxq->fl_qcidx = queue_idx * hw->stride_rx;
rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
@@ -1519,7 +1522,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
socket_id);
if (tz == NULL) {
- PMD_DRV_LOG(ERR, "Error allocatig rx dma");
+ PMD_DRV_LOG(ERR, "Error allocating rx dma");
nfp_net_rx_queue_release(rxq);
return -ENOMEM;
}
@@ -1906,7 +1909,7 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
/*
* RX path design:
*
- * There are some decissions to take:
+ * There are some decisions to take:
* 1) How to check DD RX descriptors bit
* 2) How and when to allocate new mbufs
*
@@ -1976,7 +1979,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_rmb();
/*
- * We got a packet. Let's alloc a new mbuff for refilling the
+ * We got a packet. Let's alloc a new mbuf for refilling the
* free descriptor ring as soon as possible
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
@@ -1991,8 +1994,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_hold++;
/*
- * Grab the mbuff and refill the descriptor with the
- * previously allocated mbuff
+ * Grab the mbuf and refill the descriptor with the
+ * previously allocated mbuf
*/
mb = rxb->mbuf;
rxb->mbuf = new_mb;
@@ -2024,7 +2027,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return -EINVAL;
}
- /* Filling the received mbuff with packet info */
+ /* Filling the received mbuf with packet info */
if (hw->rx_offset)
mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
else
@@ -2049,7 +2052,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
- /* Adding the mbuff to the mbuff array passed by the app */
+ /* Adding the mbuf to the mbuf array passed by the app */
rx_pkts[avail++] = mb;
/* Now resetting and updating the descriptor */
@@ -2443,7 +2446,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
for (j = 0; j < 4; j++) {
if (!(mask & (0x1 << j)))
continue;
- reta_conf->reta[shift + j] =
+ reta_conf[idx].reta[shift + j] =
(uint8_t)((reta >> (8 * j)) & 0xF);
}
}
@@ -2789,9 +2792,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
case PCI_DEVICE_ID_NFP6000_PF_NIC:
case PCI_DEVICE_ID_NFP6000_VF_NIC:
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
- tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
+ tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
- rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
+ rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
break;
default:
PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
@@ -2954,9 +2957,9 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
return -ENOMEM;
if (ports > 1)
- sprintf(port_name, "%s_port%d", dev->device.name, port);
+ snprintf(port_name, 100, "%s_port%d", dev->device.name, port);
else
- sprintf(port_name, "%s", dev->device.name);
+ strlcat(port_name, dev->device.name, 100);
eth_dev = rte_eth_dev_allocate(port_name);
if (!eth_dev)
@@ -3021,28 +3024,31 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
/* Looking for firmware file in order of priority */
/* First try to find a firmware image specific for this device */
- sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
+ snprintf(serial, sizeof(serial),
+ "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
cpp->interface & 0xff);
- sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
+ snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
+ serial);
PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
fw_f = open(fw_name, O_RDONLY);
- if (fw_f > 0)
+ if (fw_f >= 0)
goto read_fw;
/* Then try the PCI name */
- sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name);
+ snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
+ dev->device.name);
PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
fw_f = open(fw_name, O_RDONLY);
- if (fw_f > 0)
+ if (fw_f >= 0)
goto read_fw;
/* Finally try the card type and media */
- sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card);
+ snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
fw_f = open(fw_name, O_RDONLY);
if (fw_f < 0) {
@@ -3118,8 +3124,9 @@ nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
- sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model,
- nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000);
+ snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
+ nfp_fw_model, nfp_eth_table->count,
+ nfp_eth_table->ports[0].speed / 1000);
nsp = nfp_nsp_open(cpp);
if (!nsp) {
diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h
index 21e17da1..fc3540a2 100644
--- a/drivers/net/nfp/nfp_net_ctrl.h
+++ b/drivers/net/nfp/nfp_net_ctrl.h
@@ -122,7 +122,7 @@
#define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */
#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
-#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */
+#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1U << 31)/* live MAC addr change */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -135,7 +135,7 @@
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
-#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
+#define NFP_NET_CFG_UPDATE_ERR (0x1U << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
#define NFP_NET_CFG_MTU 0x0018
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index b01036df..d70cbc63 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -34,7 +34,7 @@
*
* @file dpdk/pmd/nfp_net_pmd.h
*
- * Netronome NFP_NET PDM driver
+ * Netronome NFP_NET PMD driver
*/
#ifndef _NFP_NET_PMD_H_
@@ -240,7 +240,7 @@ struct nfp_net_txq {
uint32_t tx_free_thresh;
/*
- * For each descriptor keep a reference to the mbuff and
+ * For each descriptor keep a reference to the mbuf and
* DMA address used until completion is signalled.
*/
struct {
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
index 6e380cca..538f882b 100644
--- a/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
@@ -368,6 +368,9 @@ _nfp6000_encode_mu(uint64_t *addr, int dest_island, int mode, int addr40,
isld[1] = isld1;
locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+ if (locality_lsb < 0)
+ return NFP_ERRNO(EINVAL);
+
if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
da = 1;
else
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
index c68d9400..37b7991f 100644
--- a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
+++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
@@ -16,7 +16,9 @@
#include <assert.h>
#include <stdio.h>
+#if defined(RTE_BACKTRACE)
#include <execinfo.h>
+#endif
#include <stdlib.h>
#include <unistd.h>
#include <stdint.h>
@@ -788,17 +790,17 @@ nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
if (cpp->driver_lock_needed) {
ret = nfp_acquire_process_lock(desc);
if (ret)
- return -1;
+ goto error;
}
if (nfp6000_set_model(dev, cpp) < 0)
- return -1;
+ goto error;
if (nfp6000_set_interface(dev, cpp) < 0)
- return -1;
+ goto error;
if (nfp6000_set_serial(dev, cpp) < 0)
- return -1;
+ goto error;
if (nfp6000_set_barsz(dev, desc) < 0)
- return -1;
+ goto error;
desc->cfg = (char *)dev->mem_resource[0].addr;
@@ -809,7 +811,11 @@ nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
model = __nfp_cpp_model_autodetect(cpp);
nfp_cpp_model_set(cpp, model);
- return ret;
+ return 0;
+
+error:
+ free(desc);
+ return -1;
}
static void
diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c
index 75d3c974..dec4a8b6 100644
--- a/drivers/net/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c
@@ -801,7 +801,8 @@ __nfp_cpp_model_autodetect(struct nfp_cpp *cpp)
uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
uint32_t model = 0;
- nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model);
+ if (nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model))
+ return 0;
if (NFP_CPP_MODEL_IS_6000(model)) {
uint32_t tmp;
@@ -810,8 +811,10 @@ __nfp_cpp_model_autodetect(struct nfp_cpp *cpp)
/* The PL's PluDeviceID revision code is authoratative */
model &= ~0xff;
- nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) +
- NFP_PL_DEVICE_ID, &tmp);
+ if (nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) +
+ NFP_PL_DEVICE_ID, &tmp))
+ return 0;
+
model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10;
}
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 920f6f89..2a4a08af 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -21,6 +21,7 @@
#include "base/octeontx_pkovf.h"
#include "base/octeontx_io.h"
+#define OCTEONTX_PMD net_octeontx
#define OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT 12
#define OCTEONTX_VDEV_NR_PORT_ARG ("nr_port")
#define OCTEONTX_MAX_NAME_LEN 32
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 9fd93277..65bbd7e2 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -1258,7 +1258,8 @@ create_eth:
if (pp == NULL) {
PMD_LOG(ERR,
"Failed to allocate memory for process private");
- return -1;
+ ret = -1;
+ goto free_kvlist;
}
eth_dev->dev_ops = &ops;
@@ -1281,7 +1282,7 @@ create_eth:
eth_dev->tx_pkt_burst = eth_pcap_tx;
rte_eth_dev_probing_finish(eth_dev);
- return 0;
+ goto free_kvlist;
}
ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index 2aaf298f..7047eb9f 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -400,9 +400,9 @@
#define QM_BYTE_CRD_REG_WIDTH 24
#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
#define QM_WFQ_CRD_REG_WIDTH 32
-#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_SIGN_BIT (1U << (QM_WFQ_CRD_REG_WIDTH - 1))
#define QM_RL_CRD_REG_WIDTH 32
-#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_SIGN_BIT (1U << (QM_RL_CRD_REG_WIDTH - 1))
/*****************/
/* CAU CONSTANTS */
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 6d4a4dd7..2ce0ea9e 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -2250,7 +2250,7 @@ struct igu_cleanup {
#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
/* must always be set (use enum command_type_bit) */
-#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1U
#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
__le32 reserved1;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 158ca673..7bc09479 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -2420,7 +2420,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
#define GFT_RAM_LINE_DST_PORT_MASK 0x1
#define GFT_RAM_LINE_DST_PORT_SHIFT 30
-#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1U
#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
__le32 hi;
#define GFT_RAM_LINE_DSCP_MASK 0x1
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
index b8c2686f..92361e79 100644
--- a/drivers/net/qede/base/ecore_hw_defs.h
+++ b/drivers/net/qede/base/ecore_hw_defs.h
@@ -51,7 +51,7 @@ struct igu_ctrl_reg {
#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
#define IGU_CTRL_REG_RESERVED_MASK 0x1
#define IGU_CTRL_REG_RESERVED_SHIFT 28
-#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_MASK 0x1U /* use enum igu_ctrl_cmd */
#define IGU_CTRL_REG_TYPE_SHIFT 31
};
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 518673dc..0b2f305e 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -2735,7 +2735,8 @@ static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_qedevf_pmd = {
.id_table = pci_id_qedevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = qedevf_eth_dev_pci_probe,
.remove = qedevf_eth_dev_pci_remove,
};
@@ -2754,7 +2755,8 @@ static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_qede_pmd = {
.id_table = pci_id_qede_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = qede_eth_dev_pci_probe,
.remove = qede_eth_dev_pci_remove,
};
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index eda19b2b..27bac099 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1420,13 +1420,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint32_t rss_hash;
int rx_alloc_count = 0;
- hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
- sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
-
- rte_rmb();
-
- if (hw_comp_cons == sw_comp_cons)
- return 0;
/* Allocate buffers that we used in previous loop */
if (rxq->rx_alloc_count) {
@@ -1447,6 +1440,14 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->rx_alloc_count = 0;
}
+ hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ rte_rmb();
+
+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
while (sw_comp_cons != hw_comp_cons) {
ol_flags = 0;
packet_type = RTE_PTYPE_UNKNOWN;
@@ -1819,7 +1820,7 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
temp == PKT_TX_TUNNEL_GENEVE ||
temp == PKT_TX_TUNNEL_MPLSINUDP ||
temp == PKT_TX_TUNNEL_GRE)
- break;
+ continue;
}
rte_errno = -ENOTSUP;
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index aeb48f5e..c438da51 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -45,8 +45,8 @@ struct ring_queue {
};
struct pmd_internals {
- unsigned max_rx_queues;
- unsigned max_tx_queues;
+ unsigned int max_rx_queues;
+ unsigned int max_tx_queues;
struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
@@ -55,12 +55,11 @@ struct pmd_internals {
enum dev_action action;
};
-
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_FIXED,
};
static int eth_ring_logtype;
@@ -138,6 +137,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
struct rte_mempool *mb_pool __rte_unused)
{
struct pmd_internals *internals = dev->data->dev_private;
+
dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
return 0;
}
@@ -149,6 +149,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct pmd_internals *internals = dev->data->dev_private;
+
dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
return 0;
}
@@ -156,9 +157,10 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
static void
eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+ struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
+
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
@@ -169,7 +171,7 @@ eth_dev_info(struct rte_eth_dev *dev,
static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- unsigned i;
+ unsigned int i;
unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
@@ -197,8 +199,9 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
eth_stats_reset(struct rte_eth_dev *dev)
{
- unsigned i;
+ unsigned int i;
struct pmd_internals *internal = dev->data->dev_private;
+
for (i = 0; i < dev->data->nb_rx_queues; i++)
internal->rx_ring_queues[i].rx_pkts.cnt = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -250,8 +253,10 @@ static struct rte_vdev_driver pmd_ring_drv;
static int
do_eth_dev_ring_create(const char *name,
- struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
- struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
+ struct rte_ring * const rx_queues[],
+ const unsigned int nb_rx_queues,
+ struct rte_ring *const tx_queues[],
+ const unsigned int nb_tx_queues,
const unsigned int numa_node, enum dev_action action,
struct rte_eth_dev **eth_dev_p)
{
@@ -260,20 +265,20 @@ do_eth_dev_ring_create(const char *name,
struct rte_eth_dev *eth_dev = NULL;
void **rx_queues_local = NULL;
void **tx_queues_local = NULL;
- unsigned i;
+ unsigned int i;
PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
numa_node);
- rx_queues_local = rte_zmalloc_socket(name,
- sizeof(void *) * nb_rx_queues, 0, numa_node);
+ rx_queues_local = rte_calloc_socket(name, nb_rx_queues,
+ sizeof(void *), 0, numa_node);
if (rx_queues_local == NULL) {
rte_errno = ENOMEM;
goto error;
}
- tx_queues_local = rte_zmalloc_socket(name,
- sizeof(void *) * nb_tx_queues, 0, numa_node);
+ tx_queues_local = rte_calloc_socket(name, nb_tx_queues,
+ sizeof(void *), 0, numa_node);
if (tx_queues_local == NULL) {
rte_errno = ENOMEM;
goto error;
@@ -344,10 +349,10 @@ error:
int
rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
- const unsigned nb_rx_queues,
+ const unsigned int nb_rx_queues,
struct rte_ring *const tx_queues[],
- const unsigned nb_tx_queues,
- const unsigned numa_node)
+ const unsigned int nb_tx_queues,
+ const unsigned int numa_node)
{
struct ring_internal_args args = {
.rx_queues = rx_queues,
@@ -357,8 +362,8 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
.numa_node = numa_node,
.addr = &args,
};
- char args_str[32] = { 0 };
- char ring_name[32] = { 0 };
+ char args_str[32];
+ char ring_name[RTE_RING_NAMESIZE];
uint16_t port_id = RTE_MAX_ETHPORTS;
int ret;
@@ -376,8 +381,14 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
return -1;
}
- snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
- snprintf(ring_name, 32, "net_ring_%s", name);
+ snprintf(args_str, sizeof(args_str), "%s=%p",
+ ETH_RING_INTERNAL_ARG, &args);
+
+ ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name);
+ if (ret >= (int)sizeof(ring_name)) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
ret = rte_vdev_init(ring_name, args_str);
if (ret) {
@@ -385,7 +396,11 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
return -1;
}
- rte_eth_dev_get_port_by_name(ring_name, &port_id);
+ ret = rte_eth_dev_get_port_by_name(ring_name, &port_id);
+ if (ret) {
+ rte_errno = ENODEV;
+ return -1;
+ }
return port_id;
}
@@ -398,20 +413,28 @@ rte_eth_from_ring(struct rte_ring *r)
}
static int
-eth_dev_ring_create(const char *name, const unsigned numa_node,
+eth_dev_ring_create(const char *name, const unsigned int numa_node,
enum dev_action action, struct rte_eth_dev **eth_dev)
{
/* rx and tx are so-called from point of view of first port.
* They are inverted from the point of view of second port
*/
struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
- unsigned i;
+ unsigned int i;
char rng_name[RTE_RING_NAMESIZE];
- unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
+ unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
RTE_PMD_RING_MAX_TX_RINGS);
for (i = 0; i < num_rings; i++) {
- snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
+ int cc;
+
+ cc = snprintf(rng_name, sizeof(rng_name),
+ "ETH_RXTX%u_%s", i, name);
+ if (cc >= (int)sizeof(rng_name)) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
+
rxtx[i] = (action == DEV_CREATE) ?
rte_ring_create(rng_name, 1024, numa_node,
RING_F_SP_ENQ|RING_F_SC_DEQ) :
@@ -429,17 +452,18 @@ eth_dev_ring_create(const char *name, const unsigned numa_node,
struct node_action_pair {
char name[PATH_MAX];
- unsigned node;
+ unsigned int node;
enum dev_action action;
};
struct node_action_list {
- unsigned total;
- unsigned count;
+ unsigned int total;
+ unsigned int count;
struct node_action_pair *list;
};
-static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
+static int parse_kvlist(const char *key __rte_unused,
+ const char *value, void *data)
{
struct node_action_list *info = data;
int ret;
@@ -552,8 +576,8 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
kvlist = rte_kvargs_parse(params, valid_arguments);
if (!kvlist) {
- PMD_LOG(INFO, "Ignoring unsupported parameters when creating"
- " rings-backed ethernet device");
+ PMD_LOG(INFO,
+ "Ignoring unsupported parameters when creatingrings-backed ethernet device");
ret = eth_dev_ring_create(name, rte_socket_id(),
DEV_CREATE, &eth_dev);
if (ret == -1) {
@@ -597,7 +621,7 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
goto out_free;
info->total = ret;
- info->list = (struct node_action_pair*)(info + 1);
+ info->list = (struct node_action_pair *)(info + 1);
ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
parse_kvlist, info);
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 6690053f..0d7311d6 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -1071,8 +1071,8 @@ sfc_unprobe(struct sfc_adapter *sa)
}
uint32_t
-sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
- uint32_t ll_default)
+sfc_register_logtype(const struct rte_pci_addr *pci_addr,
+ const char *lt_prefix_str, uint32_t ll_default)
{
size_t lt_prefix_str_size = strlen(lt_prefix_str);
size_t lt_str_size_max;
@@ -1092,7 +1092,7 @@ sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
lt_str[lt_prefix_str_size - 1] = '.';
- rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
+ rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
lt_str_size_max - lt_prefix_str_size);
lt_str[lt_str_size_max - 1] = '\0';
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index a94ca8e7..f1cb8300 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -311,7 +311,7 @@ int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp);
void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
-uint32_t sfc_register_logtype(struct sfc_adapter *sa,
+uint32_t sfc_register_logtype(const struct rte_pci_addr *pci_addr,
const char *lt_prefix_str,
uint32_t ll_default);
diff --git a/drivers/net/sfc/sfc_debug.h b/drivers/net/sfc/sfc_debug.h
index 6b600ff4..62f3937e 100644
--- a/drivers/net/sfc/sfc_debug.h
+++ b/drivers/net/sfc/sfc_debug.h
@@ -27,7 +27,8 @@
do { \
const struct sfc_adapter *_sa = (sa); \
\
- rte_panic("sfc " PCI_PRI_FMT " #%" PRIu8 ": " fmt "\n", \
+ rte_panic("sfc " PCI_PRI_FMT \
+ " #%" PRIu16 ": " fmt "\n", \
_sa->pci_addr.domain, _sa->pci_addr.bus, \
_sa->pci_addr.devid, _sa->pci_addr.function, \
_sa->port_id, ##args); \
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index ff6d5b48..cf229f8b 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -340,9 +340,7 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
bool eop;
- /* Both checks may be done, so use bit OR to have only one branching */
- if (unlikely((header_len > SFC_TSOH_STD_LEN) |
- (tcph_off > txq->tso_tcp_header_offset_limit)))
+ if (unlikely(tcph_off > txq->tso_tcp_header_offset_limit))
return EMSGSIZE;
/*
@@ -407,6 +405,13 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
SFC_TSOH_STD_LEN;
+ /*
+ * Discard a packet if header linearization is needed but
+ * the header is too big.
+ */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
hdr_addr = txq->tsoh + hdr_addr_off;
hdr_iova = txq->tsoh_iova + hdr_addr_off;
copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index a7322a1e..052d38cd 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -94,17 +94,17 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
/* Autonegotiation may be disabled */
dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_1G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_10G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_25G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_50G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_100G;
dev_info->max_rx_queues = sa->rxq_max;
@@ -860,6 +860,33 @@ fail_inval:
}
static int
+sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ boolean_t scatter_enabled;
+ const char *error;
+ unsigned int i;
+
+ for (i = 0; i < sa->rxq_count; i++) {
+ if ((sa->rxq_info[i].rxq->state & SFC_RXQ_INITIALIZED) == 0)
+ continue;
+
+ scatter_enabled = (sa->rxq_info[i].type_flags &
+ EFX_RXQ_FLAG_SCATTER);
+
+ if (!sfc_rx_check_scatter(pdu, sa->rxq_info[i].rxq->buf_size,
+ encp->enc_rx_prefix_size,
+ scatter_enabled, &error)) {
+ sfc_err(sa, "MTU check for RxQ %u failed: %s", i,
+ error);
+ return EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct sfc_adapter *sa = dev->data->dev_private;
@@ -885,6 +912,10 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
sfc_adapter_lock(sa);
+ rc = sfc_check_scatter_on_all_rx_queues(sa, pdu);
+ if (rc != 0)
+ goto fail_check_scatter;
+
if (pdu != sa->port.pdu) {
if (sa->state == SFC_ADAPTER_STARTED) {
sfc_stop(sa);
@@ -921,6 +952,8 @@ fail_start:
sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
"PDU max size - port is stopped",
(unsigned int)pdu, (unsigned int)old_pdu);
+
+fail_check_scatter:
sfc_adapter_unlock(sa);
fail_inval:
@@ -1124,8 +1157,6 @@ sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct sfc_adapter *sa = dev->data->dev_private;
- sfc_log_init(sa, "RxQ=%u", rx_queue_id);
-
return sfc_rx_qdesc_npending(sa, rx_queue_id);
}
@@ -1877,7 +1908,7 @@ static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
};
static int
-sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
+sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev, uint32_t logtype_main)
{
/*
* Device private data has really many process-local pointers.
@@ -1891,12 +1922,14 @@ sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
if (dp_rx == NULL) {
- sfc_err(sa, "cannot find %s Rx datapath", sa->dp_rx_name);
+ SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
+ "cannot find %s Rx datapath", sa->dp_rx_name);
rc = ENOENT;
goto fail_dp_rx;
}
if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
- sfc_err(sa, "%s Rx datapath does not support multi-process",
+ SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
+ "%s Rx datapath does not support multi-process",
sa->dp_rx_name);
rc = EINVAL;
goto fail_dp_rx_multi_process;
@@ -1904,12 +1937,14 @@ sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name);
if (dp_tx == NULL) {
- sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name);
+ SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
+ "cannot find %s Tx datapath", sa->dp_tx_name);
rc = ENOENT;
goto fail_dp_tx;
}
if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
- sfc_err(sa, "%s Tx datapath does not support multi-process",
+ SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
+ "%s Tx datapath does not support multi-process",
sa->dp_tx_name);
rc = EINVAL;
goto fail_dp_tx_multi_process;
@@ -1957,27 +1992,30 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
{
struct sfc_adapter *sa = dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint32_t logtype_main;
int rc;
const efx_nic_cfg_t *encp;
const struct ether_addr *from;
sfc_register_dp();
+ logtype_main = sfc_register_logtype(&pci_dev->addr,
+ SFC_LOGTYPE_MAIN_STR,
+ RTE_LOG_NOTICE);
+
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -sfc_eth_dev_secondary_set_ops(dev);
+ return -sfc_eth_dev_secondary_set_ops(dev, logtype_main);
/* Required for logging */
sa->pci_addr = pci_dev->addr;
sa->port_id = dev->data->port_id;
+ sa->logtype_main = logtype_main;
sa->eth_dev = dev;
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
- sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR,
- RTE_LOG_NOTICE);
-
rc = sfc_kvargs_parse(sa);
if (rc != 0)
goto fail_kvargs_parse;
diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c
index 007506b4..e485e07d 100644
--- a/drivers/net/sfc/sfc_mcdi.c
+++ b/drivers/net/sfc/sfc_mcdi.c
@@ -256,7 +256,8 @@ sfc_mcdi_init(struct sfc_adapter *sa)
if (rc != 0)
goto fail_dma_alloc;
- mcdi->logtype = sfc_register_logtype(sa, SFC_LOGTYPE_MCDI_STR,
+ mcdi->logtype = sfc_register_logtype(&sa->pci_addr,
+ SFC_LOGTYPE_MCDI_STR,
RTE_LOG_NOTICE);
emtp = &mcdi->transport;
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index c792e0b2..a78d35a2 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -360,6 +360,18 @@ sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
return RTE_ETH_RX_DESC_UNAVAIL;
}
+boolean_t
+sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size,
+ boolean_t rx_scatter_enabled, const char **error)
+{
+ if ((rx_buf_size < pdu + rx_prefix_size) && !rx_scatter_enabled) {
+ *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
+ return B_FALSE;
+ }
+
+ return B_TRUE;
+}
+
struct sfc_rxq *
sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
{
@@ -964,6 +976,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
+ const char *error;
rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
&evq_entries, &rxq_max_fill_level);
@@ -987,10 +1000,11 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
goto fail_bad_conf;
}
- if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
- sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
- "object size is too small", sw_index);
+ if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
+ encp->enc_rx_prefix_size,
+ (offloads & DEV_RX_OFFLOAD_SCATTER),
+ &error)) {
+ sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
"PDU size %u plus Rx prefix %u bytes",
sw_index, buf_size, (unsigned int)sa->port.pdu,
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 3fba7d8a..65724b03 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -159,6 +159,10 @@ int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
efx_rx_hash_type_t *efx);
uint64_t sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa,
efx_rx_hash_type_t efx);
+boolean_t sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size,
+ uint32_t rx_prefix_size,
+ boolean_t rx_scatter_enabled,
+ const char **error);
#ifdef __cplusplus
}
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index 076a25d4..a28af0e7 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -107,10 +107,6 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
idx += SFC_TSO_OPT_DESCS_NUM;
- /* Packets which have too big headers should be discarded */
- if (unlikely(header_len > SFC_TSOH_STD_LEN))
- return EMSGSIZE;
-
/*
* The TCP header must start at most 208 bytes into the frame.
* If it starts later than this then the NIC won't realise
@@ -129,6 +125,13 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
* limitations on address boundaries crossing by DMA descriptor data.
*/
if (m->data_len < header_len) {
+ /*
+ * Discard a packet if header linearization is needed but
+ * the header is too big.
+ */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index aa73d264..242137e6 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -737,7 +737,8 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* We may have reached this place for
* one of the following reasons:
*
- * 1) Packet header length is greater
+ * 1) Packet header linearization is needed
+ * and the header length is greater
* than SFC_TSOH_STD_LEN
* 2) TCP header starts at more then
* 208 bytes into the frame
diff --git a/drivers/net/softnic/rte_eth_softnic_flow.c b/drivers/net/softnic/rte_eth_softnic_flow.c
index 21e75300..aefc384d 100644
--- a/drivers/net/softnic/rte_eth_softnic_flow.c
+++ b/drivers/net/softnic/rte_eth_softnic_flow.c
@@ -1283,7 +1283,8 @@ flow_rule_action_get(struct pmd_internals *softnic,
action,
"QUEUE: Invalid RX queue ID");
- sprintf(name, "RXQ%u", (uint32_t)conf->index);
+ snprintf(name, sizeof(name), "RXQ%u",
+ (uint32_t)conf->index);
status = softnic_pipeline_port_out_find(softnic,
pipeline->name,
@@ -1373,7 +1374,7 @@ flow_rule_action_get(struct pmd_internals *softnic,
action,
"RSS: Invalid RX queue ID");
- sprintf(name, "RXQ%u",
+ snprintf(name, sizeof(name), "RXQ%u",
(uint32_t)conf->queue[i]);
status = softnic_pipeline_port_out_find(softnic,
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index a9342997..86787368 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -67,6 +67,8 @@
/* IPC key for queue fds sync */
#define TAP_MP_KEY "tap_mp_sync_queues"
+#define TAP_IOV_DEFAULT_MAX 1024
+
static int tap_devices_count;
static struct rte_vdev_driver pmd_tap_drv;
static struct rte_vdev_driver pmd_tun_drv;
@@ -1326,6 +1328,13 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
struct rx_queue *rxq = &internals->rxq[rx_queue_id];
struct rte_mbuf **tmp = &rxq->pool;
long iov_max = sysconf(_SC_IOV_MAX);
+
+ if (iov_max <= 0) {
+ TAP_LOG(WARNING,
+ "_SC_IOV_MAX is not defined. Using %d as default",
+ TAP_IOV_DEFAULT_MAX);
+ iov_max = TAP_IOV_DEFAULT_MAX;
+ }
uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
struct iovec (*iovecs)[nb_desc + 1];
int data_off = RTE_PKTMBUF_HEADROOM;
@@ -2055,13 +2064,14 @@ tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
int queue, fd_iterator;
/* Prepare the request */
+ memset(&request, 0, sizeof(request));
strlcpy(request.name, TAP_MP_KEY, sizeof(request.name));
strlcpy(request_param->port_name, port_name,
sizeof(request_param->port_name));
request.len_param = sizeof(*request_param);
/* Send request and receive reply */
ret = rte_mp_request_sync(&request, &replies, &timeout);
- if (ret < 0) {
+ if (ret < 0 || replies.nb_received != 1) {
TAP_LOG(ERR, "Failed to request queues from primary: %d",
rte_errno);
return -1;
@@ -2071,6 +2081,11 @@ tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
TAP_LOG(DEBUG, "Received IPC reply for %s", reply_param->port_name);
/* Attach the queues from received file descriptors */
+ if (reply_param->rxq_count + reply_param->txq_count != reply->num_fds) {
+ TAP_LOG(ERR, "Unexpected number of fds received");
+ return -1;
+ }
+
dev->data->nb_rx_queues = reply_param->rxq_count;
dev->data->nb_tx_queues = reply_param->txq_count;
fd_iterator = 0;
@@ -2078,7 +2093,7 @@ tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
process_private->rxq_fds[queue] = reply->fds[fd_iterator++];
for (queue = 0; queue < reply_param->txq_count; queue++)
process_private->txq_fds[queue] = reply->fds[fd_iterator++];
-
+ free(reply);
return 0;
}
@@ -2111,19 +2126,24 @@ tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer)
/* Fill file descriptors for all queues */
reply.num_fds = 0;
reply_param->rxq_count = 0;
+ if (dev->data->nb_rx_queues + dev->data->nb_tx_queues >
+ RTE_MP_MAX_FD_NUM){
+ TAP_LOG(ERR, "Number of rx/tx queues exceeds max number of fds");
+ return -1;
+ }
+
for (queue = 0; queue < dev->data->nb_rx_queues; queue++) {
reply.fds[reply.num_fds++] = process_private->rxq_fds[queue];
reply_param->rxq_count++;
}
RTE_ASSERT(reply_param->rxq_count == dev->data->nb_rx_queues);
- RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
- RTE_ASSERT(reply.num_fds <= RTE_MP_MAX_FD_NUM);
reply_param->txq_count = 0;
for (queue = 0; queue < dev->data->nb_tx_queues; queue++) {
reply.fds[reply.num_fds++] = process_private->txq_fds[queue];
reply_param->txq_count++;
}
+ RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
/* Send reply */
strlcpy(reply.name, request->name, sizeof(reply.name));
diff --git a/drivers/net/tap/tap_bpf_program.c b/drivers/net/tap/tap_bpf_program.c
index 1cb73822..532e8838 100644
--- a/drivers/net/tap/tap_bpf_program.c
+++ b/drivers/net/tap/tap_bpf_program.c
@@ -106,7 +106,7 @@ rte_softrss_be(const __u32 *input_tuple, const uint8_t *rss_key,
for (j = 0; j < input_len; j++) {
#pragma unroll
for (i = 0; i < 32; i++) {
- if (input_tuple[j] & (1 << (31 - i))) {
+ if (input_tuple[j] & (1U << (31 - i))) {
hash ^= ((const __u32 *)def_rss_key)[j] << i |
(__u32)((uint64_t)
(((const __u32 *)def_rss_key)[j + 1])
diff --git a/drivers/net/vdev_netvsc/vdev_netvsc.c b/drivers/net/vdev_netvsc/vdev_netvsc.c
index 16303ef5..a5fd64e0 100644
--- a/drivers/net/vdev_netvsc/vdev_netvsc.c
+++ b/drivers/net/vdev_netvsc/vdev_netvsc.c
@@ -811,7 +811,7 @@ vdev_netvsc_cmp_rte_device(const struct rte_device *dev1,
static void
vdev_netvsc_scan_callback(__rte_unused void *arg)
{
- struct rte_vdev_device *dev;
+ struct rte_device *dev;
struct rte_devargs *devargs;
struct rte_bus *vbus = rte_bus_find_by_name("vdev");
@@ -819,8 +819,9 @@ vdev_netvsc_scan_callback(__rte_unused void *arg)
if (!strncmp(devargs->name, VDEV_NETVSC_DRIVER_NAME,
VDEV_NETVSC_DRIVER_NAME_LEN))
return;
- dev = (struct rte_vdev_device *)vbus->find_device(NULL,
- vdev_netvsc_cmp_rte_device, VDEV_NETVSC_DRIVER_NAME);
+
+ dev = vbus->find_device(NULL, vdev_netvsc_cmp_rte_device,
+ VDEV_NETVSC_DRIVER_NAME);
if (dev)
return;
if (rte_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME))
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 2ba66d29..f938b7ce 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -71,7 +71,6 @@ static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
static int virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static int virtio_intr_enable(struct rte_eth_dev *dev);
static int virtio_intr_disable(struct rte_eth_dev *dev);
static int virtio_dev_queue_stats_mapping_set(
@@ -729,6 +728,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct virtqueue *vq = rxvq->vq;
virtqueue_enable_intr(vq);
+ virtio_mb();
return 0;
}
@@ -778,6 +778,21 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.mac_addr_set = virtio_mac_addr_set,
};
+/*
+ * dev_ops for virtio-user in secondary processes, as we just have
+ * some limited supports currently.
+ */
+const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
+ .dev_infos_get = virtio_dev_info_get,
+ .stats_get = virtio_dev_stats_get,
+ .xstats_get = virtio_dev_xstats_get,
+ .xstats_get_names = virtio_dev_xstats_get_names,
+ .stats_reset = virtio_dev_stats_reset,
+ .xstats_reset = virtio_dev_stats_reset,
+ /* collect stats per queue */
+ .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
+};
+
static void
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
@@ -1693,6 +1708,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
out:
rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
return ret;
}
@@ -1820,6 +1836,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
+ hw->vtnet_hdr_size;
uint64_t rx_offloads = rxmode->offloads;
uint64_t tx_offloads = txmode->offloads;
uint64_t req_features;
@@ -1834,6 +1852,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
+ req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
@@ -2185,6 +2206,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
host_features = VTPCI_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index e0f80e5a..39a9f7b7 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -45,6 +45,8 @@
1u << VIRTIO_NET_F_HOST_TSO4 | \
1u << VIRTIO_NET_F_HOST_TSO6)
+extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops;
+
/*
* CQ function prototype
*/
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index eb891433..7f7562dd 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -192,7 +192,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
static void
virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
{
- uint16_t i, used_idx, desc_idx = 0, last_idx;
+ uint16_t i, idx = vq->vq_used_cons_idx;
int16_t free_cnt = 0;
struct vq_desc_extra *dxp = NULL;
@@ -200,27 +200,16 @@ virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
return;
for (i = 0; i < num; i++) {
- struct vring_used_elem *uep;
-
- used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
- uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t)uep->id;
-
- dxp = &vq->vq_descx[desc_idx];
- vq->vq_used_cons_idx++;
-
+ dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
+ free_cnt += dxp->ndescs;
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
}
- last_idx = desc_idx + dxp->ndescs - 1;
- free_cnt = last_idx - vq->vq_desc_tail_idx;
- if (free_cnt <= 0)
- free_cnt += vq->vq_nentries;
-
- vq_ring_free_inorder(vq, last_idx, free_cnt);
+ vq->vq_free_cnt += free_cnt;
+ vq->vq_used_cons_idx = idx;
}
static inline int
@@ -421,7 +410,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
while (i < num) {
idx = idx & (vq->vq_nentries - 1);
- dxp = &vq->vq_descx[idx];
+ dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
@@ -472,7 +461,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
head_idx = vq->vq_desc_head_idx;
idx = head_idx;
- dxp = &vq->vq_descx[idx];
+ if (in_order)
+ dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
+ else
+ dxp = &vq->vq_descx[idx];
dxp->cookie = (void *)cookie;
dxp->ndescs = needed;
@@ -788,7 +780,7 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
stats->size_bins[0]++;
else if (s < 1519)
stats->size_bins[6]++;
- else if (s >= 1519)
+ else
stats->size_bins[7]++;
}
@@ -1107,6 +1099,7 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue,
prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ virtio_rmb();
num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
rcv_cnt);
uint16_t extra_idx = 0;
@@ -1271,6 +1264,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t rcv_cnt =
RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ virtio_rmb();
uint32_t rx_num =
virtqueue_dequeue_burst_rx(vq,
rcv_pkts, len, rcv_cnt);
@@ -1380,6 +1374,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_pktmbuf_free(txm);
continue;
}
+ /* vlan_insert may add a header mbuf */
+ tx_pkts[nb_tx] = txm;
}
/* optimize ring usage */
@@ -1484,6 +1480,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
rte_pktmbuf_free(txm);
continue;
}
+ /* vlan_insert may add a header mbuf */
+ tx_pkts[nb_tx] = txm;
}
/* optimize ring usage */
diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h
index 83a85cc6..55f47036 100644
--- a/drivers/net/virtio/virtio_user/vhost.h
+++ b/drivers/net/virtio/virtio_user/vhost.h
@@ -2,8 +2,8 @@
* Copyright(c) 2010-2016 Intel Corporation
*/
-#ifndef _VHOST_NET_USER_H
-#define _VHOST_NET_USER_H
+#ifndef _VIRTIO_USER_VHOST_H
+#define _VIRTIO_USER_VHOST_H
#include <stdint.h>
#include <linux/types.h>
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index a3faf1d0..fbd9e979 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -62,6 +62,7 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
const char *mac, uint64_t features)
{
unsigned int tap_features;
+ char *tap_name = NULL;
int sndbuf = INT_MAX;
struct ifreq ifr;
int tapfd;
@@ -112,6 +113,12 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
goto error;
}
+ tap_name = strdup(ifr.ifr_name);
+ if (!tap_name) {
+ PMD_DRV_LOG(ERR, "strdup ifname failed: %s", strerror(errno));
+ goto error;
+ }
+
fcntl(tapfd, F_SETFL, O_NONBLOCK);
if (ioctl(tapfd, TUNSETVNETHDRSZ, &hdr_size) < 0) {
@@ -134,11 +141,12 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
goto error;
}
- if (!(*p_ifname))
- *p_ifname = strdup(ifr.ifr_name);
+ free(*p_ifname);
+ *p_ifname = tap_name;
return tapfd;
error:
+ free(tap_name);
close(tapfd);
return -1;
}
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 2c6eba0a..0a88d595 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -393,7 +393,10 @@ virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
return -1;
flag = fcntl(fd, F_GETFL);
- fcntl(fd, F_SETFL, flag | O_NONBLOCK);
+ if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) {
+ PMD_DRV_LOG(ERR, "fcntl failed, %s", strerror(errno));
+ return -1;
+ }
return 0;
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 9c8bcd2c..f0051f88 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -412,7 +412,7 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int mrg_rxbuf, int in_order)
+ int server, int mrg_rxbuf, int in_order)
{
pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
@@ -420,6 +420,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
+ dev->is_server = server;
dev->mac_specified = 0;
dev->frontend_features = 0;
dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index c42ce5d4..3e3a7b78 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -50,7 +50,7 @@ int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int mrg_rxbuf, int in_order);
+ int server, int mrg_rxbuf, int in_order);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index f8791391..5781c094 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -469,6 +469,26 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
char *mac_addr = NULL;
int ret = -1;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ const char *name = rte_vdev_device_name(dev);
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ RTE_LOG(ERR, PMD, "Failed to probe %s\n", name);
+ return -1;
+ }
+
+ if (eth_virtio_dev_init(eth_dev) < 0) {
+ PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
+ rte_eth_dev_release_port(eth_dev);
+ return -1;
+ }
+
+ eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args);
if (!kvlist) {
PMD_INIT_LOG(ERR, "error when parsing param");
@@ -581,33 +601,19 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
}
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- struct virtio_user_dev *vu_dev;
-
- eth_dev = virtio_user_eth_dev_alloc(dev);
- if (!eth_dev) {
- PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
- goto end;
- }
-
- hw = eth_dev->data->dev_private;
- vu_dev = virtio_user_get_dev(hw);
- if (server_mode == 1)
- vu_dev->is_server = true;
- else
- vu_dev->is_server = false;
- if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
- queue_size, mac_addr, &ifname, mrg_rxbuf,
- in_order) < 0) {
- PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
- virtio_user_eth_dev_free(eth_dev);
- goto end;
- }
+ eth_dev = virtio_user_eth_dev_alloc(dev);
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+ goto end;
+ }
- } else {
- eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
- if (!eth_dev)
- goto end;
+ hw = eth_dev->data->dev_private;
+ if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
+ queue_size, mac_addr, &ifname, server_mode,
+ mrg_rxbuf, in_order) < 0) {
+ PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
+ virtio_user_eth_dev_free(eth_dev);
+ goto end;
}
/* previously called by rte_pci_probe() for physical dev */
@@ -649,6 +655,9 @@ virtio_user_pmd_remove(struct rte_vdev_device *vdev)
if (!eth_dev)
return -ENODEV;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
+
/* make sure the device is stopped, queues freed */
rte_eth_dev_close(eth_dev->data->port_id);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 93e5de9a..812e1857 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -52,6 +52,7 @@
#define VMXNET3_RX_OFFLOAD_CAP \
(DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_VLAN_FILTER | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \