aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c29
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c9
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c14
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c57
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c16
5 files changed, 75 insertions, 50 deletions
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 43c55d74..cf9d51dc 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -1542,16 +1542,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("SFP+ module not supported\n");
@@ -1804,16 +1798,10 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("QSFP module not supported\n");
@@ -1838,7 +1826,6 @@ err_read_i2c_eeprom:
return IXGBE_ERR_SFP_NOT_PRESENT;
}
-
/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index bac36e0d..d6686f6c 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2991,6 +2991,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
/* Extended stats */
for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbevf_stats_strings[i].offset);
}
@@ -4089,6 +4090,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
ixgbe_dev_info_get(dev, &dev_info);
@@ -4099,7 +4101,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -5722,6 +5724,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ixgbe_hw *hw;
uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5731,7 +5734,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -7529,7 +7532,7 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
}
static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 26395e41..09440ccb 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -387,15 +387,27 @@ ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
uint32_t reg_offset, vf_shift;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
vf_shift = vf & VFRE_MASK;
reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ IXGBE_WRITE_FLUSH(hw);
+ reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c61ce470..d1e300a5 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -3323,7 +3323,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
- uint32_t q;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -3343,18 +3342,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
- /* Disable drop for all queues in VMDQ mode*/
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
- } else {
- /* Enable drop for all queues in SRIOV mode */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT) | IXGBE_QDE_ENABLE));
- }
-
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg &= ~IXGBE_RTTDCS_ARBDIS;
@@ -3488,16 +3475,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
/**
* ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
static void
-ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
uint8_t i;
+ uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
/*
@@ -3535,6 +3524,21 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT)));
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
+ }
}
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
@@ -3647,7 +3651,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Get dcb TX configuration parameters from rte_eth_conf */
ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
- ixgbe_dcb_rx_hw_config(hw, dcb_config);
+ ixgbe_dcb_rx_hw_config(dev, dcb_config);
break;
default:
PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
@@ -3706,6 +3710,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
}
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
}
switch (hw->mac.type) {
@@ -4083,9 +4096,8 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
break;
}
} else {
- /*
- * SRIOV active scheme
- * Support RSS together with VMDq & SRIOV
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_RSS:
@@ -4093,10 +4105,13 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
ixgbe_config_vf_rss(dev);
break;
case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
ixgbe_vmdq_dcb_configure(dev);
break;
- /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+ /* DCB/RSS together with SRIOV is not supported */
case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
PMD_INIT_LOG(ERR,
"Could not support DCB/RSS with VMDq & SRIOV");
return -1;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index abbf2841..dd7d1778 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -335,9 +335,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -345,11 +349,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -358,8 +364,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);