summaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 09:22:35 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 16:09:40 +0200
commit8b25d1ad5d2264bdfc2818c7bda74ee2697df6db (patch)
tree8c3c769777f7e66a2d1ba7dd7651b563cfde370b /drivers/net/ixgbe
parent97f17497d162afdb82c8704bf097f0fee3724b2e (diff)
Imported Upstream version 16.07-rc1
Change-Id: I40a523e52f12e8496fdd69e902824b0226c303de Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/Makefile10
-rw-r--r--drivers/net/ixgbe/base/README2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.c5
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.h3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c9
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c41
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.h8
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c361
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.h9
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.h4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_osdep.h1
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c16
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.h3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h118
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.c10
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.h7
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.c29
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.h1
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c1158
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.h52
-rw-r--r--drivers/net/ixgbe/ixgbe_82599_bypass.c8
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass_defines.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c527
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c68
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c38
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c499
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h326
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c560
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c (renamed from drivers/net/ixgbe/ixgbe_rxtx_vec.c)314
32 files changed, 3101 insertions, 1104 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 50bf51c9..a6c71f34 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -43,7 +43,7 @@ EXPORT_MAP := rte_pmd_ixgbe_version.map
LIBABIVER := 1
-ifeq ($(CC), icc)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
@@ -51,7 +51,7 @@ CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
CFLAGS_ixgbe_rxtx.o += -wd3656
-else ifeq ($(CC), clang)
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
#
# CFLAGS for clang
#
@@ -108,7 +108,11 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
-SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec.c
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
+else
+SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
+endif
ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index caa26640..76e78051 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -34,7 +34,7 @@ IntelĀ® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2016.01.07 released by ND. The sub-directory of base/
+cid-10g-shared-code.2016.04.12 released by ND. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
index 9e65fffa..db808801 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -995,17 +995,20 @@ STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VFTA
* @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ * @bypass_vlvf: boolean flag - unused
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on, bool bypass_vlvf)
{
u32 regindex;
u32 bitindex;
u32 bits;
u32 vftabyte;
+ UNREFERENCED_1PARAMETER(bypass_vlvf);
+
DEBUGFUNC("ixgbe_set_vfta_82598");
if (vlan > 4095)
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h
index 89dd11a5..0326e70b 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.h
+++ b/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -39,7 +39,8 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass);
s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 154c1f10..5bc7c2b9 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -1176,11 +1176,14 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
- hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+ hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+ IXGBE_CLEAR_VMDQ_ALL);
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index cf1e5169..17868676 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -209,6 +209,8 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
@@ -1078,33 +1080,38 @@ s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
* ixgbe_set_vfta - Set VLAN filter table
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFTA
- * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN
+ * @vlvf_bypass: boolean flag indicating updating the default pool is okay
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
- vlan_on), IXGBE_NOT_IMPLEMENTED);
+ vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_set_vlvf - Set VLAN Pool Filter
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- * @vfta_changed: pointer to boolean flag which indicates whether VFTA
- * should be changed
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VLVF
+ * @vfta_delta: pointer to the difference between the current value of VFTA
+ * and the desired value
+ * @vfta: the desired value of the VFTA
+ * @vlvf_bypass: boolean flag indicating updating the default pool is okay
*
* Turn on/off specified bit in VLVF table.
**/
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
- bool *vfta_changed)
+ u32 *vfta_delta, u32 vfta, bool vlvf_bypass)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
- vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
+ vlan_on, vfta_delta, vfta, vlvf_bypass),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -1637,6 +1644,20 @@ void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
hw->mac.ops.release_swfw_sync(hw, mask);
}
+/**
+ * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register.
+ * Regardless of whether is succeeds or not it then release the semaphore.
+ * This is function is called to recover from catastrophic failures that
+ * may have left the semaphore locked.
+ **/
+void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+}
void ixgbe_disable_rx(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
index ae26a6ac..3aad1da7 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.h
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -124,9 +124,10 @@ s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on, bool vlvf_bypass);
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed);
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
@@ -191,6 +192,7 @@ s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw);
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
@@ -215,5 +217,7 @@ s32 ixgbe_handle_lasi(struct ixgbe_hw *hw);
void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed);
void ixgbe_disable_rx(struct ixgbe_hw *hw);
void ixgbe_enable_rx(struct ixgbe_hw *hw);
+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#endif /* _IXGBE_API_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index ec61408d..811875a4 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -135,6 +135,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
/* Flow Control */
mac->ops.fc_enable = ixgbe_fc_enable_generic;
mac->ops.setup_fc = ixgbe_setup_fc_generic;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg;
/* Link */
mac->ops.get_link_capabilities = NULL;
@@ -1020,24 +1021,33 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
* ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
* @hw: pointer to the HW structure
*
- * Determines the LAN function id by reading memory-mapped registers
- * and swaps the port value if requested.
+ * Determines the LAN function id by reading memory-mapped registers and swaps
+ * the port value if requested, and set MAC instance for devices that share
+ * CS4227.
**/
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
u32 reg;
+ u16 ee_ctrl_4;
DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
- bus->lan_id = bus->func;
+ bus->lan_id = (u8)bus->func;
/* check for a port swap */
reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
+
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ }
}
/**
@@ -2397,10 +2407,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
hw->mac.addr[4], hw->mac.addr[5]);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- /* clear VMDq pool/queue selection for RAR 0 */
- hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
}
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+
hw->addr_ctrl.overflow_promisc = 0;
hw->addr_ctrl.rar_used_count = 1;
@@ -2729,7 +2740,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
}
/* Negotiate the fc mode to use */
- ixgbe_fc_autoneg(hw);
+ hw->mac.ops.fc_autoneg(hw);
/* Disable any previous flow control settings */
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
@@ -2839,7 +2850,7 @@ out:
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
-STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg))) {
@@ -3799,68 +3810,65 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* return the VLVF index where this VLAN id should be placed
*
**/
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
{
- u32 bits = 0;
- u32 first_empty_slot = 0;
- s32 regindex;
+ s32 regindex, first_empty_slot;
+ u32 bits;
/* short cut the special case */
if (vlan == 0)
return 0;
- /*
- * Search for the vlan id in the VLVF entries. Save off the first empty
- * slot found along the way
- */
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ /* if vlvf_bypass is set we don't want to use an empty slot, we
+ * will simply bypass the VLVF if there are no entries present in the
+ * VLVF that contain our VLAN
+ */
+ first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
+
+ /* add VLAN enable bit for comparison */
+ vlan |= IXGBE_VLVF_VIEN;
+
+ /* Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way.
+ *
+ * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
+ */
+ for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !(first_empty_slot))
+ if (bits == vlan)
+ return regindex;
+ if (!first_empty_slot && !bits)
first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
}
- /*
- * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
- * in the VLVF. Else use the first empty VLVF register for this
- * vlan id.
- */
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
- "No space in VLVF.\n");
- regindex = IXGBE_ERR_NO_SPACE;
- }
- }
+ /* If we are here then we didn't find the VLAN. Return first empty
+ * slot we found during our search, else error.
+ */
+ if (!first_empty_slot)
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
- return regindex;
+ return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
}
/**
* ixgbe_set_vfta_generic - Set VLAN filter table
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on, bool vlvf_bypass)
{
- s32 regindex;
- u32 bitindex;
- u32 vfta;
- u32 targetbit;
- s32 ret_val = IXGBE_SUCCESS;
- bool vfta_changed = false;
+ u32 regidx, vfta_delta, vfta;
+ s32 ret_val;
DEBUGFUNC("ixgbe_set_vfta_generic");
- if (vlan > 4095)
+ if (vlan > 4095 || vind > 63)
return IXGBE_ERR_PARAM;
/*
@@ -3875,33 +3883,32 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* bits[11-5]: which register
* bits[4-0]: which bit in the register
*/
- regindex = (vlan >> 5) & 0x7F;
- bitindex = vlan & 0x1F;
- targetbit = (1 << bitindex);
- vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
-
- if (vlan_on) {
- if (!(vfta & targetbit)) {
- vfta |= targetbit;
- vfta_changed = true;
- }
- } else {
- if ((vfta & targetbit)) {
- vfta &= ~targetbit;
- vfta_changed = true;
- }
- }
+ regidx = vlan / 32;
+ vfta_delta = 1 << (vlan % 32);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
+
+ /* vfta_delta represents the difference between the current value
+ * of vfta and the value we want in the register. Since the diff
+ * is an XOR mask we can just update the vfta using an XOR
+ */
+ vfta_delta &= vlan_on ? ~vfta : vfta;
+ vfta ^= vfta_delta;
/* Part 2
* Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
*/
- ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
- &vfta_changed);
- if (ret_val != IXGBE_SUCCESS)
+ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
+ vfta, vlvf_bypass);
+ if (ret_val != IXGBE_SUCCESS) {
+ if (vlvf_bypass)
+ goto vfta_update;
return ret_val;
+ }
- if (vfta_changed)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+vfta_update:
+ /* Update VFTA now that we are ready for traffic */
+ if (vfta_delta)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
return IXGBE_SUCCESS;
}
@@ -3910,21 +3917,25 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* ixgbe_set_vlvf_generic - Set VLAN Pool Filter
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- * @vfta_changed: pointer to boolean flag which indicates whether VFTA
- * should be changed
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VLVF
+ * @vfta_delta: pointer to the difference between the current value of VFTA
+ * and the desired value
+ * @vfta: the desired value of the VFTA
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified bit in VLVF table.
**/
s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed)
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass)
{
- u32 vt;
+ u32 bits;
+ s32 vlvf_index;
DEBUGFUNC("ixgbe_set_vlvf_generic");
- if (vlan > 4095)
+ if (vlan > 4095 || vind > 63)
return IXGBE_ERR_PARAM;
/* If VT Mode is set
@@ -3934,82 +3945,57 @@ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- if (vt & IXGBE_VT_CTL_VT_ENABLE) {
- s32 vlvf_index;
- u32 bits;
-
- vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
- if (vlvf_index < 0)
- return vlvf_index;
-
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2),
- bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits |= (1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1),
- bits);
- }
- } else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2),
- bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits &= ~(1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1),
- bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- }
- }
+ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
+ return IXGBE_SUCCESS;
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
+ if (vlvf_index < 0)
+ return vlvf_index;
- /*
- * If there are still bits set in the VLVFB registers
- * for the VLAN ID indicated we need to see if the
- * caller is requesting that we clear the VFTA entry bit.
- * If the caller has requested that we clear the VFTA
- * entry bit but there are still pools/VFs using this VLAN
- * ID entry then ignore the request. We're not worried
- * about the case where we're turning the VFTA VLAN ID
- * entry bit on, only when requested to turn it off as
- * there may be multiple pools and/or VFs using the
- * VLAN ID entry. In that case we cannot clear the
- * VFTA bit until all pools/VFs using that VLAN ID have also
- * been cleared. This will be indicated by "bits" being
- * zero.
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
+
+ /* set the pool bit */
+ bits |= 1 << (vind % 32);
+ if (vlan_on)
+ goto vlvf_update;
+
+ /* clear the pool bit */
+ bits ^= 1 << (vind % 32);
+
+ if (!bits &&
+ !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
+ /* Clear VFTA first, then disable VLVF. Otherwise
+ * we run the risk of stray packets leaking into
+ * the PF via the default pool
*/
- if (bits) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
- (IXGBE_VLVF_VIEN | vlan));
- if ((!vlan_on) && (vfta_changed != NULL)) {
- /* someone wants to clear the vfta entry
- * but some pools/VFs are still using it.
- * Ignore it. */
- *vfta_changed = false;
- }
- } else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ if (vfta_delta)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
+
+ /* disable VLVF and clear remaining bit from pool */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
+
+ return IXGBE_SUCCESS;
}
+ /* If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ *vfta_delta = 0;
+
+vlvf_update:
+ /* record pool change and enable VLAN ID if not already enabled */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
return IXGBE_SUCCESS;
}
@@ -4032,7 +4018,7 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
}
return IXGBE_SUCCESS;
@@ -4218,43 +4204,25 @@ out:
/**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
- * @enable: enable or disable switch for anti-spoofing
- * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ * @enable: enable or disable switch for MAC anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
*
**/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{
- int j;
- int pf_target_reg = pf >> 3;
- int pf_target_shift = pf % 8;
- u32 pfvfspoof = 0;
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
- /*
- * PFVFSPOOF register array is size 8 with 8 bits assigned to
- * MAC anti-spoof enables in each register array element.
- */
- for (j = 0; j < pf_target_reg; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Do not set the bits assigned to the PF
- */
- pfvfspoof &= (1 << pf_target_shift) - 1;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * Remaining pools belong to the PF so they do not need to have
- * anti-spoofing enabled.
- */
- for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
/**
@@ -4363,8 +4331,9 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* So we will leave this up to the caller to read back the data
* in these cases.
*
- * Communicates with the manageability block. On success return IXGBE_SUCCESS
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
**/
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data)
@@ -4373,6 +4342,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
u16 buf_len;
u16 dword_len;
+ s32 status;
DEBUGFUNC("ixgbe_host_interface_command");
@@ -4380,6 +4350,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ if (status)
+ return status;
+
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
@@ -4388,13 +4364,15 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if ((hicr & IXGBE_HICR_EN) == 0) {
DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
if ((length % (sizeof(u32))) != 0) {
DEBUGOUT("Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto rel_out;
}
dword_len = length >> 2;
@@ -4421,11 +4399,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
ERROR_REPORT1(IXGBE_ERROR_CAUTION,
"Command has failed with no status valid.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
if (!return_data)
- return 0;
+ goto rel_out;
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
@@ -4439,11 +4418,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* If there is any thing in data position pull it in */
buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
if (buf_len == 0)
- return 0;
+ goto rel_out;
if (length < buf_len + hdr_size) {
DEBUGOUT("Buffer not large enough for reply message.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs, add 3 for odd lengths */
@@ -4455,7 +4435,10 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
IXGBE_LE32_TO_CPUS(&buffer[bi]);
}
- return 0;
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ return status;
}
/**
@@ -4480,12 +4463,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
- != IXGBE_SUCCESS) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
-
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
@@ -4517,8 +4494,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
break;
}
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
-out:
return ret_val;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
index fd67a889..0545f85c 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -133,11 +133,12 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on, bool vlvf_bypass);
s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed);
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -147,7 +148,7 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h
index 4a120a3d..d775142d 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -109,7 +109,9 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
/* mailbox API, version 1.2 VF requests */
-#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C
+#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 40b0b512..31cc1bef 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -96,6 +96,7 @@ enum {
#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
+#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i)
#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i)
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 6ed685e8..ed1b14f3 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -454,6 +454,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case X557_PHY_ID:
phy_type = ixgbe_phy_x550em_ext_t;
break;
+ case IXGBE_M88E1500_E_PHY_ID:
+ phy_type = ixgbe_phy_m88;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -615,13 +618,12 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
DEBUGFUNC("ixgbe_read_phy_reg_generic");
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
- status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
- phy_data);
- hw->mac.ops.release_swfw_sync(hw, gssr);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
- }
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
return status;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h
index 1a5affe5..281f9faf 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.h
+++ b/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -89,8 +89,11 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_CS4227 0xBE /* CS4227 address */
#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
+#define IXGBE_CS4223_PHY_ID 0x7003/* Quad port */
+#define IXGBE_CS4227_PHY_ID 0x3003/* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 4dce2ac1..83818a96 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -133,12 +133,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
-#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15C6
-#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA
#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC
#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
@@ -194,7 +196,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_FLA_X540 IXGBE_FLA
#define IXGBE_FLA_X550 IXGBE_FLA
#define IXGBE_FLA_X550EM_x IXGBE_FLA
-#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA_X550EM_a 0x15F68
#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
@@ -1078,16 +1080,40 @@ struct ixgbe_dmac_config {
#define IXGBE_PCIEPIPEDAT 0x11008
#define IXGBE_GSCL_1 0x11010
#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1
+#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2
#define IXGBE_GSCL_3 0x11018
#define IXGBE_GSCL_4 0x1101C
#define IXGBE_GSCN_0 0x11020
#define IXGBE_GSCN_1 0x11024
#define IXGBE_GSCN_2 0x11028
#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0
+#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1
+#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2
+#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3
#define IXGBE_FACTPS 0x10150
#define IXGBE_FACTPS_X540 IXGBE_FACTPS
+#define IXGBE_GSCL_1_X550 0x11800
+#define IXGBE_GSCL_2_X550 0x11804
+#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550
+#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550
+#define IXGBE_GSCN_0_X550 0x11820
+#define IXGBE_GSCN_1_X550 0x11824
+#define IXGBE_GSCN_2_X550 0x11828
+#define IXGBE_GSCN_3_X550 0x1182C
+#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550
+#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550
+#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550
+#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550
#define IXGBE_FACTPS_X550 IXGBE_FACTPS
#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS
+#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550
+#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550
+#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550
+#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550
+#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550
+#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550
#define IXGBE_FACTPS_X550EM_a 0x15FEC
#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS)
@@ -1124,6 +1150,10 @@ struct ixgbe_dmac_config {
#define IXGBE_GSCL_6_82599 0x11034
#define IXGBE_GSCL_7_82599 0x11038
#define IXGBE_GSCL_8_82599 0x1103C
+#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599
+#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599
+#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599
+#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599
#define IXGBE_PHYADR_82599 0x11040
#define IXGBE_PHYDAT_82599 0x11044
#define IXGBE_PHYCTL_82599 0x11048
@@ -1134,10 +1164,22 @@ struct ixgbe_dmac_config {
#define IXGBE_CIAD_82599 IXGBE_CIAD
#define IXGBE_CIAA_X540 IXGBE_CIAA
#define IXGBE_CIAD_X540 IXGBE_CIAD
+#define IXGBE_GSCL_5_X550 0x11810
+#define IXGBE_GSCL_6_X550 0x11814
+#define IXGBE_GSCL_7_X550 0x11818
+#define IXGBE_GSCL_8_X550 0x1181C
+#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550
+#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550
+#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550
+#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550
#define IXGBE_CIAA_X550 0x11508
#define IXGBE_CIAD_X550 0x11510
#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550
#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550
+#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550
+#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550
+#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550
+#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550
#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550
#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550
#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA)
@@ -1472,6 +1514,7 @@ struct ixgbe_dmac_config {
#define IXGBE_CORECTL_WRITE_CMD 0x00010000
/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
@@ -1606,7 +1649,8 @@ struct ixgbe_dmac_config {
#define ATH_PHY_ID 0x03429050
/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0
+#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
@@ -2247,6 +2291,9 @@ enum {
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_EEPROM_CTRL_4 0x45
+#define IXGBE_EE_CTRL_4_INST_ID 0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
@@ -2358,6 +2405,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7)
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
@@ -2820,7 +2868,7 @@ enum {
#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P)))
#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P)))
#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P)))
-#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P)))
#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
@@ -3003,6 +3051,12 @@ enum ixgbe_fdir_pballoc_type {
/* Host Interface Command Structures */
+#ifdef C99
+#pragma pack(push, 1)
+#else
+#pragma pack(1)
+#endif /* C99 */
+
struct ixgbe_hic_hdr {
u8 cmd;
u8 buf_len;
@@ -3080,17 +3134,22 @@ struct ixgbe_hic_internal_phy_req {
struct ixgbe_hic_hdr hdr;
u8 port_number;
u8 command_type;
- u16 address;
+ __be16 address;
u16 rsv1;
- u32 write_data;
+ __le32 write_data;
u16 pad;
};
struct ixgbe_hic_internal_phy_resp {
struct ixgbe_hic_hdr hdr;
- u32 read_data;
+ __le32 read_data;
};
+#ifdef C99
+#pragma pack(pop)
+#else
+#pragma pack()
+#endif /* C99 */
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
@@ -3246,6 +3305,7 @@ typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_10_FULL 0x0004
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
@@ -3511,6 +3571,8 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_intel,
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
+ ixgbe_phy_sgmii,
+ ixgbe_phy_m88,
ixgbe_phy_generic
};
@@ -3567,6 +3629,14 @@ enum ixgbe_fc_mode {
ixgbe_fc_default
};
+/* Master/slave control */
+enum ixgbe_ms_type {
+ ixgbe_ms_hw_default = 0,
+ ixgbe_ms_force_master,
+ ixgbe_ms_force_slave,
+ ixgbe_ms_auto
+};
+
/* Smart Speed Settings */
#define IXGBE_SMARTSPEED_MAX_RETRIES 3
enum ixgbe_smart_speed {
@@ -3626,7 +3696,8 @@ struct ixgbe_bus_info {
enum ixgbe_bus_type type;
u16 func;
- u16 lan_id;
+ u8 lan_id;
+ u16 instance_id;
};
/* Flow control parameters */
@@ -3766,6 +3837,7 @@ struct ixgbe_mac_operations {
s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*init_swfw_sync)(struct ixgbe_hw *);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@@ -3805,8 +3877,9 @@ struct ixgbe_mac_operations {
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
- s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
+ s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32,
+ bool);
s32 (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
@@ -3814,6 +3887,7 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
s32 (*setup_fc)(struct ixgbe_hw *);
+ void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3941,6 +4015,8 @@ struct ixgbe_phy_info {
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
ixgbe_link_speed speeds_supported;
+ enum ixgbe_ms_type ms_type;
+ enum ixgbe_ms_type original_ms_type;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -4057,8 +4133,12 @@ struct ixgbe_hw {
#define IXGBE_FUSES0_REV_MASK (3 << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
@@ -4072,18 +4152,29 @@ struct ixgbe_hw {
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
+#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
+#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11)
+
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19)
+
#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
@@ -4116,6 +4207,11 @@ struct ixgbe_hw {
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1 << 1)
+#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M (1 << 23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
index 40dc1c8c..a75074a5 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -362,8 +362,10 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+ return IXGBE_ERR_MBX;
+ }
return ret_val;
}
@@ -422,13 +424,15 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if true then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
**/
-s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 ret_val;
- UNREFERENCED_1PARAMETER(vind);
+ UNREFERENCED_2PARAMETER(vind, vlvf_bypass);
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h
index 411152a4..8851cb82 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.h
+++ b/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -31,8 +31,8 @@ POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
-#ifndef __IXGBE_VF_H__
-#define __IXGBE_VF_H__
+#ifndef _IXGBE_VF_H_
+#define _IXGBE_VF_H_
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
@@ -131,7 +131,8 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr,
bool clear);
-s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass);
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
index 9ade1b5e..31dead0d 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -99,6 +99,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
+ mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540;
mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
@@ -268,11 +269,14 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
- hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+ hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+ IXGBE_CLEAR_VMDQ_ALL);
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
@@ -943,6 +947,25 @@ STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function reset hardware semaphore bits for a semaphore that may
+ * have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+ /* First try to grab the semaphore but we don't need to bother
+ * looking to see whether we got the lock or not since we do
+ * the same thing regardless of whether we got the lock or not.
+ * We got the lock - we release it.
+ * We timeout trying to get the lock - we force its release.
+ */
+ ixgbe_get_swfw_sync_semaphore(hw);
+ ixgbe_release_swfw_sync_semaphore(hw);
+}
+
+/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h
index 42c08a82..e4baf6ff 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.h
+++ b/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -59,6 +59,7 @@ s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index 0bbaa55b..aa6e859f 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -39,8 +39,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_phy.h"
STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
-static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
-static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
+STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
+STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
/**
* ixgbe_init_ops_X550 - Inits func ptrs and MAC type
@@ -329,6 +329,137 @@ STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @dev_type: always unused
+ * @phy_data: Pointer to read data from PHY register
+ */
+STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 dev_type, u16 *phy_data)
+{
+ u32 i, data, command;
+ UNREFERENCED_1PARAMETER(dev_type);
+
+ /* Setup and write the read command */
+ command = (reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+
+ if (command & IXGBE_MSCA_MDI_COMMAND) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY read command did not complete.\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /* Read operation is complete. Get the data from MSRWD */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)data;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @dev_type: always unused
+ * @phy_data: Data to write to the PHY register
+ */
+STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 dev_type, u16 phy_data)
+{
+ u32 i, command;
+ UNREFERENCED_1PARAMETER(dev_type);
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the write command */
+ command = (reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+
+ if (command & IXGBE_MSCA_MDI_COMMAND) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_phy_1g - Get 1g PHY type based on device id
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+STATIC s32 ixgbe_identify_phy_1g(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u16 phy_id_high;
+ u16 phy_id_low;
+ s32 rc;
+
+ rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_MDIO_PHY_ID_HIGH, 0,
+ &phy_id_high);
+ if (rc)
+ goto rel_out;
+
+ rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_MDIO_PHY_ID_LOW, 0,
+ &phy_id_low);
+ if (rc)
+ goto rel_out;
+
+ hw->phy.id = (u32)phy_id_high << 16;
+ hw->phy.id |= phy_id_low & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = (u32)phy_id_low & ~IXGBE_PHY_REVISION_MASK;
+
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ return rc;
+}
+
+/**
* ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -338,7 +469,8 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
- hw->phy.phy_semaphore_mask = IXGBE_GSSR_TOKEN_SM;
+ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
+ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
else
@@ -364,10 +496,17 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
+ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ return ixgbe_identify_phy_1g(hw);
default:
break;
}
@@ -397,7 +536,7 @@ STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -412,7 +551,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32
+STATIC s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
@@ -428,7 +567,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -443,7 +582,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
*
* Returns an error code on error.
**/
-static s32
+STATIC s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
@@ -512,8 +651,8 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
link->addr = IXGBE_CS4227;
}
if (hw->mac.type == ixgbe_mac_X550EM_a) {
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
}
@@ -527,12 +666,21 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
mac->ops.setup_fc = ixgbe_setup_fc_generic;
+ else if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ mac->ops.setup_fc = ixgbe_setup_fc_x550a;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_x550a;
+ }
else
mac->ops.setup_fc = ixgbe_setup_fc_X550em;
-
- if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR)
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ break;
+ default:
mac->ops.setup_eee = NULL;
+ }
/* PHY */
phy->ops.init = ixgbe_init_phy_ops_X550em;
@@ -717,6 +865,116 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_enable_eee_x550 - Enable EEE support
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_enable_eee_x550(struct ixgbe_hw *hw)
+{
+ u16 autoneg_eee_reg;
+ u32 link_reg;
+ s32 status;
+
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Advertise EEE capability */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_eee_reg);
+
+ autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_eee_reg);
+ return IXGBE_SUCCESS;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
+
+ /* Don't advertise FEC capability when EEE enabled. */
+ link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ break;
+ default:
+ break;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_disable_eee_x550 - Disable EEE support
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_disable_eee_x550(struct ixgbe_hw *hw)
+{
+ u16 autoneg_eee_reg;
+ u32 link_reg;
+ s32 status;
+
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Disable advertised EEE capability */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_eee_reg);
+
+ autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_eee_reg);
+ return IXGBE_SUCCESS;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
+
+ /* Advertise FEC capability when EEE is disabled. */
+ link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ break;
+ default:
+ break;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_setup_eee_X550 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @enable_eee: boolean flag to enable EEE
@@ -728,10 +986,8 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
**/
s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
{
- u32 eeer;
- u16 autoneg_eee_reg;
- u32 link_reg;
s32 status;
+ u32 eeer;
DEBUGFUNC("ixgbe_setup_eee_X550");
@@ -740,75 +996,20 @@ s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
if (enable_eee) {
eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Advertise EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
-
- autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
- } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
- /* Not supported on first revision of X550EM_x. */
- if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
- !(IXGBE_FUSES0_REV_MASK &
- IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
- return IXGBE_SUCCESS;
-
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
-
- /* Don't advertise FEC capability when EEE enabled. */
- link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- }
+ /* Not supported on first revision of X550EM_x. */
+ if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
+ !(IXGBE_FUSES0_REV_MASK &
+ IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
+ return IXGBE_SUCCESS;
+ status = ixgbe_enable_eee_x550(hw);
+ if (status)
+ return status;
} else {
eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Disable advertised EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
-
- autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
- } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
-
- /* Advertise FEC capability when EEE is disabled. */
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- }
+ status = ixgbe_disable_eee_x550(hw);
+ if (status)
+ return status;
}
IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
@@ -1007,6 +1208,7 @@ s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
token_cmd.port_number = hw->bus.lan_id;
token_cmd.command_type = FW_PHY_TOKEN_REQ;
token_cmd.pad = 0;
@@ -1037,6 +1239,7 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
token_cmd.port_number = hw->bus.lan_id;
token_cmd.command_type = FW_PHY_TOKEN_REL;
token_cmd.pad = 0;
@@ -1048,6 +1251,8 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
return status;
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return IXGBE_SUCCESS;
+
+ DEBUGOUT("Put PHY Token host interface command failed");
return IXGBE_ERR_FW_RESP_INVALID;
}
@@ -1060,23 +1265,24 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
* @data: Data to write to the register
**/
s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data)
+ u32 device_type, u32 data)
{
struct ixgbe_hic_internal_phy_req write_cmd;
s32 status;
UNREFERENCED_1PARAMETER(device_type);
+ memset(&write_cmd, 0, sizeof(write_cmd));
write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
write_cmd.port_number = hw->bus.lan_id;
write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
- write_cmd.address = (u16)reg_addr;
- write_cmd.rsv1 = 0;
- write_cmd.write_data = data;
- write_cmd.pad = 0;
+ write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
+ write_cmd.write_data = IXGBE_CPU_TO_LE32(data);
status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
- sizeof(write_cmd), IXGBE_HI_COMMAND_TIMEOUT, false);
+ sizeof(write_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -1090,26 +1296,29 @@ s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* @data: Pointer to read data from the register
**/
s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data)
+ u32 device_type, u32 *data)
{
- struct ixgbe_hic_internal_phy_req read_cmd;
+ union {
+ struct ixgbe_hic_internal_phy_req cmd;
+ struct ixgbe_hic_internal_phy_resp rsp;
+ } hic;
s32 status;
UNREFERENCED_1PARAMETER(device_type);
- read_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
- read_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
- read_cmd.port_number = hw->bus.lan_id;
- read_cmd.command_type = FW_INT_PHY_REQ_READ;
- read_cmd.address = (u16)reg_addr;
- read_cmd.rsv1 = 0;
- read_cmd.write_data = 0;
- read_cmd.pad = 0;
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+ hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
- status = ixgbe_host_interface_command(hw, (u32 *)&read_cmd,
- sizeof(read_cmd), IXGBE_HI_COMMAND_TIMEOUT, true);
+ status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
/* Extract the register value from the response. */
- *data = ((struct ixgbe_hic_internal_phy_resp *)&read_cmd)->read_data;
+ *data = IXGBE_LE32_TO_CPU(hic.rsp.read_data);
return status;
}
@@ -1286,10 +1495,18 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ media_type = ixgbe_media_type_copper;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ media_type = ixgbe_media_type_backplane;
+ hw->phy.type = ixgbe_phy_sgmii;
+ break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
- case IXGBE_DEV_ID_X550EM_A_10G_T:
media_type = ixgbe_media_type_copper;
+ hw->phy.type = ixgbe_phy_m88;
break;
default:
media_type = ixgbe_media_type_unknown;
@@ -1382,6 +1599,57 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval;
+ s32 rc;
+ UNREFERENCED_2PARAMETER(speed, autoneg_wait_to_complete);
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+
+ return rc;
+}
+
+/**
* ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
*/
@@ -1391,8 +1659,8 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
- switch (hw->mac.ops.get_media_type(hw)) {
- case ixgbe_media_type_fiber:
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
/* CS4227 does not support autoneg, so disable the laser control
* functions for SFP+ fiber
*/
@@ -1400,17 +1668,28 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
- mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
mac->ops.set_rate_select_speed =
ixgbe_set_soft_rate_select_speed;
+ if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
+ (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550a;
+ else
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550em;
break;
case ixgbe_media_type_copper:
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
mac->ops.check_link = ixgbe_check_link_t_X550em;
break;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+ mac->ops.setup_link = ixgbe_setup_sgmii;
+ break;
default:
break;
- }
+ }
}
/**
@@ -1447,8 +1726,19 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
- *speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+ switch (hw->phy.type) {
+ case ixgbe_phy_m88:
+ *speed = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case ixgbe_phy_sgmii:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
*autoneg = true;
}
@@ -1644,9 +1934,9 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
return status;
@@ -1664,14 +1954,210 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
/* Restart auto-negotiation. */
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
}
/**
+ * ixgbe_set_master_slave_mode - Set up PHY for master/slave mode
+ * @hw: pointer to hardware structure
+ *
+ * Must be called while holding the PHY semaphore and token
+ */
+STATIC s32 ixgbe_set_master_slave_mode(struct ixgbe_hw *hw)
+{
+ u16 phy_data;
+ s32 rc;
+
+ /* Resolve master/slave mode */
+ rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_M88E1500_1000T_CTRL, 0,
+ &phy_data);
+ if (rc)
+ return rc;
+
+ /* load defaults for future use */
+ if (phy_data & IXGBE_M88E1500_1000T_CTRL_MS_ENABLE) {
+ if (phy_data & IXGBE_M88E1500_1000T_CTRL_MS_VALUE)
+ hw->phy.original_ms_type = ixgbe_ms_force_master;
+ else
+ hw->phy.original_ms_type = ixgbe_ms_force_slave;
+ } else {
+ hw->phy.original_ms_type = ixgbe_ms_auto;
+ }
+
+ switch (hw->phy.ms_type) {
+ case ixgbe_ms_force_master:
+ phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
+ phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_VALUE;
+ break;
+ case ixgbe_ms_force_slave:
+ phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
+ phy_data &= ~IXGBE_M88E1500_1000T_CTRL_MS_VALUE;
+ break;
+ case ixgbe_ms_auto:
+ phy_data &= ~IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
+ break;
+ default:
+ break;
+ }
+
+ return ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_1000T_CTRL, 0,
+ phy_data);
+}
+
+/**
+ * ixgbe_reset_phy_m88_nolock - Reset m88 PHY without locking
+ * @hw: pointer to hardware structure
+ *
+ * Must be called while holding the PHY semaphore and token
+ */
+STATIC s32 ixgbe_reset_phy_m88_nolock(struct ixgbe_hw *hw)
+{
+ s32 rc;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_FIBER_CTRL, 0,
+ IXGBE_M88E1500_FIBER_CTRL_RESET |
+ IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
+ IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB);
+ if (rc)
+ goto res_out;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 18);
+ if (rc)
+ goto res_out;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_GEN_CTRL, 0,
+ IXGBE_M88E1500_GEN_CTRL_RESET |
+ IXGBE_M88E1500_GEN_CTRL_SGMII_COPPER);
+ if (rc)
+ goto res_out;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ if (rc)
+ goto res_out;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
+ IXGBE_M88E1500_COPPER_CTRL_RESET |
+ IXGBE_M88E1500_COPPER_CTRL_AN_EN |
+ IXGBE_M88E1500_COPPER_CTRL_RESTART_AN |
+ IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX |
+ IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB);
+
+res_out:
+ ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ return rc;
+}
+
+/**
+ * ixgbe_reset_phy_m88 - Reset m88 PHY
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_reset_phy_m88(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ s32 rc;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_reset_phy_m88_nolock(hw);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return rc;
+}
+
+/**
+ * ixgbe_setup_m88 - setup m88 PHY
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_setup_m88(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ u16 phy_data;
+ s32 rc;
+
+ if (phy->reset_disable || ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_M88E1500_PHY_SPEC_CTRL, 0,
+ &phy_data);
+ if (rc)
+ goto rel_out;
+
+ /* Enable downshift and setting it to X6 */
+ phy_data &= ~IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE;
+ phy_data |= IXGBE_M88E1500_PSCR_DOWNSHIFT_6X;
+ phy_data |= IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE;
+ rc = ixgbe_write_phy_reg_mdi_22(hw,
+ IXGBE_M88E1500_PHY_SPEC_CTRL, 0,
+ phy_data);
+ if (rc)
+ goto rel_out;
+
+ ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+
+ /* Commit the changes */
+ rc = ixgbe_reset_phy_m88_nolock(hw);
+ if (rc) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ goto rel_out;
+ }
+
+ rc = ixgbe_set_master_slave_mode(hw);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return rc;
+
+rel_out:
+ ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return rc;
+}
+
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values, and check for valid field
+ * values.
+ **/
+STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+ * PHY address. This register field was has only been used for X552.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_a &&
+ hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
+ hw->phy.addr = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1688,14 +2174,11 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode.
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
}
@@ -1722,11 +2205,6 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
case ixgbe_phy_x550em_ext_t:
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
-
/* If internal link mode is XFI, then setup iXFI internal link,
* else setup KR now.
*/
@@ -1742,6 +2220,15 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
+ case ixgbe_phy_sgmii:
+ phy->ops.setup_link = NULL;
+ break;
+ case ixgbe_phy_m88:
+ phy->ops.setup_link = ixgbe_setup_m88;
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
+ phy->ops.reset = ixgbe_reset_phy_m88;
+ break;
default:
break;
}
@@ -1752,12 +2239,14 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
* ixgbe_set_mdio_speed - Set MDIO clock speed
* @hw: pointer to hardware structure
*/
-static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
{
u32 hlreg0;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
@@ -1933,10 +2422,13 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
* ixgbe_setup_kr_x550em - Configure the KR PHY.
* @hw: pointer to hardware structure
*
- * Configures the integrated KR PHY.
+ * Configures the integrated KR PHY for X550EM_x.
**/
s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
+ if (hw->mac.type != ixgbe_mac_X550EM_x)
+ return IXGBE_SUCCESS;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -2019,6 +2511,99 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for SFP support.
+ **/
+s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 ret_val;
+ u16 reg_phy_ext;
+ bool setup_linear = false;
+ u32 reg_slice, reg_phy_int, slice_offset;
+
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* Check if SFP module is supported and linear */
+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ return IXGBE_SUCCESS;
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
+ /* Configure internal PHY for native SFI */
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (setup_linear) {
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
+ } else {
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
+ }
+
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* Setup XFI/SFI internal link. */
+ ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
+ } else {
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
+ /* Find Address */
+ DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ }
+
+ /* Get external PHY device id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* When configuring quad port CS4223, the MAC instance is part
+ * of the slice offset.
+ */
+ if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ slice_offset = (hw->bus.lan_id +
+ (hw->bus.instance_id << 1)) << 12;
+ else
+ slice_offset = hw->bus.lan_id << 12;
+
+ /* Configure CS4227/CS4223 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+ if (setup_linear)
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = hw->phy.ops.write_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+ }
+ return ret_val;
+}
+
+/**
* ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
* @hw: pointer to hardware structure
*
@@ -2261,57 +2846,57 @@ s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
u32 reg_val;
/* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Set near-end loopback clocks. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Set loopback enable. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Training bypass. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
}
@@ -3147,24 +3732,30 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
goto out;
}
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
- ret_val = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (ret_val != IXGBE_SUCCESS)
goto out;
reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
- IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
if (pause)
reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
if (asm_dir)
reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
- ret_val = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* This device does not fully support AN. */
hw->fc.disable_fc_autoneg = true;
+ break;
+ default:
+ break;
}
out:
@@ -3172,6 +3763,183 @@ out:
}
/**
+ * ixgbe_fc_autoneg_x550a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw)
+{
+ u32 link_s1, lp_an_page_low, an_cntl_1;
+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ /* Check at auto-negotiation has completed */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_S1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
+
+ if (status != IXGBE_SUCCESS ||
+ (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ /* Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
+ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_setup_fc_x550em - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 an_cntl, link_ctrl = 0;
+
+ DEBUGFUNC("ixgbe_setup_fc_x550em");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do FC autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ return status;
+ }
+
+ /* The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
+
+ /* Restart auto-negotiation. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ return status;
+ }
+
+ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
+
+ return status;
+}
+
+/**
* ixgbe_set_mux - Set mux for port 1 access with CS4227
* @hw: pointer to hardware structure
* @state: set mux if 1, clear if 0
@@ -3238,7 +4006,7 @@ void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
*
* Acquires the SWFW semaphore and get the shared phy token as needed
*/
-static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
+STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
int retries = FW_PHY_TOKEN_RETRIES;
@@ -3247,17 +4015,22 @@ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
while (--retries) {
+ status = IXGBE_SUCCESS;
if (hmask)
status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
if (status)
- break;
+ return status;
if (!(mask & IXGBE_GSSR_TOKEN_SM))
- break;
+ return IXGBE_SUCCESS;
+
status = ixgbe_get_phy_token(hw);
- if (status != IXGBE_ERR_TOKEN_RETRY)
- break;
+ if (status == IXGBE_SUCCESS)
+ return IXGBE_SUCCESS;
+
if (hmask)
ixgbe_release_swfw_sync_X540(hw, hmask);
+ if (status != IXGBE_ERR_TOKEN_RETRY)
+ return status;
msec_delay(FW_PHY_TOKEN_DELAY);
}
@@ -3271,7 +4044,7 @@ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
*
* Releases the SWFW semaphore and puts the shared phy token as needed
*/
-static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
+STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
@@ -3285,6 +4058,63 @@ static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
}
/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ **/
+s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ s32 status;
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_read_phy_reg_x550a");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ **/
+s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ s32 status;
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_write_phy_reg_x550a");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
* ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
* @hw: pointer to hardware structure
*
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h
index a8c0a678..27d5d02f 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.h
+++ b/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -36,6 +36,49 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_type.h"
+/* More phy definitions */
+#define IXGBE_M88E1500_COPPER_CTRL 0x0/* Page 0 reg */
+#define IXGBE_M88E1500_COPPER_CTRL_RESET 0x8000
+#define IXGBE_M88E1500_COPPER_CTRL_AN_EN 0x1000
+#define IXGBE_M88E1500_COPPER_CTRL_RESTART_AN 0x0200
+#define IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX 0x0100
+#define IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB 0x0040
+#define IXGBE_M88E1500_1000T_CTRL 0x09 /* 1000Base-T Ctrl Reg */
+/* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define IXGBE_M88E1500_1000T_CTRL_MS_VALUE 0x0800
+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define IXGBE_M88E1500_1000T_CTRL_MS_ENABLE 0x1000
+#define IXGBE_M88E1500_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define IXGBE_M88E1500_AUTO_COPPER_SGMII 0x2
+#define IXGBE_M88E1500_AUTO_COPPER_BASEX 0x3
+#define IXGBE_M88E1500_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define IXGBE_M88E1500_MAC_CTRL_1 0x10
+#define IXGBE_M88E1500_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define IXGBE_M88E1500_CFG_REG_1 0x0010
+#define IXGBE_M88E1500_CFG_REG_2 0x0011
+#define IXGBE_M88E1500_CFG_REG_3 0x0007
+#define IXGBE_M88E1500_MODE 0x0014
+#define IXGBE_M88E1500_PAGE_ADDR 0x16/* Page Offset reg */
+#define IXGBE_M88E1500_FIBER_CTRL 0x0/* Page 1 reg */
+#define IXGBE_M88E1500_FIBER_CTRL_RESET 0x8000
+#define IXGBE_M88E1500_FIBER_CTRL_SPEED_LSB 0x2000
+#define IXGBE_M88E1500_FIBER_CTRL_POWER_DOWN 0x0800
+#define IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL 0x0100
+#define IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB 0x0040
+#define IXGBE_M88E1500_EEE_CTRL_1 0x0/* Page 18 reg */
+#define IXGBE_M88E1500_EEE_CTRL_1_MS 0x0001/* EEE Master/Slave */
+#define IXGBE_M88E1500_GEN_CTRL 0x14/* Page 18 reg */
+#define IXGBE_M88E1500_GEN_CTRL_RESET 0x8000
+#define IXGBE_M88E1500_GEN_CTRL_SGMII_COPPER 0x0001/* Mode bits 0-2 */
+
+/* M88E1500 Specific Registers */
+#define IXGBE_M88E1500_PHY_SPEC_CTRL 0x10 /* PHY Specific Ctrl Reg */
+#define IXGBE_M88E1500_PHY_SPEC_STATUS 0x11 /* PHY Specific Stat Reg */
+
+#define IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define IXGBE_M88E1500_PSCR_DOWNSHIFT_MASK 0x7000
+#define IXGBE_M88E1500_PSCR_DOWNSHIFT_6X 0x5000
+
s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
@@ -100,6 +143,15 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw);
s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
diff --git a/drivers/net/ixgbe/ixgbe_82599_bypass.c b/drivers/net/ixgbe/ixgbe_82599_bypass.c
index 21c42eac..de9fa5a7 100644
--- a/drivers/net/ixgbe/ixgbe_82599_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_82599_bypass.c
@@ -297,8 +297,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
{
int rc;
- if ((rc = ixgbe_init_hw(hw)) == 0 &&
- hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ rc = ixgbe_init_hw(hw);
+ if (rc == 0 && hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
hw->mac.ops.setup_link =
&ixgbe_setup_mac_link_multispeed_fixed_fiber;
@@ -306,8 +306,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
hw->mac.ops.disable_tx_laser = NULL;
- hw->mac.ops.enable_tx_laser = NULL;
- hw->mac.ops.flap_tx_laser = NULL;
+ hw->mac.ops.enable_tx_laser = NULL;
+ hw->mac.ops.flap_tx_laser = NULL;
}
return rc;
diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c
index 73f608b9..70069284 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_bypass.c
@@ -82,7 +82,7 @@ ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
BYPASS_CTL1_VALID_M |
BYPASS_CTL1_OFFTRST_M;
value = (sec & BYPASS_CTL1_TIME_M) |
- BYPASS_CTL1_VALID |
+ BYPASS_CTL1_VALID |
BYPASS_CTL1_OFFTRST;
FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
@@ -275,8 +275,8 @@ s32
ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
{
struct ixgbe_hw *hw;
- u32 status;
- u32 mask;
+ u32 status;
+ u32 mask;
s32 ret_val;
struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h
index fcd97743..5f5c63e3 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.h
+++ b/drivers/net/ixgbe/ixgbe_bypass.h
@@ -37,10 +37,10 @@
#ifdef RTE_NIC_BYPASS
struct ixgbe_bypass_mac_ops {
- s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status);
- bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg);
- s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
- s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value);
+ s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status);
+ bool (*bypass_valid_rd)(u32 in_reg, u32 out_reg);
+ s32 (*bypass_set)(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
+ s32 (*bypass_rd_eep)(struct ixgbe_hw *hw, u32 addr, u8 *value);
};
struct ixgbe_bypass_info {
diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h
index 22570acf..cafcb278 100644
--- a/drivers/net/ixgbe/ixgbe_bypass_defines.h
+++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h
@@ -136,7 +136,7 @@ enum ixgbe_state_t {
#define BYPASS_LOG_EVENT_SHIFT 28
#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */
#define IXGBE_DEV_TO_ADPATER(dev) \
- ((struct ixgbe_adapter*)(dev->data->dev_private))
+ ((struct ixgbe_adapter *)(dev->data->dev_private))
/* extractions from ixgbe_phy.h */
#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 3f1ebc15..0629b426 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -150,6 +150,7 @@
#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_QDE_STRIP_TAG 0x00000004
+#define IXGBE_VTEICR_MASK 0x07
enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_NONE = 0,
@@ -157,6 +158,9 @@ enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_ALLMULTI,
};
+#define IXGBE_EXVET_VET_EXT_SHIFT 16
+#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
+
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
@@ -174,11 +178,15 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
@@ -232,7 +240,7 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
@@ -264,14 +272,14 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
- ether_addr* mac_addr,uint8_t on);
-static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+ ether_addr * mac_addr, uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask,uint8_t vlan_on);
+ uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
@@ -361,6 +369,8 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
struct timespec *timestamp);
static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
const struct timespec *timestamp);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
static int ixgbe_dev_l2_tunnel_eth_type_conf
(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
@@ -397,21 +407,21 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
last = latest; \
}
-#define IXGBE_SET_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] |= 1 << bit;\
} while (0)
-#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] &= ~(1 << bit);\
} while (0)
-#define IXGBE_GET_HWSTRIP(h, q, r) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
@@ -466,6 +476,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.xstats_get = ixgbe_dev_xstats_get,
.stats_reset = ixgbe_dev_stats_reset,
.xstats_reset = ixgbe_dev_xstats_reset,
+ .xstats_get_names = ixgbe_dev_xstats_get_names,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
.dev_infos_get = ixgbe_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
@@ -555,6 +566,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
.xstats_reset = ixgbevf_dev_stats_reset,
+ .xstats_get_names = ixgbevf_dev_xstats_get_names,
.dev_close = ixgbevf_dev_close,
.allmulticast_enable = ixgbevf_dev_allmulticast_enable,
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
@@ -685,6 +697,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
sizeof(rte_ixgbe_rxq_strings[0]))
+#define IXGBE_NB_RXQ_PRIO_VALUES 8
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
{"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
@@ -695,6 +708,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
sizeof(rte_ixgbe_txq_strings[0]))
+#define IXGBE_NB_TXQ_PRIO_VALUES 8
static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
{"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
@@ -901,8 +915,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
- }
- else {
+ } else {
PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
@@ -911,7 +924,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
}
static void
-ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
+ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
{
struct ixgbe_stat_mapping_registers *stat_mappings =
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
@@ -929,7 +942,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
}
static void
-ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
{
uint8_t i;
struct ixgbe_dcb_tc_config *tc;
@@ -952,7 +965,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
tc = &dcb_config->tc_config[0];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
- for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
}
@@ -1016,7 +1029,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
@@ -1039,17 +1052,18 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
* has already done this work. Only check we don't need a different
* RX and TX function.
*/
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
- * Tx queue may not initialized by primary process */
+ * Tx queue may not initialized by primary process
+ */
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
- "Using default TX function.");
+ "Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
@@ -1086,7 +1100,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
- ixgbe_dcb_init(hw,dcb_config);
+ ixgbe_dcb_init(hw, dcb_config);
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full;
@@ -1127,11 +1141,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
- "LOM. Please be aware there may be issues associated "
- "with your hardware.");
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
PMD_INIT_LOG(ERR, "If you are experiencing problems "
- "please contact your Intel or hardware representative "
- "who provided you with this hardware.");
+ "please contact your Intel or hardware representative "
+ "who provided you with this hardware.");
} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
if (diag) {
@@ -1150,12 +1164,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
@@ -1164,11 +1178,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing hash filter MAC addresses */
eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- IXGBE_VMDQ_NUM_UC_MAC, 0);
+ IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
@@ -1198,8 +1212,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
(int) hw->mac.type, (int) hw->phy.type);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
- eth_dev->data->port_id, pci_dev->id.vendor_id,
- pci_dev->id.device_id);
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
rte_intr_callback_register(&pci_dev->intr_handle,
ixgbe_dev_interrupt_handler,
@@ -1214,7 +1228,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
return 0;
}
@@ -1313,7 +1327,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
@@ -1327,8 +1341,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
- * RX function */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
@@ -1339,7 +1354,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE,
- "No TX queues configured yet. Using default TX function.");
+ "No TX queues configured yet. Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
@@ -1398,12 +1413,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
@@ -1433,14 +1448,20 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* reset the hardware with the new settings */
diag = hw->mac.ops.start_hw(hw);
switch (diag) {
- case 0:
- break;
+ case 0:
+ break;
- default:
- PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return -EIO;
+ default:
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return -EIO;
}
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ ixgbevf_dev_interrupt_handler,
+ (void *)eth_dev);
+ rte_intr_enable(&pci_dev->intr_handle);
+ ixgbevf_intr_enable(hw);
+
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "ixgbe_mac_82599_vf");
@@ -1454,6 +1475,7 @@ static int
eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct ixgbe_hw *hw;
+ struct rte_pci_device *pci_dev = eth_dev->pci_dev;
PMD_INIT_FUNC_TRACE();
@@ -1475,6 +1497,11 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ ixgbevf_dev_interrupt_handler,
+ (void *)eth_dev);
+
return 0;
}
@@ -1537,7 +1564,7 @@ ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vfta;
uint32_t vid_idx;
@@ -1575,15 +1602,47 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret = 0;
+ uint32_t reg;
+ uint32_t qinq;
+
+ qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ qinq &= IXGBE_DMATXCTL_GDV;
switch (vlan_type) {
case ETH_VLAN_TYPE_INNER:
- /* Only the high 16-bits is valid */
- IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+ if (qinq) {
+ reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+ | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+ } else {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Inner type is not supported"
+ " by single VLAN");
+ }
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ if (qinq) {
+ /* Only the high 16-bits is valid */
+ IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
+ IXGBE_EXVET_VET_EXT_SHIFT);
+ } else {
+ reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+ | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+ }
+
break;
default:
ret = -EINVAL;
- PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
break;
}
@@ -1611,7 +1670,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vlnctrl;
uint16_t i;
@@ -1635,6 +1694,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
{
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+ struct ixgbe_rx_queue *rxq;
if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
return;
@@ -1643,6 +1703,16 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
IXGBE_SET_HWSTRIP(hwstrip, queue);
else
IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+ if (queue >= dev->data->nb_rx_queues)
+ return;
+
+ rxq = dev->data->rx_queues[queue];
+
+ if (on)
+ rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ else
+ rxq->vlan_flags = PKT_RX_VLAN_PKT;
}
static void
@@ -1659,12 +1729,12 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
}
@@ -1683,12 +1753,12 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
@@ -1707,8 +1777,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl &= ~IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
@@ -1735,8 +1804,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
@@ -1836,6 +1904,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
@@ -2116,7 +2185,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
ixgbe_stop_adapter(hw);
/* reinitialize adapter
- * this calls reset and start */
+ * this calls reset and start
+ */
status = ixgbe_pf_reset_hw(hw);
if (status != 0)
return -1;
@@ -2251,7 +2321,7 @@ skip_link_setup:
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbe_vlan_offload_set(dev, mask);
@@ -2471,8 +2541,8 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++) {
- uint32_t mp;
- mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
/* global total per queue */
hw_stats->mpc[i] += mp;
/* Running comprehensive total for stats display */
@@ -2664,15 +2734,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Rx Errors */
stats->imissed = total_missed_rx;
stats->ierrors = hw_stats->crcerrs +
- hw_stats->mspdc +
- hw_stats->rlec +
- hw_stats->ruc +
- hw_stats->roc +
- hw_stats->illerrc +
- hw_stats->errbc +
- hw_stats->rfc +
- hw_stats->fccrc +
- hw_stats->fclast;
+ hw_stats->mspdc +
+ hw_stats->rlec +
+ hw_stats->ruc +
+ hw_stats->roc +
+ hw_stats->illerrc +
+ hw_stats->errbc +
+ hw_stats->rfc +
+ hw_stats->fccrc +
+ hw_stats->fclast;
/* Tx Errors */
stats->oerrors = 0;
@@ -2694,12 +2764,76 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
/* This function calculates the number of xstats based on the current config */
static unsigned
ixgbe_xstats_calc_num(void) {
- return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
- (IXGBE_NB_TXQ_PRIO_STATS * 8);
+ return IXGBE_NB_HW_STATS +
+ (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
+ (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
+}
+
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+{
+ const unsigned cnt_stats = ixgbe_xstats_calc_num();
+ unsigned stat, i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+
+ /* Extended stats from ixgbe_hw_stats */
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_stats_strings[i].name);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ count++;
+ }
+ }
+ }
+ return cnt_stats;
+}
+
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned limit)
+{
+ unsigned i;
+
+ if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
+ return -ENOMEM;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_ixgbevf_stats_strings[i].name);
+ return IXGBEVF_NB_XSTATS;
}
static int
-ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct ixgbe_hw *hw =
@@ -2731,8 +2865,6 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* Extended stats from ixgbe_hw_stats */
count = 0;
for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
- rte_ixgbe_stats_strings[i].name);
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_stats_strings[i].offset);
count++;
@@ -2740,10 +2872,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
- for (i = 0; i < 8; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_priority%u_%s", i,
- rte_ixgbe_rxq_strings[stat].name);
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_rxq_strings[stat].offset +
(sizeof(uint64_t) * i));
@@ -2753,17 +2882,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* TX Priority Stats */
for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
- for (i = 0; i < 8; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_priority%u_%s", i,
- rte_ixgbe_txq_strings[stat].name);
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_txq_strings[stat].offset +
(sizeof(uint64_t) * i));
count++;
}
}
-
return count;
}
@@ -2786,7 +2911,7 @@ static void
ixgbevf_update_stats(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Good Rx packet, include VF loopback */
@@ -2811,7 +2936,7 @@ ixgbevf_update_stats(struct rte_eth_dev *dev)
}
static int
-ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
@@ -2828,8 +2953,6 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* Extended stats */
for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
- snprintf(xstats[i].name, sizeof(xstats[i].name),
- "%s", rte_ixgbevf_stats_strings[i].name);
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbevf_stats_strings[i].offset);
}
@@ -2852,14 +2975,12 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->ibytes = hw_stats->vfgorc;
stats->opackets = hw_stats->vfgptc;
stats->obytes = hw_stats->vfgotc;
- stats->imcasts = hw_stats->vfmprc;
- /* stats->imcasts should be removed as imcasts is deprecated */
}
static void
ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Sync HW register to the last stats */
@@ -2870,8 +2991,6 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
hw_stats->vfgorc = 0;
hw_stats->vfgptc = 0;
hw_stats->vfgotc = 0;
- hw_stats->vfmprc = 0;
-
}
static void
@@ -3356,7 +3475,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
if (intr_enable_delay) {
if (rte_eal_alarm_set(timeout * 1000,
- ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
} else {
PMD_DRV_LOG(DEBUG, "enable intr immediately");
@@ -3575,7 +3694,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* Enable flow control according to the current settings.
*/
static int
-ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
{
int ret_val = 0;
uint32_t mflcn_reg, fccfg_reg;
@@ -3622,13 +3741,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_rx_pause:
/*
@@ -3645,20 +3764,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_tx_pause:
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled by software override.
*/
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
@@ -3669,7 +3788,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
ret_val = IXGBE_ERR_CONFIG;
goto out;
- break;
}
/* Set 802.3x based flow control settings. */
@@ -3708,13 +3826,13 @@ out:
}
static int
-ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
if (hw->mac.type != ixgbe_mac_82598EB) {
- ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+ ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
}
return ret_val;
}
@@ -3728,9 +3846,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
uint8_t tc_num;
uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_dcb_config *dcb_config =
- IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
ixgbe_fc_none,
@@ -3763,7 +3881,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
- err = ixgbe_dcb_pfc_enable(dev,tc_num);
+ err = ixgbe_dcb_pfc_enable(dev, tc_num);
/* Not negotiated is not an error case */
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
@@ -3911,7 +4029,8 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
+ * feature has not been enabled before.
+ */
if (!dev->data->scattered_rx &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
@@ -3971,7 +4090,7 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw)
static int
ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
- struct rte_eth_conf* conf = &dev->data->dev_conf;
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
@@ -4033,10 +4152,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
}
/* Set vfta */
- ixgbevf_set_vfta_all(dev,1);
+ ixgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbevf_vlan_offload_set(dev, mask);
@@ -4077,6 +4196,8 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ ixgbevf_intr_disable(hw);
+
hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
@@ -4084,7 +4205,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
* Clear what we set, but we still keep shadow_vfta to
* restore after device starts
*/
- ixgbevf_set_vfta_all(dev,0);
+ ixgbevf_set_vfta_all(dev, 0);
/* Clear stored conf */
dev->data->scattered_rx = 0;
@@ -4123,18 +4244,19 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
int i = 0, j = 0, vfta = 0, mask = 1;
- for (i = 0; i < IXGBE_VFTA_SIZE; i++){
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
vfta = shadow_vfta->vfta[i];
if (vfta) {
mask = 1;
- for (j = 0; j < 32; j++){
+ for (j = 0; j < 32; j++) {
if (vfta & mask)
- ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
- mask<<=1;
+ ixgbe_set_vfta(hw, (i<<5)+j, 0,
+ on, false);
+ mask <<= 1;
}
}
}
@@ -4146,7 +4268,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vid_idx = 0;
uint32_t vid_bit = 0;
@@ -4155,7 +4277,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
PMD_INIT_FUNC_TRACE();
/* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
- ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
+ ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to set VF vlan");
return ret;
@@ -4191,7 +4313,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
ctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
}
static void
@@ -4207,7 +4329,7 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
for (i = 0; i < hw->mac.max_rx_queues; i++)
- ixgbevf_vlan_strip_queue_set(dev,i,on);
+ ixgbevf_vlan_strip_queue_set(dev, i, on);
}
}
@@ -4227,9 +4349,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
}
static uint32_t
-ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
{
uint32_t vector = 0;
+
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((uc_addr->addr_bytes[4] >> 4) |
@@ -4257,8 +4380,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
}
static int
-ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
- uint8_t on)
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint8_t on)
{
uint32_t vector;
uint32_t uta_idx;
@@ -4279,7 +4402,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
if (hw->mac.type < ixgbe_mac_82599EB)
return -ENOTSUP;
- vector = ixgbe_uta_vector(hw,mac_addr);
+ vector = ixgbe_uta_vector(hw, mac_addr);
uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
uta_shift = vector & ixgbe_uta_bit_mask;
@@ -4304,7 +4427,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
else
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
return 0;
}
@@ -4389,7 +4512,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
static int
ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
@@ -4399,16 +4522,26 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
if (ixgbe_vmdq_mode_check(hw) < 0)
return -ENOTSUP;
- addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+ if (pool >= ETH_64_POOLS)
+ return -EINVAL;
+
+ /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (pool >= 32) {
+ addr = IXGBE_VFRE(1);
+ val = bit1 << (pool - 32);
+ } else {
+ addr = IXGBE_VFRE(0);
+ val = bit1 << pool;
+ }
+
reg = IXGBE_READ_REG(hw, addr);
- val = bit1 << pool;
if (on)
reg |= val;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
@@ -4416,7 +4549,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
static int
ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
@@ -4426,16 +4559,26 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
if (ixgbe_vmdq_mode_check(hw) < 0)
return -ENOTSUP;
- addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+ if (pool >= ETH_64_POOLS)
+ return -EINVAL;
+
+ /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (pool >= 32) {
+ addr = IXGBE_VFTE(1);
+ val = bit1 << (pool - 32);
+ } else {
+ addr = IXGBE_VFTE(0);
+ val = bit1 << pool;
+ }
+
reg = IXGBE_READ_REG(hw, addr);
- val = bit1 << pool;
if (on)
reg |= val;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
@@ -4453,7 +4596,8 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
return -ENOTSUP;
for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+ ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
+ vlan_on, false);
if (ret < 0)
return ret;
}
@@ -4475,7 +4619,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
- uint32_t mr_ctl,vlvf;
+ uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
uint32_t mv_msb = 0;
uint32_t mv_lsb = 0;
@@ -4488,7 +4632,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
const uint8_t vlan_mask_offset = 32;
const uint8_t dst_pool_offset = 8;
const uint8_t rule_mr_offset = 4;
- const uint8_t mirror_rule_mask= 0x0F;
+ const uint8_t mirror_rule_mask = 0x0F;
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
@@ -4511,11 +4655,12 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
- for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+ for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
/* search vlan id related pool vlan filter index */
reg_index = ixgbe_find_vlvf_slot(hw,
- mirror_conf->vlan.vlan_id[i]);
+ mirror_conf->vlan.vlan_id[i],
+ false);
if (reg_index < 0)
return -EINVAL;
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
@@ -4800,6 +4945,9 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
uint32_t q_idx;
uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+ /* Configure VF other cause ivar */
+ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
/* won't configure msix register if no mapping is done
* between intr vector and event fd.
*/
@@ -4814,9 +4962,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
intr_handle->intr_vec[q_idx] = vector_idx;
}
-
- /* Configure VF other cause ivar */
- ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
}
/**
@@ -5306,7 +5451,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
+ * feature has not been enabled before.
+ */
if (!dev->data->scattered_rx &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
@@ -7135,6 +7281,67 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
}
+static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 in_msg = 0;
+
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
+
+ /* PF reset VF event */
+ if (in_msg == IXGBE_PF_CONTROL_MSG)
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+}
+
+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ ixgbevf_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ intr->flags = 0;
+
+ /* only one misc vector supported - mailbox */
+ eicr &= IXGBE_VTEICR_MASK;
+ if (eicr == IXGBE_MISC_VEC_ID)
+ intr->flags |= IXGBE_FLAG_MAILBOX;
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (intr->flags & IXGBE_FLAG_MAILBOX) {
+ ixgbevf_mbx_process(dev);
+ intr->flags &= ~IXGBE_FLAG_MAILBOX;
+ }
+
+ ixgbevf_intr_enable(hw);
+
+ return 0;
+}
+
+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ ixgbevf_dev_interrupt_get_status(dev);
+ ixgbevf_dev_interrupt_action(dev);
+}
+
static struct rte_driver rte_ixgbe_driver = {
.type = PMD_PDEV,
.init = rte_ixgbe_pmd_init,
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 2e4c353a..861c7cbe 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -189,14 +189,13 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
- PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
- "during enabling!");
+ PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
return -ETIMEDOUT;
}
return 0;
@@ -282,6 +281,7 @@ static inline uint32_t
reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
{
uint32_t mask = hi_dword << 16;
+
mask |= lo_dword;
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
@@ -810,8 +810,10 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
/* Process bits 0 and 16 */
- if (key & 0x0001) hash_result ^= lo_hash_dword;
- if (key & 0x00010000) hash_result ^= hi_hash_dword;
+ if (key & 0x0001)
+ hash_result ^= lo_hash_dword;
+ if (key & 0x00010000)
+ hash_result ^= hi_hash_dword;
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
@@ -822,9 +824,11 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
/* process the remaining 30 bits in the key 2 bits at a time */
- for (i = 15; i; i-- ) {
- if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
- if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ for (i = 15; i; i--) {
+ if (key & (0x0001 << i))
+ hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i))
+ hash_result ^= hi_hash_dword >> i;
}
return hash_result;
@@ -1016,7 +1020,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
@@ -1080,9 +1084,9 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
*/
static int
ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter,
- bool del,
- bool update)
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
@@ -1092,7 +1096,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
bool is_perfect = FALSE;
int err;
struct ixgbe_hw_fdir_info *info =
- IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
if (fdir_mode == RTE_FDIR_MODE_NONE)
@@ -1109,12 +1113,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
(fdir_filter->input.flow_type ==
- RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+ RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
- " IPv4-other is not supported without"
- " L4 protocol and ports masked!");
+ " IPv4-other is not supported without"
+ " L4 protocol and ports masked!");
return -ENOTSUP;
}
@@ -1132,16 +1136,16 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
if (is_perfect) {
if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
- " perfect mode!");
+ " perfect mode!");
return -ENOTSUP;
}
fdirhash = atr_compute_perfect_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
+ dev->data->dev_conf.fdir_conf.pballoc);
fdirhash |= fdir_filter->soft_id <<
- IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
fdirhash = atr_compute_sig_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
+ dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
err = fdir_erase_filter_82599(hw, fdirhash);
@@ -1159,22 +1163,22 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
} else {
PMD_DRV_LOG(ERR, "Drop option is not supported in"
- " signature mode.");
+ " signature mode.");
return -EINVAL;
}
} else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
- fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
+ fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
queue = (uint8_t)fdir_filter->action.rx_queue;
else
return -EINVAL;
if (is_perfect) {
err = fdir_write_perfect_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash,
- fdir_mode);
+ fdircmd_flags, fdirhash,
+ fdir_mode);
} else {
err = fdir_add_signature_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash);
+ fdircmd_flags, fdirhash);
}
if (err < 0)
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
@@ -1269,22 +1273,22 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
- IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
uint32_t reg, max_num;
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
/* Get the information from registers */
reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
- IXGBE_FDIRFREE_COLL_SHIFT);
+ IXGBE_FDIRFREE_COLL_SHIFT);
info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
- IXGBE_FDIRFREE_FREE_SHIFT);
+ IXGBE_FDIRFREE_FREE_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
- IXGBE_FDIRLEN_MAXHASH_SHIFT);
+ IXGBE_FDIRLEN_MAXHASH_SHIFT);
info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
- IXGBE_FDIRLEN_MAXLEN_SHIFT);
+ IXGBE_FDIRLEN_MAXLEN_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
@@ -1310,10 +1314,10 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
- (reg & FDIRCTRL_PBALLOC_MASK)));
+ (reg & FDIRCTRL_PBALLOC_MASK)));
if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
- fdir_stats->guarant_cnt = max_num - fdir_stats->free;
+ fdir_stats->guarant_cnt = max_num - fdir_stats->free;
else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index a2787d90..56393ff2 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -97,9 +97,9 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
struct ixgbe_mirror_info *mirror_info =
- IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
struct ixgbe_uta_info *uta_info =
- IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
uint16_t vf_num;
@@ -108,15 +108,16 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
return;
*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
- memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
- memset(uta_info,0,sizeof(struct ixgbe_uta_info));
+ memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
+ memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
if (vf_num >= ETH_32_POOLS) {
@@ -141,8 +142,6 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
/* set mb interrupt mask */
ixgbe_mb_intr_setup(eth_dev);
-
- return;
}
void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
@@ -220,7 +219,8 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
uint32_t vlanctrl;
int i;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
return -1;
/* enable VMDq and set the default pool for PF */
@@ -280,19 +280,18 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
}
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- /*
+ /*
* enable vlan filtering and allow all vlan tags through
*/
- vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
- /* VFTA - enable all vlan filters */
- for (i = 0; i < IXGBE_MAX_VFTA; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
- }
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IXGBE_MAX_VFTA; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
@@ -481,7 +480,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
- if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
}
@@ -545,7 +544,7 @@ ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
vfinfo[vf].vlan_count++;
else if (vfinfo[vf].vlan_count)
vfinfo[vf].vlan_count--;
- return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
+ return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false);
}
static int
@@ -678,6 +677,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
int ret = ixgbe_vf_reset(dev, vf, msgbuf);
+
vfinfo[vf].clear_to_send = true;
return ret;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9fb38a6c..8a306b06 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -88,17 +88,6 @@
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
-
#if 1
#define RTE_PMD_USE_PREFETCH
#endif
@@ -352,6 +341,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_tx = 0;
while (nb_pkts) {
uint16_t ret, n;
+
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
nb_tx = (uint16_t)(nb_tx + ret);
@@ -478,30 +468,28 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
*/
static inline uint32_t
what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
- union ixgbe_tx_offload tx_offload)
+ union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
- & tx_offload.data[0])) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
- & tx_offload.data[1])))) {
- return txq->ctx_curr;
- }
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
- & tx_offload.data[0])) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
- & tx_offload.data[1])))) {
- return txq->ctx_curr;
- }
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
/* Mismatch, use the previous context */
return IXGBE_CTX_NUM;
@@ -511,6 +499,7 @@ static inline uint32_t
tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
{
uint32_t tmp = 0;
+
if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
tmp |= IXGBE_ADVTXD_POPTS_TXSM;
if (ol_flags & PKT_TX_IP_CKSUM)
@@ -524,6 +513,7 @@ static inline uint32_t
tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
uint32_t cmdtype = 0;
+
if (ol_flags & PKT_TX_VLAN_PKT)
cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
@@ -561,8 +551,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
status = txr[desc_to_clean_to].wb.status;
- if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
- {
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
"(port=%d queue=%d)",
@@ -920,24 +909,40 @@ end_of_tx:
* RX functions
*
**********************************************************************/
-#define IXGBE_PACKET_TYPE_IPV4 0X01
-#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
-#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
-#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
-#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
-#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
-#define IXGBE_PACKET_TYPE_IPV6 0X04
-#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
-#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
-#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
-#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
-#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
-#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+
+#define IXGBE_PACKET_TYPE_ETHER 0X00
+#define IXGBE_PACKET_TYPE_IPV4 0X01
+#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
+#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
+#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
+#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
+#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
+#define IXGBE_PACKET_TYPE_IPV6 0X04
+#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
+#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
+#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
+#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
#define IXGBE_PACKET_TYPE_NVGRE 0X00
#define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
@@ -945,13 +950,17 @@ end_of_tx:
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
#define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
@@ -965,13 +974,17 @@ end_of_tx:
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
#define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
@@ -993,48 +1006,88 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
*/
static const uint32_t
ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
[IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4,
+ [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
[IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
[IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
[IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_INNER_L3_IPV6,
[IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
[IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
[IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
+ RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
};
static const uint32_t
@@ -1087,6 +1140,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_SCTP,
[IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
@@ -1094,6 +1151,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
[IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
@@ -1106,6 +1167,14 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
[IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
@@ -1162,6 +1231,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
[IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
@@ -1170,6 +1243,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
[IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
@@ -1182,6 +1259,14 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
};
if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
@@ -1232,7 +1317,7 @@ ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
}
static inline uint64_t
-rx_desc_status_to_pkt_flags(uint32_t rx_status)
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
{
uint64_t pkt_flags;
@@ -1241,7 +1326,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
* Do not check whether L3/L4 rx checksum done by NIC or not,
* That can be found from rte_eth_rxmode.hw_ip_checksum flag
*/
- pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+ pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
#ifdef RTE_LIBRTE_IEEE1588
if (rx_status & IXGBE_RXD_STAT_TMST)
@@ -1298,6 +1383,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
uint32_t pkt_info[LOOK_AHEAD];
int i, j, nb_rx = 0;
uint32_t status;
+ uint64_t vlan_flags = rxq->vlan_flags;
/* get references to current descriptor and S/W ring entry */
rxdp = &rxq->rx_ring[rxq->rx_tail];
@@ -1313,8 +1399,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
* reference packets that are ready to be received.
*/
for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
- i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
- {
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
@@ -1340,7 +1425,8 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
/* convert descriptor fields to rte mbuf flags */
- pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+ vlan_flags);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
((uint16_t)pkt_info[j]);
@@ -1472,6 +1558,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
int i, j;
+
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
@@ -1523,6 +1610,7 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_rx = 0;
while (nb_pkts) {
uint16_t ret, n;
+
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
nb_rx = (uint16_t)(nb_rx + ret);
@@ -1554,6 +1642,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_rx;
uint16_t nb_hold;
uint64_t pkt_flags;
+ uint64_t vlan_flags;
nb_rx = 0;
nb_hold = 0;
@@ -1561,6 +1650,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
+ vlan_flags = rxq->vlan_flags;
while (nb_rx < nb_pkts) {
/*
* The order of operations here is important as the DD status
@@ -1608,7 +1698,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@@ -1670,7 +1760,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- pkt_flags = rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
pkt_flags = pkt_flags |
ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
@@ -1763,7 +1853,7 @@ ixgbe_fill_cluster_head_buf(
*/
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
- pkt_flags = rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
head->ol_flags = pkt_flags;
@@ -1879,7 +1969,7 @@ next_desc:
rte_le_to_cpu_16(rxd.wb.upper.length));
if (!bulk_alloc) {
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
"port_id=%u queue_id=%u",
@@ -1889,8 +1979,7 @@ next_desc:
rx_mbuf_alloc_failed++;
break;
}
- }
- else if (nb_hold > rxq->rx_free_thresh) {
+ } else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
@@ -2151,6 +2240,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
@@ -2170,7 +2260,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
txq->ctx_curr = 0;
- memset((void*)&txq->ctx_cache, 0,
+ memset((void *)&txq->ctx_cache, 0,
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
@@ -2443,6 +2533,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
+
mb = rxq->rx_stage[rxq->rx_next_avail + i];
rte_pktmbuf_free_seg(mb);
}
@@ -2665,7 +2756,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Zero init all the descriptors in the ring.
*/
- memset (rz->addr, 0, RX_RING_SZ);
+ memset(rz->addr, 0, RX_RING_SZ);
/*
* Modified to setup VFRDT for Virtual Function
@@ -2679,8 +2770,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
rxq->rdh_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
- }
- else {
+ } else {
rxq->rdt_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
rxq->rdh_reg_addr =
@@ -2816,6 +2906,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
@@ -2824,6 +2915,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(adapter, rxq);
@@ -3139,6 +3231,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
}
for (i = 0; i < nb_tcs; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
@@ -3147,14 +3240,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
/* zero alloc all unused TCs */
for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
+
+ rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
/* MRQC: enable vmdq and dcb */
- mrqc = ((num_pools == ETH_16_POOLS) ? \
- IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
+ mrqc = (num_pools == ETH_16_POOLS) ?
+ IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
/* PFVTCTL: turn on virtualisation and set the default pool */
@@ -3192,7 +3286,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
}
/* VFRE: pool enabling for receive - 16 or 32 */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*
@@ -3205,7 +3299,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
(cfg->pool_map[i].vlan_id & 0xFFF)));
/*
* Put the allowed pools in VFB reg. As we only have 16 or 32
@@ -3223,7 +3317,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
*/
static void
ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t q;
@@ -3238,18 +3332,17 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
/* Enable DCB for Tx with 8 TCs */
if (dcb_config->num_tcs.pg_tcs == 8) {
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- }
- else {
+ } else {
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
}
if (dcb_config->vt_mode)
- reg |= IXGBE_MTQC_VT_ENA;
+ reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
/* Disable drop for all queues */
for (q = 0; q < 128; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
@@ -3261,7 +3354,6 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
reg |= IXGBE_SECTX_DCB;
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
}
- return;
}
/**
@@ -3285,25 +3377,23 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*Configure general DCB TX parameters*/
- ixgbe_dcb_tx_hw_config(hw,dcb_config);
- return;
+ ixgbe_dcb_tx_hw_config(hw, dcb_config);
}
static void
ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
- if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
- }
- else {
+ } else {
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
@@ -3318,19 +3408,18 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
static void
ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
- if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
- }
- else {
+ } else {
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
@@ -3342,7 +3431,6 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
}
- return;
}
static void
@@ -3352,7 +3440,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
@@ -3373,7 +3461,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
struct rte_eth_dcb_tx_conf *tx_conf =
&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
@@ -3394,7 +3482,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
*/
static void
ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
@@ -3454,13 +3542,11 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
- return;
}
static void
ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
- uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -3485,16 +3571,16 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
- ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
- ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
break;
default:
break;
@@ -3515,7 +3601,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
int ret = 0;
- uint8_t i,pfc_en,nb_tcs;
+ uint8_t i, pfc_en, nb_tcs;
uint16_t pbsize, rx_buffer_size;
uint8_t config_dcb_rx = 0;
uint8_t config_dcb_tx = 0;
@@ -3529,7 +3615,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- switch(dev->data->dev_conf.rxmode.mq_mode){
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
dcb_config->vt_mode = true;
if (hw->mac.type != ixgbe_mac_82598EB) {
@@ -3560,10 +3646,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
case ETH_MQ_TX_VMDQ_DCB:
dcb_config->vt_mode = true;
config_dcb_tx = DCB_TX_CONFIG;
- /* get DCB and VT TX configuration parameters from rte_eth_conf */
- ixgbe_dcb_vt_tx_config(dev,dcb_config);
+ /* get DCB and VT TX configuration parameters
+ * from rte_eth_conf
+ */
+ ixgbe_dcb_vt_tx_config(dev, dcb_config);
/*Configure general VMDQ and DCB TX parameters*/
- ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
+ ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
break;
case ETH_MQ_TX_DCB:
@@ -3586,8 +3674,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Avoid un-configured priority mapping to TC0 */
uint8_t j = 4;
uint8_t mask = 0xFF;
+
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
- mask = (uint8_t)(mask & (~ (1 << map[i])));
+ mask = (uint8_t)(mask & (~(1 << map[i])));
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
map[j++] = i;
@@ -3623,6 +3712,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Set RX buffer size */
pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
+
for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
@@ -3632,9 +3722,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
}
}
if (config_dcb_tx) {
- /* Only support an equally distributed Tx packet buffer strategy. */
+ /* Only support an equally distributed
+ * Tx packet buffer strategy.
+ */
uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
+
for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
@@ -3647,9 +3740,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
}
/*Calculates traffic class credits*/
- ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
IXGBE_DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
IXGBE_DCB_RX_CONFIG);
if (config_dcb_rx) {
@@ -3659,7 +3752,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
/* Configure PG(ETS) RX */
- ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
+ ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
}
if (config_dcb_tx) {
@@ -3669,7 +3762,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
/* Configure PG(ETS) TX */
- ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
+ ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
}
/*Configure queue statistics registers*/
@@ -3683,7 +3776,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
* If the TC count is 8,and the default high_water is 48,
* the low_water is 16 as default.
*/
- hw->fc.high_water[i] = (pbsize * 3 ) / 4;
+ hw->fc.high_water[i] = (pbsize * 3) / 4;
hw->fc.low_water[i] = pbsize / 4;
/* Enable pfc for this TC */
tc = &dcb_config->tc_config[i];
@@ -3721,8 +3814,6 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
/** Configure DCB hardware **/
ixgbe_dcb_hw_configure(dev, dcb_cfg);
-
- return;
}
/*
@@ -3787,7 +3878,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
(cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
/*
* Put the allowed pools in VFB reg. As we only have 16 or 64
@@ -3795,12 +3886,11 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
* i.e. bits 0-31
*/
if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
(cfg->pool_map[i].pools & UINT32_MAX));
else
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
- ((cfg->pool_map[i].pools >> 32) \
- & UINT32_MAX));
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
+ ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
}
@@ -3840,7 +3930,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
/* Disable drop for all queues */
for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
@@ -3848,8 +3938,6 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
IXGBE_WRITE_FLUSH(hw);
-
- return;
}
static int __attribute__((cold))
@@ -3857,12 +3945,13 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
- unsigned i;
+ unsigned int i;
/* Initialize software ring entries */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union ixgbe_adv_rx_desc *rxd;
- struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned) rxq->queue_id);
@@ -4253,6 +4342,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
rxq->rx_using_sse = rx_using_sse;
}
}
@@ -4305,6 +4395,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RFCTL configuration */
if (rsc_capable) {
uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+
if (rx_conf->enable_lro)
/*
* Since NFS packets coalescing is not supported - clear
@@ -4498,6 +4589,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
uint32_t psrtype;
+
psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
@@ -4597,7 +4689,8 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Enable TX CRC (checksum offload requirement) and hw padding
- * (TSO requirement) */
+ * (TSO requirement)
+ */
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4622,26 +4715,26 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
* bookkeeping if things aren't delivered in order.
*/
switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- txctrl = IXGBE_READ_REG(hw,
- IXGBE_DCA_TXCTRL(txq->reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
- txctrl);
- break;
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_DCA_TXCTRL(txq->reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
+ txctrl);
+ break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_X550EM_a:
- default:
- txctrl = IXGBE_READ_REG(hw,
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ default:
+ txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
- txctrl);
- break;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
+ txctrl);
+ break;
}
}
@@ -4813,12 +4906,12 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
- /* Wait until RX Enable ready */
+ /* Wait until RX Enable bit clear */
poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
do {
rte_delay_ms(1);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
+ } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
rx_queue_id);
@@ -4892,48 +4985,48 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -1;
- /* Wait until TX queue is empty */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_us(RTE_IXGBE_WAIT_100_US);
- txtdh = IXGBE_READ_REG(hw,
- IXGBE_TDH(txq->reg_idx));
- txtdt = IXGBE_READ_REG(hw,
- IXGBE_TDT(txq->reg_idx));
- } while (--poll_ms && (txtdh != txtdt));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.", tx_queue_id);
- }
+ txq = dev->data->tx_queues[tx_queue_id];
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
- txdctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ /* Wait until TX queue is empty */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ txtdh = IXGBE_READ_REG(hw,
+ IXGBE_TDH(txq->reg_idx));
+ txtdt = IXGBE_READ_REG(hw,
+ IXGBE_TDT(txq->reg_idx));
+ } while (--poll_ms && (txtdh != txtdt));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
+ "when stopping.", tx_queue_id);
+ }
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw,
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable bit clear */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
IXGBE_TXDCTL(txq->reg_idx));
- } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d", tx_queue_id);
- }
+ } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable "
+ "Tx Queue %d", tx_queue_id);
+ }
- if (txq->ops != NULL) {
- txq->ops->release_mbufs(txq);
- txq->ops->reset(txq);
- }
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- } else
- return -1;
+ if (txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 3691a19d..2608b364 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -146,6 +146,8 @@ struct ixgbe_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint8_t rx_deferred_start; /**< not in global dev start. */
+ /** flags to set in mbuf when a vlan is detected. */
+ uint64_t vlan_flags;
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
new file mode 100644
index 00000000..62b82013
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -0,0 +1,326 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_RXTX_VEC_COMMON_H_
+#define _IXGBE_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned int pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len)
+ end->data_len -= rxq->crc_len;
+ else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static inline int __attribute__((always_inline))
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+{
+ struct ixgbe_tx_entry_v *txep;
+ uint32_t status;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (!(status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+{
+ unsigned int i;
+ struct ixgbe_tx_entry_v *txe;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+
+ if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
+ return;
+
+ /* release the used mbufs in sw_ring */
+ for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+ i != txq->tx_tail;
+ i = (i + 1) & max_desc) {
+ txe = &txq->sw_ring_v[i];
+ rte_pktmbuf_free_seg(txe->mbuf);
+ }
+ txq->nb_tx_free = max_desc;
+
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txe = &txq->sw_ring_v[i];
+ txe->mbuf = NULL;
+ }
+}
+
+static inline void
+_ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline void
+_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
+{
+ if (txq == NULL)
+ return;
+
+ if (txq->sw_ring != NULL) {
+ rte_free(txq->sw_ring_v - 1);
+ txq->sw_ring_v = NULL;
+ }
+}
+
+static inline void
+_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
+{
+ static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
+ struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++)
+ txq->tx_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->wb.status = IXGBE_TXD_STAT_DD;
+ txe[i].mbuf = NULL;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static inline int
+ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline int
+ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq,
+ const struct ixgbe_txq_ops *txq_ops)
+{
+ if (txq->sw_ring_v == NULL)
+ return -1;
+
+ /* leave the first one for overflow */
+ txq->sw_ring_v = txq->sw_ring_v + 1;
+ txq->ops = txq_ops;
+
+ return 0;
+}
+
+static inline int
+ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /*
+ * - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+#endif
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
new file mode 100644
index 00000000..64a329ea
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -0,0 +1,560 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "ixgbe_rxtx_vec_common.h"
+
+#include <arm_neon.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ uint64x2_t dma_addr0, dma_addr1;
+ uint64x2_t zero = vdupq_n_u64(0);
+ uint64_t paddr;
+ uint8x8_t p;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (unlikely(rte_mempool_get_bulk(rxq->mb_pool,
+ (void *)rxep,
+ RTE_IXGBE_RXQ_REARM_THRESH) < 0)) {
+ if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vst1q_u64((uint64_t *)&rxdp[i].read,
+ zero);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_IXGBE_RXQ_REARM_THRESH;
+ return;
+ }
+
+ p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /*
+ * Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ vst1_u8((uint8_t *)&mb0->rearm_data, p);
+ paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
+ /* flush desc with pa dma_addr */
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
+
+ vst1_u8((uint8_t *)&mb1->rearm_data, p);
+ paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
+
+#define VTAG_SHIFT (3)
+
+static inline void
+desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
+ uint8x16_t staterr, struct rte_mbuf **rx_pkts)
+{
+ uint8x16_t ptype;
+ uint8x16_t vtag;
+
+ union {
+ uint8_t e[4];
+ uint32_t word;
+ } vol;
+
+ const uint8x16_t pkttype_msk = {
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+ const uint8x16_t rsstype_msk = {
+ 0x0F, 0x0F, 0x0F, 0x0F,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+ const uint8x16_t rss_flags = {
+ 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, PKT_RX_FDIR};
+
+ ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0];
+ ptype = vandq_u8(ptype, rsstype_msk);
+ ptype = vqtbl1q_u8(rss_flags, ptype);
+
+ vtag = vshrq_n_u8(staterr, VTAG_SHIFT);
+ vtag = vandq_u8(vtag, pkttype_msk);
+ vtag = vorrq_u8(ptype, vtag);
+
+ vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts)
+#endif
+
+/*
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ * - don't support ol_flags for rss and csum err
+ */
+
+#define IXGBE_VPMD_DESC_DD_MASK 0x01010101
+#define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
+
+static inline uint16_t
+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ uint8x16_t shuf_msk = {
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* skip 32 bits pkt_type */
+ 12, 13, /* octet 12~13, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 12, 13, /* octet 12~13, 16 bits data_len */
+ 14, 15, /* octet 14~15, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+ uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
+ rxq->crc_len, 0, 0, 0};
+
+ /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch_non_temporal(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
+ ixgbe_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ return 0;
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_IXGBE_DESCS_PER_LOOP,
+ rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
+ uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP];
+ uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ uint8x16x2_t sterr_tmp1, sterr_tmp2;
+ uint64x2_t mbp1, mbp2;
+ uint8x16_t staterr;
+ uint16x8_t tmp;
+ uint32_t stat;
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
+
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
+ rte_rmb();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
+
+ descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ descs[0] = vld1q_u64((uint64_t *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
+ pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
+ pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]),
+ vreinterpretq_u8_u64(descs[3]));
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]),
+ vreinterpretq_u8_u64(descs[2]));
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0];
+ stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
+
+ /* set ol_flags with vlan packet type */
+ desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr,
+ &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
+ pkt_mb4 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
+ pkt_mb3 = vreinterpretq_u8_u16(tmp);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
+ pkt_mb2 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
+ pkt_mb1 = vreinterpretq_u8_u16(tmp);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ /* and with mask to extract bits, flipping 1-0 */
+ *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
+
+ split_packet += RTE_IXGBE_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcount(stat & IXGBE_VPMD_DESC_DD_MASK);
+ nb_pkts_recd += var;
+ if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/*
+ * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64x2_t descriptor = {
+ pkt->buf_physaddr + pkt->data_off,
+ (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
+
+ vst1q_u64((uint64_t *)&txdp->read, descriptor);
+}
+
+static inline void
+vtx(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ volatile union ixgbe_adv_tx_desc *txdp;
+ struct ixgbe_tx_entry_v *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = DCMD_DTYP_FLAGS;
+ uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+ txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+static void __attribute__((cold))
+ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_tx_queue_release_mbufs_vec(txq);
+}
+
+void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ _ixgbe_rx_queue_release_mbufs_vec(rxq);
+}
+
+static void __attribute__((cold))
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_tx_free_swring_vec(txq);
+}
+
+static void __attribute__((cold))
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_reset_tx_queue_vec(txq);
+}
+
+static const struct ixgbe_txq_ops vec_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
+ .free_swring = ixgbe_tx_free_swring,
+ .reset = ixgbe_reset_tx_queue,
+};
+
+int __attribute__((cold))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
+{
+ return ixgbe_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+{
+ return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
+}
+
+int __attribute__((cold))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index 50407043..4f95debd 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -37,6 +37,7 @@
#include "ixgbe_ethdev.h"
#include "ixgbe_rxtx.h"
+#include "ixgbe_rxtx_vec_common.h"
#include <tmmintrin.h>
@@ -140,10 +141,9 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
*/
#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-#define VTAG_SHIFT (3)
-
static inline void
-desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
+ struct rte_mbuf **rx_pkts)
{
__m128i ptype0, ptype1, vtag0, vtag1;
union {
@@ -151,11 +151,6 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
uint64_t dword;
} vol;
- /* pkt type + vlan olflags mask */
- const __m128i pkttype_msk = _mm_set_epi16(
- 0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
-
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
@@ -167,6 +162,19 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+ /* mask everything except vlan present bit */
+ const __m128i vlan_msk = _mm_set_epi16(
+ 0x0000, 0x0000,
+ 0x0000, 0x0000,
+ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
+ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
+ /* map vlan present (0x8) to ol_flags */
+ const __m128i vlan_map = _mm_set_epi8(
+ 0, 0, 0, 0,
+ 0, 0, 0, vlan_flags,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0);
+
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
@@ -177,8 +185,8 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
- vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
- vtag1 = _mm_and_si128(vtag1, pkttype_msk);
+ vtag1 = _mm_and_si128(vtag1, vlan_msk);
+ vtag1 = _mm_shuffle_epi8(vlan_map, vtag1);
vtag1 = _mm_or_si128(ptype0, vtag1);
vol.dword = _mm_cvtsi128_si64(vtag1);
@@ -220,6 +228,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0, 0 /* ignore pkt_type field */
);
__m128i dd_check, eop_check;
+ uint8_t vlan_flags;
/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
@@ -228,18 +237,21 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
/* Just the act of getting into the function from the application is
- * going to cost about 7 cycles */
+ * going to cost about 7 cycles
+ */
rxdp = rxq->rx_ring + rxq->rx_tail;
_mm_prefetch((const void *)rxdp, _MM_HINT_T0);
/* See if we need to rearm the RX queue - gives the prefetch a bit
- * of time to act */
+ * of time to act
+ */
if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
ixgbe_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
- * there is actually a packet available */
+ * there is actually a packet available
+ */
if (!(rxdp->wb.upper.status_error &
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
@@ -262,9 +274,14 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
);
/* Cache is empty -> need to scan the buffer rings, but first move
- * the next 'n' mbufs into the cache */
+ * the next 'n' mbufs into the cache
+ */
sw_ring = &rxq->sw_ring[rxq->rx_tail];
+ /* ensure these 2 flags are in the lower 8 bits */
+ RTE_BUILD_BUG_ON((PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
+ vlan_flags = rxq->vlan_flags & UINT8_MAX;
+
/* A. load 4 packet in one loop
* [A*. mask out 4 unused dirty field in desc]
* B. copy 4 mbuf point from swring to rx_pkts
@@ -302,10 +319,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
if (split_packet) {
- rte_prefetch0(&rx_pkts[pos]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
}
/* avoid compiler reorder optimization */
@@ -325,7 +342,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
/* set ol_flags with vlan packet type */
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -359,7 +376,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* the staterr values are not in order, as the count
* count of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
- * compresses the 32-bit values to 8-bit */
+ * compresses the 32-bit values to 8-bit
+ */
eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
@@ -414,69 +432,6 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
-static inline uint16_t
-reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
- uint16_t nb_bufs, uint8_t *split_flags)
-{
- struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
- struct rte_mbuf *start = rxq->pkt_first_seg;
- struct rte_mbuf *end = rxq->pkt_last_seg;
- unsigned pkt_idx, buf_idx;
-
- for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
- if (end != NULL) {
- /* processing a split packet */
- end->next = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
-
- start->nb_segs++;
- start->pkt_len += rx_bufs[buf_idx]->data_len;
- end = end->next;
-
- if (!split_flags[buf_idx]) {
- /* it's the last packet of the set */
- start->hash = end->hash;
- start->ol_flags = end->ol_flags;
- /* we need to strip crc for the whole packet */
- start->pkt_len -= rxq->crc_len;
- if (end->data_len > rxq->crc_len)
- end->data_len -= rxq->crc_len;
- else {
- /* free up last mbuf */
- struct rte_mbuf *secondlast = start;
-
- start->nb_segs--;
- while (secondlast->next != end)
- secondlast = secondlast->next;
- secondlast->data_len -= (rxq->crc_len -
- end->data_len);
- secondlast->next = NULL;
- rte_pktmbuf_free_seg(end);
- end = secondlast;
- }
- pkts[pkt_idx++] = start;
- start = end = NULL;
- }
- } else {
- /* not processing a split packet */
- if (!split_flags[buf_idx]) {
- /* not a split packet, save and skip */
- pkts[pkt_idx++] = rx_bufs[buf_idx];
- continue;
- }
- end = start = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
- rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
- }
- }
-
- /* save the partial packet for next time */
- rxq->pkt_first_seg = start;
- rxq->pkt_last_seg = end;
- memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
- return pkt_idx;
-}
-
/*
* vPMD receive routine that reassembles scattered packets
*
@@ -535,76 +490,11 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
{
int i;
+
for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
vtx1(txdp, *pkt, flags);
}
-static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
-{
- struct ixgbe_tx_entry_v *txep;
- uint32_t status;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
-
- /* check DD bit on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
- if (!(status & IXGBE_ADVTXD_STAT_DD))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /*
- * first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool))
- free[nb_free++] = m;
- else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free, nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
-static inline void __attribute__((always_inline))
-tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- int i;
- for (i = 0; i < (int)nb_pkts; ++i)
- txep[i].mbuf = tx_pkts[i];
-}
-
uint16_t
ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -675,91 +565,25 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
static void __attribute__((cold))
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
- unsigned i;
- struct ixgbe_tx_entry_v *txe;
- const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
-
- if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
- return;
-
- /* release the used mbufs in sw_ring */
- for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
- i != txq->tx_tail;
- i = (i + 1) & max_desc) {
- txe = &txq->sw_ring_v[i];
- rte_pktmbuf_free_seg(txe->mbuf);
- }
- txq->nb_tx_free = max_desc;
-
- /* reset tx_entry */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- txe = &txq->sw_ring_v[i];
- txe->mbuf = NULL;
- }
+ _ixgbe_tx_queue_release_mbufs_vec(txq);
}
void __attribute__((cold))
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
- const unsigned mask = rxq->nb_rx_desc - 1;
- unsigned i;
-
- if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
- return;
-
- /* free all mbufs that are valid in the ring */
- for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
- rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
- rxq->rxrearm_nb = rxq->nb_rx_desc;
-
- /* set all entries to NULL */
- memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+ _ixgbe_rx_queue_release_mbufs_vec(rxq);
}
static void __attribute__((cold))
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
- if (txq == NULL)
- return;
-
- if (txq->sw_ring != NULL) {
- rte_free(txq->sw_ring_v - 1);
- txq->sw_ring_v = NULL;
- }
+ _ixgbe_tx_free_swring_vec(txq);
}
static void __attribute__((cold))
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
- static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
- struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
- uint16_t i;
-
- /* Zero out HW ring memory */
- for (i = 0; i < txq->nb_tx_desc; i++)
- txq->tx_ring[i] = zeroed_desc;
-
- /* Initialize SW ring entries */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
- txd->wb.status = IXGBE_TXD_STAT_DD;
- txe[i].mbuf = NULL;
- }
-
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
- txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
- txq->tx_tail = 0;
- txq->nb_tx_used = 0;
- /*
- * Always allow 1 descriptor to be un-allocated to avoid
- * a H/W race condition
- */
- txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
- txq->ctx_curr = 0;
- memset((void *)&txq->ctx_cache, 0,
- IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+ _ixgbe_reset_tx_queue_vec(txq);
}
static const struct ixgbe_txq_ops vec_txq_ops = {
@@ -771,63 +595,17 @@ static const struct ixgbe_txq_ops vec_txq_ops = {
int __attribute__((cold))
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
- uintptr_t p;
- struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
- mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
- rte_mbuf_refcnt_set(&mb_def, 1);
-
- /* prevent compiler reordering: rearm_data covers previous fields */
- rte_compiler_barrier();
- p = (uintptr_t)&mb_def.rearm_data;
- rxq->mbuf_initializer = *(uint64_t *)p;
- return 0;
+ return ixgbe_rxq_vec_setup_default(rxq);
}
int __attribute__((cold))
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
- if (txq->sw_ring_v == NULL)
- return -1;
-
- /* leave the first one for overflow */
- txq->sw_ring_v = txq->sw_ring_v + 1;
- txq->ops = &vec_txq_ops;
-
- return 0;
+ return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
int __attribute__((cold))
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-
-#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0)
- return -1;
-#endif
-
- /* no fdir support */
- if (fconf->mode != RTE_FDIR_MODE_NONE)
- return -1;
-
- /*
- * - no csum error report support
- * - no header split support
- */
- if (rxmode->hw_ip_checksum == 1 ||
- rxmode->header_split == 1)
- return -1;
-
- return 0;
-#else
- RTE_SET_USED(dev);
- return -1;
-#endif
+ return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
}