summaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/Makefile11
-rw-r--r--drivers/net/ixgbe/base/README5
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.c4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c10
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.h4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c68
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.h7
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.c240
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.h41
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.h8
-rw-r--r--drivers/net/ixgbe/base/ixgbe_osdep.h15
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c207
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.h74
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h143
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.c20
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.h3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.c33
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c1148
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c1973
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h287
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c415
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c2778
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c44
-rw-r--r--drivers/net/ixgbe/ixgbe_regs.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c293
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h13
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c24
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c102
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c910
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h245
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map23
37 files changed, 7346 insertions, 1828 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 94ddc7b8..5529d81c 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -76,6 +76,9 @@ endif
ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1)
CFLAGS_ixgbe_common.o += -Wno-logical-not-parentheses
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
endif
@@ -100,6 +103,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_hv_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
@@ -108,6 +112,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_flow.c
ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
else
@@ -118,13 +123,9 @@ ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
endif
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index 6b54c31e..a61617be 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -1,7 +1,7 @@
..
BSD LICENSE
- Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,8 @@ IntelĀ® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2016.08.15 released by ND. The sub-directory of base/
+cid-10g-shared-code.2017.03.29 released by the team which develop
+basic drivers for any ixgbe NIC. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
index 724dcbbc..d64abb2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -1222,9 +1222,9 @@ STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h
index 0326e70b..20aab9fc 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.h
+++ b/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -45,7 +45,7 @@ s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 832242ee..d9d11a8e 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -2169,9 +2169,9 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.h b/drivers/net/ixgbe/base/ixgbe_82599.h
index c034d3d9..d555dbce 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.h
+++ b/drivers/net/ixgbe/base/ixgbe_82599.h
@@ -57,7 +57,7 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index 094ee526..4117fb01 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -205,6 +205,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
hw->mac.type = ixgbe_mac_X550EM_x;
hw->mvals = ixgbe_mvals_X550EM_x;
break;
@@ -1147,12 +1148,15 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* @min: driver minor number to be sent to firmware
* @build: driver build number to be sent to firmware
* @ver: driver version number to be sent to firmware
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
**/
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
- u8 ver)
+ u8 ver, u16 len, char *driver_ver)
{
return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
- build, ver), IXGBE_NOT_IMPLEMENTED);
+ build, ver, len, driver_ver),
+ IXGBE_NOT_IMPLEMENTED);
}
@@ -1575,7 +1579,7 @@ s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
(hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
index 24c4ae8d..2f532aa8 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.h
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -133,7 +133,7 @@ s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
- u8 ver);
+ u8 ver, u16 len, char *driver_ver);
s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
@@ -143,7 +143,7 @@ s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index cca19efc..4dabb434 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -113,6 +113,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.led_off = ixgbe_led_off_generic;
mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
+ mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
/* RAR, Multicast, VLAN */
mac->ops.set_rar = ixgbe_set_rar_generic;
@@ -188,7 +189,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
break;
case ixgbe_media_type_backplane:
- supported = true;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
+ supported = false;
+ else
+ supported = true;
break;
case ixgbe_media_type_copper:
/* only some copper devices support flow control autoneg */
@@ -409,8 +413,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Setup flow control */
ret_val = ixgbe_setup_fc(hw);
- if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED)
+ if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
+ DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
return ret_val;
+ }
/* Cache bit indicating need for crosstalk fix */
switch (hw->mac.type) {
@@ -492,11 +498,17 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
- if (status == IXGBE_SUCCESS) {
+ if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
/* Start the HW */
status = hw->mac.ops.start_hw(hw);
}
+ /* Initialize the LED link active for LED blink support */
+ hw->mac.ops.init_led_link_act(hw);
+
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
+
return status;
}
@@ -1136,6 +1148,47 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
+ * @hw: pointer to hardware structure
+ *
+ * Store the index for the link active LED. This will be used to support
+ * blinking the LED.
+ **/
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 led_reg, led_mode;
+ u8 i;
+
+ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* Get LED link active from the LEDCTL register */
+ for (i = 0; i < 4; i++) {
+ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
+
+ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
+ IXGBE_LED_LINK_ACTIVE) {
+ mac->led_link_act = i;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ /*
+ * If LEDCTL register does not have the LED link active set, then use
+ * known MAC defaults.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X550EM_x:
+ mac->led_link_act = 1;
+ break;
+ default:
+ mac->led_link_act = 2;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_led_on_generic - Turns on the software controllable LEDs.
* @hw: pointer to hardware structure
* @index: led number to turn on
@@ -3764,7 +3817,8 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.ops.clear_rar(hw, rar);
done:
return IXGBE_SUCCESS;
@@ -4184,7 +4238,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type >= ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -4595,13 +4649,15 @@ rel_out:
* semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
**/
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub)
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
int i;
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+ UNREFERENCED_2PARAMETER(len, driver_ver);
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
index 66dd5659..903f34d5 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -72,6 +72,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
@@ -155,12 +156,14 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
int strategy);
void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 ver);
+ u8 build, u8 ver, u16 len, const char *str);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data);
s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout);
-
+s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *);
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.c b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
new file mode 100644
index 00000000..47143a26
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
@@ -0,0 +1,240 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_vf.h"
+#include "ixgbe_hv_vf.h"
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ UNREFERENCED_2PARAMETER(hw, xcast_mode);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ UNREFERENCED_3PARAMETER(hw, index, addr);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vlan, u32 vind)
+{
+ UNREFERENCED_5PARAMETER(hw, index, addr, vlan, vind);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ DELAY(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Reserved for pre-x550 devices */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 reg;
+
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+ /* CRC == 4 */
+ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+ UNREFERENCED_1PARAMETER(hw);
+
+ /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+ if (api != ixgbe_mbox_api_10)
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw)
+{
+ /* Set defaults for VF then override applicable Hyper-V
+ * specific functions
+ */
+ ixgbe_init_ops_vf(hw);
+
+ hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf;
+ hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf;
+ hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf;
+ hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf;
+ hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf;
+ hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode;
+ hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf;
+ hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf;
+ hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf;
+
+ return IXGBE_SUCCESS;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.h b/drivers/net/ixgbe/base/ixgbe_hv_vf.h
new file mode 100644
index 00000000..9119f29f
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2016, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_HV_VF_H_
+#define _IXGBE_HV_VF_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_HV_VF_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h
index 7556a818..bde50a51 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -114,6 +114,14 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
+};
+
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 77f0af51..4aab278d 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -44,6 +44,7 @@
#include <rte_cycles.h>
#include <rte_log.h>
#include <rte_byteorder.h>
+#include <rte_io.h>
#include "../ixgbe_logs.h"
#include "../ixgbe_bypass_defines.h"
@@ -81,6 +82,7 @@
#define UNREFERENCED_2PARAMETER(_p, _q)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
/* Shared code error reporting */
enum {
@@ -95,8 +97,9 @@ enum {
#define STATIC static
#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
+#define IXGBE_CPU_TO_LE16(_i) rte_cpu_to_le_16(_i)
#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
-#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
+#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i)
#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i)
@@ -121,16 +124,18 @@ typedef int bool;
#define prefetch(x) rte_prefetch0(x)
-#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define IXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint32_t ixgbe_read_addr(volatile void* addr)
{
return rte_le_to_cpu_32(IXGBE_PCI_REG(addr));
}
-#define IXGBE_PCI_REG_WRITE(reg, value) do { \
- IXGBE_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
-} while(0)
+#define IXGBE_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+
+#define IXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
#define IXGBE_PCI_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 43c55d74..62c3080a 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 10;
+ int max_retry = 3;
int retry = 0;
u8 csum_byte;
u8 high_bits;
@@ -121,8 +121,6 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
u8 reg_high;
u8 csum;
- if (hw->mac.type >= ixgbe_mac_X550)
- max_retry = 3;
reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ~csum;
@@ -293,8 +291,11 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
{
u16 ext_ability = 0;
- if (!ixgbe_validate_phy_addr(hw, phy_addr))
+ if (!ixgbe_validate_phy_addr(hw, phy_addr)) {
+ DEBUGOUT1("Unable to validate PHY address 0x%04X\n",
+ phy_addr);
return false;
+ }
if (ixgbe_get_phy_id(hw))
return false;
@@ -413,6 +414,8 @@ bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
if (phy_id != 0xFFFF && phy_id != 0x0)
valid = true;
+ DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id);
+
return valid;
}
@@ -441,6 +444,9 @@ s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
+ DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
+ phy_id_high, phy_id_low);
+
return status;
}
@@ -459,7 +465,6 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID1:
case X550_PHY_ID2:
case X550_PHY_ID3:
case X540_PHY_ID:
@@ -477,7 +482,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
break;
case IXGBE_M88E1500_E_PHY_ID:
case IXGBE_M88E1543_E_PHY_ID:
- phy_type = ixgbe_phy_m88;
+ phy_type = ixgbe_phy_ext_1g_t;
break;
default:
phy_type = ixgbe_phy_unknown;
@@ -528,11 +533,30 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*/
for (i = 0; i < 30; i++) {
msec_delay(100);
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
- if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
- usec_delay(2);
- break;
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ usec_delay(2);
+ break;
+ }
+ } else {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+ usec_delay(2);
+ break;
+ }
}
}
@@ -554,7 +578,7 @@ out:
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+ u16 *phy_data)
{
u32 i, data, command;
@@ -576,12 +600,13 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
+ break;
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
+ DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n");
return IXGBE_ERR_PHY;
}
@@ -611,6 +636,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
+ DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n");
return IXGBE_ERR_PHY;
}
@@ -768,91 +794,63 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- /* Set or unset auto-negotiation 10G advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_10GB_FULL))
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
- hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
- if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
- /* Set or unset auto-negotiation 5G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_5GB_FULL)
- autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
- /* Set or unset auto-negotiation 2.5G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_2_5GB_FULL)
- autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Set or unset auto-negotiation 5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+ /* Set or unset auto-negotiation 2.5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_2_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
}
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 1G advertisement */
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_1GB_FULL))
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- if (speed & IXGBE_LINK_SPEED_100_FULL) {
- /* Set or unset auto-negotiation 100M advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+ IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
+ (speed & IXGBE_LINK_SPEED_100_FULL))
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
- autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
- IXGBE_MII_100BASE_T_ADVERTISE_HALF);
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
/* Blocked by MNG FW so don't reset PHY */
if (ixgbe_check_reset_blocked(hw))
@@ -1542,16 +1540,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("SFP+ module not supported\n");
@@ -1583,9 +1575,9 @@ err_read_i2c_eeprom:
*
* Determines physical layer capabilities of the current SFP.
*/
-s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u8 comp_codes_10g = 0;
u8 comp_codes_1g = 0;
@@ -1804,16 +1796,10 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("QSFP module not supported\n");
@@ -1838,7 +1824,6 @@ err_read_i2c_eeprom:
return IXGBE_ERR_SFP_NOT_PRESENT;
}
-
/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h
index da14abcd..cf8cadd9 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.h
+++ b/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -92,8 +92,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
-#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
-#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F
+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */
+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -154,73 +155,6 @@ POSSIBILITY OF SUCH DAMAGE.
/* SFP+ SFF-8472 Compliance */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-/* More phy definitions */
-#define IXGBE_M88E1500_COPPER_CTRL 0 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_COPPER_CTRL_AN_EN (1u << 12)
-#define IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN (1u << 11)
-#define IXGBE_M88E1500_COPPER_CTRL_RESTART_AN (1u << 9)
-#define IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX (1u << 8)
-#define IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB (1u << 6)
-#define IXGBE_M88E1500_COPPER_STATUS 1 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_STATUS_AN_DONE (1u << 5)
-#define IXGBE_M88E1500_COPPER_AN 4 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_AN_AS_PAUSE (1u << 11)
-#define IXGBE_M88E1500_COPPER_AN_PAUSE (1u << 10)
-#define IXGBE_M88E1500_COPPER_AN_T4 (1u << 9)
-#define IXGBE_M88E1500_COPPER_AN_100TX_FD (1u << 8)
-#define IXGBE_M88E1500_COPPER_AN_100TX_HD (1u << 7)
-#define IXGBE_M88E1500_COPPER_AN_10TX_FD (1u << 6)
-#define IXGBE_M88E1500_COPPER_AN_10TX_HD (1u << 5)
-#define IXGBE_M88E1500_COPPER_AN_LP_ABILITY 5 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_AN_LP_AS_PAUSE (1u << 11)
-#define IXGBE_M88E1500_COPPER_AN_LP_PAUSE (1u << 10)
-#define IXGBE_M88E1500_1000T_CTRL 9 /* Page 0 reg */
-/* 1=Configure PHY as Master 0=Configure PHY as Slave */
-#define IXGBE_M88E1500_1000T_CTRL_MS_VALUE (1u << 11)
-#define IXGBE_M88E1500_1000T_CTRL_1G_FD (1u << 9)
-/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
-#define IXGBE_M88E1500_1000T_CTRL_MS_ENABLE (1u << 12)
-#define IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX (1u << 9)
-#define IXGBE_M88E1500_1000T_CTRL_HALF_DUPLEX (1u << 8)
-#define IXGBE_M88E1500_1000T_STATUS 10 /* Page 0 reg */
-#define IXGBE_M88E1500_AUTO_COPPER_SGMII 0x2
-#define IXGBE_M88E1500_AUTO_COPPER_BASEX 0x3
-#define IXGBE_M88E1500_STATUS_LINK (1u << 2) /* Interface Link Bit */
-#define IXGBE_M88E1500_MAC_CTRL_1 16 /* Page 0 reg */
-#define IXGBE_M88E1500_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
-#define IXGBE_M88E1500_MAC_CTRL_1_DWN_SHIFT 12
-#define IXGBE_M88E1500_MAC_CTRL_1_DWN_4X 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_ED_SHIFT 8
-#define IXGBE_M88E1500_MAC_CTRL_1_ED_TM 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_MDIX_SHIFT 5
-#define IXGBE_M88E1500_MAC_CTRL_1_MDIX_AUTO 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN (1u << 2)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS 17 /* Page 0 reg */
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_SHIFT 14
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_MASK 3u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_10 0u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_100 1u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_1000 2u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_DUPLEX (1u << 13)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_RESOLVED (1u << 11)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_LINK (1u << 10)
-#define IXGBE_M88E1500_PAGE_ADDR 22 /* All pages reg */
-#define IXGBE_M88E1500_FIBER_CTRL 0 /* Page 1 reg */
-#define IXGBE_M88E1500_FIBER_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_FIBER_CTRL_SPEED_LSB (1u << 13)
-#define IXGBE_M88E1500_FIBER_CTRL_AN_EN (1u << 12)
-#define IXGBE_M88E1500_FIBER_CTRL_POWER_DOWN (1u << 11)
-#define IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL (1u << 8)
-#define IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB (1u << 6)
-#define IXGBE_M88E1500_MAC_SPEC_CTRL 16 /* Page 2 reg */
-#define IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN (1u << 3)
-#define IXGBE_M88E1500_EEE_CTRL_1 0 /* Page 18 reg */
-#define IXGBE_M88E1500_EEE_CTRL_1_MS (1u << 0) /* EEE Master/Slave */
-#define IXGBE_M88E1500_GEN_CTRL 20 /* Page 18 reg */
-#define IXGBE_M88E1500_GEN_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_GEN_CTRL_MODE_SGMII_COPPER 1u /* Mode bits 0-2 */
-
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
@@ -258,7 +192,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 4982e035..bda85589 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -146,6 +146,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
@@ -1045,7 +1046,7 @@ struct ixgbe_dmac_config {
#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW 0x15014
+#define IXGBE_LSWFW 0x15F14
#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
#define IXGBE_BMCIPVAL 0x05060
#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
@@ -1647,7 +1648,6 @@ struct ixgbe_dmac_config {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
-#define X550_PHY_ID1 0x01540220
#define X550_PHY_ID2 0x01540223
#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
@@ -2622,6 +2622,7 @@ enum {
#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */
#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -3037,6 +3038,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
#define FW_WRITE_SHADOW_RAM_CMD 0x33
@@ -3062,6 +3064,59 @@ enum ixgbe_fdir_pballoc_type {
#define FW_INT_PHY_REQ_LEN 10
#define FW_INT_PHY_REQ_READ 0
#define FW_INT_PHY_REQ_WRITE 1
+#define FW_PHY_ACT_REQ_CMD 5
+#define FW_PHY_ACT_DATA_COUNT 4
+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT)
+#define FW_PHY_ACT_INIT_PHY 1
+#define FW_PHY_ACT_SETUP_LINK 2
+#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0)
+#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1)
+#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2)
+#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3)
+#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4)
+#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5)
+#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6)
+#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7)
+#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8)
+#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9)
+#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u
+#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18)
+#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19)
+#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20)
+#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22)
+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0)
+#define FW_PHY_ACT_GET_LINK_INFO 3
+#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21)
+#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22)
+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24)
+#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29)
+#define FW_PHY_ACT_FORCE_LINK_DOWN 4
+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0)
+#define FW_PHY_ACT_PHY_SW_RESET 5
+#define FW_PHY_ACT_PHY_HW_RESET 6
+#define FW_PHY_ACT_GET_PHY_INFO 7
+#define FW_PHY_ACT_UD_2 0x1002
+#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6)
+#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5)
+#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4)
+#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3)
+#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2)
+#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1)
+#define FW_PHY_ACT_RETRIES 50
+#define FW_PHY_INFO_SPEED_MASK 0xFFFu
+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
/* Host Interface Command Structures */
@@ -3111,6 +3166,16 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+struct ixgbe_hic_drv_info2 {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE];
+};
+
/* These need to be dword aligned */
struct ixgbe_hic_read_shadow_ram {
union ixgbe_hic_hdr2 hdr;
@@ -3159,6 +3224,19 @@ struct ixgbe_hic_internal_phy_resp {
__be32 read_data;
};
+struct ixgbe_hic_phy_activity_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad;
+ __le16 activity_id;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
+struct ixgbe_hic_phy_activity_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
#ifdef C99
#pragma pack(pop)
#else
@@ -3332,23 +3410,25 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
-typedef u32 ixgbe_physical_layer;
+typedef u64 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
@@ -3567,7 +3647,9 @@ enum ixgbe_phy_type {
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_xfi,
ixgbe_phy_x550em_ext_t,
+ ixgbe_phy_ext_1g_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -3586,7 +3668,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
ixgbe_phy_sgmii,
- ixgbe_phy_m88,
+ ixgbe_phy_fw,
ixgbe_phy_generic
};
@@ -3643,14 +3725,6 @@ enum ixgbe_fc_mode {
ixgbe_fc_default
};
-/* Master/slave control */
-enum ixgbe_ms_type {
- ixgbe_ms_hw_default = 0,
- ixgbe_ms_force_master,
- ixgbe_ms_force_slave,
- ixgbe_ms_auto
-};
-
/* Smart Speed Settings */
#define IXGBE_SMARTSPEED_MAX_RETRIES 3
enum ixgbe_smart_speed {
@@ -3833,7 +3907,7 @@ struct ixgbe_mac_operations {
s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
void (*enable_relaxed_ordering)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ u64 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
@@ -3875,6 +3949,7 @@ struct ixgbe_mac_operations {
s32 (*led_off)(struct ixgbe_hw *, u32);
s32 (*blink_led_start)(struct ixgbe_hw *, u32);
s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+ s32 (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
@@ -3907,7 +3982,8 @@ struct ixgbe_mac_operations {
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ const char *);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
@@ -4017,6 +4093,7 @@ struct ixgbe_mac_info {
struct ixgbe_dmac_config dmac_config;
bool set_lben;
u32 max_link_up_time;
+ u8 led_link_act;
};
struct ixgbe_phy_info {
@@ -4032,8 +4109,8 @@ struct ixgbe_phy_info {
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
ixgbe_link_speed speeds_supported;
- enum ixgbe_ms_type ms_type;
- enum ixgbe_ms_type original_ms_type;
+ ixgbe_link_speed eee_speeds_supported;
+ ixgbe_link_speed eee_speeds_advertised;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -4257,8 +4334,8 @@ struct ixgbe_hw {
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21)
-#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M (1 << 23)
-#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
+#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25)
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
(0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
index e9c13f23..b513190a 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -432,6 +432,10 @@ s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
switch (hw->api_version) {
case ixgbe_mbox_api_12:
+ /* New modes were introduced in 1.3 version */
+ if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI)
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ /* Fall through */
case ixgbe_mbox_api_13:
break;
default:
@@ -613,13 +617,29 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
}
/* if the read failed it could just be a mailbox collision, best wait
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h
index d288f31a..3efffe82 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.h
+++ b/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -34,6 +34,8 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef _IXGBE_VF_H_
#define _IXGBE_VF_H_
+#include "ixgbe_type.h"
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -114,6 +116,7 @@ struct ixgbevf_hw_stats {
u64 saved_reset_vfmprc;
};
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
index 6e778bc9..0e51813b 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -208,6 +208,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_reset_hw_X540");
@@ -220,10 +221,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw);
mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
ctrl = IXGBE_CTRL_RST;
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
@@ -321,9 +329,9 @@ out:
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u16 ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
@@ -491,7 +499,6 @@ s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
- u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
/* Do not use hw->eeprom.ops.read because we do not want to take
@@ -501,14 +508,15 @@ s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
- /* Include 0x0-0x3F in the checksum */
- for (i = 0; i <= checksum_last_word; i++) {
+ /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the
+ * checksum itself
+ */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
DEBUGOUT("EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
}
- if (i != IXGBE_EEPROM_CHECKSUM)
- checksum += word;
+ checksum += word;
}
/* Include all data from pointers 0x3, 0x6-0xE. This excludes the
@@ -775,8 +783,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
/* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
- if (ixgbe_get_swfw_sync_semaphore(hw))
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
+ }
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
@@ -798,6 +808,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
if (swmask == IXGBE_GSSR_SW_MNG_SM) {
ERROR_REPORT1(IXGBE_ERROR_POLLING,
"Failed to get SW only semaphore");
+ DEBUGOUT("Failed to get SW only semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
@@ -806,8 +817,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* of the requested resource(s) while ignoring the corresponding FW/HW
* bits in the SW_FW_SYNC register.
*/
- if (ixgbe_get_swfw_sync_semaphore(hw))
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
+ }
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (swfw_sync & (fwmask | hwmask)) {
swfw_sync |= swmask;
@@ -829,9 +842,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
rmask |= IXGBE_GSSR_I2C_MASK;
ixgbe_release_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h
index e4baf6ff..8a19ae2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.h
+++ b/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -43,7 +43,7 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool link_up_wait_to_complete);
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index acb8140c..674dc144 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -62,7 +62,7 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
mac->ops.dmac_config = ixgbe_dmac_config_X550;
mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
- mac->ops.setup_eee = ixgbe_setup_eee_X550;
+ mac->ops.setup_eee = NULL;
mac->ops.set_source_address_pruning =
ixgbe_set_source_address_pruning_X550;
mac->ops.set_ethertype_anti_spoofing =
@@ -83,6 +83,8 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
mac->ops.mdd_event = ixgbe_mdd_event_X550;
mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
mac->ops.disable_rx = ixgbe_disable_rx_x550;
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
@@ -448,23 +450,161 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->phy.type = ixgbe_phy_x550em_xfi;
+ break;
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_A_10G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
default:
break;
}
return IXGBE_SUCCESS;
}
+/**
+ * ixgbe_fw_phy_activity - Perform an activity on a PHY
+ * @hw: pointer to hardware structure
+ * @activity: activity to perform
+ * @data: Pointer to 4 32-bit words of data
+ */
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT])
+{
+ union {
+ struct ixgbe_hic_phy_activity_req cmd;
+ struct ixgbe_hic_phy_activity_resp rsp;
+ } hic;
+ u16 retries = FW_PHY_ACT_RETRIES;
+ s32 rc;
+ u16 i;
+
+ do {
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
+
+ rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (rc != IXGBE_SUCCESS)
+ return rc;
+ if (hic.rsp.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS) {
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
+ return IXGBE_SUCCESS;
+ }
+ usec_delay(20);
+ --retries;
+ } while (retries > 0);
+
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+}
+
+static const struct {
+ u16 fw_speed;
+ ixgbe_link_speed phy_speed;
+} ixgbe_fw_map[] = {
+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
+};
+
+/**
+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+{
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ u16 phy_speeds;
+ u16 phy_id_lo;
+ s32 rc;
+ u16 i;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
+ if (rc)
+ return rc;
+
+ hw->phy.speeds_supported = 0;
+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (phy_speeds & ixgbe_fw_map[i].fw_speed)
+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
+ }
+ if (!hw->phy.autoneg_advertised)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+{
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ return ixgbe_get_phy_id_fw(hw);
+}
+
+/**
+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+
+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
+}
+
STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
@@ -601,18 +741,20 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
else
mac->ops.setup_fc = ixgbe_setup_fc_X550em;
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_X550em;
switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_fw;
+ phy->ops.set_phy_power = NULL;
+ phy->ops.get_firmware_version = NULL;
break;
default:
- mac->ops.setup_eee = NULL;
+ phy->ops.identify = ixgbe_identify_phy_x550em;
}
- /* PHY */
- phy->ops.init = ixgbe_init_phy_ops_X550em;
- phy->ops.identify = ixgbe_identify_phy_x550em;
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
phy->ops.set_phy_power = NULL;
@@ -631,6 +773,92 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+ u16 i;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_rx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_tx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
+ setup[0] |= ixgbe_fw_map[i].fw_speed;
+ }
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
+
+ if (hw->phy.eee_speeds_advertised)
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ if (rc)
+ return rc;
+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+ return IXGBE_ERR_OVERTEMP;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ */
+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+{
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_setup_eee_fw - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_eee flag.
+ * This function controls EEE for firmware-based PHY implementations.
+ */
+static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
+{
+ if (!!hw->phy.eee_speeds_advertised == enable_eee)
+ return IXGBE_SUCCESS;
+ if (enable_eee)
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ else
+ hw->phy.eee_speeds_advertised = 0;
+ return hw->phy.ops.setup_link(hw);
+}
+
+/**
* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
*
@@ -671,10 +899,18 @@ s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
break;
}
- if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
- (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
- mac->ops.setup_fc = ixgbe_setup_fc_sgmii_x550em_a;
+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
+ mac->ops.setup_eee = ixgbe_setup_eee_fw;
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ break;
+ default:
+ break;
}
return ret_val;
@@ -709,6 +945,7 @@ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
ixgbe_write_i2c_combined_generic_unlocked;
link->addr = IXGBE_CS4227;
+
return ret_val;
}
@@ -876,159 +1113,6 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
}
/**
- * ixgbe_enable_eee_x550 - Enable EEE support
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_enable_eee_x550(struct ixgbe_hw *hw)
-{
- u16 autoneg_eee_reg;
- u32 link_reg;
- s32 status;
-
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Advertise EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_eee_reg);
-
- autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_eee_reg);
- return IXGBE_SUCCESS;
- }
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
- status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
-
- /* Don't advertise FEC capability when EEE enabled. */
- link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- break;
- default:
- break;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_disable_eee_x550 - Disable EEE support
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_disable_eee_x550(struct ixgbe_hw *hw)
-{
- u16 autoneg_eee_reg;
- u32 link_reg;
- s32 status;
-
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Disable advertised EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_eee_reg);
-
- autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_eee_reg);
- return IXGBE_SUCCESS;
- }
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
- status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
-
- /* Advertise FEC capability when EEE is disabled. */
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- break;
- default:
- break;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_setup_eee_X550 - Enable/disable EEE support
- * @hw: pointer to the HW structure
- * @enable_eee: boolean flag to enable EEE
- *
- * Enable/disable EEE based on enable_eee flag.
- * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
- * are modified.
- *
- **/
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
-{
- s32 status;
- u32 eeer;
-
- DEBUGFUNC("ixgbe_setup_eee_X550");
-
- eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
- /* Enable or disable EEE per flag */
- if (enable_eee) {
- eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
- /* Not supported on first revision of X550EM_x. */
- if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
- !(IXGBE_FUSES0_REV_MASK &
- IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
- return IXGBE_SUCCESS;
-
- status = ixgbe_enable_eee_x550(hw);
- if (status)
- return status;
- } else {
- eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
- status = ixgbe_disable_eee_x550(hw);
- if (status)
- return status;
- }
- IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
@@ -1227,13 +1311,20 @@ s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
sizeof(token_cmd),
IXGBE_HI_COMMAND_TIMEOUT,
true);
- if (status)
+ if (status) {
+ DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
+ status);
return status;
+ }
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return IXGBE_SUCCESS;
- if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
+ DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
+ token_cmd.hdr.cmd_or_resp.ret_status);
return IXGBE_ERR_FW_RESP_INVALID;
+ }
+ DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
return IXGBE_ERR_TOKEN_RETRY;
}
@@ -1492,6 +1583,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
@@ -1722,11 +1814,11 @@ STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
}
/**
- * ixgbe_setup_sgmii_m88 - Set up link for sgmii with Marvell PHYs
+ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-STATIC s32 ixgbe_setup_sgmii_m88(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg_wait)
+STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
@@ -1776,7 +1868,7 @@ STATIC s32 ixgbe_setup_sgmii_m88(struct ixgbe_hw *hw, ixgbe_link_speed speed,
return rc;
flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
- flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
@@ -1826,7 +1918,9 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
if (hw->mac.type == ixgbe_mac_X550EM_a) {
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
- mac->ops.setup_link = ixgbe_setup_sgmii_m88;
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+ mac->ops.check_link =
+ ixgbe_check_mac_link_generic;
} else {
mac->ops.setup_link =
ixgbe_setup_mac_link_t_X550em;
@@ -1859,6 +1953,12 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+ if (hw->phy.type == ixgbe_phy_fw) {
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+ return 0;
+ }
+
/* SFP */
if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -1882,11 +1982,7 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
switch (hw->phy.type) {
- case ixgbe_phy_m88:
- *speed = IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_10_FULL;
- break;
+ case ixgbe_phy_ext_1g_t:
case ixgbe_phy_sgmii:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
@@ -2024,19 +2120,32 @@ STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
/* Enable link status change alarm */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
- if (status != IXGBE_SUCCESS)
- return status;
+ /* Enable the LASI interrupts on X552 devices to receive notifications
+ * of the link configurations of the external PHY and correspondingly
+ * support the configuration of the internal iXFI link, since iXFI does
+ * not support auto-negotiation. This is not required for X553 devices
+ * having KR support, which performs auto-negotiations and which is used
+ * as the internal link to the external PHY. Hence adding a check here
+ * to avoid enabling LASI interrupts for X553 devices.
+ */
+ if (hw->mac.type != ixgbe_mac_X550EM_a) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
- reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
+ if (status != IXGBE_SUCCESS)
+ return status;
- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
- if (status != IXGBE_SUCCESS)
- return status;
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
/* Enable high temperature failure and global fault alarms */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
@@ -2150,262 +2259,47 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
}
/**
- * ixgbe_setup_m88 - setup m88 PHY
+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-STATIC s32 ixgbe_setup_m88(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
{
- u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- u16 reg;
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
s32 rc;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return IXGBE_SUCCESS;
- rc = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
if (rc)
return rc;
+ memset(store, 0, sizeof(store));
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0, &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0,
- reg);
- }
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 2);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_MAC_SPEC_CTRL, 0,
- &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_SPEC_CTRL, 0,
- reg);
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0,
- 0);
- if (rc)
- goto out;
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- &reg);
- if (rc)
- goto out;
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESET;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- usec_delay(50);
- } else {
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0,
- 0);
- if (rc)
- goto out;
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
-
- if (!(reg & IXGBE_M88E1500_COPPER_CTRL_AN_EN)) {
- reg |= IXGBE_M88E1500_COPPER_CTRL_AN_EN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_1000T_CTRL, 0, &reg);
- if (rc)
- goto out;
- reg &= ~IXGBE_M88E1500_1000T_CTRL_HALF_DUPLEX;
- reg &= ~IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg |= IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_1000T_CTRL, 0, reg);
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_AN, 0, &reg);
- if (rc)
- goto out;
- reg &= ~IXGBE_M88E1500_COPPER_AN_T4;
- reg &= ~IXGBE_M88E1500_COPPER_AN_100TX_FD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_100TX_HD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_10TX_FD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_10TX_HD;
-
- /* Flow control auto negotiation configuration was moved from here to
- * the function ixgbe_setup_fc_sgmii_x550em_a()
- */
-
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- reg |= IXGBE_M88E1500_COPPER_AN_100TX_FD;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
- reg |= IXGBE_M88E1500_COPPER_AN_10TX_FD;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_AN, 0, reg);
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESTART_AN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
-
-
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
-
-out:
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
-}
-
-/**
- * ixgbe_reset_phy_m88e1500 - Reset m88e1500 PHY
- * @hw: pointer to hardware structure
- *
- * The PHY token must be held when calling this function.
- */
-static s32 ixgbe_reset_phy_m88e1500(struct ixgbe_hw *hw)
-{
- u16 reg;
- s32 rc;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
if (rc)
return rc;
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- return rc;
-
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESET;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
-
- usec_delay(10);
-
- return rc;
+ return ixgbe_setup_fw_link(hw);
}
/**
- * ixgbe_reset_phy_m88e1543 - Reset m88e1543 PHY
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
* @hw: pointer to hardware structure
- *
- * The PHY token must be held when calling this function.
*/
-static s32 ixgbe_reset_phy_m88e1543(struct ixgbe_hw *hw)
+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
- return hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
-}
-
-/**
- * ixgbe_reset_phy_m88 - Reset m88 PHY
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_reset_phy_m88(struct ixgbe_hw *hw)
-{
- u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- u16 reg;
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
s32 rc;
- if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
- return IXGBE_SUCCESS;
-
- rc = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
return rc;
- switch (hw->phy.id) {
- case IXGBE_M88E1500_E_PHY_ID:
- rc = ixgbe_reset_phy_m88e1500(hw);
- break;
- case IXGBE_M88E1543_E_PHY_ID:
- rc = ixgbe_reset_phy_m88e1543(hw);
- break;
- default:
- rc = IXGBE_ERR_PHY;
- break;
+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ ixgbe_shutdown_fw_phy(hw);
+ return IXGBE_ERR_OVERTEMP;
}
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_FIBER_CTRL_RESET |
- IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
- IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_FIBER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 18);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_GEN_CTRL_RESET |
- IXGBE_M88E1500_GEN_CTRL_MODE_SGMII_COPPER;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_GEN_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_FIBER_CTRL_RESET |
- IXGBE_M88E1500_FIBER_CTRL_AN_EN |
- IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
- IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_FIBER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- if (rc)
- goto out;
-
- reg = (IXGBE_M88E1500_MAC_CTRL_1_DWN_4X <<
- IXGBE_M88E1500_MAC_CTRL_1_DWN_SHIFT) |
- (IXGBE_M88E1500_MAC_CTRL_1_ED_TM <<
- IXGBE_M88E1500_MAC_CTRL_1_ED_SHIFT) |
- (IXGBE_M88E1500_MAC_CTRL_1_MDIX_AUTO <<
- IXGBE_M88E1500_MAC_CTRL_1_MDIX_SHIFT);
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0, reg);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_COPPER_CTRL_RESET |
- IXGBE_M88E1500_COPPER_CTRL_AN_EN |
- IXGBE_M88E1500_COPPER_CTRL_RESTART_AN |
- IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX |
- IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- hw->mac.ops.release_swfw_sync(hw, mask);
-
- /* In case of first reset set advertised speeds to default value */
- if (!hw->phy.autoneg_advertised)
- hw->phy.autoneg_advertised = IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_10_FULL;
-
- return ixgbe_setup_m88(hw);
-
-out:
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
+ return IXGBE_SUCCESS;
}
/**
@@ -2450,6 +2344,9 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_phy_ops_X550em");
+ hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
@@ -2463,6 +2360,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
else
@@ -2482,13 +2380,18 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
+ break;
default:
break;
}
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
return ret_val;
/* Setup function pointers based on detected hardware */
@@ -2508,6 +2411,16 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
+ case ixgbe_phy_ext_1g_t:
+ /* link is managed by FW */
+ phy->ops.setup_link = NULL;
+ break;
+ case ixgbe_phy_x550em_xfi:
+ /* link is managed by HW */
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
case ixgbe_phy_x550em_ext_t:
/* If internal link mode is XFI, then setup iXFI internal link,
* else setup KR now.
@@ -2527,9 +2440,9 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
case ixgbe_phy_sgmii:
phy->ops.setup_link = NULL;
break;
- case ixgbe_phy_m88:
- phy->ops.setup_link = ixgbe_setup_m88;
- phy->ops.reset = ixgbe_reset_phy_m88;
+ case ixgbe_phy_fw:
+ phy->ops.setup_link = ixgbe_setup_fw_link;
+ phy->ops.reset = ixgbe_reset_phy_fw;
break;
default:
break;
@@ -2549,8 +2462,6 @@ STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_SGMII:
case IXGBE_DEV_ID_X550EM_A_SGMII_L:
- case IXGBE_DEV_ID_X550EM_A_1G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_QSFP:
@@ -2559,6 +2470,13 @@ STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ /* Select fast MDIO clock speed for these devices */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
default:
break;
}
@@ -2579,14 +2497,16 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
u32 ctrl = 0;
u32 i;
bool link_up = false;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_reset_hw_X550em");
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
- if (status != IXGBE_SUCCESS)
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
return status;
-
+ }
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
@@ -2595,14 +2515,23 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ DEBUGOUT("Returning from reset HW due to PHY init failure\n");
return status;
+ }
/* start the external PHY */
if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
status = ixgbe_init_ext_t_x550em(hw);
- if (status)
+ if (status) {
+ DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
+ status);
return status;
+ }
}
/* Setup SFP module if there is one present. */
@@ -2615,8 +2544,10 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
return status;
/* Reset PHY */
- if (!hw->phy.reset_disable && hw->phy.ops.reset)
- hw->phy.ops.reset(hw);
+ if (!hw->phy.reset_disable && hw->phy.ops.reset) {
+ if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
+ return IXGBE_ERR_OVERTEMP;
+ }
mac_reset_top:
/* Issue global reset to the MAC. Needs to be SW reset if link is up.
@@ -2631,9 +2562,17 @@ mac_reset_top:
ctrl = IXGBE_CTRL_RST;
}
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
/* Poll for reset bit to self-clear meaning reset is complete */
for (i = 0; i < 10; i++) {
@@ -2674,6 +2613,9 @@ mac_reset_top:
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
+
return status;
}
@@ -2723,14 +2665,16 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY.
* @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY for X550EM_x.
**/
s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
- if (hw->mac.type != ixgbe_mac_X550EM_x)
+ /* leave link alone for 2.5G */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return IXGBE_SUCCESS;
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -2762,53 +2706,18 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
if (ret_val != IXGBE_SUCCESS)
return ret_val;
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
- /* Configure CS4227 LINE side to 10G SR. */
- reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB +
- (hw->bus.lan_id << 12);
- reg_val = IXGBE_CS4227_SPEED_10G;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
- (hw->bus.lan_id << 12);
+ /* Configure CS4227 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
+ (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- /* Configure CS4227 for HOST connection rate then type. */
- reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB +
- (hw->bus.lan_id << 12);
- reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ?
- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB +
- (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- /* Setup XFI internal link. */
- ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
- } else {
- /* Configure internal PHY for KR/KX. */
- ixgbe_setup_kr_speed_x550em(hw, speed);
-
- /* Configure CS4227 LINE side to proper mode. */
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
- (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
- }
+ ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
+ reg_val);
return ret_val;
}
@@ -2922,8 +2831,8 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
return IXGBE_ERR_PHY_ADDR_INVALID;
}
- /* Get external PHY device id */
- ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ /* Get external PHY SKU id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
if (ret_val != IXGBE_SUCCESS)
@@ -2932,7 +2841,7 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
/* When configuring quad port CS4223, the MAC instance is part
* of the slice offset.
*/
- if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
slice_offset = (hw->bus.lan_id +
(hw->bus.instance_id << 1)) << 12;
else
@@ -2940,12 +2849,26 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
/* Configure CS4227/CS4223 LINE side to proper mode. */
reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
+ (IXGBE_CS4227_EDC_MODE_SR << 1));
+
if (setup_linear)
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
else
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
ret_val = hw->phy.ops.write_reg(hw, reg_slice,
IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+
+ /* Flush previous write with a read */
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
}
return ret_val;
}
@@ -3033,6 +2956,10 @@ STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
s32 status;
u32 reg_val;
+ /* iXFI is only supported with X552 */
+ if (mac->type != ixgbe_mac_X550EM_x)
+ return IXGBE_ERR_LINK_SETUP;
+
/* Disable AN and force speed to 10G Serial. */
status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -3129,7 +3056,8 @@ s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
/* If link is down, there is no setup necessary so return */
status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status != IXGBE_SUCCESS)
@@ -3745,9 +3673,9 @@ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u16 ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
@@ -3756,6 +3684,21 @@ u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ }
+ }
+ /* fall through */
+ case ixgbe_phy_x550em_xfi:
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
IXGBE_PHYSICAL_LAYER_1000BASE_KX;
break;
@@ -3772,6 +3715,20 @@ u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
+ case ixgbe_phy_fw:
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+ break;
+ case ixgbe_phy_sgmii:
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_ext_1g_t:
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ break;
default:
break;
}
@@ -4071,6 +4028,9 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
/* This device does not fully support AN. */
hw->fc.disable_fc_autoneg = true;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->fc.disable_fc_autoneg = true;
+ break;
default:
break;
}
@@ -4177,7 +4137,7 @@ void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
- u16 reg, pcs_an_lp, pcs_an;
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
bool link_up;
@@ -4199,34 +4159,20 @@ void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
}
/* Check if auto-negotiation has completed */
- status = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_STATUS,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
if (status != IXGBE_SUCCESS ||
- (reg & IXGBE_M88E1500_COPPER_STATUS_AN_DONE) == 0) {
+ !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
DEBUGOUT("Auto-Negotiation did not complete\n");
status = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
- /* Get the advertized flow control */
- status = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, &pcs_an);
- if (status != IXGBE_SUCCESS)
- goto out;
-
- /* Get link partner's flow control */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_M88E1500_COPPER_AN_LP_ABILITY,
- IXGBE_MDIO_ZERO_DEV_TYPE, &pcs_an_lp);
- if (status != IXGBE_SUCCESS)
- goto out;
-
/* Negotiate the flow control */
- status = ixgbe_negotiate_fc(hw, (u32)pcs_an, (u32)pcs_an_lp,
- IXGBE_M88E1500_COPPER_AN_PAUSE,
- IXGBE_M88E1500_COPPER_AN_AS_PAUSE,
- IXGBE_M88E1500_COPPER_AN_LP_PAUSE,
- IXGBE_M88E1500_COPPER_AN_LP_AS_PAUSE);
+ status = ixgbe_negotiate_fc(hw, info[0], info[0],
+ FW_PHY_ACT_GET_LINK_INFO_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_FC_TX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
out:
if (status == IXGBE_SUCCESS) {
@@ -4238,83 +4184,6 @@ out:
}
/**
- * ixgbe_setup_fc_sgmii_x550em_a - Set up flow control
- * @hw: pointer to hardware structure
- *
- * Called at init time to set up flow control.
- **/
-s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw)
-{
- u16 reg;
- s32 rc;
-
- /* Validate the requested mode */
- if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
- ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
- "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
- }
-
- if (hw->fc.requested_mode == ixgbe_fc_default)
- hw->fc.requested_mode = ixgbe_fc_full;
-
- /* Read contents of the Auto-Negotiation register, page 0 reg 4 */
- rc = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
- if (rc)
- goto out;
-
- /* Disable all the settings related to Flow control Auto-negotiation */
- reg &= ~IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- reg &= ~IXGBE_M88E1500_COPPER_AN_PAUSE;
-
- /* Configure the Asymmetric and symmetric pause according to the user
- * requested mode.
- */
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_full:
- reg |= IXGBE_M88E1500_COPPER_AN_PAUSE;
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- case ixgbe_fc_rx_pause:
- reg |= IXGBE_M88E1500_COPPER_AN_PAUSE;
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- case ixgbe_fc_tx_pause:
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- default:
- break;
- }
-
- /* Write back to the Auto-Negotiation register with newly configured
- * fields
- */
- hw->phy.ops.write_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, reg);
-
- /* In this section of the code we restart Auto-negotiation */
-
- /* Read the CONTROL register, Page 0 reg 0 */
- rc = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_CTRL,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
- if (rc)
- goto out;
-
- /* Set the bit to restart Auto-Neg. The bit to enable Auto-neg is ON
- * by default
- */
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESTART_AN;
-
- /* write the new values to the register to restart Auto-Negotiation */
- hw->phy.ops.write_reg(hw, IXGBE_M88E1500_COPPER_CTRL,
- IXGBE_MDIO_ZERO_DEV_TYPE, reg);
-
-out:
- return rc;
-}
-
-/**
* ixgbe_setup_fc_backplane_x550em_a - Set up flow control
* @hw: pointer to hardware structure
*
@@ -4481,21 +4350,34 @@ STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
status = IXGBE_SUCCESS;
if (hmask)
status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
- if (status)
+ if (status) {
+ DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
+ status);
return status;
+ }
if (!(mask & IXGBE_GSSR_TOKEN_SM))
return IXGBE_SUCCESS;
status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
+ status);
+
if (status == IXGBE_SUCCESS)
return IXGBE_SUCCESS;
if (hmask)
ixgbe_release_swfw_sync_X540(hw, hmask);
- if (status != IXGBE_ERR_TOKEN_RETRY)
+
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
+ status);
return status;
+ }
}
+ DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
return status;
}
@@ -4631,8 +4513,10 @@ s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
else
force_speed = IXGBE_LINK_SPEED_1GB_FULL;
- /* If internal link mode is XFI, then setup XFI internal link. */
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ /* If X552 and internal link mode is XFI, then setup XFI internal link.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
if (status != IXGBE_SUCCESS)
@@ -4655,7 +4539,7 @@ s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
u32 status;
- u16 autoneg_status;
+ u16 i, autoneg_status = 0;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
@@ -4668,21 +4552,18 @@ s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
return status;
/* MAC link is up, so check external PHY link.
- * Read this twice back to back to indicate current status.
+ * X557 PHY. Link status is latching low, and can only be used to detect
+ * link drop, and not the current status of the link without performing
+ * back-to-back reads.
*/
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
-
- if (status != IXGBE_SUCCESS)
- return status;
-
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ for (i = 0; i < 2; i++) {
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
- if (status != IXGBE_SUCCESS)
- return status;
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
/* If external PHY link is not up, then indicate link not up */
if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
@@ -4729,7 +4610,8 @@ s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
- return IXGBE_SUCCESS;
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_on_generic(hw, led_idx);
}
/**
@@ -4753,5 +4635,67 @@ s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
- return IXGBE_SUCCESS;
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_off_generic(hw, led_idx);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return IXGBE_SUCCESS
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len, const char *driver_ver)
+{
+ struct ixgbe_hic_drv_info2 fw_cmd;
+ s32 ret_val = IXGBE_SUCCESS;
+ int i;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
+
+ if ((len == 0) || (driver_ver == NULL) ||
+ (len > sizeof(fw_cmd.driver_string)))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ memcpy(fw_cmd.driver_string, driver_ver, len);
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val != IXGBE_SUCCESS)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ return ret_val;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h
index cd4db29c..6d188741 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.h
+++ b/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -57,8 +57,6 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data);
-s32 ixgbe_set_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
unsigned int pool);
void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
@@ -67,6 +65,8 @@ s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data);
s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data);
+s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver, u16 len, const char *str);
s32 ixgbe_get_phy_token(struct ixgbe_hw *);
s32 ixgbe_put_phy_token(struct ixgbe_hw *);
s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
@@ -88,7 +88,7 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed);
s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index bac36e0d..2083cded 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,10 +56,12 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
+#include <rte_hash_crc.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -72,8 +74,6 @@
#include "base/ixgbe_phy.h"
#include "ixgbe_regs.h"
-#include "rte_pmd_ixgbe.h"
-
/*
* High threshold controlling when to start sending XOFF frames. Must be at
* least 8 bytes less than receive packet buffer size. This value is in units
@@ -154,17 +154,16 @@
#define IXGBE_QDE_STRIP_TAG 0x00000004
#define IXGBE_VTEICR_MASK 0x07
-enum ixgbevf_xcast_modes {
- IXGBEVF_XCAST_MODE_NONE = 0,
- IXGBEVF_XCAST_MODE_MULTI,
- IXGBEVF_XCAST_MODE_ALLMULTI,
-};
-
#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static int ixgbe_dev_start(struct rte_eth_dev *dev);
static void ixgbe_dev_stop(struct rte_eth_dev *dev);
@@ -183,16 +182,27 @@ static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
+static int
+ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int size);
static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+static int ixgbe_dev_xstats_get_names_by_id(
+ __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
+static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size);
static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
@@ -231,18 +241,21 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
-static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void ixgbe_dev_interrupt_handler(void *param);
static void ixgbe_dev_interrupt_delayed_handler(void *param);
-static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
+static bool is_device_supported(struct rte_eth_dev *dev,
+ struct rte_pci_driver *drv);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
@@ -276,12 +289,6 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
ether_addr * mac_addr, uint8_t on);
static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
-static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
- uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
-static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
@@ -297,18 +304,13 @@ static void ixgbe_configure_msix(struct rte_eth_dev *dev);
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate);
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk);
-static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
- bool add);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
@@ -318,17 +320,11 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
-static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter,
- bool add);
static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
-static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -371,8 +367,7 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
struct timespec *timestamp);
static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
const struct timespec *timestamp);
-static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static void ixgbevf_dev_interrupt_handler(void *param);
static int ixgbe_dev_l2_tunnel_eth_type_conf
(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
@@ -389,6 +384,8 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_filter_restore(struct rte_eth_dev *dev);
+static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -517,6 +514,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = IXGBE_MAX_RING_DESC,
.nb_min = IXGBE_MIN_RING_DESC,
.nb_align = IXGBE_TXD_ALIGN,
+ .nb_seg_max = IXGBE_TX_MAX_SEG,
+ .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
};
static const struct eth_dev_ops ixgbe_eth_dev_ops = {
@@ -533,10 +532,13 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbe_dev_stats_get,
.xstats_get = ixgbe_dev_xstats_get,
+ .xstats_get_by_id = ixgbe_dev_xstats_get_by_id,
.stats_reset = ixgbe_dev_stats_reset,
.xstats_reset = ixgbe_dev_xstats_reset,
.xstats_get_names = ixgbe_dev_xstats_get_names,
+ .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
+ .fw_version_get = ixgbe_fw_version_get,
.dev_infos_get = ixgbe_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
.mtu_set = ixgbe_dev_mtu_set,
@@ -554,6 +556,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_queue_count = ixgbe_dev_rx_queue_count,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
@@ -568,12 +572,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
.mirror_rule_set = ixgbe_mirror_rule_set,
.mirror_rule_reset = ixgbe_mirror_rule_reset,
- .set_vf_rx_mode = ixgbe_set_pool_rx_mode,
- .set_vf_rx = ixgbe_set_pool_rx,
- .set_vf_tx = ixgbe_set_pool_tx,
- .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
- .set_vf_rate_limit = ixgbe_set_vf_rate_limit,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
#ifdef RTE_NIC_BYPASS
@@ -637,6 +636,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
@@ -744,6 +745,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
sizeof(rte_ixgbe_stats_strings[0]))
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+ {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_untagged)},
+ {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_encrypted)},
+ {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_protected)},
+ {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+ out_octets_encrypted)},
+ {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+ out_octets_protected)},
+ {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_untagged)},
+ {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_badtag)},
+ {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_nosci)},
+ {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unknownsci)},
+ {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+ in_octets_decrypted)},
+ {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+ in_octets_validated)},
+ {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unchecked)},
+ {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_delayed)},
+ {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_late)},
+ {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_ok)},
+ {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_invalid)},
+ {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_notvalid)},
+ {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unusedsa)},
+ {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+ sizeof(rte_ixgbe_macsec_strings[0]))
+
/* Per-queue statistics */
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -859,6 +905,8 @@ ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
+ if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ status = IXGBE_SUCCESS;
return status;
}
@@ -1083,7 +1131,8 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
static int
eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta *shadow_vfta =
@@ -1094,6 +1143,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
uint32_t ctrl_ext;
uint16_t csum;
int diag, i;
@@ -1103,6 +1154,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
/*
* For secondary processes, we don't initialise any further as primary
@@ -1127,9 +1179,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = eth_dev->pci_dev;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -1196,6 +1248,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
diag = ixgbe_init_hw(hw);
}
+ if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
+ diag = IXGBE_SUCCESS;
+
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
"LOM. Please be aware there may be issues associated "
@@ -1272,20 +1327,37 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&pci_dev->intr_handle,
- ixgbe_dev_interrupt_handler,
- (void *)eth_dev);
+ rte_intr_callback_register(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
/* enable uio/vfio intr/eventfd mapping */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
/* enable support intr */
ixgbe_enable_intr(eth_dev);
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct ixgbe_filter_info));
+
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
- memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ /* initialize flow director filter list & hash */
+ ixgbe_fdir_filter_init(eth_dev);
+
+ /* initialize l2 tunnel filter list & hash */
+ ixgbe_l2_tn_filter_init(eth_dev);
+
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&ixgbe_flow_list);
+
+ /* initialize bandwidth configuration info */
+ memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
return 0;
}
@@ -1293,7 +1365,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
PMD_INIT_FUNC_TRACE();
@@ -1302,7 +1375,6 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- pci_dev = eth_dev->pci_dev;
if (hw->adapter_stopped == 0)
ixgbe_dev_close(eth_dev);
@@ -1315,9 +1387,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
ixgbe_swfw_lock_reset(hw);
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- ixgbe_dev_interrupt_handler, (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
@@ -1328,9 +1400,154 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->hash_mac_addrs);
eth_dev->data->hash_mac_addrs = NULL;
+ /* remove all the fdir filters & hash */
+ ixgbe_fdir_filter_uninit(eth_dev);
+
+ /* remove all the L2 tunnel filters & hash */
+ ixgbe_l2_tn_filter_uninit(eth_dev);
+
+ /* Remove all ntuple filters of the device */
+ ixgbe_ntuple_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ ixgbe_filterlist_flush();
+
return 0;
}
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple,
+ entries);
+ rte_free(p_5tuple);
+ }
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_handle)
+ rte_hash_free(fdir_info->hash_handle);
+
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ if (l2_tn_info->hash_map)
+ rte_free(l2_tn_info->hash_map);
+ if (l2_tn_info->hash_handle)
+ rte_hash_free(l2_tn_info->hash_handle);
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+ l2_tn_filter,
+ entries);
+ rte_free(l2_tn_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = IXGBE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(union ixgbe_atr_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", eth_dev->data->name);
+ fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_fdir_filter *) *
+ IXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ return -ENOMEM;
+ }
+ fdir_info->mask_added = FALSE;
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters l2_tn_hash_params = {
+ .name = l2_tn_hash_name,
+ .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
+ .key_len = sizeof(struct ixgbe_l2_tn_key),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&l2_tn_info->l2_tn_list);
+ snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+ "l2_tn_%s", eth_dev->data->name);
+ l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+ if (!l2_tn_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+ return -EINVAL;
+ }
+ l2_tn_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_l2_tn_filter *) *
+ IXGBE_MAX_L2_TN_FILTER_NUM,
+ 0);
+ if (!l2_tn_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for L2 TN hash map!");
+ return -ENOMEM;
+ }
+ l2_tn_info->e_tag_en = FALSE;
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
+
+ return 0;
+}
/*
* Negotiate mailbox API version with the PF.
* After reset API version is always set to the basic one (ixgbe_mbox_api_10).
@@ -1381,7 +1598,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
{
int diag;
uint32_t tc, tcs;
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta *shadow_vfta =
@@ -1419,9 +1637,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -1513,10 +1730,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
return -EIO;
}
- rte_intr_callback_register(&pci_dev->intr_handle,
- ixgbevf_dev_interrupt_handler,
- (void *)eth_dev);
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_callback_register(intr_handle,
+ ixgbevf_dev_interrupt_handler, eth_dev);
+ rte_intr_enable(intr_handle);
ixgbevf_intr_enable(hw);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
@@ -1531,8 +1747,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
PMD_INIT_FUNC_TRACE();
@@ -1554,40 +1771,52 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
- rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- ixgbevf_dev_interrupt_handler,
- (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ixgbevf_dev_interrupt_handler, eth_dev);
return 0;
}
-static struct eth_driver rte_ixgbe_pmd = {
- .pci_drv = {
- .id_table = pci_id_ixgbe_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_ixgbe_dev_init,
- .eth_dev_uninit = eth_ixgbe_dev_uninit,
- .dev_private_size = sizeof(struct ixgbe_adapter),
+static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
+}
+
+static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ixgbe_pmd = {
+ .id_table = pci_id_ixgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_ixgbe_pci_probe,
+ .remove = eth_ixgbe_pci_remove,
};
+static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
+}
+
+static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
+}
+
/*
* virtual function driver struct
*/
-static struct eth_driver rte_ixgbevf_pmd = {
- .pci_drv = {
- .id_table = pci_id_ixgbevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_ixgbevf_dev_init,
- .eth_dev_uninit = eth_ixgbevf_dev_uninit,
- .dev_private_size = sizeof(struct ixgbe_adapter),
+static struct rte_pci_driver rte_ixgbevf_pmd = {
+ .id_table = pci_id_ixgbevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_ixgbevf_pci_probe,
+ .remove = eth_ixgbevf_pci_remove,
};
static int
@@ -1947,6 +2176,8 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
static int
ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+
switch (nb_rx_q) {
case 1:
case 2:
@@ -1960,7 +2191,7 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
}
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
- RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
return 0;
}
@@ -2180,6 +2411,80 @@ ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
}
}
+int
+ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_link link;
+ uint8_t nb_q_per_pool;
+ uint32_t queue_stride;
+ uint32_t queue_idx, idx = 0, vf_idx;
+ uint32_t queue_end;
+ uint16_t total_rate = 0;
+ struct rte_pci_device *pci_dev;
+
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+ rte_eth_link_get_nowait(dev->data->port_id, &link);
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (tx_rate > link.link_speed)
+ return -EINVAL;
+
+ if (q_msk == 0)
+ return 0;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ queue_idx = vf * queue_stride;
+ queue_end = queue_idx + nb_q_per_pool - 1;
+ if (queue_end >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (vfinfo) {
+ for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+ if (vf_idx == vf)
+ continue;
+ for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+ idx++)
+ total_rate += vfinfo[vf_idx].tx_rate[idx];
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ /* Store tx_rate for this vf. */
+ for (idx = 0; idx < nb_q_per_pool; idx++) {
+ if (((uint64_t)0x1 << idx) & q_msk) {
+ if (vfinfo[vf].tx_rate[idx] != tx_rate)
+ vfinfo[vf].tx_rate[idx] = tx_rate;
+ total_rate += tx_rate;
+ }
+ }
+
+ if (total_rate > dev->data->dev_link.link_speed) {
+ /* Reset stored TX rate of the VF if it causes exceed
+ * link speed.
+ */
+ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+ return -EINVAL;
+ }
+
+ /* Set RTTBCNRC of each queue/pool for vf X */
+ for (; queue_idx <= queue_end; queue_idx++) {
+ if (0x1 & q_msk)
+ ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+ q_msk = q_msk >> 1;
+ }
+
+ return 0;
+}
+
/*
* Configure device link speed and setup link.
* It returns 0 on success.
@@ -2191,7 +2496,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
@@ -2253,7 +2559,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -2291,10 +2597,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* Restore vf rate limit */
if (vfinfo != NULL) {
- for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+ for (vf = 0; vf < pci_dev->max_vfs; vf++)
for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
if (vfinfo[vf].tx_rate[idx] != 0)
- ixgbe_set_vf_rate_limit(dev, vf,
+ ixgbe_set_vf_rate_limit(
+ dev, vf,
vfinfo[vf].tx_rate[idx],
1 << idx);
}
@@ -2366,13 +2673,13 @@ skip_link_setup:
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
ixgbe_dev_lsc_interrupt_setup(dev);
+ ixgbe_dev_macsec_interrupt_setup(dev);
} else {
rte_intr_callback_unregister(intr_handle,
- ixgbe_dev_interrupt_handler,
- (void *)dev);
+ ixgbe_dev_interrupt_handler, dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplex");
}
/* check if rxq interrupt is enabled */
@@ -2385,6 +2692,8 @@ skip_link_setup:
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
+ ixgbe_l2_tunnel_conf(dev);
+ ixgbe_filter_restore(dev);
return 0;
@@ -2405,10 +2714,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct ixgbe_filter_info *filter_info =
- IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
PMD_INIT_FUNC_TRACE();
@@ -2423,8 +2730,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
/* stop adapter */
ixgbe_stop_adapter(hw);
- for (vf = 0; vfinfo != NULL &&
- vf < dev->pci_dev->max_vfs; vf++)
+ for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
vfinfo[vf].clear_to_send = false;
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
@@ -2445,17 +2751,6 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
memset(&link, 0, sizeof(link));
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- /* Remove all ntuple filters of the device */
- for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
- p_5tuple != NULL; p_5tuple = p_5tuple_next) {
- p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
- TAILQ_REMOVE(&filter_info->fivetuple_list,
- p_5tuple, entries);
- rte_free(p_5tuple);
- }
- memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
-
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
rte_intr_callback_register(intr_handle,
@@ -2557,6 +2852,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
static void
ixgbe_read_stats_registers(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *hw_stats,
+ struct ixgbe_macsec_stats *macsec_stats,
uint64_t *total_missed_rx, uint64_t *total_qbrc,
uint64_t *total_qprc, uint64_t *total_qprdc)
{
@@ -2564,9 +2860,9 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
uint32_t delta_gprc = 0;
unsigned i;
/* Workaround for RX byte count not including CRC bytes when CRC
-+ * strip is enabled. CRC bytes are removed from counters when crc_strip
+ * strip is enabled. CRC bytes are removed from counters when crc_strip
* is disabled.
-+ */
+ */
int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
IXGBE_HLREG0_RXCRCSTRP);
@@ -2726,6 +3022,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
/* Flow Director Stats registers */
hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+ /* MACsec Stats registers */
+ macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+ macsec_stats->out_pkts_encrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+ macsec_stats->out_pkts_protected +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+ macsec_stats->out_octets_encrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+ macsec_stats->out_octets_protected +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+ macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+ macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+ macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+ macsec_stats->in_pkts_unknownsci +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+ macsec_stats->in_octets_decrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+ macsec_stats->in_octets_validated +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+ macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+ macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+ macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+ for (i = 0; i < 2; i++) {
+ macsec_stats->in_pkts_ok +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+ macsec_stats->in_pkts_invalid +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+ macsec_stats->in_pkts_notvalid +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+ }
+ macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+ macsec_stats->in_pkts_notusingsa +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
}
/*
@@ -2738,6 +3068,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_stats *hw_stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
unsigned i;
@@ -2746,8 +3079,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
total_qprc = 0;
total_qprdc = 0;
- ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
- &total_qprc, &total_qprdc);
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+ &total_qbrc, &total_qprc, &total_qprdc);
if (stats == NULL)
return;
@@ -2799,13 +3132,13 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
/* This function calculates the number of xstats based on the current config */
static unsigned
ixgbe_xstats_calc_num(void) {
- return IXGBE_NB_HW_STATS +
+ return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
}
static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
{
const unsigned cnt_stats = ixgbe_xstats_calc_num();
unsigned stat, i, count;
@@ -2826,6 +3159,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
count++;
}
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_macsec_strings[i].name);
+ count++;
+ }
+
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2851,6 +3193,84 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
return cnt_stats;
}
+static int ixgbe_dev_xstats_get_names_by_id(
+ __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ if (!ids) {
+ const unsigned int cnt_stats = ixgbe_xstats_calc_num();
+ unsigned int stat, i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+
+ /* Extended stats from ixgbe_hw_stats */
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_stats_strings[i].name);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_macsec_strings[i].name);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ count++;
+ }
+ }
+ }
+ return cnt_stats;
+ }
+
+ uint16_t i;
+ uint16_t size = ixgbe_xstats_calc_num();
+ struct rte_eth_xstat_name xstats_names_copy[size];
+
+ ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
+ size);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= size) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned limit)
{
@@ -2875,6 +3295,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_stats *hw_stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
unsigned i, stat, count = 0;
@@ -2888,8 +3311,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
total_qprc = 0;
total_qprdc = 0;
- ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
- &total_qprc, &total_qprdc);
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+ &total_qbrc, &total_qprc, &total_qprdc);
/* If this is a reset xstats is NULL, and we have cleared the
* registers by reading them.
@@ -2906,6 +3329,14 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count++;
}
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+ rte_ixgbe_macsec_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2930,11 +3361,105 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return count;
}
+static int
+ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ if (!ids) {
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(
+ dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned int i, stat, count = 0;
+
+ count = ixgbe_xstats_calc_num();
+
+ if (!ids && n < count)
+ return count;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
+ &total_missed_rx, &total_qbrc, &total_qprc,
+ &total_qprdc);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!ids && !values)
+ return 0;
+
+ /* Extended stats from ixgbe_hw_stats */
+ count = 0;
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ values[count] = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_stats_strings[i].offset);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ values[count] = *(uint64_t *)(((char *)macsec_stats) +
+ rte_ixgbe_macsec_strings[i].offset);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ values[count] =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_rxq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ values[count] =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_txq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+ return count;
+ }
+
+ uint16_t i;
+ uint16_t size = ixgbe_xstats_calc_num();
+ uint64_t values_copy[size];
+
+ ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= size) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
static void
ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct ixgbe_hw_stats *stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
unsigned count = ixgbe_xstats_calc_num();
@@ -2943,6 +3468,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
/* Reset software totals */
memset(stats, 0, sizeof(*stats));
+ memset(macsec_stats, 0, sizeof(*macsec_stats));
}
static void
@@ -2991,6 +3517,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
/* Extended stats */
for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbevf_stats_strings[i].offset);
}
@@ -3031,12 +3558,35 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
hw_stats->vfgotc = 0;
}
+static int
+ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u16 eeprom_verh, eeprom_verl;
+ u32 etrack_id;
+ int ret;
+
+ ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
+ ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
+
+ etrack_id = (eeprom_verh << 16) | eeprom_verl;
+ ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
@@ -3052,7 +3602,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
@@ -3073,6 +3623,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
!RTE_ETH_DEV_SRIOV(dev).active)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
if (hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3086,6 +3640,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO;
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
if (hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3166,15 +3724,17 @@ static void
ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
@@ -3223,8 +3783,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link, old;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int link_up;
int diag;
+ u32 speed = 0;
+ bool autoneg = false;
link.link_status = ETH_LINK_DOWN;
link.link_speed = 0;
@@ -3234,6 +3798,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
hw->mac.get_link_status = true;
+ if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
+ ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
+ speed = hw->phy.autoneg_advertised;
+ if (!speed)
+ ixgbe_get_link_capabilities(hw, &speed, &autoneg);
+ ixgbe_setup_link(hw, speed, true);
+ }
+
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
@@ -3251,10 +3823,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
if (link_up == 0) {
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
if (link.link_status == old.link_status)
return -1;
return 0;
}
+ intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -3381,6 +3955,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
return 0;
}
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_LINKSEC;
+
+ return 0;
+}
+
/*
* It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
*
@@ -3415,6 +4011,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
if (eicr & IXGBE_EICR_MAILBOX)
intr->flags |= IXGBE_FLAG_MAILBOX;
+ if (eicr & IXGBE_EICR_LINKSEC)
+ intr->flags |= IXGBE_FLAG_MACSEC;
+
if (hw->mac.type == ixgbe_mac_X550EM_x &&
hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3436,6 +4035,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
static void
ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -3451,10 +4051,10 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
(int)(dev->data->port_id));
}
PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
- dev->pci_dev->addr.domain,
- dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid,
- dev->pci_dev->addr.function);
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
}
/*
@@ -3468,7 +4068,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
* - On failure, a negative value.
*/
static int
-ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -3506,19 +4107,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
ixgbe_dev_link_status_print(dev);
- intr->mask_original = intr->mask;
- /* only disable lsc interrupt */
- intr->mask &= ~IXGBE_EIMS_LSC;
if (rte_eal_alarm_set(timeout * 1000,
ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
- else
- intr->mask = intr->mask_original;
+ else {
+ /* remember original mask */
+ intr->mask_original = intr->mask;
+ /* only disable lsc interrupt */
+ intr->mask &= ~IXGBE_EIMS_LSC;
+ }
}
PMD_DRV_LOG(DEBUG, "enable intr immediately");
ixgbe_enable_intr(dev);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -3541,12 +4143,16 @@ static void
ixgbe_dev_interrupt_delayed_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t eicr;
+ ixgbe_disable_intr(hw);
+
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_pf_mbx_process(dev);
@@ -3563,9 +4169,19 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
+ if (intr->flags & IXGBE_FLAG_MACSEC) {
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+ NULL);
+ intr->flags &= ~IXGBE_FLAG_MACSEC;
+ }
+
+ /* restore original mask */
+ intr->mask = intr->mask_original;
+ intr->mask_original = 0;
+
PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
ixgbe_enable_intr(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
}
/**
@@ -3581,13 +4197,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
* void
*/
static void
-ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+ixgbe_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
ixgbe_dev_interrupt_get_status(dev);
- ixgbe_dev_interrupt_action(dev);
+ ixgbe_dev_interrupt_action(dev, dev->intr_handle);
}
static int
@@ -3951,7 +4566,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, sp_reta_size);
+ "(%d)", reta_size, sp_reta_size);
return -EINVAL;
}
@@ -3998,7 +4613,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, sp_reta_size);
+ "(%d)", reta_size, sp_reta_size);
return -EINVAL;
}
@@ -4023,14 +4638,15 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t enable_addr = 1;
- ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
+ return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
+ pool, enable_addr);
}
static void
@@ -4044,41 +4660,26 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
static void
ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+
ixgbe_remove_rar(dev, 0);
- ixgbe_add_rar(dev, addr, 0, 0);
+ ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
}
-int
-rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
- struct ether_addr *mac_addr)
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
- struct ixgbe_hw *hw;
- struct ixgbe_vf_info *vfinfo;
- int rar_entry;
- uint8_t *new_mac = (uint8_t *)(mac_addr);
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
+ if (strcmp(dev->data->drv_name, drv->driver.name))
+ return false;
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
- rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ return true;
+}
- if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
- rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
- ETHER_ADDR_LEN);
- return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
- IXGBE_RAH_AV);
- }
- return -EINVAL;
+bool
+is_ixgbe_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_ixgbe_pmd);
}
static int
@@ -4089,6 +4690,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
ixgbe_dev_info_get(dev, &dev_info);
@@ -4099,7 +4701,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -4197,7 +4799,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t intr_vector = 0;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int err, mask = 0;
@@ -4242,7 +4845,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -4260,7 +4863,8 @@ static void
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -4401,15 +5005,15 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
}
-static int
-ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+int
+ixgbe_vt_check(struct ixgbe_hw *hw)
{
uint32_t reg_val;
- /* we only need to do this if VMDq is enabled */
+ /* if Virtualization Technology is enabled */
reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
- PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
+ PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
return -1;
}
@@ -4547,343 +5151,6 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
return new_val;
}
-static int
-ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
- uint16_t rx_mask, uint8_t on)
-{
- int val = 0;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
- " on 82599 hardware and newer");
- return -ENOTSUP;
- }
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
-
- if (on)
- vmolr |= val;
- else
- vmolr &= ~val;
-
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- if (pool >= ETH_64_POOLS)
- return -EINVAL;
-
- /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
- if (pool >= 32) {
- addr = IXGBE_VFRE(1);
- val = bit1 << (pool - 32);
- } else {
- addr = IXGBE_VFRE(0);
- val = bit1 << pool;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- if (pool >= ETH_64_POOLS)
- return -EINVAL;
-
- /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
- if (pool >= 32) {
- addr = IXGBE_VFTE(1);
- val = bit1 << (pool - 32);
- } else {
- addr = IXGBE_VFTE(0);
- val = bit1 << pool;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask, uint8_t vlan_on)
-{
- int ret = 0;
- uint16_t pool_idx;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
- for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
- if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
- vlan_on, false);
- if (ret < 0)
- return ret;
- }
- }
-
- return ret;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_mac_info *mac;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mac = &hw->mac;
-
- mac->ops.set_vlan_anti_spoofing(hw, on, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_mac_info *mac;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mac = &hw->mac;
- mac->ops.set_mac_anti_spoofing(hw, on, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
-{
- struct ixgbe_hw *hw;
- uint32_t ctrl;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (vlan_id > 4095)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
- if (vlan_id) {
- ctrl = vlan_id;
- ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
- } else {
- ctrl = 0;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t ctrl;
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
- /* enable or disable VMDQ loopback */
- if (on)
- ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
- else
- ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
-
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t reg_value;
- int i;
- int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- for (i = 0; i <= num_queues; i++) {
- reg_value = IXGBE_QDE_WRITE |
- (i << IXGBE_QDE_IDX_SHIFT) |
- (on & IXGBE_QDE_ENABLE);
- IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
- }
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t reg_value;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- /* only support VF's 0 to 63 */
- if ((vf >= dev_info.max_vfs) || (vf > 63))
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
- if (on)
- reg_value |= IXGBE_SRRCTL_DROP_EN;
- else
- reg_value &= ~IXGBE_SRRCTL_DROP_EN;
-
- IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
- uint16_t queues_per_pool;
- uint32_t q;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
-
- /* The PF has 128 queue pairs and in SRIOV configuration
- * those queues will be assigned to VF's, so RXDCTL
- * registers will be dealing with queues which will be
- * assigned to VF's.
- * Let's say we have SRIOV configured with 31 VF's then the
- * first 124 queues 0-123 will be allocated to VF's and only
- * the last 4 queues 123-127 will be assigned to the PF.
- */
-
- queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
-
- for (q = 0; q < queues_per_pool; q++)
- (*dev->dev_ops->vlan_strip_queue_set)(dev,
- q + vf * queues_per_pool, on);
- return 0;
-}
-
#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
@@ -4894,8 +5161,8 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
static int
ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t rule_id, uint8_t on)
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
{
uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
@@ -4918,7 +5185,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint8_t mirror_type = 0;
- if (ixgbe_vmdq_mode_check(hw) < 0)
+ if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
if (rule_id >= IXGBE_MAX_MIRROR_RULES)
@@ -4926,22 +5193,28 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
- mirror_conf->rule_type);
+ mirror_conf->rule_type);
return -EINVAL;
}
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
- /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
+ /* Check if vlan id is valid and find conresponding VLAN ID
+ * index in VLVF
+ */
for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
- /* search vlan id related pool vlan filter index */
- reg_index = ixgbe_find_vlvf_slot(hw,
- mirror_conf->vlan.vlan_id[i],
- false);
+ /* search vlan id related pool vlan filter
+ * index
+ */
+ reg_index = ixgbe_find_vlvf_slot(
+ hw,
+ mirror_conf->vlan.vlan_id[i],
+ false);
if (reg_index < 0)
return -EINVAL;
- vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
+ vlvf = IXGBE_READ_REG(hw,
+ IXGBE_VLVF(reg_index));
if ((vlvf & IXGBE_VLVF_VIEN) &&
((vlvf & IXGBE_VLVF_VLANID_MASK) ==
mirror_conf->vlan.vlan_id[i]))
@@ -4971,7 +5244,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
}
}
- /*
+ /**
* if enable pool mirror, write related pool mask register,if disable
* pool mirror, clear PFMRVM register
*/
@@ -5001,8 +5274,9 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
mr_ctl |= mirror_type;
mr_ctl &= mirror_rule_mask;
mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
- } else
+ } else {
mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+ }
mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
@@ -5039,11 +5313,11 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
- if (ixgbe_vmdq_mode_check(hw) < 0)
+ if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
memset(&mr_info->mr_conf[rule_id], 0,
- sizeof(struct rte_eth_mirror_conf));
+ sizeof(struct rte_eth_mirror_conf));
/* clear PFVMCTL register */
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
@@ -5062,6 +5336,8 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
static int
ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5071,7 +5347,7 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -5094,6 +5370,8 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5113,7 +5391,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
mask &= (1 << (queue_id - 32));
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
}
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -5217,7 +5495,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
static void
ixgbevf_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t q_idx;
@@ -5250,7 +5529,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
static void
ixgbe_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
@@ -5365,62 +5645,7 @@ static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
return 0;
}
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vf_info *vfinfo =
- *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
- uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
- uint32_t queue_stride =
- IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
- uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
- uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
- uint16_t total_rate = 0;
-
- if (queue_end >= hw->mac.max_tx_queues)
- return -EINVAL;
-
- if (vfinfo != NULL) {
- for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
- if (vf_idx == vf)
- continue;
- for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
- idx++)
- total_rate += vfinfo[vf_idx].tx_rate[idx];
- }
- } else
- return -EINVAL;
-
- /* Store tx_rate for this vf. */
- for (idx = 0; idx < nb_q_per_pool; idx++) {
- if (((uint64_t)0x1 << idx) & q_msk) {
- if (vfinfo[vf].tx_rate[idx] != tx_rate)
- vfinfo[vf].tx_rate[idx] = tx_rate;
- total_rate += tx_rate;
- }
- }
-
- if (total_rate > dev->data->dev_link.link_speed) {
- /*
- * Reset stored TX rate of the VF if it causes exceed
- * link speed.
- */
- memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
- return -EINVAL;
- }
-
- /* Set RTTBCNRC of each queue/pool for vf X */
- for (; queue_idx <= queue_end; queue_idx++) {
- if (0x1 & q_msk)
- ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
- q_msk = q_msk >> 1;
- }
-
- return 0;
-}
-
-static void
+static int
ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
__attribute__((unused)) uint32_t index,
__attribute__((unused)) uint32_t pool)
@@ -5434,11 +5659,19 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
* set of PF resources used to store VF MAC addresses.
*/
if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
- return;
+ return -1;
diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
- if (diag == 0)
- return;
- PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR, "Unable to add MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ diag);
+ return diag;
}
static void
@@ -5497,28 +5730,24 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
}
-#define MAC_TYPE_FILTER_SUP(type) do {\
- if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
- (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
- (type) != ixgbe_mac_X550EM_a)\
- return -ENOTSUP;\
-} while (0)
-
-static int
+int
ixgbe_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t syn_info;
uint32_t synqf;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
- synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ syn_info = filter_info->syn_info;
if (add) {
- if (synqf & IXGBE_SYN_FILTER_ENABLE)
+ if (syn_info & IXGBE_SYN_FILTER_ENABLE)
return -EINVAL;
synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
@@ -5528,10 +5757,13 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
else
synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
} else {
- if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
return -ENOENT;
synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
}
+
+ filter_info->syn_info = synqf;
IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
IXGBE_WRITE_FLUSH(hw);
return 0;
@@ -5587,7 +5819,7 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
(struct rte_eth_syn_filter *)arg);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -5609,6 +5841,52 @@ convert_protocol_type(uint8_t protocol_value)
return IXGBE_FILTER_PROTOCOL_NONE;
}
+/* inject a 5-tuple filter to HW */
+static inline void
+ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
+
+ i = filter->index;
+
+ sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+ IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+ ftqf = (uint32_t)(filter->filter_info.proto &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->filter_info.priority &
+ IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(filter->queue <<
+ IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
+}
+
/*
* add a 5tuple filter
*
@@ -5626,13 +5904,9 @@ static int
ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
int i, idx, shift;
- uint32_t ftqf, sdpqf;
- uint32_t l34timir = 0;
- uint8_t mask = 0xff;
/*
* look for an unused 5tuple filter index,
@@ -5655,37 +5929,8 @@ ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- sdpqf = (uint32_t)(filter->filter_info.dst_port <<
- IXGBE_SDPQF_DSTPORT_SHIFT);
- sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
-
- ftqf = (uint32_t)(filter->filter_info.proto &
- IXGBE_FTQF_PROTOCOL_MASK);
- ftqf |= (uint32_t)((filter->filter_info.priority &
- IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
- if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
- mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
- if (filter->filter_info.dst_ip_mask == 0)
- mask &= IXGBE_FTQF_DEST_ADDR_MASK;
- if (filter->filter_info.src_port_mask == 0)
- mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
- if (filter->filter_info.dst_port_mask == 0)
- mask &= IXGBE_FTQF_DEST_PORT_MASK;
- if (filter->filter_info.proto_mask == 0)
- mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
- ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
- ftqf |= IXGBE_FTQF_POOL_MASK_EN;
- ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
-
- IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+ ixgbe_inject_5tuple_filter(dev, filter);
- l34timir |= IXGBE_L34T_IMIR_RESERVE;
- l34timir |= (uint32_t)(filter->queue <<
- IXGBE_L34T_IMIR_QUEUE_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
return 0;
}
@@ -5722,6 +5967,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ixgbe_hw *hw;
uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5731,7 +5977,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -5752,11 +5998,6 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
-#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
- if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
- return -ENOTSUP;\
-} while (0)
-
static inline struct ixgbe_5tuple_filter *
ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
struct ixgbe_5tuple_filter_info *key)
@@ -5864,7 +6105,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
* - On success, zero.
* - On failure, a negative value.
*/
-static int
+int
ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
@@ -6009,48 +6250,7 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
return ret;
}
-static inline int
-ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
- uint16_t ethertype)
-{
- int i;
-
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (filter_info->ethertype_filters[i] == ethertype &&
- (filter_info->ethertype_mask & (1 << i)))
- return i;
- }
- return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
- uint16_t ethertype)
-{
- int i;
-
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (!(filter_info->ethertype_mask & (1 << i))) {
- filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] = ethertype;
- return i;
- }
- }
- return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
- uint8_t idx)
-{
- if (idx >= IXGBE_MAX_ETQF_FILTERS)
- return -1;
- filter_info->ethertype_mask &= ~(1 << idx);
- filter_info->ethertype_filters[idx] = 0;
- return idx;
-}
-
-static int
+int
ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
@@ -6061,6 +6261,7 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
uint32_t etqf = 0;
uint32_t etqs = 0;
int ret;
+ struct ixgbe_ethertype_filter ethertype_filter;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
@@ -6094,18 +6295,23 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
if (add) {
- ret = ixgbe_ethertype_filter_insert(filter_info,
- filter->ether_type);
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "ethertype filters are full.");
- return -ENOSYS;
- }
etqf = IXGBE_ETQF_FILTER_EN;
etqf |= (uint32_t)filter->ether_type;
etqs |= (uint32_t)((filter->queue <<
IXGBE_ETQS_RX_QUEUE_SHIFT) &
IXGBE_ETQS_RX_QUEUE);
etqs |= IXGBE_ETQS_QUEUE_EN;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = ixgbe_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
} else {
ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
if (ret < 0)
@@ -6201,7 +6407,7 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -6219,9 +6425,15 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_L2_TUNNEL:
ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ixgbe_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
+ ret = -EINVAL;
break;
}
@@ -6869,12 +7081,15 @@ ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
if (l2_tunnel == NULL)
return -EINVAL;
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
break;
default:
@@ -6913,9 +7128,12 @@ ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = TRUE;
ret = ixgbe_e_tag_enable(hw);
break;
default:
@@ -6954,9 +7172,12 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = FALSE;
ret = ixgbe_e_tag_disable(hw);
break;
default:
@@ -7045,12 +7266,108 @@ ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
return -EINVAL;
}
+static inline struct ixgbe_l2_tn_filter *
+ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_filter *l2_tn_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(l2_tn_info->hash_handle,
+ &l2_tn_filter->key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert L2 tunnel filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+ TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such L2 tunnel filter to delete %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_filter = l2_tn_info->hash_map[ret];
+ l2_tn_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+ rte_free(l2_tn_filter);
+
+ return 0;
+}
+
/* Add l2 tunnel filter */
-static int
+int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore)
{
- int ret = 0;
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+ struct ixgbe_l2_tn_filter *node;
+
+ if (!restore) {
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+
+ node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+ if (node) {
+ PMD_DRV_LOG(ERR,
+ "The L2 tunnel filter already exists!");
+ return -EINVAL;
+ }
+
+ node = rte_zmalloc("ixgbe_l2_tn",
+ sizeof(struct ixgbe_l2_tn_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ (void)rte_memcpy(&node->key,
+ &key,
+ sizeof(struct ixgbe_l2_tn_key));
+ node->pool = l2_tunnel->pool;
+ ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+ }
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
@@ -7062,15 +7379,27 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
break;
}
+ if ((!restore) && (ret < 0))
+ (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
return ret;
}
/* Delete l2 tunnel filter */
-static int
+int
ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel)
{
- int ret = 0;
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+ ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+ if (ret < 0)
+ return ret;
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
@@ -7096,7 +7425,7 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = 0;
+ int ret;
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
@@ -7111,7 +7440,8 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_ADD:
ret = ixgbe_dev_l2_tunnel_filter_add
(dev,
- (struct rte_eth_l2_tunnel_conf *)arg);
+ (struct rte_eth_l2_tunnel_conf *)arg,
+ FALSE);
break;
case RTE_ETH_FILTER_DELETE:
ret = ixgbe_dev_l2_tunnel_filter_del
@@ -7154,10 +7484,13 @@ ixgbe_dev_l2_tunnel_forwarding_enable
(struct rte_eth_dev *dev,
enum rte_eth_tunnel_type l2_tunnel_type)
{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
int ret = 0;
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = TRUE;
ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
break;
default:
@@ -7175,10 +7508,13 @@ ixgbe_dev_l2_tunnel_forwarding_disable
(struct rte_eth_dev *dev,
enum rte_eth_tunnel_type l2_tunnel_type)
{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
int ret = 0;
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = FALSE;
ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
break;
default:
@@ -7195,15 +7531,16 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
bool en)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
int ret = 0;
uint32_t vmtir, vmvir;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) {
+ if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
PMD_DRV_LOG(ERR,
"VF id %u should be less than %u",
l2_tunnel->vf_id,
- dev->pci_dev->max_vfs);
+ pci_dev->max_vfs);
return -EINVAL;
}
@@ -7529,7 +7866,7 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
}
static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
@@ -7584,8 +7921,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
}
static void
-ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+ixgbevf_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -7593,7 +7929,226 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
ixgbevf_dev_interrupt_action(dev);
}
-RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
+/**
+ * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the transmit data path and waits for the HW to internally empty
+ * the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+ int i;
+ int sectxreg;
+
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+ for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+ if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+ break;
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECTX_POLL)
+ PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+ "path fully disabled. Continuing with init.");
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+ uint32_t sectxreg;
+
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/* restore n-tuple filter */
+static inline void
+ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *node;
+
+ TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+ ixgbe_inject_5tuple_filter(dev, node);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
+ filter_info->ethertype_filters[i].etqs);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore SYN filter */
+static inline void
+ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore L2 tunnel filter */
+static inline void
+ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *node;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+
+ TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+ l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = node->key.tn_id;
+ l2_tn_conf.pool = node->pool;
+ (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+ }
+}
+
+static int
+ixgbe_filter_restore(struct rte_eth_dev *dev)
+{
+ ixgbe_ntuple_filter_restore(dev);
+ ixgbe_ethertype_filter_restore(dev);
+ ixgbe_syn_filter_restore(dev);
+ ixgbe_fdir_filter_restore(dev);
+ ixgbe_l2_tn_filter_restore(dev);
+
+ return 0;
+}
+
+static void
+ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (l2_tn_info->e_tag_en)
+ (void)ixgbe_e_tag_enable(hw);
+
+ if (l2_tn_info->e_tag_fwd_en)
+ (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
+
+ (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ ixgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)ixgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+void
+ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the L2 tunnel filters */
+int
+ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
-RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a4e2996a..b576a6f4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,11 +38,14 @@
#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
#include <rte_time.h>
+#include <rte_hash.h>
/* need update link, bit flag */
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
#define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
/*
* Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,10 +133,28 @@
#define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00
+
+#define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
+#define IXGBE_MAX_L2_TN_FILTER_NUM 128
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+ (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
+ (type) != ixgbe_mac_X550EM_a)\
+ return -ENOTSUP;\
+} while (0)
+
/*
* Information about the fdir mode.
*/
-
struct ixgbe_hw_fdir_mask {
uint16_t vlan_tci_mask;
uint32_t src_ipv4_mask;
@@ -148,6 +169,28 @@ struct ixgbe_hw_fdir_mask {
uint8_t tunnel_type_mask;
};
+struct ixgbe_fdir_filter {
+ TAILQ_ENTRY(ixgbe_fdir_filter) entries;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t fdirhash; /* hash value for fdir */
+ uint8_t queue; /* assigned rx queue */
+};
+
+/* list of fdir filters */
+TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
+
+struct ixgbe_fdir_rule {
+ struct ixgbe_hw_fdir_mask mask;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
+ bool b_mask; /* If TRUE, mask has meaning. */
+ enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t soft_id; /* an unique value for this rule */
+ uint8_t queue; /* assigned rx queue */
+};
+
struct ixgbe_hw_fdir_info {
struct ixgbe_hw_fdir_mask mask;
uint8_t flex_bytes_offset;
@@ -159,6 +202,11 @@ struct ixgbe_hw_fdir_info {
uint64_t remove;
uint64_t f_add;
uint64_t f_remove;
+ struct ixgbe_fdir_filter_list fdir_list; /* filter list*/
+ /* store the pointers of the filters, index is the hash value. */
+ struct ixgbe_fdir_filter **hash_map;
+ struct rte_hash *hash_handle; /* cuckoo hash handler */
+ bool mask_added; /* If already got mask from consistent filter */
};
/* structure for interrupt relative data */
@@ -254,16 +302,136 @@ struct ixgbe_5tuple_filter {
(RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
(sizeof(uint32_t) * NBBY))
+struct ixgbe_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+ uint32_t etqs;
+ /**
+ * If this filter is added by configuration,
+ * it should not be removed.
+ */
+ bool conf;
+};
+
/*
* Structure to store filters' info.
*/
struct ixgbe_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
- uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
+ struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
/* Bit mask for every used 5tuple filter */
uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
struct ixgbe_5tuple_filter_list fivetuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
+};
+
+struct ixgbe_l2_tn_key {
+ enum rte_eth_tunnel_type l2_tn_type;
+ uint32_t tn_id;
+};
+
+struct ixgbe_l2_tn_filter {
+ TAILQ_ENTRY(ixgbe_l2_tn_filter) entries;
+ struct ixgbe_l2_tn_key key;
+ uint32_t pool;
+};
+
+TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter);
+
+struct ixgbe_l2_tn_info {
+ struct ixgbe_l2_tn_filter_list l2_tn_list;
+ struct ixgbe_l2_tn_filter **hash_map;
+ struct rte_hash *hash_handle;
+ bool e_tag_en; /* e-tag enabled */
+ bool e_tag_fwd_en; /* e-tag based forwarding enabled */
+ bool e_tag_ether_type; /* ether type for e-tag */
+};
+
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+ TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+ struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+ struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* ixgbe_flow memory list structure */
+struct ixgbe_flow_mem {
+ TAILQ_ENTRY(ixgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+struct ixgbe_ntuple_filter_list filter_ntuple_list;
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+struct ixgbe_ethertype_filter_list filter_ethertype_list;
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+struct ixgbe_syn_filter_list filter_syn_list;
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
+struct ixgbe_flow_mem_list ixgbe_flow_list;
+
+/*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+ /* TX port statistics */
+ uint64_t out_pkts_untagged;
+ uint64_t out_pkts_encrypted;
+ uint64_t out_pkts_protected;
+ uint64_t out_octets_encrypted;
+ uint64_t out_octets_protected;
+
+ /* RX port statistics */
+ uint64_t in_pkts_untagged;
+ uint64_t in_pkts_badtag;
+ uint64_t in_pkts_nosci;
+ uint64_t in_pkts_unknownsci;
+ uint64_t in_octets_decrypted;
+ uint64_t in_octets_validated;
+
+ /* RX SC statistics */
+ uint64_t in_pkts_unchecked;
+ uint64_t in_pkts_delayed;
+ uint64_t in_pkts_late;
+
+ /* RX SA statistics */
+ uint64_t in_pkts_ok;
+ uint64_t in_pkts_invalid;
+ uint64_t in_pkts_notvalid;
+ uint64_t in_pkts_unusedsa;
+ uint64_t in_pkts_notusingsa;
+};
+
+/* The configuration of bandwidth */
+struct ixgbe_bw_conf {
+ uint8_t tc_num; /* Number of TCs. */
};
/*
@@ -272,6 +440,7 @@ struct ixgbe_filter_info {
struct ixgbe_adapter {
struct ixgbe_hw hw;
struct ixgbe_hw_stats stats;
+ struct ixgbe_macsec_stats macsec_stats;
struct ixgbe_hw_fdir_info fdir;
struct ixgbe_interrupt intr;
struct ixgbe_stat_mapping_registers stat_mappings;
@@ -285,6 +454,8 @@ struct ixgbe_adapter {
struct ixgbe_bypass_info bps;
#endif /* RTE_NIC_BYPASS */
struct ixgbe_filter_info filter;
+ struct ixgbe_l2_tn_info l2_tn;
+ struct ixgbe_bw_conf bw_conf;
bool rx_bulk_alloc_allowed;
bool rx_vec_allowed;
@@ -293,12 +464,18 @@ struct ixgbe_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+#define IXGBE_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
(&((struct ixgbe_adapter *)adapter)->stats)
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
(&((struct ixgbe_adapter *)adapter)->intr)
@@ -329,6 +506,12 @@ struct ixgbe_adapter {
#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct ixgbe_adapter *)adapter)->filter)
+#define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->l2_tn)
+
+#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->bw_conf)
+
/*
* RX/TX function prototypes
*/
@@ -353,7 +536,9 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
-int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
@@ -398,6 +583,9 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
@@ -414,10 +602,31 @@ uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
+int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
+int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore);
+int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel);
+void ixgbe_filterlist_flush(void);
/*
* Flow director function prototypes
*/
int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
+ bool del, bool update);
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
@@ -444,4 +653,74 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
enum rte_filter_op filter_op, void *arg);
+void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
+
+extern const struct rte_flow_ops ixgbe_flow_ops;
+
+void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_vt_check(struct ixgbe_hw *hw);
+int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+bool is_ixgbe_supported(struct rte_eth_dev *dev);
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+ struct ixgbe_ethertype_filter *ethertype_filter)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i].ethertype =
+ ethertype_filter->ethertype;
+ filter_info->ethertype_filters[i].etqf =
+ ethertype_filter->etqf;
+ filter_info->ethertype_filters[i].etqs =
+ ethertype_filter->etqs;
+ filter_info->ethertype_filters[i].conf =
+ ethertype_filter->conf;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= IXGBE_MAX_ETQF_FILTERS)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
+ filter_info->ethertype_filters[idx].etqs = 0;
+ filter_info->ethertype_filters[idx].etqs = FALSE;
+ return idx;
+}
+
#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 4b81ee37..7f6c7b58 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -43,6 +43,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_malloc.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -111,10 +112,8 @@
static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
static int fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
@@ -294,8 +293,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
-fdir_set_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_82599(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@@ -307,8 +305,6 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
uint32_t fdirtcpm; /* TCP source and destination port masks. */
uint32_t fdiripv6m; /* IPv6 source and destination masks. */
- uint16_t dst_ipv6m = 0;
- uint16_t src_ipv6m = 0;
volatile uint32_t *reg;
PMD_INIT_FUNC_TRACE();
@@ -319,31 +315,30 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
*/
- if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
+ if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
- if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- else if (input_mask->vlan_tci_mask == 0)
+ else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
- info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = reverse_fdir_bitmasks(
- rte_be_to_cpu_16(input_mask->dst_port_mask),
- rte_be_to_cpu_16(input_mask->src_port_mask));
+ rte_be_to_cpu_16(info->mask.dst_port_mask),
+ rte_be_to_cpu_16(info->mask.src_port_mask));
/* write all the same so that UDP, TCP and SCTP use the same mask
* (little-endian)
@@ -351,30 +346,23 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
- info->mask.src_port_mask = input_mask->src_port_mask;
- info->mask.dst_port_mask = input_mask->dst_port_mask;
/* Store source and destination IPv4 masks (big-endian),
* can not use IXGBE_WRITE_REG.
*/
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
- *reg = ~(input_mask->ipv4_mask.src_ip);
+ *reg = ~(info->mask.src_ipv4_mask);
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
- *reg = ~(input_mask->ipv4_mask.dst_ip);
- info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
- info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ *reg = ~(info->mask.dst_ipv4_mask);
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
/*
* Store source and destination IPv6 masks (bit reversed)
*/
- IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
- IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
- fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
+ fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
+ info->mask.src_ipv6_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
- info->mask.src_ipv6_mask = src_ipv6m;
- info->mask.dst_ipv6_mask = dst_ipv6m;
}
return IXGBE_SUCCESS;
@@ -385,8 +373,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
-fdir_set_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_x550(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@@ -409,20 +396,19 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
/* some bits must be set for mac vlan or tunnel mode */
fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
- if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- else if (input_mask->vlan_tci_mask == 0)
+ else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
- info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
@@ -433,12 +419,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = input_mask->mac_addr_byte_mask;
+ mac_mask = info->mask.mac_addr_byte_mask;
fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
& IXGBE_FDIRIP6M_INNER_MAC;
- info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
- switch (input_mask->tunnel_type_mask) {
+ switch (info->mask.tunnel_type_mask) {
case 0:
/* Mask turnnel type */
fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
@@ -449,10 +434,8 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
return -EINVAL;
}
- info->mask.tunnel_type_mask =
- input_mask->tunnel_type_mask;
- switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
+ switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
case 0x0:
/* Mask vxlan id */
fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
@@ -466,8 +449,6 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
return -EINVAL;
}
- info->mask.tunnel_id_mask =
- input_mask->tunnel_id_mask;
}
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
@@ -481,22 +462,90 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
}
static int
-fdir_set_input_mask(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint16_t dst_ipv6m = 0;
+ uint16_t src_ipv6m = 0;
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.src_port_mask = input_mask->src_port_mask;
+ info->mask.dst_port_mask = input_mask->dst_port_mask;
+ info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+ info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+ info->mask.src_ipv6_mask = src_ipv6m;
+ info->mask.dst_ipv6_mask = dst_ipv6m;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+ info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
+ info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+ mode <= RTE_FDIR_MODE_PERFECT)
+ return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
+ else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
+
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+}
+
+int
+ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
{
enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
if (mode >= RTE_FDIR_MODE_SIGNATURE &&
mode <= RTE_FDIR_MODE_PERFECT)
- return fdir_set_input_mask_82599(dev, input_mask);
+ return fdir_set_input_mask_82599(dev);
else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
- return fdir_set_input_mask_x550(dev, input_mask);
+ return fdir_set_input_mask_x550(dev);
PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
return -ENOTSUP;
}
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ int ret;
+
+ ret = ixgbe_fdir_store_input_mask(dev, input_mask);
+ if (ret)
+ return ret;
+
+ return ixgbe_fdir_set_input_mask(dev);
+}
+
/*
* ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
* arguments are valid
@@ -681,6 +730,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp4_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp4_flow.dst_port;
+ /* fall-through */
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
@@ -696,6 +746,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp6_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp6_flow.dst_port;
+ /* fall-through */
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
@@ -1075,36 +1126,115 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
}
-/*
- * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
- * @dev: pointer to the structure rte_eth_dev
- * @fdir_filter: fdir filter entry
- * @del: 1 - delete, 0 - add
- * @update: 1 - update
- */
+static inline struct ixgbe_fdir_filter *
+ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ struct ixgbe_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_handle,
+ &fdir_filter->ixgbe_fdir);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ fdir_info->hash_map[ret] = fdir_filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ ret = rte_hash_del_key(fdir_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
+ return ret;
+ }
+
+ fdir_filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+ rte_free(fdir_filter);
+
+ return 0;
+}
+
static int
-ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter,
+ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct ixgbe_fdir_rule *rule)
+{
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ int err;
+
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+
+ err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
+ &rule->ixgbe_fdir,
+ fdir_mode);
+ if (err)
+ return err;
+
+ rule->mode = fdir_mode;
+ if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ rule->queue = fdir_filter->action.rx_queue;
+ rule->soft_id = fdir_filter->soft_id;
+
+ return 0;
+}
+
+int
+ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
bool del,
bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
uint32_t fdirhash;
- union ixgbe_atr_input input;
uint8_t queue;
bool is_perfect = FALSE;
int err;
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ struct ixgbe_fdir_filter *node;
+ bool add_node = FALSE;
- if (fdir_mode == RTE_FDIR_MODE_NONE)
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
return -ENOTSUP;
/*
* Sanity check for x550.
- * When adding a new filter with flow type set to IPv4-other,
+ * When adding a new filter with flow type set to IPv4,
* the flow director mask should be configed before,
* and the L4 protocol and ports are masked.
*/
@@ -1112,12 +1242,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
(hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
- (fdir_filter->input.flow_type ==
- RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+ (rule->ixgbe_fdir.formatted.flow_type ==
+ IXGBE_ATR_FLOW_TYPE_IPV4) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
- " IPv4-other is not supported without"
+ " IPv4 is not supported without"
" L4 protocol and ports masked!");
return -ENOTSUP;
}
@@ -1126,28 +1256,26 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
is_perfect = TRUE;
- memset(&input, 0, sizeof(input));
-
- err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
- fdir_mode);
- if (err)
- return err;
-
if (is_perfect) {
- if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+ if (rule->ixgbe_fdir.formatted.flow_type &
+ IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
" perfect mode!");
return -ENOTSUP;
}
- fdirhash = atr_compute_perfect_hash_82599(&input,
+ fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
- fdirhash |= fdir_filter->soft_id <<
+ fdirhash |= rule->soft_id <<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
- fdirhash = atr_compute_sig_hash_82599(&input,
+ fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
+ err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ if (err < 0)
+ return err;
+
err = fdir_erase_filter_82599(hw, fdirhash);
if (err < 0)
PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
@@ -1157,7 +1285,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
}
/* add or update an fdir filter*/
fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
- if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
+ if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
if (is_perfect) {
queue = dev->data->dev_conf.fdir_conf.drop_queue;
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
@@ -1166,28 +1294,86 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
" signature mode.");
return -EINVAL;
}
- } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
- fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
- queue = (uint8_t)fdir_filter->action.rx_queue;
+ } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
+ queue = (uint8_t)rule->queue;
else
return -EINVAL;
+ node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
+ if (node) {
+ if (update) {
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+ } else {
+ PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+ return -EINVAL;
+ }
+ } else {
+ add_node = TRUE;
+ node = rte_zmalloc("ixgbe_fdir",
+ sizeof(struct ixgbe_fdir_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+ (void)rte_memcpy(&node->ixgbe_fdir,
+ &rule->ixgbe_fdir,
+ sizeof(union ixgbe_atr_input));
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+
+ err = ixgbe_insert_fdir_filter(info, node);
+ if (err < 0) {
+ rte_free(node);
+ return err;
+ }
+ }
+
if (is_perfect) {
- err = fdir_write_perfect_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash,
- fdir_mode);
+ err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash, fdir_mode);
} else {
- err = fdir_add_signature_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash);
+ err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash);
}
- if (err < 0)
+ if (err < 0) {
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
- else
+
+ if (add_node)
+ (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ } else {
PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+ }
return err;
}
+/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
+{
+ struct ixgbe_fdir_rule rule;
+ int err;
+
+ err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
+
+ if (err)
+ return err;
+
+ return ixgbe_fdir_filter_program(dev, &rule, del, update);
+}
+
static int
ixgbe_fdir_flush(struct rte_eth_dev *dev)
{
@@ -1378,3 +1564,66 @@ ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* restore flow director filter */
+void
+ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *node;
+ bool is_perfect = FALSE;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_write_perfect_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash,
+ fdir_mode);
+ }
+ } else {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_add_signature_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash);
+ }
+ }
+}
+
+/* remove all the flow director filters */
+int
+ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+ struct ixgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = ixgbe_fdir_flush(dev);
+
+ return ret;
+}
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
new file mode 100644
index 00000000..da7b1cc8
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -0,0 +1,2778 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_hash_crc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_phy.h"
+#include "rte_pmd_ixgbe.h"
+
+
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+ do { \
+ item = pattern + index;\
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+ index++; \
+ item = pattern + index; \
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* get the TCP/UDP info */
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else {
+ sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+ if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+ attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
+ filter->priority = 1;
+
+ return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static int
+ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Ixgbe doesn't support tcp flags. */
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Ixgbe doesn't support many priorities. */
+ if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+ filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ return -rte_errno;
+
+ /* fixed value for ixgbe */
+ filter->flags = RTE_5TUPLE_FLAGS;
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* Parse pattern */
+ index = 0;
+
+ /* The first non-void item should be MAC. */
+ item = pattern + index;
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ index++;
+ item = pattern + index;
+ }
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ index++;
+ item = pattern + index;
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ index++;
+ item = pattern + index;
+ }
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ index = 0;
+ /* Check if the first non-void action is QUEUE or DROP. */
+ act = actions + index;
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ index++;
+ act = actions + index;
+ }
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ act = actions + index;
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ index++;
+ act = actions + index;
+ }
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Ixgbe doesn't support MAC address. */
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue index much too big");
+ return -rte_errno;
+ }
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
+ * pattern:
+ * The first not void item can be E_TAG.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * E_TAG grp 0x1 0x3
+ e_cid_base 0x309 0xFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_e_tag *e_tag_spec;
+ const struct rte_flow_item_e_tag *e_tag_mask;
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+ /* parse pattern */
+ index = 0;
+
+ /* The first not void item should be e-tag. */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
+ e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+
+ /* Only care about GRP and E cid base. */
+ if (e_tag_mask->epcp_edei_in_ecid_b ||
+ e_tag_mask->in_ecid_e ||
+ e_tag_mask->ecid_e ||
+ e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ /**
+ * grp and e_cid_base are bit fields and only use 14 bits.
+ * e-tag id is taken as little endian by HW.
+ */
+ filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->pool = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ret = cons_parse_l2_tn_filter(attr, pattern,
+ actions, l2_tn_filter, error);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+/* Parse to get the attr and action info of flow director rule. */
+static int
+ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark;
+ uint32_t index;
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ rule->queue = act_q->index;
+ } else { /* drop */
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ }
+
+ /* check if the next not void item is MARK */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
+ (act->type != RTE_FLOW_ACTION_TYPE_END)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rule->soft_id = 0;
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark = (const struct rte_flow_action_mark *)act->conf;
+ rule->soft_id = mark->id;
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ }
+
+ /* check if the next not void item is END */
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP or SCTP.
+ * The next not void item must be END.
+ * MAC VLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be MAC VLAN.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP/SCTP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * UDP/TCP/SCTP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * END
+ * MAC VLAN pattern example:
+ * ITEM Spec Mask
+ * ETH dst_addr
+ {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
+ 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+
+ uint32_t index, j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /* parse pattern */
+ index = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or TCP or UDP or SCTP.
+ */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT;
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+ if (item->spec && !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+ /* Get the dst MAC. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ rule->ixgbe_fdir.formatted.inner_mac[j] =
+ eth_spec->dst.addr_bytes[j];
+ }
+ }
+
+
+ if (item->mask) {
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
+ rule->b_mask = TRUE;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * src MAC address must be masked,
+ * and don't support dst MAC address mask.
+ */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j] ||
+ eth_mask->dst.addr_bytes[j] != 0xFF) {
+ memset(rule, 0,
+ sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no VLAN, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+ }
+ /*** If both spec and mask are item,
+ * it means don't care about ETH.
+ * Do nothing.
+ */
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ } else {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!(item->spec && item->mask)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+ rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+ /* More than one tags are not supported. */
+
+ /**
+ * Check if the next not void item is not vlan.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst addresses,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+ rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ rule->ixgbe_fdir.formatted.dst_ip[0] =
+ ipv4_spec->hdr.dst_addr;
+ rule->ixgbe_fdir.formatted.src_ip[0] =
+ ipv4_spec->hdr.src_addr;
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_TCPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ tcp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ }
+
+ /* Get the UDP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_UDPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = udp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ udp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ }
+
+ /* Get the SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ sctp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+#define NVGRE_PROTOCOL 0x6558
+
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ * VxLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * NVGRE PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * VxLAN pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * UDP NULL NULL
+ * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * NEGRV pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * NVGRE protocol 0x6558 0xFFFF
+ * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint32_t index, j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /* parse pattern */
+ index = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or IPv6 or UDP or VxLAN.
+ */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+
+ /* Skip MAC. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is IPv4 or IPv6. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is UDP or NVGRE. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip UDP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is VxLAN. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the VxLAN info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ rule->ixgbe_fdir.formatted.tunnel_type =
+ RTE_FDIR_TUNNEL_TYPE_VXLAN;
+
+ /* Only care about VNI, others should be masked. */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+
+ /* Tunnel type is always meaningful. */
+ rule->mask.tunnel_type_mask = 1;
+
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ if (vxlan_mask->flags) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* VNI must be totally masked or not. */
+ if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) &&
+ ((vxlan_mask->vni[0] != 0xFF) ||
+ (vxlan_mask->vni[1] != 0xFF) ||
+ (vxlan_mask->vni[2] != 0xFF))) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
+ RTE_DIM(vxlan_mask->vni));
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ vxlan_spec = (const struct rte_flow_item_vxlan *)
+ item->spec;
+ rte_memcpy(((uint8_t *)
+ &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
+ rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
+ rule->ixgbe_fdir.formatted.tni_vni);
+ }
+ }
+
+ /* Get the NVGRE info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
+ rule->ixgbe_fdir.formatted.tunnel_type =
+ RTE_FDIR_TUNNEL_TYPE_NVGRE;
+
+ /**
+ * Only care about flags0, flags1, protocol and TNI,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+
+ /* Tunnel type is always meaningful. */
+ rule->mask.tunnel_type_mask = 1;
+
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ if (nvgre_mask->flow_id) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ if (nvgre_mask->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x3000) ||
+ nvgre_mask->protocol != 0xFFFF) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* TNI must be totally masked or not. */
+ if (nvgre_mask->tni[0] &&
+ ((nvgre_mask->tni[0] != 0xFF) ||
+ (nvgre_mask->tni[1] != 0xFF) ||
+ (nvgre_mask->tni[2] != 0xFF))) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* tni is a 24-bits bit field */
+ rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
+ RTE_DIM(nvgre_mask->tni));
+ rule->mask.tunnel_id_mask <<= 8;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ if (nvgre_spec->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x2000) ||
+ nvgre_spec->protocol !=
+ rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* tni is a 24-bits bit field */
+ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
+ nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
+ rule->ixgbe_fdir.formatted.tni_vni <<= 8;
+ }
+ }
+
+ /* check if the next not void item is MAC */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* src MAC address should be masked. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j]) {
+ memset(rule, 0,
+ sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ rule->mask.mac_addr_byte_mask = 0;
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ /* It's a per byte mask. */
+ if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+ rule->mask.mac_addr_byte_mask |= 0x1 << j;
+ } else if (eth_mask->dst.addr_bytes[j]) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no vlan, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+ /* Get the dst MAC. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ rule->ixgbe_fdir.formatted.inner_mac[j] =
+ eth_spec->dst.addr_bytes[j];
+ }
+ }
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
+ (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!(item->spec && item->mask)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+ rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+ /* More than one tags are not supported. */
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /**
+ * If the tags is 0, it means don't care about the VLAN.
+ * Do nothing.
+ */
+
+ return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+static int
+ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a)
+ return -ENOTSUP;
+
+ ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
+ actions, rule, error);
+
+ if (!ret)
+ goto step_next;
+
+ ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
+ actions, rule, error);
+
+step_next:
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+ return ret;
+}
+
+void
+ixgbe_filterlist_flush(void)
+{
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ rte_free(syn_filter_ptr);
+ }
+
+ while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr,
+ entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr,
+ entries);
+ rte_free(ixgbe_flow_mem_ptr->flow);
+ rte_free(ixgbe_flow_mem_ptr);
+ }
+}
+
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+ixgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct rte_flow *flow = NULL;
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
+ sizeof(struct ixgbe_flow_mem), 0);
+ if (!ixgbe_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ ixgbe_flow_mem_ptr->flow = flow;
+ TAILQ_INSERT_TAIL(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret) {
+ ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
+ sizeof(struct ixgbe_ntuple_filter_ele), 0);
+ (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret) {
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ &ethertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "ixgbe_ethertype_filter",
+ sizeof(struct ixgbe_ethertype_filter_ele), 0);
+ (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
+ sizeof(struct ixgbe_eth_syn_filter_ele), 0);
+ (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret) {
+ /* A mask cannot be deleted. */
+ if (fdir_rule.b_mask) {
+ if (!fdir_info->mask_added) {
+ /* It's the first time the mask is set. */
+ rte_memcpy(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
+ ret = ixgbe_fdir_set_input_mask(dev);
+ if (ret)
+ goto out;
+
+ fdir_info->mask_added = TRUE;
+ } else {
+ /**
+ * Only support one global mask,
+ * all the masks should be the same.
+ */
+ ret = memcmp(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
+ if (ret)
+ goto out;
+ }
+ }
+
+ if (fdir_rule.b_spec) {
+ ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
+ FALSE, FALSE);
+ if (!ret) {
+ fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
+ sizeof(struct ixgbe_fdir_rule_ele), 0);
+ (void)rte_memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct ixgbe_fdir_rule));
+ TAILQ_INSERT_TAIL(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ flow->rule = fdir_rule_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+ return flow;
+ }
+
+ if (ret)
+ goto out;
+ }
+
+ goto out;
+ }
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret) {
+ ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+ if (!ret) {
+ l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
+ sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
+ (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
+ &l2_tn_filter,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ flow->rule = l2_tn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(ixgbe_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret)
+ return 0;
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on ixgbe. */
+static int
+ixgbe_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&ntuple_filter,
+ &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&ethertype_filter,
+ &ethertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ &ethertype_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&syn_filter,
+ &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
+ (void)rte_memcpy(&fdir_rule,
+ &fdir_rule_ptr->filter_info,
+ sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ rte_free(fdir_rule_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
+ if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+ rte_free(ixgbe_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* Destroy all flow rules associated with a port on ixgbe. */
+static int
+ixgbe_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ixgbe_clear_all_ntuple_filter(dev);
+ ixgbe_clear_all_ethertype_filter(dev);
+ ixgbe_clear_syn_filter(dev);
+
+ ret = ixgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = ixgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ixgbe_filterlist_flush();
+
+ return 0;
+}
+
+const struct rte_flow_ops ixgbe_flow_ops = {
+ ixgbe_flow_validate,
+ ixgbe_flow_create,
+ ixgbe_flow_destroy,
+ ixgbe_flow_flush,
+ NULL,
+};
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 26395e41..d88832e5 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -61,7 +61,9 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- return eth_dev->pci_dev->max_vfs;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+
+ return pci_dev->max_vfs;
}
static inline
@@ -176,6 +178,7 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
uint16_t vf_num;
int i;
+ struct ixgbe_ethertype_filter ethertype_filter;
if (!hw->mac.ops.set_ethertype_anti_spoofing) {
RTE_LOG(INFO, PMD, "ether type anti-spoofing is not"
@@ -183,16 +186,23 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
return;
}
- /* occupy an entity of ether type filter */
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (!(filter_info->ethertype_mask & (1 << i))) {
- filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] =
- IXGBE_ETHERTYPE_FLOW_CTRL;
- break;
- }
+ i = ixgbe_ethertype_filter_lookup(filter_info,
+ IXGBE_ETHERTYPE_FLOW_CTRL);
+ if (i >= 0) {
+ RTE_LOG(ERR, PMD, "A ether type filter"
+ " entity for flow control already exists!\n");
+ return;
}
- if (i == IXGBE_MAX_ETQF_FILTERS) {
+
+ ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqs = 0;
+ ethertype_filter.conf = TRUE;
+ i = ixgbe_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (i < 0) {
RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter"
" entity for flow control.\n");
return;
@@ -387,15 +397,27 @@ ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
uint32_t reg_offset, vf_shift;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
vf_shift = vf & VFRE_MASK;
reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ IXGBE_WRITE_FLUSH(hw);
+ reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
diff --git a/drivers/net/ixgbe/ixgbe_regs.h b/drivers/net/ixgbe/ixgbe_regs.h
index 773e1693..2aa48201 100644
--- a/drivers/net/ixgbe/ixgbe_regs.h
+++ b/drivers/net/ixgbe/ixgbe_regs.h
@@ -41,7 +41,7 @@ struct reg_info {
uint32_t count;
uint32_t stride;
const char *name;
-} reg_info;
+};
static const struct reg_info ixgbe_regs_general[] = {
{IXGBE_CTRL, 1, 1, "IXGBE_CTRL"},
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c61ce470..1e078959 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -70,6 +70,7 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_ip.h>
+#include <rte_net.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -79,13 +80,23 @@
#include "base/ixgbe_common.h"
#include "ixgbe_rxtx.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IXGBE_TX_IEEE1588_TMST 0
+#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
- PKT_TX_OUTER_IP_CKSUM)
+ PKT_TX_MACSEC | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ IXGBE_TX_IEEE1588_TMST)
+
+#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
#if 1
#define RTE_PMD_USE_PREFETCH
@@ -100,6 +111,11 @@
#define rte_ixgbe_prefetch(p) do {} while (0)
#endif
+#ifdef RTE_IXGBE_INC_VECTOR
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+#endif
+
/*********************************************************************
*
* TX functions
@@ -131,7 +147,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
/* free buffers one at a time */
- m = __rte_pktmbuf_prefree_seg(txep->mbuf);
+ m = rte_pktmbuf_prefree_seg(txep->mbuf);
txep->mbuf = NULL;
if (unlikely(m == NULL))
@@ -321,7 +337,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
return nb_pkts;
}
@@ -352,6 +368,30 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+#ifdef RTE_IXGBE_INC_VECTOR
+static uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+#endif
+
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
@@ -519,6 +559,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+ if (ol_flags & PKT_TX_MACSEC)
+ cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
return cmdtype;
}
@@ -897,7 +939,7 @@ end_of_tx:
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -905,6 +947,57 @@ end_of_tx:
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
+ * non-TSO
+ */
+
+ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -1492,8 +1585,6 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
if (reset_mbuf) {
- mb->next = NULL;
- mb->nb_segs = 1;
mb->port = rxq->port_id;
}
@@ -1583,7 +1674,8 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ cur_free_trigger);
}
if (rxq->rx_tail >= rxq->nb_rx_desc)
@@ -1987,8 +2079,8 @@ next_desc:
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
- next_rdt);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ next_rdt);
nb_hold -= rxq->rx_free_thresh;
} else {
PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
@@ -2100,12 +2192,6 @@ next_desc:
goto next_desc;
}
- /*
- * This is the last buffer of the received packet - return
- * the current cluster to the user.
- */
- rxm->next = NULL;
-
/* Initialize the first mbuf of the returned packet */
ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
@@ -2159,7 +2245,7 @@ next_desc:
rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
nb_hold = 0;
}
@@ -2284,6 +2370,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_prepare = NULL;
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
@@ -2304,6 +2391,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ dev->tx_pkt_prepare = ixgbe_prep_pkts;
}
}
@@ -2587,7 +2675,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
* rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
* rxq->rx_free_thresh < rxq->nb_rx_desc
* (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
- * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
@@ -2609,15 +2696,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc <
- (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "IXGBE_MAX_RING_DESC=%d, "
- "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
- RTE_PMD_IXGBE_RX_MAX_BURST);
- ret = -EINVAL;
}
return ret;
@@ -2634,12 +2712,7 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
/*
* By default, the Rx queue setup function allocates enough memory for
* IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
- * extra memory at the end of the descriptor ring to be zero'd out. A
- * pre-condition for using the Rx burst bulk alloc function is that the
- * number of descriptors is less than or equal to
- * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
- * constraints here to see if we need to zero out memory after the end
- * of the H/W descriptor ring.
+ * extra memory at the end of the descriptor ring to be zero'd out.
*/
if (adapter->rx_bulk_alloc_allowed)
/* zero out extra memory */
@@ -2859,11 +2932,6 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
@@ -2898,6 +2966,63 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
+int
+ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t nb_hold, desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (rxq->rx_using_sse)
+ nb_hold = rxq->rxrearm_nb;
+ else
+#endif
+ nb_hold = rxq->nb_rx_hold;
+ if (offset >= rxq->nb_rx_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct ixgbe_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void __attribute__((cold))
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
@@ -3323,7 +3448,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
- uint32_t q;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -3343,18 +3467,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
- /* Disable drop for all queues in VMDQ mode*/
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
- } else {
- /* Enable drop for all queues in SRIOV mode */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT) | IXGBE_QDE_ENABLE));
- }
-
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg &= ~IXGBE_RTTDCS_ARBDIS;
@@ -3488,16 +3600,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
/**
* ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
static void
-ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
uint8_t i;
+ uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
/*
@@ -3535,6 +3649,21 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT)));
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
+ }
}
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
@@ -3625,6 +3754,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
@@ -3647,7 +3778,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Get dcb TX configuration parameters from rte_eth_conf */
ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
- ixgbe_dcb_rx_hw_config(hw, dcb_config);
+ ixgbe_dcb_rx_hw_config(dev, dcb_config);
break;
default:
PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
@@ -3696,8 +3827,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Re-configure 4 TCs BW */
for (i = 0; i < nb_tcs; i++) {
tc = &dcb_config->tc_config[i];
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
- (uint8_t)(100 / nb_tcs);
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
(uint8_t)(100 / nb_tcs);
}
@@ -3706,6 +3838,16 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
}
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
}
switch (hw->mac.type) {
@@ -4083,9 +4225,8 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
break;
}
} else {
- /*
- * SRIOV active scheme
- * Support RSS together with VMDq & SRIOV
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_RSS:
@@ -4093,10 +4234,13 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
ixgbe_config_vf_rss(dev);
break;
case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
ixgbe_vmdq_dcb_configure(dev);
break;
- /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+ /* DCB/RSS together with SRIOV is not supported */
case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
PMD_INIT_LOG(ERR,
"Could not support DCB/RSS with VMDq & SRIOV");
return -1;
@@ -4378,6 +4522,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
bool rsc_capable = false;
uint16_t i;
uint32_t rdrxctl;
+ uint32_t rfctl;
/* Sanity check */
dev->dev_ops->dev_infos_get(dev, &dev_info);
@@ -4405,22 +4550,18 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
}
/* RFCTL configuration */
- if (rsc_capable) {
- uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-
- if (rx_conf->enable_lro)
- /*
- * Since NFS packets coalescing is not supported - clear
- * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
- * enabled.
- */
- rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
- IXGBE_RFCTL_NFSR_DIS);
- else
- rfctl |= IXGBE_RFCTL_RSC_DIS;
-
- IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
- }
+ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ if ((rsc_capable) && (rx_conf->enable_lro))
+ /*
+ * Since NFS packets coalescing is not supported - clear
+ * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
+ * enabled.
+ */
+ rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
+ IXGBE_RFCTL_NFSR_DIS);
+ else
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
if (!rx_conf->enable_lro)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 2608b364..1ffab4cc 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -67,7 +67,7 @@
#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
#endif
-#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \
+#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) * \
sizeof(union ixgbe_adv_rx_desc))
#ifdef RTE_PMD_PACKET_PREFETCH
@@ -80,6 +80,8 @@
#define RTE_IXGBE_WAIT_100_US 100
#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
+#define IXGBE_TX_MAX_SEG 40
+
#define IXGBE_PACKET_TYPE_MASK_82599 0X7F
#define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
@@ -309,8 +311,8 @@ void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
#ifdef RTE_IXGBE_INC_VECTOR
-uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
#endif /* RTE_IXGBE_INC_VECTOR */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index a3473b98..1c34bb5f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -123,12 +123,12 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -143,7 +143,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
@@ -310,13 +310,6 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0)
- return -1;
-#endif
-
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e2715cb9..44de1caa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -85,9 +85,6 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
/*
* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
vst1_u8((uint8_t *)&mb0->rearm_data, p);
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
@@ -114,14 +111,6 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-
#define VTAG_SHIFT (3)
static inline void
@@ -170,9 +159,6 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
rx_pkts[2]->ol_flags = vol.e[2];
rx_pkts[3]->ol_flags = vol.e[3];
}
-#else
-#define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts)
-#endif
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
@@ -330,12 +316,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
split_packet += RTE_IXGBE_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
@@ -451,8 +431,8 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
}
uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index abbf2841..a7bc199f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -82,23 +82,10 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
__m128i vaddr0, vaddr1;
- uintptr_t p0, p1;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /*
- * Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- p0 = (uintptr_t)&mb0->rearm_data;
- *(uint64_t *)p0 = rxq->mbuf_initializer;
- p1 = (uintptr_t)&mb1->rearm_data;
- *(uint64_t *)p1 = rxq->mbuf_initializer;
-
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
@@ -133,23 +120,12 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
+desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
struct rte_mbuf **rx_pkts)
{
__m128i ptype0, ptype1, vtag0, vtag1, csum;
- union {
- uint16_t e[4];
- uint64_t dword;
- } vol;
+ __m128i rearm0, rearm1, rearm2, rearm3;
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
@@ -228,18 +204,41 @@ desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
vtag1 = _mm_or_si128(vtag0, vtag1);
vtag1 = _mm_or_si128(ptype0, vtag1);
- vol.dword = _mm_cvtsi128_si64(vtag1);
- rx_pkts[0]->ol_flags = vol.e[0];
- rx_pkts[1]->ol_flags = vol.e[1];
- rx_pkts[2]->ol_flags = vol.e[2];
- rx_pkts[3]->ol_flags = vol.e[3];
-}
+ /*
+ * At this point, we have the 4 sets of flags in the low 64-bits
+ * of vtag1 (4x16).
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
+
#else
-#define desc_to_olflags_v(desc, vlan_flags, rx_pkts) do { \
- RTE_SET_USED(vlan_flags); \
- } while (0)
-#endif
+ rearm0 = _mm_slli_si128(vtag1, 14);
+ rearm1 = _mm_slli_si128(vtag1, 12);
+ rearm2 = _mm_slli_si128(vtag1, 10);
+ rearm3 = _mm_slli_si128(vtag1, 8);
+
+ rearm0 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm0, 48));
+ rearm1 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm1, 48));
+ rearm2 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm2, 48));
+ rearm3 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm3, 48));
+
+#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
@@ -268,6 +267,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0, 0 /* ignore pkt_type field */
);
__m128i dd_check, eop_check;
+ __m128i mbuf_init;
uint8_t vlan_flags;
/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
@@ -313,6 +313,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0xFF, 0xFF
);
+ mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
*/
@@ -335,9 +337,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -345,11 +351,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -358,8 +366,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
@@ -385,7 +395,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
/* set ol_flags with vlan packet type */
- desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]);
+ desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -425,12 +435,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
split_packet += RTE_IXGBE_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
/* C.3 calc available number of desc */
@@ -537,8 +541,8 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
}
uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
new file mode 100644
index 00000000..e8fc9a64
--- /dev/null
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -0,0 +1,910 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "rte_pmd_ixgbe.h"
+
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ int rar_entry;
+ uint8_t *new_mac = (uint8_t *)(mac_addr);
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ ETHER_ADDR_LEN);
+ return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+ IXGBE_RAH_AV);
+ }
+ return -EINVAL;
+}
+
+int
+rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+ ctrl = IXGBE_PF_CONTROL_MSG;
+ if (vfinfo[vf].clear_to_send)
+ ctrl |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, &ctrl, 1, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+
+ mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+ mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+ if (vlan_id) {
+ ctrl = vlan_id;
+ ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+ } else {
+ ctrl = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ /* enable or disable VMDQ loopback */
+ if (on)
+ ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+ else
+ ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ int i;
+ int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ for (i = 0; i <= num_queues; i++) {
+ reg_value = IXGBE_QDE_WRITE |
+ (i << IXGBE_QDE_IDX_SHIFT) |
+ (on & IXGBE_QDE_ENABLE);
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ /* only support VF's 0 to 63 */
+ if ((vf >= pci_dev->max_vfs) || (vf > 63))
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+ if (on)
+ reg_value |= IXGBE_SRRCTL_DROP_EN;
+ else
+ reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+ uint16_t queues_per_pool;
+ uint32_t q;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+ /* The PF has 128 queue pairs and in SRIOV configuration
+ * those queues will be assigned to VF's, so RXDCTL
+ * registers will be dealing with queues which will be
+ * assigned to VF's.
+ * Let's say we have SRIOV configured with 31 VF's then the
+ * first 124 queues 0-123 will be allocated to VF's and only
+ * the last 4 queues 123-127 will be assigned to the PF.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_16_POOLS;
+ else
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_64_POOLS;
+
+ for (q = 0; q < queues_per_pool; q++)
+ (*dev->dev_ops->vlan_strip_queue_set)(dev,
+ q + vf * queues_per_pool, on);
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ int val = 0;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+ uint32_t vmolr;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+ " on 82599 hardware and newer");
+ return -ENOTSUP;
+ }
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFRE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFRE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFTE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFTE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+ uint16_t vf_idx;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ for (vf_idx = 0; vf_idx < 64; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
+ vlan_on, false);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Stop the data paths */
+ if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+ return -ENOTSUP;
+ /**
+ * Workaround:
+ * As no ixgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * ixgbe_disable_sec_rx_path().
+ */
+ ixgbe_disable_sec_tx_path_generic(hw);
+
+ /* Enable Ethernet CRC (required by MACsec offload) */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+ /* Enable the TX and RX crypto engines */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+ ctrl |= 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+ /* Enable SA lookup */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+ ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+ ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+ IXGBE_LSECTXCTRL_AUTH;
+ ctrl |= IXGBE_LSECTXCTRL_AISCI;
+ ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+ ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+ ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+ ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+ if (rp)
+ ctrl |= IXGBE_LSECRXCTRL_RP;
+ else
+ ctrl &= ~IXGBE_LSECRXCTRL_RP;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+ /* Start the data paths */
+ ixgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no ixgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ ixgbe_enable_sec_tx_path_generic(hw);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Stop the data paths */
+ if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+ return -ENOTSUP;
+ /**
+ * Workaround:
+ * As no ixgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * ixgbe_disable_sec_rx_path().
+ */
+ ixgbe_disable_sec_tx_path_generic(hw);
+
+ /* Disable the TX and RX crypto engines */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+ /* Disable SA lookup */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+ ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+ ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+ /* Start the data paths */
+ ixgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no ixgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ ixgbe_enable_sec_tx_path_generic(hw);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+ ctrl = mac[4] | (mac[5] << 8);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+ pi = rte_cpu_to_be_16(pi);
+ ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Set the PN and key */
+ pn = rte_cpu_to_be_32(pn);
+ if (idx == 0) {
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+ }
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+ }
+ }
+
+ /* Set AN and select the SA */
+ ctrl = (an << idx * 2) | (idx << 4);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ /* Set the PN */
+ pn = rte_cpu_to_be_32(pn);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+ /* Set the key */
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+ }
+
+ /* Set the AN and validate the SA */
+ ctrl = an | (1 << 2);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct ixgbe_dcb_config *dcb_config;
+ struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_conf *eth_conf;
+ struct ixgbe_bw_conf *bw_conf;
+ uint8_t i;
+ uint8_t nb_tcs;
+ uint16_t sum;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ IXGBE_DCB_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
+ eth_conf = &dev->data->dev_conf;
+
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ if (nb_tcs != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ nb_tcs);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < nb_tcs; i++)
+ sum += bw_weight[i];
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
+ }
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ }
+
+ bw_conf->tc_num = nb_tcs;
+
+ return 0;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb8261..1f2b1bd7 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -42,6 +42,20 @@
#include <rte_ethdev.h>
/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf);
+
+/**
* Set the VF MAC address.
*
* @param port
@@ -183,6 +197,237 @@ int
rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
/**
+ * Enable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param en
+ * 1 - Enable encryption (encrypt and add integrity signature).
+ * 0 - Disable encryption (only add integrity signature).
+ * @param rp
+ * 1 - Enable replay protection.
+ * 0 - Disable replay protection.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure Tx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure Rx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the remote side.
+ * @param pi
+ * The PI (port identifier) on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable Tx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1).
+ * @param an
+ * The association number on the local side.
+ * @param pn
+ * The packet number on the local side.
+ * @param key
+ * The key on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+ * Enable Rx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1)
+ * @param an
+ * The association number on the remote side.
+ * @param pn
+ * The packet number on the remote side.
+ * @param key
+ * The key on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+* Set RX L2 Filtering mode of a VF of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param rx_mask
+* The RX mode mask, which is one or more of accepting Untagged Packets,
+* packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
+* ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
+* ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+* in rx_mode.
+* @param on
+* 1 - Enable a VF RX mode.
+* 0 - Disable a VF RX mode.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on);
+
+/**
+* Enable or disable a VF traffic receive of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic receive.
+* 0 - Disable a VF traffic receive.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable or disable a VF traffic transmit of the Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic transmit.
+* 0 - Disable a VF traffic transmit.
+* @return
+* - (0) if successful.
+* - (-ENODEV) if *port_id* invalid.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+* received VLAN packets tagged with a given VLAN Tag Identifier.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vlan
+* The VLAN Tag Identifier whose filtering must be enabled or disabled.
+* @param vf_mask
+* Bitmap listing which VFs participate in the VLAN filtering.
+* @param vlan_on
+* 1 - Enable VFs VLAN filtering.
+* 0 - Disable VFs VLAN filtering.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on);
+
+/**
+ * Set the rate limitation for a vf on an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * The tx rate allocated from the total link speed for this VF id.
+ * @param q_msk
+ * The queue mask which need to set the rate.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Set all the TCs' bandwidth weight.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+/**
* Response sent back to ixgbe driver from user app after callback
*/
enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3f..45a57e33 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,26 @@ DPDK_16.11 {
rte_pmd_ixgbe_set_vf_vlan_insert;
rte_pmd_ixgbe_set_vf_vlan_stripq;
} DPDK_2.0;
+
+DPDK_17.02 {
+ global:
+
+ rte_pmd_ixgbe_macsec_config_rxsc;
+ rte_pmd_ixgbe_macsec_config_txsc;
+ rte_pmd_ixgbe_macsec_disable;
+ rte_pmd_ixgbe_macsec_enable;
+ rte_pmd_ixgbe_macsec_select_rxsa;
+ rte_pmd_ixgbe_macsec_select_txsa;
+ rte_pmd_ixgbe_set_vf_rate_limit;
+ rte_pmd_ixgbe_set_vf_rx;
+ rte_pmd_ixgbe_set_vf_rxmode;
+ rte_pmd_ixgbe_set_vf_tx;
+ rte_pmd_ixgbe_set_vf_vlan_filter;
+} DPDK_16.11;
+
+DPDK_17.05 {
+ global:
+
+ rte_pmd_ixgbe_ping_vf;
+ rte_pmd_ixgbe_set_tc_bw_alloc;
+} DPDK_17.02;