summaryrefslogtreecommitdiffstats
path: root/drivers/net/i40e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/i40e')
-rw-r--r--drivers/net/i40e/Makefile6
-rw-r--r--drivers/net/i40e/base/README2
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c12
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h71
-rw-r--r--drivers/net/i40e/base/i40e_common.c549
-rw-r--r--drivers/net/i40e/base/i40e_devids.h1
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c20
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h33
-rw-r--r--drivers/net/i40e/base/i40e_register.h2
-rw-r--r--drivers/net/i40e/base/i40e_type.h49
-rw-r--r--drivers/net/i40e/base/i40e_virtchnl.h445
-rw-r--r--drivers/net/i40e/base/virtchnl.h772
-rw-r--r--drivers/net/i40e/i40e_ethdev.c322
-rw-r--r--drivers/net/i40e/i40e_ethdev.h130
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c324
-rw-r--r--drivers/net/i40e/i40e_fdir.c56
-rw-r--r--drivers/net/i40e/i40e_flow.c2780
-rw-r--r--drivers/net/i40e/i40e_pf.c277
-rw-r--r--drivers/net/i40e/i40e_pf.h37
-rw-r--r--drivers/net/i40e/i40e_rxtx.c10
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c38
-rw-r--r--drivers/net/i40e/i40e_tm.c976
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c258
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h54
-rw-r--r--drivers/net/i40e/rte_pmd_i40e_version.map7
26 files changed, 5723 insertions, 1512 deletions
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 56f210d6..55c79a60 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -109,11 +109,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
-
-# vector PMD driver needs SSE4.1 support
-ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
-CFLAGS_i40e_rxtx_vec_sse.o += -msse4.1
-endif
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
index 0da9f674..59e76c21 100644
--- a/drivers/net/i40e/base/README
+++ b/drivers/net/i40e/base/README
@@ -34,7 +34,7 @@ IntelĀ® I40E driver
==================
This directory contains source code of FreeBSD i40e driver of version
-cid-i40e.2017.03.21.tar.gz released by the team which develops
+cid-i40e.2017.06.23.tar.gz released by the team which develops
basic drivers for any i40e NIC. The directory of base/ contains the
original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index a60292a3..8cc8c5ec 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -682,6 +682,18 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 7)))
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ }
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 09f5bf5c..c36da2a3 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -41,7 +41,15 @@ POSSIBILITY OF SUCH DAMAGE.
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
@@ -245,6 +253,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@@ -777,7 +787,22 @@ struct i40e_aqc_set_switch_config {
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
- u8 reserved[12];
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1829,6 +1854,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1847,7 +1874,11 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
- I40E_PHY_TYPE_MAX
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
};
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
@@ -1904,6 +1935,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -2033,19 +2066,31 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 power_desc;
+ union {
+ struct {
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
- u8 reserved[4];
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -2128,6 +2173,22 @@ struct i40e_aqc_run_phy_activity {
I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+ u8 phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_addres;
+ u8 reserved1[2];
+ u32 reg_address;
+ u32 reg_value;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 03e94bc8..900d379c 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -34,7 +34,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/**
@@ -93,6 +93,7 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
+ case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF;
break;
#endif
@@ -329,13 +330,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = LE16_TO_CPU(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
+ u16 len;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
+ len = LE16_TO_CPU(aq_desc->datalen);
+
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(aq_desc->opcode),
@@ -1295,6 +1298,8 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1377,6 +1382,8 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
+
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
@@ -1384,6 +1391,12 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Core reset upcoming.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
i40e_msec_delay(1);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
@@ -1689,8 +1702,15 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = I40E_ERR_UNKNOWN_PHY;
if (report_init) {
- hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
- hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ } else {
+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |=
+ ((u64)abilities->phy_type_ext << 32);
+ }
}
return status;
@@ -1952,7 +1972,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
- hw_link_info->loopback = resp->loopback;
+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
@@ -1983,6 +2003,12 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= 7) {
+ hw->phy.phy_types = LE32_TO_CPU(*(__le32 *)resp->link_type);
+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+ }
+
/* save link status information */
if (link)
i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
@@ -2679,7 +2705,11 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = CPU_TO_LE16(flags);
scfg->valid_flags = CPU_TO_LE16(valid_flags);
-
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
+ scfg->first_tag = CPU_TO_LE16(hw->first_tag);
+ scfg->second_tag = CPU_TO_LE16(hw->second_tag);
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -2825,6 +2855,10 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
@@ -5523,7 +5557,7 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
- i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
cmd->etype = CPU_TO_LE16(ethtype);
@@ -6654,24 +6688,38 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 temp_addr;
u8 port_num;
u32 i;
-
- temp_addr = I40E_PHY_LED_PROV_REG_1;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- temp_addr++) {
- status = i40e_read_phy_register_clause45(hw,
+ u32 reg_val_aq;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status)
+ return status;
+ *val = (u16)reg_val_aq;
+ } else {
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status =
+ i40e_read_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
temp_addr, phy_addr,
&reg_val);
- if (status)
- return status;
- *val = reg_val;
- if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
- *led_addr = temp_addr;
- break;
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
+ }
}
}
return status;
@@ -6689,51 +6737,115 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode)
{
enum i40e_status_code status = I40E_SUCCESS;
- u16 led_ctl = 0;
- u16 led_reg = 0;
+ u32 led_ctl = 0;
+ u32 led_reg = 0;
u8 phy_addr = 0;
u8 port_num;
u32 i;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &led_reg, NULL);
+ } else {
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)&led_reg);
+ }
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_reg, NULL);
+ } else {
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_reg);
+ }
if (status)
return status;
}
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &led_reg, NULL);
+ } else {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)&led_reg);
+ }
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_reg, NULL);
+ } else {
+ status =
+ i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_reg);
+ }
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_ctl, NULL);
+ } else {
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_ctl);
+ }
}
return status;
restore_config:
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_ctl, NULL);
+ } else {
+ status =
+ i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_ctl);
+ }
return status;
}
#endif /* PF_DRIVER */
@@ -6863,6 +6975,76 @@ do_retry:
if (status || use_register)
wr32(hw, reg_addr, reg_val);
}
+
+/**
+ * i40e_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = reg_addr;
+ cmd->reg_value = reg_val;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = reg_addr;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = cmd->reg_value;
+
+ return status;
+}
+
#ifdef VF_DRIVER
/**
@@ -6879,7 +7061,7 @@ do_retry:
* completion before returning.
**/
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
@@ -6918,9 +7100,9 @@ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg)
+ struct virtchnl_vf_resource *msg)
{
- struct i40e_virtchnl_vsi_resource *vsi_res;
+ struct virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
@@ -6930,19 +7112,17 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.fcoe = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.iwarp = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
- if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
i40e_memcpy(hw->mac.perm_addr,
vsi_res->default_mac_addr,
- I40E_ETH_LENGTH_OF_ADDRESS,
+ ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
- I40E_ETH_LENGTH_OF_ADDRESS,
+ ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
}
vsi_res++;
@@ -6959,7 +7139,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
**/
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
{
- return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}
#endif /* VF_DRIVER */
@@ -7261,6 +7441,165 @@ i40e_find_segment_in_package(u32 segment_type,
return NULL;
}
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct i40e_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset) \
+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_I40E)
+ return NULL;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+STATIC enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= CPU_TO_LE16(aq->flags);
+ i40e_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw),
+ I40E_NONDMA_TO_NONDMA);
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status != I40E_SUCCESS) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ i40e_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw),
+ I40E_NONDMA_TO_NONDMA);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+STATIC enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && (i == dev_cnt)) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
/**
* i40e_write_profile
* @hw: pointer to the hardware structure
@@ -7276,52 +7615,99 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
enum i40e_status_code status = I40E_SUCCESS;
struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
- u32 dev_cnt;
- u32 vendor_dev_id;
- u32 *nvm;
+ struct i40e_profile_aq_section *ddp_aq;
u32 section_size = 0;
u32 offset = 0, info = 0;
+ u32 sec_off;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
+ status = i40e_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
- dev_cnt = profile->device_table_count;
+ I40E_SECTION_TABLE(profile, sec_tbl);
- for (i = 0; i < dev_cnt; i++) {
- vendor_dev_id = profile->device_table[i].vendor_dev_id;
- if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID)
- if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
}
- if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
- }
+ return status;
+}
- nvm = (u32 *)&profile->device_table[dev_cnt];
- sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec = (struct i40e_profile_section_header *)((u8 *)profile +
- sec_tbl->section_offset[i]);
+ status = i40e_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
- /* Skip 'AQ', 'note' and 'name' sections */
- if (sec->section.type != SECTION_TYPE_MMIO)
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
continue;
section_size = sec->section.size +
sizeof(struct i40e_profile_section_header);
- /* Write profile */
+ /* Write roll-back MMIO section */
status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
- "Failed to write profile: offset %d, info %d",
- offset, info);
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
break;
}
}
@@ -7359,9 +7745,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
pinfo->track_id = track_id;
pinfo->version = profile->version;
pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+ i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
+ I40E_NONDMA_TO_NONDMA);
status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
+ track_id, &offset, &info, NULL);
return status;
}
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 4546689a..f4a87842 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -54,6 +54,7 @@ POSSIBILITY OF SUCH DAMAGE.
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#endif /* VF_DRIVER */
#ifdef X722_A0_SUPPORT
#define I40E_DEV_ID_X722_A0 0x374C
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index e8965024..a1e78300 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -749,12 +749,18 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
DEBUGFUNC("i40e_validate_nvm_checksum");
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ /* acquire_nvm provides exclusive NVM lock to synchronize access across
+ * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit
+ * twice (first time to be able to write address to I40E_GLNVM_SRCTL
+ * register, second to read data from I40E_GLNVM_SRDATA. One PF can see
+ * done bit and try to write address, while another one will interpret
+ * it as a good time to read data. It will cause invalid data to be
+ * read.
+ */
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- i40e_release_nvm(hw);
+ i40e_release_nvm(hw);
if (ret_code != I40E_SUCCESS)
goto i40e_validate_nvm_checksum_exit;
} else {
@@ -899,6 +905,11 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated
+ */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -934,6 +945,7 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
return status;
}
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 4bd589e7..acb2023f 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -36,7 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -504,10 +504,10 @@ void i40e_destroy_spinlock(struct i40e_spinlock *sp);
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -533,6 +533,15 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
@@ -566,19 +575,27 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
enum i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile);
enum i40e_status_code
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index b150fbdf..a482ab90 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -2805,7 +2805,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 84d57576..dca725af 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -92,7 +92,9 @@ POSSIBILITY OF SUCH DAMAGE.
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
/* Data type manipulation macros. */
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
@@ -197,10 +199,6 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0005
-
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -269,6 +267,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 req_fec_info;
u8 fec_info;
u8 ext_info;
u8 loopback;
@@ -439,10 +438,10 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -701,8 +700,15 @@ struct i40e_hw {
u16 wol_proxy_vsi_seid;
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
u64 flags;
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
@@ -1912,8 +1918,10 @@ struct i40e_generic_seg_header {
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY 0
+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
u32 track_id;
- char name[I40E_DDP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1940,15 +1948,36 @@ struct i40e_profile_section_header {
struct {
#define SECTION_TYPE_INFO 0x00000010
#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
#define SECTION_TYPE_NOTE 0x80000000
#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
u32 type;
u32 offset;
u32 size;
} section;
};
+struct i40e_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
struct i40e_profile_info {
u32 track_id;
struct i40e_ddp_version version;
diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h
deleted file mode 100644
index 7a24c0f1..00000000
--- a/drivers/net/i40e/base/i40e_virtchnl.h
+++ /dev/null
@@ -1,445 +0,0 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
-#ifdef I40E_SOL_VF_SUPPORT
- I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
-#endif
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum i40e_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-#ifdef I40E_SOL_VF_SUPPORT
-/* I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
- * VF sends this message to get the default MTU and list of additional ethernet
- * addresses it is allowed to use.
- * PF responds with an indirect message containing
- * i40e_virtchnl_addnl_solaris_config with zero or more
- * i40e_virtchnl_ether_addr structures.
- *
- * It is expected that this operation will only ever be needed for Solaris VFs
- * running under a Solaris PF.
- */
-struct i40e_virtchnl_addnl_solaris_config {
- u16 default_mtu;
- struct i40e_virtchnl_ether_addr_list al;
-};
-
-#endif
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the rss fields in
- * the vf resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
-#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/i40e/base/virtchnl.h b/drivers/net/i40e/base/virtchnl.h
new file mode 100644
index 00000000..f00dd360
--- /dev/null
+++ b/drivers/net/i40e/base/virtchnl.h
@@ -0,0 +1,772 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+ VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
+#endif
+#ifdef VIRTCHNL_IWARP
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+#endif
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF offload flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support; otherwise it will respond with the
+ * number requested.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
+ * VF sends this message to get the default MTU and list of additional ethernet
+ * addresses it is allowed to use.
+ * PF responds with an indirect message containing
+ * virtchnl_addnl_solaris_config with zero or more
+ * virtchnl_ether_addr structures.
+ *
+ * It is expected that this operation will only ever be needed for Solaris VFs
+ * running under a Solaris PF.
+ */
+struct virtchnl_addnl_solaris_config {
+ u16 default_mtu;
+ struct virtchnl_ether_addr_list al;
+};
+
+#endif
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct eth_stats in an external buffer.
+ */
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+#ifdef VIRTCHNL_IWARP
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define QUEUE_TYPE_PE_AEQ 0x80
+#define QUEUE_INVALID_IDX 0xFFFF
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+#endif
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+#ifdef VIRTCHNL_IWARP
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+#endif
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if ((valid_len != msglen) || (err_msg_format))
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fd7d3475..5f26e24a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -74,7 +74,7 @@
/* Maximun number of capability elements */
#define I40E_MAX_CAP_ELE_NUM 128
-/* Wait count and inteval */
+/* Wait count and interval */
#define I40E_CHK_Q_ENA_COUNT 1000
#define I40E_CHK_Q_ENA_INTERVAL_US 1000
@@ -515,6 +515,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.get_eeprom = i40e_get_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
+ .tm_ops_get = i40e_tm_ops_get,
};
/* store statistics names and its offset in stats structure */
@@ -879,7 +880,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
static void
config_floating_veb(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -919,7 +920,7 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
/* Initialize ethertype filter rule list and hash */
TAILQ_INIT(&ethertype_rule->ethertype_list);
snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
- "ethertype_%s", dev->data->name);
+ "ethertype_%s", dev->device->name);
ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
if (!ethertype_rule->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
@@ -964,7 +965,7 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
/* Initialize tunnel filter rule list and hash */
TAILQ_INIT(&tunnel_rule->tunnel_list);
snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
- "tunnel_%s", dev->data->name);
+ "tunnel_%s", dev->device->name);
tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
if (!tunnel_rule->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
@@ -1009,7 +1010,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
/* Initialize flow director filter rule list and hash */
TAILQ_INIT(&fdir_info->fdir_list);
snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
- "fdir_%s", dev->data->name);
+ "fdir_%s", dev->device->name);
fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
if (!fdir_info->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
@@ -1061,7 +1062,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return 0;
}
i40e_set_default_ptype_table(dev);
- pci_dev = I40E_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
@@ -1142,11 +1143,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
config_floating_veb(dev);
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
- ret = i40e_dev_sync_phy_type(hw);
- if (ret) {
- PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret);
- goto err_sync_phy_type;
- }
+ i40e_dev_sync_phy_type(hw);
+
/*
* On X710, performance number is far from the expectation on recent
* firmware versions. The fix for this issue may not be integrated in
@@ -1298,6 +1296,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
+ /* initialize Traffic Manager configuration */
+ i40e_tm_conf_init(dev);
+
ret = i40e_init_ethtype_filter_list(dev);
if (ret < 0)
goto err_init_ethtype_filter_list;
@@ -1331,7 +1332,6 @@ err_msix_pool_init:
err_qp_pool_init:
err_parameter_init:
err_get_capabilities:
-err_sync_phy_type:
(void)i40e_shutdown_adminq(hw);
return ret;
@@ -1414,7 +1414,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = I40E_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped == 0)
@@ -1461,6 +1461,9 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_flow);
}
+ /* Remove all Traffic Manager configuration */
+ i40e_tm_conf_uninit(dev);
+
return 0;
}
@@ -1470,9 +1473,14 @@ i40e_dev_configure(struct rte_eth_dev *dev)
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
int i, ret;
+ ret = i40e_dev_sync_phy_type(hw);
+ if (ret)
+ return ret;
+
/* Initialize to TRUE. If any of Rx queues doesn't meet the
* bulk allocation or vector Rx preconditions we will reset it.
*/
@@ -1547,7 +1555,7 @@ void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
@@ -1661,7 +1669,7 @@ void
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
@@ -1733,7 +1741,7 @@ static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
@@ -1765,7 +1773,7 @@ static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_intr, i;
@@ -1806,11 +1814,15 @@ i40e_parse_link_speeds(uint16_t link_speeds)
static int
i40e_phy_conf_link(struct i40e_hw *hw,
uint8_t abilities,
- uint8_t force_speed)
+ uint8_t force_speed,
+ bool is_up)
{
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
+ enum i40e_aq_phy_type cnt;
+ uint32_t phy_type_mask = 0;
+
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
@@ -1828,6 +1840,10 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (status)
return ret;
+ /* If link already up, no need to set up again */
+ if (is_up && phy_ab.phy_type != 0)
+ return I40E_SUCCESS;
+
memset(&phy_conf, 0, sizeof(phy_conf));
/* bits 0-2 use the values from get_phy_abilities_resp */
@@ -1838,13 +1854,21 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (abilities & I40E_AQ_PHY_AN_ENABLED)
phy_conf.link_speed = advt;
else
- phy_conf.link_speed = force_speed;
+ phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
phy_conf.abilities = abilities;
+
+
+ /* To enable link, phy_type mask needs to include each type */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ phy_type_mask |= 1 << cnt;
+
/* use get_phy_abilities_resp value for the rest */
- phy_conf.phy_type = phy_ab.phy_type;
- phy_conf.phy_type_ext = phy_ab.phy_type_ext;
+ phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
+ phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
+ I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
+ I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
@@ -1876,13 +1900,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
abilities |= I40E_AQ_PHY_AN_ENABLED;
abilities |= I40E_AQ_PHY_LINK_ENABLED;
- /* Skip changing speed on 40G interfaces, FW does not support */
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
- speed = I40E_LINK_SPEED_UNKNOWN;
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- }
-
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, true);
}
static int
@@ -1892,7 +1910,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
struct i40e_vsi *vsi;
@@ -2008,7 +2026,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO,
"lsc won't enable because of no intr multiplex");
- } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ } else {
ret = i40e_aq_set_phy_int_mask(hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL |
@@ -2016,7 +2034,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable LSE */
+ /* Call get_link_info aq commond to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
@@ -2025,6 +2043,11 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_filter_restore(pf);
+ if (pf->tm_conf.root && !pf->tm_conf.committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
return I40E_SUCCESS;
err_up:
@@ -2038,12 +2061,15 @@ static void
i40e_dev_stop(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
+ if (hw->adapter_stopped == 1)
+ return;
/* Disable all queues */
i40e_dev_switch_queues(pf, FALSE);
@@ -2085,6 +2111,11 @@ i40e_dev_stop(struct rte_eth_dev *dev)
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+
+ /* reset hierarchy commit */
+ pf->tm_conf.committed = false;
+
+ hw->adapter_stopped = 1;
}
static void
@@ -2092,7 +2123,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t reg;
int i;
@@ -2100,7 +2131,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
i40e_dev_stop(dev);
- hw->adapter_stopped = 1;
i40e_dev_free_queues(dev);
/* Disable interrupt */
@@ -2225,7 +2255,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, false);
}
int
@@ -2352,9 +2382,6 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
vsi->offset_loaded, &oes->tx_broadcast,
&nes->tx_broadcast);
- /* exclude CRC bytes */
- nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast +
- nes->tx_broadcast) * ETHER_CRC_LEN;
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
@@ -2390,14 +2417,35 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
I40E_GLV_GORCL(hw->port),
pf->offset_loaded,
- &pf->internal_rx_bytes_offset,
- &pf->internal_rx_bytes);
+ &pf->internal_stats_offset.rx_bytes,
+ &pf->internal_stats.rx_bytes);
i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
I40E_GLV_GOTCL(hw->port),
pf->offset_loaded,
- &pf->internal_tx_bytes_offset,
- &pf->internal_tx_bytes);
+ &pf->internal_stats_offset.tx_bytes,
+ &pf->internal_stats.tx_bytes);
+ /* Get total internal rx packet count */
+ i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
+ I40E_GLV_UPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_unicast,
+ &pf->internal_stats.rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
+ I40E_GLV_MPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_multicast,
+ &pf->internal_stats.rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
+ I40E_GLV_BPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_broadcast,
+ &pf->internal_stats.rx_broadcast);
+
+ /* exclude CRC size */
+ pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
+ pf->internal_stats.rx_multicast +
+ pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
/* Get statistics of struct i40e_eth_stats */
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
@@ -2420,7 +2468,17 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
* so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes;
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+
+ /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
+ * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
+ * value.
+ */
+ if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
+ ns->eth.rx_bytes = 0;
+ /* exlude internal rx bytes */
+ else
+ ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
@@ -2448,7 +2506,14 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes;
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+
+ /* exclude internal tx bytes */
+ if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
+ ns->eth.tx_bytes = 0;
+ else
+ ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
+
/* GLPRT_TEPC not supported */
/* additional port specific stats */
@@ -2869,7 +2934,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
@@ -2973,71 +3038,93 @@ i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
}
static int
-i40e_vlan_tpid_set(struct rte_eth_dev *dev,
- enum rte_vlan_type vlan_type,
- uint16_t tpid)
+i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid, int qinq)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint64_t reg_r = 0, reg_w = 0;
- uint16_t reg_id = 0;
- int ret = 0;
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint64_t reg_w = 0;
+ uint16_t reg_id = 3;
+ int ret;
- switch (vlan_type) {
- case ETH_VLAN_TYPE_OUTER:
- if (qinq)
+ if (qinq) {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
reg_id = 2;
- else
- reg_id = 3;
- break;
- case ETH_VLAN_TYPE_INNER:
- if (qinq)
- reg_id = 3;
- else {
- ret = -EINVAL;
- PMD_DRV_LOG(ERR,
- "Unsupported vlan type in single vlan.");
- return ret;
- }
- break;
- default:
- ret = -EINVAL;
- PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
- return ret;
}
+
ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
&reg_r, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
"Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
reg_id);
- ret = -EIO;
- return ret;
+ return -EIO;
}
PMD_DRV_LOG(DEBUG,
- "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
- reg_id, reg_r);
+ "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
+ reg_id, reg_r);
reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
if (reg_r == reg_w) {
- ret = 0;
PMD_DRV_LOG(DEBUG, "No need to write");
- return ret;
+ return 0;
}
ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
reg_w, NULL);
if (ret != I40E_SUCCESS) {
- ret = -EIO;
PMD_DRV_LOG(ERR,
- "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
- reg_id);
- return ret;
+ "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
+ return -EIO;
}
PMD_DRV_LOG(DEBUG,
- "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
- reg_w, reg_id);
+ "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_w, reg_id);
+
+ return 0;
+}
+
+static int
+i40e_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int ret = 0;
+
+ if ((vlan_type != ETH_VLAN_TYPE_INNER &&
+ vlan_type != ETH_VLAN_TYPE_OUTER) ||
+ (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported vlan type.");
+ return -EINVAL;
+ }
+ /* 802.1ad frames ability is added in NVM API 1.7*/
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ if (qinq) {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ hw->first_tag = rte_cpu_to_le_16(tpid);
+ else if (vlan_type == ETH_VLAN_TYPE_INNER)
+ hw->second_tag = rte_cpu_to_le_16(tpid);
+ } else {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ hw->second_tag = rte_cpu_to_le_16(tpid);
+ }
+ ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Set switch config failed aq_err: %d",
+ hw->aq.asq_last_status);
+ ret = -EIO;
+ }
+ } else
+ /* If NVM API < 1.7, keep the register setting */
+ ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
+ tpid, qinq);
return ret;
}
@@ -3066,7 +3153,7 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_EXTEND_MASK) {
if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
i40e_vsi_config_double_vlan(vsi, TRUE);
- /* Set global registers with default ether type value */
+ /* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
ETHER_TYPE_VLAN);
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
@@ -3788,7 +3875,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint16_t qp_count = 0, vsi_count = 0;
if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
@@ -4284,6 +4371,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
if (enabled_tcmap & (1 << i))
total_tc++;
+ if (total_tc == 0)
+ total_tc = 1;
vsi->enabled_tc = enabled_tcmap;
/* Number of queues per enabled TC */
@@ -5216,10 +5305,8 @@ i40e_pf_setup(struct i40e_pf *pf)
pf->offset_loaded = FALSE;
memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
- pf->internal_rx_bytes = 0;
- pf->internal_tx_bytes = 0;
- pf->internal_rx_bytes_offset = 0;
- pf->internal_tx_bytes_offset = 0;
+ memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
+ memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
ret = i40e_pf_get_switch_config(pf);
if (ret != I40E_SUCCESS) {
@@ -5734,16 +5821,16 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
index = abs_vf_id / I40E_UINT32_BIT_SIZE;
offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
- /* VFR event occured */
+ /* VFR event occurred */
if (val & (0x1 << offset)) {
int ret;
/* Clear the event first */
I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
(0x1 << offset));
- PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
+ PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
/**
- * Only notify a VF reset event occured,
+ * Only notify a VF reset event occurred,
* don't trigger another SW reset
*/
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
@@ -5804,7 +5891,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
ret = i40e_dev_link_update(dev, 0);
if (!ret)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
break;
default:
PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -7460,7 +7547,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
/*
* If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calulate the actual PF queues that are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
*/
if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
@@ -8136,7 +8223,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
/**
* Validate if the input set is allowed for a specific PCTYPE
*/
-static int
+int
i40e_validate_input_set(enum i40e_filter_pctype pctype,
enum rte_filter_type filter, uint64_t inset)
{
@@ -8311,7 +8398,7 @@ i40e_parse_input_set(uint64_t *inset,
* Translate the input set from bit masks to register aware bit masks
* and vice versa
*/
-static uint64_t
+uint64_t
i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
{
uint64_t val = 0;
@@ -8396,7 +8483,7 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
return val;
}
-static int
+int
i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
{
uint8_t i, idx = 0;
@@ -8444,7 +8531,7 @@ i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
return idx;
}
-static void
+void
i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
@@ -9012,7 +9099,7 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
static void
i40e_enable_extended_tag(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint32_t buf = 0;
int ret;
@@ -9150,8 +9237,9 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
*/
/* For both X710 and XL710 */
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
@@ -9172,13 +9260,22 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw)
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp phy_ab;
int ret = -ENOTSUP;
+ int retries = 0;
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
NULL);
- if (status)
- return ret;
-
+ while (status) {
+ PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
+ status);
+ retries++;
+ rte_delay_us(100000);
+ if (retries < 5)
+ status = i40e_aq_get_phy_capabilities(hw, false,
+ true, &phy_ab, NULL);
+ else
+ return ret;
+ }
return 0;
}
@@ -9203,8 +9300,12 @@ i40e_configure_registers(struct i40e_hw *hw)
reg_table[i].val =
I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
else /* For X710/XL710/XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+ if (hw->aq.fw_maj_ver < 6)
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
+ else
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
}
if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
@@ -10460,7 +10561,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
@@ -10494,7 +10595,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -10738,8 +10839,7 @@ i40e_filter_restore(struct i40e_pf *pf)
static bool
is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
- if (strcmp(dev->data->drv_name,
- drv->driver.name))
+ if (strcmp(dev->device->driver->name, drv->driver.name))
return false;
return true;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b0d963c1..48abc05a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -39,6 +39,7 @@
#include <rte_kvargs.h>
#include <rte_hash.h>
#include <rte_flow_driver.h>
+#include <rte_tm_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -102,7 +103,7 @@
/* Linux PF host with virtchnl version 1.1 */
#define PF_IS_V11(vf) \
- (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
+ (((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \
((vf)->version_minor == 1))
/* index flex payload per layer */
@@ -251,6 +252,15 @@ enum i40e_flxpld_layer_idx {
I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 40000
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+
/**
* The overhead from MTU to max frame size.
* Considering QinQ packet, the VLAN tag needs to be counted twice.
@@ -405,7 +415,7 @@ enum I40E_VF_STATE {
struct i40e_pf_vf {
struct i40e_pf *pf;
struct i40e_vsi *vsi;
- enum I40E_VF_STATE state; /* The number of queue pairs availiable */
+ enum I40E_VF_STATE state; /* The number of queue pairs available */
uint16_t vf_idx; /* VF index in pf->vfs */
uint16_t lan_nb_qps; /* Actual queues allocated */
uint16_t reset_cnt; /* Total vf reset times */
@@ -431,6 +441,25 @@ struct i40e_vmdq_info {
struct i40e_vsi *vsi;
};
+#define I40E_FDIR_MAX_FLEXLEN 16 /**< Max length of flexbytes. */
+#define I40E_MAX_FLX_SOURCE_OFF 480
+#define NONUSE_FLX_PIT_DEST_OFF 63
+#define NONUSE_FLX_PIT_FSIZE 1
+#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
+#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
+ (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
+ (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
+ ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
+ NONUSE_FLX_PIT_DEST_OFF : \
+ ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
+#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
+#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
+#define I40E_FDIR_IPv6_TC_OFFSET 20
+
/*
* Structure to store flex pit for flow diretor.
*/
@@ -442,6 +471,7 @@ struct i40e_fdir_flex_pit {
struct i40e_fdir_flex_mask {
uint8_t word_mask; /**< Bit i enables word i of flexible payload */
+ uint8_t nb_bitmask;
struct {
uint8_t offset;
uint16_t mask;
@@ -479,6 +509,12 @@ struct i40e_fdir_info {
struct i40e_fdir_filter_list fdir_list;
struct i40e_fdir_filter **hash_map;
struct rte_hash *hash_table;
+
+ /* Mark if flex pit and mask is set */
+ bool flex_pit_flag[I40E_MAX_FLXPLD_LAYER];
+ bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX];
+
+ bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */
};
/* Ethertype filter number HW supports */
@@ -625,6 +661,67 @@ struct rte_flow {
TAILQ_HEAD(i40e_flow_list, rte_flow);
+/* Struct to store Traffic Manager shaper profile. */
+struct i40e_tm_shaper_profile {
+ TAILQ_ENTRY(i40e_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(i40e_shaper_profile_list, i40e_tm_shaper_profile);
+
+/* node type of Traffic Manager */
+enum i40e_tm_node_type {
+ I40E_TM_NODE_TYPE_PORT,
+ I40E_TM_NODE_TYPE_TC,
+ I40E_TM_NODE_TYPE_QUEUE,
+ I40E_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct i40e_tm_node {
+ TAILQ_ENTRY(i40e_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ struct i40e_tm_node *parent;
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(i40e_tm_node_list, i40e_tm_node);
+
+/* Struct to store all the Traffic Manager configuration. */
+struct i40e_tm_conf {
+ struct i40e_shaper_profile_list shaper_profile_list;
+ struct i40e_tm_node *root; /* root node - port */
+ struct i40e_tm_node_list tc_list; /* node list for all the TCs */
+ struct i40e_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -639,11 +736,9 @@ struct i40e_pf {
struct i40e_hw_port_stats stats_offset;
struct i40e_hw_port_stats stats;
- /* internal packet byte count, it should be excluded from the total */
- uint64_t internal_rx_bytes;
- uint64_t internal_tx_bytes;
- uint64_t internal_rx_bytes_offset;
- uint64_t internal_tx_bytes_offset;
+ /* internal packet statistics, it should be excluded from the total */
+ struct i40e_eth_stats internal_stats_offset;
+ struct i40e_eth_stats internal_stats;
bool offset_loaded;
struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
@@ -690,6 +785,7 @@ struct i40e_pf {
struct i40e_flow_list flow_list;
bool mpls_replace_flag; /* 1 - MPLS filter replace is done */
bool qinq_replace_flag; /* QINQ filter replace is done */
+ struct i40e_tm_conf tm_conf;
};
enum pending_msg {
@@ -742,7 +838,7 @@ struct i40e_vf {
/* Event from pf */
bool dev_closed;
bool link_up;
- enum i40e_aq_link_speed link_speed;
+ enum virtchnl_link_speed link_speed;
bool vf_reset;
volatile uint32_t pend_cmd; /* pending command not finished yet */
int32_t cmd_retval; /* return value of the cmd response from PF */
@@ -750,8 +846,8 @@ struct i40e_vf {
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
/* VSI info */
- struct i40e_virtchnl_vf_resource *vf_res; /* All VSIs */
- struct i40e_virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+ struct virtchnl_vf_resource *vf_res; /* All VSIs */
+ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
struct i40e_vsi vsi;
uint64_t flags;
};
@@ -822,8 +918,7 @@ int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr);
void i40e_update_vsi_stats(struct i40e_vsi *vsi);
void i40e_pf_disable_irq0(struct i40e_hw *hw);
void i40e_pf_enable_irq0(struct i40e_hw *hw);
-int i40e_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+int i40e_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi);
void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi);
int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
@@ -898,6 +993,17 @@ int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
int total);
bool is_i40e_supported(struct rte_eth_dev *dev);
+int i40e_validate_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter, uint64_t inset);
+int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask,
+ uint8_t nb_elem);
+uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input);
+void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
+
+int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void i40e_tm_conf_init(struct rte_eth_dev *dev);
+void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 6e5839dd..f6d82934 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -56,7 +56,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -77,7 +76,7 @@
#define MAX_RESET_WAIT_CNT 20
struct i40evf_arq_msg_info {
- enum i40e_virtchnl_ops ops;
+ enum virtchnl_ops ops;
enum i40e_status_code result;
uint16_t buf_len;
uint16_t msg_len;
@@ -85,7 +84,7 @@ struct i40evf_arq_msg_info {
};
struct vf_cmd_info {
- enum i40e_virtchnl_ops ops;
+ enum virtchnl_ops ops;
uint8_t *in_args;
uint32_t in_args_size;
uint8_t *out_buffer;
@@ -108,7 +107,7 @@ static void i40evf_dev_stop(struct rte_eth_dev *dev);
static void i40evf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40evf_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
@@ -159,7 +158,7 @@ static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
-static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
+static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
uint8_t *msg,
uint16_t msglen);
@@ -244,7 +243,7 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops opcode;
+ enum virtchnl_ops opcode;
enum i40e_status_code retval;
int ret;
enum i40evf_aq_result result = I40EVF_MSG_NON;
@@ -259,16 +258,16 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
return result;
}
- opcode = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
/* pf sys event */
- if (opcode == I40E_VIRTCHNL_OP_EVENT) {
- struct i40e_virtchnl_pf_event *vpe =
- (struct i40e_virtchnl_pf_event *)event.msg_buf;
+ if (opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event.msg_buf;
result = I40EVF_MSG_SYS;
switch (vpe->event) {
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
vf->link_up =
vpe->event_data.link_event.link_status;
vf->link_speed =
@@ -277,12 +276,12 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
PMD_DRV_LOG(INFO, "Link status update:%s",
vf->link_up ? "up" : "down");
break;
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
vf->vf_reset = true;
vf->pend_msg |= PFMSG_RESET_IMPENDING;
PMD_DRV_LOG(INFO, "vf is reseting");
break;
- case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
vf->dev_closed = true;
vf->pend_msg |= PFMSG_DRIVER_CLOSE;
PMD_DRV_LOG(INFO, "PF driver closed");
@@ -312,17 +311,17 @@ static inline void
_clear_cmd(struct i40e_vf *vf)
{
rte_wmb();
- vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
}
/*
* Check there is pending cmd in execution. If none, set new command.
*/
static inline int
-_atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
+_atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops)
{
int ret = rte_atomic32_cmpset(&vf->pend_cmd,
- I40E_VIRTCHNL_OP_UNKNOWN, ops);
+ VIRTCHNL_OP_UNKNOWN, ops);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
@@ -347,7 +346,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
info.msg = args->out_buffer;
info.buf_len = args->out_size;
- info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
+ info.ops = VIRTCHNL_OP_UNKNOWN;
info.result = I40E_SUCCESS;
err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
@@ -359,12 +358,12 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
}
switch (args->ops) {
- case I40E_VIRTCHNL_OP_RESET_VF:
+ case VIRTCHNL_OP_RESET_VF:
/*no need to process in this function */
err = 0;
break;
- case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
/* for init adminq commands, need to poll the response */
err = -1;
do {
@@ -385,13 +384,18 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
/* for other adminq in running time, waiting the cmd done flag */
err = -1;
do {
- if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) {
err = 0;
break;
}
rte_delay_ms(ASQ_DELAY_MS);
/* If don't read msg or read sys event, continue */
} while (i++ < MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= MAX_TRY_TIMES) {
+ PMD_DRV_LOG(WARNING, "No response for %d", args->ops);
+ _clear_cmd(vf);
+ }
break;
}
@@ -404,15 +408,15 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
static int
i40evf_check_api_version(struct rte_eth_dev *dev)
{
- struct i40e_virtchnl_version_info version, *pver;
+ struct virtchnl_version_info version, *pver;
int err;
struct vf_cmd_info args;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- version.major = I40E_VIRTCHNL_VERSION_MAJOR;
- version.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ version.major = VIRTCHNL_VERSION_MAJOR;
+ version.minor = VIRTCHNL_VERSION_MINOR;
- args.ops = I40E_VIRTCHNL_OP_VERSION;
+ args.ops = VIRTCHNL_OP_VERSION;
args.in_args = (uint8_t *)&version;
args.in_args_size = sizeof(version);
args.out_buffer = vf->aq_resp;
@@ -424,19 +428,19 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
return err;
}
- pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
+ pver = (struct virtchnl_version_info *)args.out_buffer;
vf->version_major = pver->major;
vf->version_minor = pver->minor;
if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
- else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
+ else if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= VIRTCHNL_VERSION_MINOR))
PMD_DRV_LOG(INFO, "Peer is Linux PF host");
else {
PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
vf->version_major, vf->version_minor,
- I40E_VIRTCHNL_VERSION_MAJOR,
- I40E_VIRTCHNL_VERSION_MINOR);
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
return -1;
}
@@ -452,15 +456,15 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
struct vf_cmd_info args;
uint32_t caps, len;
- args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
if (PF_IS_V11(vf)) {
- caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
- I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN |
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING;
args.in_args = (uint8_t *)&caps;
args.in_args_size = sizeof(caps);
} else {
@@ -474,8 +478,8 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
return err;
}
- len = sizeof(struct i40e_virtchnl_vf_resource) +
- I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ len = sizeof(struct virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
(void)rte_memcpy(vf->vf_res, args.out_buffer,
RTE_MIN(args.out_size, len));
@@ -492,18 +496,18 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_promisc_info promisc;
+ struct virtchnl_promisc_info promisc;
promisc.flags = 0;
promisc.vsi_id = vf->vsi_res->vsi_id;
if (enable_unicast)
- promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
+ promisc.flags |= FLAG_VF_UNICAST_PROMISC;
if (enable_multicast)
- promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+ promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
- args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
args.in_args = (uint8_t *)&promisc;
args.in_args_size = sizeof(promisc);
args.out_buffer = vf->aq_resp;
@@ -517,30 +521,46 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
return err;
}
-/* Configure vlan and double vlan offload. Use flag to specify which part to configure */
static int
-i40evf_config_vlan_offload(struct rte_eth_dev *dev,
- bool enable_vlan_strip)
+i40evf_enable_vlan_strip(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_vlan_offload_info offload;
-
- offload.vsi_id = vf->vsi_res->vsi_id;
- offload.enable_vlan_strip = enable_vlan_strip;
+ int ret;
- args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
- args.in_args = (uint8_t *)&offload;
- args.in_args_size = sizeof(offload);
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING");
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
- PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
+ return ret;
+}
- return err;
+static int
+i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct vf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING");
+
+ return ret;
}
static int
@@ -550,7 +570,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_pvid_info tpid_info;
+ struct virtchnl_pvid_info tpid_info;
if (info == NULL) {
PMD_DRV_LOG(ERR, "invalid parameters");
@@ -561,7 +581,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
tpid_info.vsi_id = vf->vsi_res->vsi_id;
(void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
- args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
+ args.ops = (enum virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
args.in_args = (uint8_t *)&tpid_info;
args.in_args_size = sizeof(tpid_info);
args.out_buffer = vf->aq_resp;
@@ -575,7 +595,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
}
static void
-i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
+i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
uint16_t vsi_id,
uint16_t queue_id,
uint16_t nb_txq,
@@ -590,7 +610,7 @@ i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
}
static void
-i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
+i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
uint16_t vsi_id,
uint16_t queue_id,
uint16_t nb_rxq,
@@ -618,8 +638,8 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_tx_queue **txq =
(struct i40e_tx_queue **)dev->data->tx_queues;
- struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
- struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct virtchnl_vsi_queue_config_info *vc_vqci;
+ struct virtchnl_queue_pair_info *vc_qpi;
struct vf_cmd_info args;
uint16_t i, nb_qp = vf->num_queue_pairs;
const uint32_t size =
@@ -628,7 +648,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
int ret;
memset(buff, 0, sizeof(buff));
- vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
+ vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff;
vc_vqci->vsi_id = vf->vsi_res->vsi_id;
vc_vqci->num_queue_pairs = nb_qp;
@@ -640,7 +660,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
vf->max_pkt_len, rxq[i]);
}
memset(&args, 0, sizeof(args));
- args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
args.in_args = (uint8_t *)vc_vqci;
args.in_args_size = size;
args.out_buffer = vf->aq_resp;
@@ -648,7 +668,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+ "VIRTCHNL_OP_CONFIG_VSI_QUEUES");
return ret;
}
@@ -662,8 +682,8 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_tx_queue **txq =
(struct i40e_tx_queue **)dev->data->tx_queues;
- struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
- struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ struct virtchnl_vsi_queue_config_ext_info *vc_vqcei;
+ struct virtchnl_queue_pair_ext_info *vc_qpei;
struct vf_cmd_info args;
uint16_t i, nb_qp = vf->num_queue_pairs;
const uint32_t size =
@@ -672,7 +692,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
int ret;
memset(buff, 0, sizeof(buff));
- vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
+ vc_vqcei = (struct virtchnl_vsi_queue_config_ext_info *)buff;
vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
vc_vqcei->num_queue_pairs = nb_qp;
vc_qpei = vc_vqcei->qpair;
@@ -693,7 +713,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
}
memset(&args, 0, sizeof(args));
args.ops =
- (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
+ (enum virtchnl_ops)VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
args.in_args = (uint8_t *)vc_vqcei;
args.in_args_size = size;
args.out_buffer = vf->aq_resp;
@@ -701,7 +721,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
+ "VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
return ret;
}
@@ -724,10 +744,10 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct vf_cmd_info args;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
- sizeof(struct i40e_virtchnl_vector_map)];
- struct i40e_virtchnl_irq_map_info *map_info;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
+ sizeof(struct virtchnl_vector_map)];
+ struct virtchnl_irq_map_info *map_info;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vector_id;
int i, err;
@@ -741,7 +761,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
vector_id = I40E_MISC_VEC_ID;
}
- map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
+ map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
map_info->num_vectors = 1;
map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
@@ -756,7 +776,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
intr_handle->intr_vec[i] = vector_id;
}
- args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
args.in_args = (u8 *)cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -773,7 +793,7 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
bool on)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_queue_select queue_select;
+ struct virtchnl_queue_select queue_select;
int err;
struct vf_cmd_info args;
memset(&queue_select, 0, sizeof(queue_select));
@@ -785,9 +805,9 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
queue_select.tx_queues |= 1 << qid;
if (on)
- args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
else
- args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
args.in_args = (u8 *)&queue_select;
args.in_args_size = sizeof(queue_select);
args.out_buffer = vf->aq_resp;
@@ -861,10 +881,10 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
__rte_unused uint32_t index,
__rte_unused uint32_t pool)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
- sizeof(struct i40e_virtchnl_ether_addr)];
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
+ sizeof(struct virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
@@ -876,13 +896,13 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
return I40E_ERR_INVALID_MAC_ADDR;
}
- list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = 1;
(void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
- args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+ args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
args.in_args = cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -891,6 +911,8 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_ADD_ETHER_ADDRESS");
+ else
+ vf->vsi.mac_num++;
return err;
}
@@ -899,10 +921,10 @@ static void
i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
- sizeof(struct i40e_virtchnl_ether_addr)];
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
+ sizeof(struct virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
@@ -914,13 +936,13 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
return;
}
- list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = 1;
(void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
- args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
args.in_args = cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -929,6 +951,8 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_DEL_ETHER_ADDRESS");
+ else
+ vf->vsi.mac_num--;
return;
}
@@ -947,13 +971,13 @@ static int
i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_queue_select q_stats;
+ struct virtchnl_queue_select q_stats;
int err;
struct vf_cmd_info args;
memset(&q_stats, 0, sizeof(q_stats));
q_stats.vsi_id = vf->vsi_res->vsi_id;
- args.ops = I40E_VIRTCHNL_OP_GET_STATS;
+ args.ops = VIRTCHNL_OP_GET_STATS;
args.in_args = (u8 *)&q_stats;
args.in_args_size = sizeof(q_stats);
args.out_buffer = vf->aq_resp;
@@ -1050,18 +1074,18 @@ static int
i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_vlan_filter_list *vlan_list;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
sizeof(uint16_t)];
int err;
struct vf_cmd_info args;
- vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
vlan_list->vsi_id = vf->vsi_res->vsi_id;
vlan_list->num_elements = 1;
vlan_list->vlan_id[0] = vlanid;
- args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
+ args.ops = VIRTCHNL_OP_ADD_VLAN;
args.in_args = (u8 *)&cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -1077,18 +1101,18 @@ static int
i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_vlan_filter_list *vlan_list;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
sizeof(uint16_t)];
int err;
struct vf_cmd_info args;
- vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
vlan_list->vsi_id = vf->vsi_res->vsi_id;
vlan_list->num_elements = 1;
vlan_list->vlan_id[0] = vlanid;
- args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
+ args.ops = VIRTCHNL_OP_DEL_VLAN;
args.in_args = (u8 *)&cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -1178,7 +1202,7 @@ i40evf_reset_vf(struct i40e_hw *hw)
reset = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
- if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
+ if (VIRTCHNL_VFR_COMPLETED == reset || VIRTCHNL_VFR_VFACTIVE == reset)
break;
else
rte_delay_ms(50);
@@ -1242,8 +1266,8 @@ i40evf_init_vf(struct rte_eth_dev *dev)
PMD_INIT_LOG(ERR, "check_api version failed");
goto err_aq;
}
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
- (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
if (!vf->vf_res) {
PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
@@ -1257,7 +1281,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
/* got VF config message back from PF, now we can parse it */
for (i = 0; i < vf->vf_res->num_vsis; i++) {
- if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
vf->vsi_res = &vf->vf_res->vsi_res[i];
}
@@ -1269,7 +1293,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
if (hw->mac.type == I40E_MAC_X722_VF)
vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
vf->vsi.vsi_id = vf->vsi_res->vsi_id;
- vf->vsi.type = vf->vsi_res->vsi_type;
+ vf->vsi.type = (enum i40e_vsi_type)vf->vsi_res->vsi_type;
vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -1318,25 +1342,25 @@ i40evf_uninit_vf(struct rte_eth_dev *dev)
}
static void
-i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
- uint8_t *msg,
- __rte_unused uint16_t msglen)
+i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
+ __rte_unused uint16_t msglen)
{
- struct i40e_virtchnl_pf_event *pf_msg =
- (struct i40e_virtchnl_pf_event *)msg;
+ struct virtchnl_pf_event *pf_msg =
+ (struct virtchnl_pf_event *)msg;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
switch (pf_msg->event) {
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL, NULL);
break;
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
vf->link_up = pf_msg->event_data.link_event.link_status;
vf->link_speed = pf_msg->event_data.link_event.link_speed;
break;
- case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
default:
@@ -1352,7 +1376,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_arq_event_info info;
uint16_t pending, aq_opc;
- enum i40e_virtchnl_ops msg_opc;
+ enum virtchnl_ops msg_opc;
enum i40e_status_code msg_ret;
int ret;
@@ -1377,13 +1401,13 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* cookie_high of struct i40e_aq_desc, while return error code
* are stored in cookie_low, Which is done by
* i40e_aq_send_msg_to_vf in PF driver.*/
- msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(
+ msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
info.desc.cookie_high);
msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
info.desc.cookie_low);
switch (aq_opc) {
case i40e_aqc_opc_send_msg_to_vf:
- if (msg_opc == I40E_VIRTCHNL_OP_EVENT)
+ if (msg_opc == VIRTCHNL_OP_EVENT)
/* process event*/
i40evf_handle_pf_event(dev, info.msg_buf,
info.msg_len);
@@ -1460,7 +1484,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
{
struct i40e_hw *hw
= I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1592,8 +1616,8 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
*/
if (!conf->rxmode.hw_strip_crc) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) {
+ if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
/* Peer is running non-DPDK PF driver. */
PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
return -EINVAL;
@@ -1621,22 +1645,15 @@ i40evf_init_vlan(struct rte_eth_dev *dev)
static void
i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
- bool enable_vlan_strip = 0;
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- /* Linux pf host doesn't support vlan offload yet */
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
- /* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
- /* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
- enable_vlan_strip = 1;
- else
- enable_vlan_strip = 0;
-
- i40evf_config_vlan_offload(dev, enable_vlan_strip);
- }
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.hw_vlan_strip)
+ i40evf_enable_vlan_strip(dev);
+ else
+ i40evf_disable_vlan_strip(dev);
}
}
@@ -1884,7 +1901,7 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
@@ -1917,7 +1934,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
@@ -1944,7 +1961,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
@@ -1979,7 +1996,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -2001,7 +2018,7 @@ i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static void
i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err, i, j;
int next_begin = 0;
@@ -2012,11 +2029,11 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
do {
j = 0;
- len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ len = sizeof(struct virtchnl_ether_addr_list);
for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
continue;
- len += sizeof(struct i40e_virtchnl_ether_addr);
+ len += sizeof(struct virtchnl_ether_addr);
if (len >= I40E_AQ_BUF_SZ) {
next_begin = i + 1;
break;
@@ -2043,17 +2060,23 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
}
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = j;
- args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS :
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
args.in_args = (uint8_t *)list;
args.in_args_size = len;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
+ if (err) {
PMD_DRV_LOG(ERR, "fail to execute command %s",
add ? "OP_ADD_ETHER_ADDRESS" :
"OP_DEL_ETHER_ADDRESS");
+ } else {
+ if (add)
+ vf->vsi.mac_num++;
+ else
+ vf->vsi.mac_num--;
+ }
rte_free(list);
begin = next_begin;
} while (begin < I40E_NUM_MACADDR_MAX);
@@ -2064,7 +2087,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
@@ -2130,11 +2153,14 @@ err_queue:
static void
i40evf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev);
PMD_INIT_FUNC_TRACE();
+ if (hw->adapter_stopped == 1)
+ return;
i40evf_stop_queues(dev);
i40evf_disable_queues_intr(dev);
i40e_dev_clear_queues(dev);
@@ -2147,6 +2173,7 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
}
/* remove all mac addrs */
i40evf_add_del_all_mac_addr(dev, FALSE);
+ hw->adapter_stopped = 1;
}
@@ -2261,7 +2288,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2330,11 +2357,10 @@ static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
i40evf_dev_stop(dev);
- hw->adapter_stopped = 1;
i40e_dev_free_queues(dev);
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 28cc554f..8013add4 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -67,15 +67,13 @@
#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
-#define I40E_FDIR_IPv6_TC_OFFSET 20
#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
#define I40E_FDIR_UDP_DEFAULT_LEN 400
-/* Wait count and interval for fdir filter programming */
-#define I40E_FDIR_WAIT_COUNT 10
-#define I40E_FDIR_WAIT_INTERVAL_US 1000
+/* Wait time for fdir filter programming */
+#define I40E_FDIR_MAX_WAIT_US 10000
/* Wait count and interval for fdir filter flush */
#define I40E_FDIR_FLUSH_RETRY 50
@@ -84,21 +82,6 @@
#define I40E_COUNTER_PF 2
/* Statistic counter index for one pf */
#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
-#define I40E_MAX_FLX_SOURCE_OFF 480
-#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
-
-#define NONUSE_FLX_PIT_DEST_OFF 63
-#define NONUSE_FLX_PIT_FSIZE 1
-#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
- (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
- I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
- (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
- I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
- ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
- NONUSE_FLX_PIT_DEST_OFF : \
- ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
- I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
- I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
#define I40E_FDIR_FLOWS ( \
(1 << RTE_ETH_FLOW_FRAG_IPV4) | \
@@ -113,8 +96,6 @@
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
(1 << RTE_ETH_FLOW_L2_PAYLOAD))
-#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
-
static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
@@ -257,7 +238,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
- eth_dev->data->drv_name,
+ eth_dev->device->driver->name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
@@ -300,8 +281,12 @@ i40e_fdir_teardown(struct i40e_pf *pf)
vsi = pf->fdir.fdir_vsi;
if (!vsi)
return;
- i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
- i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
i40e_dev_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
i40e_dev_tx_queue_release(pf->fdir.txq);
@@ -378,8 +363,6 @@ i40e_init_flx_pld(struct i40e_pf *pf)
}
}
-#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
-
#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
if ((flex_pit2).src_offset < \
(flex_pit1).src_offset + (flex_pit1).size) { \
@@ -1295,28 +1278,27 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
/* Update the tx tail register */
rte_wmb();
I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
-
- for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
- rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
+ for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
if ((txdp->cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
break;
+ rte_delay_us(1);
}
- if (i >= I40E_FDIR_WAIT_COUNT) {
+ if (i >= I40E_FDIR_MAX_WAIT_US) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
" time out to get DD on tx queue.");
return -ETIMEDOUT;
}
/* totally delay 10 ms to check programming status*/
- rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
- if (i40e_check_fdir_programming_status(rxq) < 0) {
- PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
- " programming status reported.");
- return -ENOSYS;
+ for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+ if (i40e_check_fdir_programming_status(rxq) >= 0)
+ return 0;
+ rte_delay_us(1);
}
-
- return 0;
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
}
/*
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 24e1c658..b92719a3 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -52,8 +52,7 @@
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
-#define I40E_IPV4_TC_SHIFT 4
-#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
#define I40E_IPV6_FRAG_HEADER 44
#define I40E_TENANT_ARRAY_NUM 3
#define I40E_TCI_MASK 0xFFFF
@@ -114,6 +113,12 @@ static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -135,7 +140,7 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
struct rte_flow_error *error,
union i40e_filter_t *filter);
static int
-i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_tunnel_filter_conf *filter);
@@ -158,102 +163,1295 @@ static enum rte_flow_item_type pattern_ethertype[] = {
/* Pattern matched flow director filter */
static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
RTE_FLOW_ITEM_TYPE_END,
};
@@ -296,7 +1494,40 @@ static enum rte_flow_item_type pattern_vxlan_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_nvgre_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_mpls_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
@@ -329,7 +1560,6 @@ static enum rte_flow_item_type pattern_mpls_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched QINQ */
static enum rte_flow_item_type pattern_qinq_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
@@ -340,28 +1570,163 @@ static enum rte_flow_item_type pattern_qinq_1[] = {
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
- /* FDIR */
+ /* FDIR - support default flow type without flexible payload*/
+ { pattern_ethertype, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* FDIR - support default flow type with flexible payload */
+ { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ /* FDIR - support single vlan input set */
+ { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ /* FDIR - support VF item */
+ { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
/* VXLAN */
{ pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* NVGRE */
+ { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
/* MPLSoUDP & MPLSoGRE */
{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
@@ -452,10 +1817,10 @@ i40e_match_pattern(enum rte_flow_item_type *item_array,
/* Find if there's parse filter function matched */
static parse_filter_t
-i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
{
parse_filter_t parse_filter = NULL;
- uint8_t i = 0;
+ uint8_t i = *idx;
for (; i < RTE_DIM(i40e_supported_patterns); i++) {
if (i40e_match_pattern(i40e_supported_patterns[i].items,
@@ -465,6 +1830,8 @@ i40e_find_parse_filter_func(struct rte_flow_item *pattern)
}
}
+ *idx = ++i;
+
return parse_filter;
}
@@ -704,12 +2071,244 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+static int
+i40e_flow_check_raw_item(const struct rte_flow_item *item,
+ const struct rte_flow_item_raw *raw_spec,
+ struct rte_flow_error *error)
+{
+ if (!raw_spec->relative) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Relative should be 1.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->offset % sizeof(uint16_t)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Offset should be even.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->search || raw_spec->limit) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "search or limit is not supported.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->offset < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Offset should be non-negative.");
+ return -rte_errno;
+ }
+ return 0;
+}
+
+static int
+i40e_flow_store_flex_pit(struct i40e_pf *pf,
+ struct i40e_fdir_flex_pit *flex_pit,
+ enum i40e_flxpld_layer_idx layer_idx,
+ uint8_t raw_id)
+{
+ uint8_t field_idx;
+
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
+ /* Check if the configuration is conflicted */
+ if (pf->fdir.flex_pit_flag[layer_idx] &&
+ (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
+ pf->fdir.flex_set[field_idx].size != flex_pit->size ||
+ pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
+ return -1;
+
+ /* Check if the configuration exists. */
+ if (pf->fdir.flex_pit_flag[layer_idx] &&
+ (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
+ pf->fdir.flex_set[field_idx].size == flex_pit->size &&
+ pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
+ return 1;
+
+ pf->fdir.flex_set[field_idx].src_offset =
+ flex_pit->src_offset;
+ pf->fdir.flex_set[field_idx].size =
+ flex_pit->size;
+ pf->fdir.flex_set[field_idx].dst_offset =
+ flex_pit->dst_offset;
+
+ return 0;
+}
+
+static int
+i40e_flow_store_flex_mask(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ uint8_t *mask)
+{
+ struct i40e_fdir_flex_mask flex_mask;
+ uint16_t mask_tmp;
+ uint8_t i, nb_bitmask = 0;
+
+ memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
+ if (mask_tmp) {
+ flex_mask.word_mask |=
+ I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
+ if (mask_tmp != UINT16_MAX) {
+ flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
+ flex_mask.bitmask[nb_bitmask].offset =
+ i / sizeof(uint16_t);
+ nb_bitmask++;
+ if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
+ return -1;
+ }
+ }
+ }
+ flex_mask.nb_bitmask = nb_bitmask;
+
+ if (pf->fdir.flex_mask_flag[pctype] &&
+ (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
+ sizeof(struct i40e_fdir_flex_mask))))
+ return -2;
+ else if (pf->fdir.flex_mask_flag[pctype] &&
+ !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
+ sizeof(struct i40e_fdir_flex_mask))))
+ return 1;
+
+ memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
+ sizeof(struct i40e_fdir_flex_mask));
+ return 0;
+}
+
+static void
+i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
+ enum i40e_flxpld_layer_idx layer_idx,
+ uint8_t raw_id)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t flx_pit;
+ uint8_t field_idx;
+ uint16_t min_next_off = 0; /* in words */
+ uint8_t i;
+
+ /* Set flex pit */
+ for (i = 0; i < raw_id; i++) {
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
+ pf->fdir.flex_set[field_idx].size,
+ pf->fdir.flex_set[field_idx].dst_offset);
+
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ min_next_off = pf->fdir.flex_set[field_idx].src_offset +
+ pf->fdir.flex_set[field_idx].size;
+ }
+
+ for (; i < I40E_MAX_FLXPLD_FIED; i++) {
+ /* set the non-used register obeying register's constrain */
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
+ NONUSE_FLX_PIT_DEST_OFF);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ min_next_off++;
+ }
+
+ pf->fdir.flex_pit_flag[layer_idx] = 1;
+}
+
+static void
+i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_mask *flex_mask;
+ uint32_t flxinset, fd_mask;
+ uint8_t i;
+
+ /* Set flex mask */
+ flex_mask = &pf->fdir.flex_mask[pctype];
+ flxinset = (flex_mask->word_mask <<
+ I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
+ I40E_PRTQF_FD_FLXINSET_INSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
+
+ for (i = 0; i < flex_mask->nb_bitmask; i++) {
+ fd_mask = (flex_mask->bitmask[i].mask <<
+ I40E_PRTQF_FD_MSK_MASK_SHIFT) &
+ I40E_PRTQF_FD_MSK_MASK_MASK;
+ fd_mask |= ((flex_mask->bitmask[i].offset +
+ I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
+ I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
+ I40E_PRTQF_FD_MSK_OFFSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
+ }
+
+ pf->fdir.flex_mask_flag[pctype] = 1;
+}
+
+static int
+i40e_flow_set_fdir_inset(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ uint64_t input_set)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t inset_reg = 0;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int i, num;
+
+ /* Check if the input set is valid */
+ if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
+ input_set) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid input set");
+ return -EINVAL;
+ }
+
+ /* Check if the configuration is conflicted */
+ if (pf->fdir.inset_flag[pctype] &&
+ memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
+ return -1;
+
+ if (pf->fdir.inset_flag[pctype] &&
+ !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
+ return 0;
+
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return -EINVAL;
+
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
+ I40E_WRITE_FLUSH(hw);
+
+ pf->fdir.input_set[pctype] = input_set;
+ pf->fdir.inset_flag[pctype] = 1;
+ return 0;
+}
+
/* 1. Last in item should be NULL as range is not supported.
- * 2. Supported flow type and input set: refer to array
- * default_inset_table in i40e_ethdev.c.
- * 3. Mask of fields which need to be matched should be
+ * 2. Supported patterns: refer to array i40e_supported_patterns.
+ * 3. Supported flow type and input set: refer to array
+ * valid_fdir_inset_table in i40e_ethdev.c.
+ * 4. Mask of fields which need to be matched should be
* filled with 1.
- * 4. Mask of fields which needn't to be matched should be
+ * 5. Mask of fields which needn't to be matched should be
* filled with 0.
*/
static int
@@ -721,20 +2320,44 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
const struct rte_flow_item_udp *udp_spec, *udp_mask;
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_vf *vf_spec;
+
uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
enum i40e_filter_pctype pctype;
uint64_t input_set = I40E_INSET_NONE;
- uint16_t flag_offset;
+ uint16_t frag_off;
enum rte_flow_item_type item_type;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
- uint32_t j;
+ uint32_t i, j;
+ uint8_t ipv6_addr_mask[16] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
+ uint8_t raw_id = 0;
+ int32_t off_arr[I40E_MAX_FLXPLD_FIED];
+ uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
+ struct i40e_fdir_flex_pit flex_pit;
+ uint8_t next_dst_off = 0;
+ uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
+ uint16_t flex_size;
+ bool cfg_flex_pit = true;
+ bool cfg_flex_msk = true;
+ uint16_t outer_tpid;
+ uint16_t ether_type;
+ uint32_t vtc_flow_cpu;
+ int ret;
+ memset(off_arr, 0, sizeof(off_arr));
+ memset(len_arr, 0, sizeof(len_arr));
+ memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
+ outer_tpid = i40e_get_outer_vlan(dev);
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -748,13 +2371,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = (const struct rte_flow_item_eth *)item->spec;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
- if (eth_spec || eth_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid ETH spec/mask");
- return -rte_errno;
+
+ if (eth_spec && eth_mask) {
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ !is_zero_ether_addr(&eth_mask->dst)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask.");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) ==
+ UINT16_MAX) {
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ eth_spec->type;
+ }
+
+ ether_type = rte_be_to_cpu_16(eth_spec->type);
+ if (ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type.");
+ return -rte_errno;
+ }
+ }
+
+ flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ layer_idx = I40E_FLXPLD_L2_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ input_set |= I40E_INSET_VLAN_INNER;
+ filter->input.flow_ext.vlan_tci =
+ vlan_spec->tci;
+ }
}
+
+ flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ layer_idx = I40E_FLXPLD_L2_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
l3 = RTE_FLOW_ITEM_TYPE_IPV4;
@@ -762,58 +2430,55 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
(const struct rte_flow_item_ipv4 *)item->spec;
ipv4_mask =
(const struct rte_flow_item_ipv4 *)item->mask;
- if (!ipv4_spec || !ipv4_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL IPv4 spec/mask");
- return -rte_errno;
- }
- /* Check IPv4 mask and update input set */
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.hdr_checksum) {
- rte_flow_error_set(error, EINVAL,
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid IPv4 mask.");
- return -rte_errno;
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ frag_off = ipv4_spec->hdr.fragment_offset;
+ frag_off = rte_be_to_cpu_16(frag_off);
+ if (frag_off & IPV4_HDR_OFFSET_MASK ||
+ frag_off & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
}
- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
- input_set |= I40E_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
- input_set |= I40E_INSET_IPV4_DST;
- if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_TOS;
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_PROTO;
-
- /* Get filter info */
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
- /* Check if it is fragment. */
- flag_offset =
- rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
- if (flag_offset & IPV4_HDR_OFFSET_MASK ||
- flag_offset & IPV4_HDR_MF_FLAG)
- flow_type = RTE_ETH_FLOW_FRAG_IPV4;
-
- /* Get the filter info */
- filter->input.flow.ip4_flow.proto =
- ipv4_spec->hdr.next_proto_id;
- filter->input.flow.ip4_flow.tos =
- ipv4_spec->hdr.type_of_service;
- filter->input.flow.ip4_flow.ttl =
- ipv4_spec->hdr.time_to_live;
- filter->input.flow.ip4_flow.src_ip =
- ipv4_spec->hdr.src_addr;
- filter->input.flow.ip4_flow.dst_ip =
- ipv4_spec->hdr.dst_addr;
+ layer_idx = I40E_FLXPLD_L3_IDX;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -822,232 +2487,276 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
(const struct rte_flow_item_ipv6 *)item->spec;
ipv6_mask =
(const struct rte_flow_item_ipv6 *)item->mask;
- if (!ipv6_spec || !ipv6_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL IPv6 spec/mask");
- return -rte_errno;
- }
-
- /* Check IPv6 mask and update input set */
- if (ipv6_mask->hdr.payload_len) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv6 mask");
- return -rte_errno;
- }
- /* SCR and DST address of IPv6 shouldn't be masked */
- for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
- if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
- ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ if (ipv6_spec && ipv6_mask) {
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid IPv6 mask");
return -rte_errno;
}
+
+ if (!memcmp(ipv6_mask->hdr.src_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr)))
+ input_set |= I40E_INSET_IPV6_SRC;
+ if (!memcmp(ipv6_mask->hdr.dst_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ vtc_flow_cpu =
+ rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(vtc_flow_cpu >>
+ I40E_FDIR_IPv6_TC_OFFSET);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto ==
+ I40E_IPV6_FRAG_HEADER)
+ flow_type =
+ RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
}
- input_set |= I40E_INSET_IPV6_SRC;
- input_set |= I40E_INSET_IPV6_DST;
-
- if ((ipv6_mask->hdr.vtc_flow &
- rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
- == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
- input_set |= I40E_INSET_IPV6_TC;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
- input_set |= I40E_INSET_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
- input_set |= I40E_INSET_IPV6_HOP_LIMIT;
-
- /* Get filter info */
- filter->input.flow.ipv6_flow.tc =
- (uint8_t)(ipv6_spec->hdr.vtc_flow <<
- I40E_IPV4_TC_SHIFT);
- filter->input.flow.ipv6_flow.proto =
- ipv6_spec->hdr.proto;
- filter->input.flow.ipv6_flow.hop_limits =
- ipv6_spec->hdr.hop_limits;
-
- rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
- ipv6_spec->hdr.src_addr, 16);
- rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
- ipv6_spec->hdr.dst_addr, 16);
-
- /* Check if it is fragment. */
- if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
- flow_type = RTE_ETH_FLOW_FRAG_IPV6;
- else
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ layer_idx = I40E_FLXPLD_L3_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_TCP:
tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
- if (!tcp_spec || !tcp_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL TCP spec/mask");
- return -rte_errno;
- }
- /* Check TCP mask and update input set */
- if (tcp_mask->hdr.sent_seq ||
- tcp_mask->hdr.recv_ack ||
- tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags ||
- tcp_mask->hdr.rx_win ||
- tcp_mask->hdr.cksum ||
- tcp_mask->hdr.tcp_urp) {
- rte_flow_error_set(error, EINVAL,
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid TCP mask");
- return -rte_errno;
- }
+ return -rte_errno;
+ }
- if (tcp_mask->hdr.src_port != UINT16_MAX ||
- tcp_mask->hdr.dst_port != UINT16_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid TCP mask");
- return -rte_errno;
+ if (tcp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (tcp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.tcp4_flow.src_port =
- tcp_spec->hdr.src_port;
- filter->input.flow.tcp4_flow.dst_port =
- tcp_spec->hdr.dst_port;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.tcp6_flow.src_port =
- tcp_spec->hdr.src_port;
- filter->input.flow.tcp6_flow.dst_port =
- tcp_spec->hdr.dst_port;
- }
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_UDP:
udp_spec = (const struct rte_flow_item_udp *)item->spec;
udp_mask = (const struct rte_flow_item_udp *)item->mask;
- if (!udp_spec || !udp_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL UDP spec/mask");
- return -rte_errno;
- }
- /* Check UDP mask and update input set*/
- if (udp_mask->hdr.dgram_len ||
- udp_mask->hdr.dgram_cksum) {
- rte_flow_error_set(error, EINVAL,
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid UDP mask");
- return -rte_errno;
- }
+ return -rte_errno;
+ }
- if (udp_mask->hdr.src_port != UINT16_MAX ||
- udp_mask->hdr.dst_port != UINT16_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
- return -rte_errno;
+ if (udp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (udp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.udp4_flow.src_port =
- udp_spec->hdr.src_port;
- filter->input.flow.udp4_flow.dst_port =
- udp_spec->hdr.dst_port;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.udp6_flow.src_port =
- udp_spec->hdr.src_port;
- filter->input.flow.udp6_flow.dst_port =
- udp_spec->hdr.dst_port;
- }
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_SCTP:
sctp_spec =
(const struct rte_flow_item_sctp *)item->spec;
sctp_mask =
(const struct rte_flow_item_sctp *)item->mask;
- if (!sctp_spec || !sctp_mask) {
- rte_flow_error_set(error, EINVAL,
+
+ if (sctp_spec && sctp_mask) {
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "NULL SCTP spec/mask");
- return -rte_errno;
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (sctp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+ if (sctp_mask->hdr.tag == UINT32_MAX)
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag
+ = sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag
+ = sctp_spec->hdr.tag;
+ }
}
- /* Check SCTP mask and update input set */
- if (sctp_mask->hdr.cksum) {
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ if (!raw_spec || !raw_mask) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "Invalid UDP mask");
+ "NULL RAW spec/mask");
return -rte_errno;
}
- if (sctp_mask->hdr.src_port != UINT16_MAX ||
- sctp_mask->hdr.dst_port != UINT16_MAX ||
- sctp_mask->hdr.tag != UINT32_MAX) {
+ ret = i40e_flow_check_raw_item(item, raw_spec, error);
+ if (ret < 0)
+ return ret;
+
+ off_arr[raw_id] = raw_spec->offset;
+ len_arr[raw_id] = raw_spec->length;
+
+ flex_size = 0;
+ memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
+ flex_pit.size =
+ raw_spec->length / sizeof(uint16_t);
+ flex_pit.dst_offset =
+ next_dst_off / sizeof(uint16_t);
+
+ for (i = 0; i <= raw_id; i++) {
+ if (i == raw_id)
+ flex_pit.src_offset +=
+ raw_spec->offset /
+ sizeof(uint16_t);
+ else
+ flex_pit.src_offset +=
+ (off_arr[i] + len_arr[i]) /
+ sizeof(uint16_t);
+ flex_size += len_arr[i];
+ }
+ if (((flex_pit.src_offset + flex_pit.size) >=
+ I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
+ flex_size > I40E_FDIR_MAX_FLEXLEN) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceeds maxmial payload limit.");
return -rte_errno;
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
- input_set |= I40E_INSET_SCTP_VT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.sctp4_flow.src_port =
- sctp_spec->hdr.src_port;
- filter->input.flow.sctp4_flow.dst_port =
- sctp_spec->hdr.dst_port;
- filter->input.flow.sctp4_flow.verify_tag =
- sctp_spec->hdr.tag;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.sctp6_flow.src_port =
- sctp_spec->hdr.src_port;
- filter->input.flow.sctp6_flow.dst_port =
- sctp_spec->hdr.dst_port;
- filter->input.flow.sctp6_flow.verify_tag =
- sctp_spec->hdr.tag;
+
+ /* Store flex pit to SW */
+ ret = i40e_flow_store_flex_pit(pf, &flex_pit,
+ layer_idx, raw_id);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule.");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_pit = false;
+
+ for (i = 0; i < raw_spec->length; i++) {
+ j = i + next_dst_off;
+ filter->input.flow_ext.flexbytes[j] =
+ raw_spec->pattern[i];
+ flex_mask[j] = raw_mask->pattern[i];
}
+
+ next_dst_off += raw_spec->length;
+ raw_id++;
break;
case RTE_FLOW_ITEM_TYPE_VF:
vf_spec = (const struct rte_flow_item_vf *)item->spec;
@@ -1075,14 +2784,44 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (input_set != i40e_get_default_input_set(pctype)) {
+ ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Conflict with the first rule's input set.");
+ return -rte_errno;
+ } else if (ret == -EINVAL) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Invalid input set.");
+ "Invalid pattern mask.");
return -rte_errno;
}
+
filter->input.flow_type = flow_type;
+ /* Store flex mask to SW */
+ ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceed maximal number of bitmasks");
+ return -rte_errno;
+ } else if (ret == -2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_msk = false;
+
+ if (cfg_flex_pit)
+ i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+
+ if (cfg_flex_msk)
+ i40e_flow_set_fdir_flex_msk(pf, pctype);
+
return 0;
}
@@ -1101,55 +2840,64 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
const struct rte_flow_action_mark *mark_spec;
uint32_t index = 0;
- /* Check if the first non-void action is QUEUE or DROP. */
+ /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
- act->type != RTE_FLOW_ACTION_TYPE_DROP) {
- rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Invalid action.");
- return -rte_errno;
- }
-
- act_q = (const struct rte_flow_action_queue *)act->conf;
- filter->action.flex_off = 0;
- if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.rx_queue = act_q->index;
+ if ((!filter->input.flow_ext.is_vf &&
+ filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
+ (filter->input.flow_ext.is_vf &&
+ filter->action.rx_queue >= pf->vf_nb_qps)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
- else
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
filter->action.behavior = RTE_ETH_FDIR_REJECT;
-
- filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
- filter->action.rx_queue = act_q->index;
-
- if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ break;
+ case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+ filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+ break;
+ default:
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid queue ID for FDIR.");
+ "Invalid action.");
return -rte_errno;
}
- /* Check if the next non-void item is MARK or END. */
+ /* Check if the next non-void item is MARK or FLAG or END. */
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
- act->type != RTE_FLOW_ACTION_TYPE_END) {
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->soft_id = mark_spec->id;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ return 0;
+ default:
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
act, "Invalid action.");
return -rte_errno;
}
- if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
- mark_spec = (const struct rte_flow_action_mark *)act->conf;
- filter->soft_id = mark_spec->id;
-
- /* Check if the next non-void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Invalid action.");
- return -rte_errno;
- }
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
}
return 0;
@@ -1262,27 +3010,27 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
return 0;
}
+static uint16_t i40e_supported_tunnel_filter_types[] = {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IMAC,
+ ETH_TUNNEL_FILTER_IMAC,
+};
+
static int
-i40e_check_tenant_id_mask(const uint8_t *mask)
+i40e_check_tunnel_filter_type(uint8_t filter_type)
{
- uint32_t j;
- int is_masked = 0;
-
- for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
- if (*(mask + j) == UINT8_MAX) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 0;
- } else if (*(mask + j) == 0) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 1;
- } else {
- return -EINVAL;
- }
+ uint8_t i;
+
+ for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
+ if (filter_type == i40e_supported_tunnel_filter_types[i])
+ return 0;
}
- return is_masked;
+ return -1;
}
/* 1. Last in item should be NULL as range is not supported.
@@ -1302,18 +3050,17 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
- const struct rte_flow_item_eth *o_eth_spec = NULL;
- const struct rte_flow_item_eth *o_eth_mask = NULL;
- const struct rte_flow_item_vxlan *vxlan_spec = NULL;
- const struct rte_flow_item_vxlan *vxlan_mask = NULL;
- const struct rte_flow_item_eth *i_eth_spec = NULL;
- const struct rte_flow_item_eth *i_eth_mask = NULL;
- const struct rte_flow_item_vlan *vlan_spec = NULL;
- const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint8_t filter_type = 0;
bool is_vni_masked = 0;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
enum rte_flow_item_type item_type;
bool vxlan_flag = 0;
uint32_t tenant_id_be = 0;
+ int ret;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
@@ -1328,6 +3075,11 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = (const struct rte_flow_item_eth *)item->spec;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
if ((!eth_spec && eth_mask) ||
(eth_spec && !eth_mask)) {
rte_flow_error_set(error, EINVAL,
@@ -1351,50 +3103,40 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
return -rte_errno;
}
- if (!vxlan_flag)
+ if (!vxlan_flag) {
rte_memcpy(&filter->outer_mac,
&eth_spec->dst,
ETHER_ADDR_LEN);
- else
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
rte_memcpy(&filter->inner_mac,
&eth_spec->dst,
ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
}
-
- if (!vxlan_flag) {
- o_eth_spec = eth_spec;
- o_eth_mask = eth_mask;
- } else {
- i_eth_spec = eth_spec;
- i_eth_mask = eth_mask;
- }
-
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec =
(const struct rte_flow_item_vlan *)item->spec;
vlan_mask =
(const struct rte_flow_item_vlan *)item->mask;
- if (vxlan_flag) {
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
- if (!(vlan_spec && vlan_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid vlan item");
- return -rte_errno;
- }
- } else {
- if (vlan_spec || vlan_mask)
- rte_flow_error_set(error, EINVAL,
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid vlan item");
return -rte_errno;
}
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
@@ -1441,7 +3183,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
(const struct rte_flow_item_vxlan *)item->mask;
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
- * If no, either spec or mask shouldn't be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
if ((!vxlan_spec && vxlan_mask) ||
(vxlan_spec && !vxlan_mask)) {
@@ -1453,17 +3195,25 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Check if VNI is masked. */
- if (vxlan_mask) {
+ if (vxlan_spec && vxlan_mask) {
is_vni_masked =
- i40e_check_tenant_id_mask(vxlan_mask->vni);
- if (is_vni_masked < 0) {
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (is_vni_masked) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid VNI mask");
return -rte_errno;
}
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
}
+
vxlan_flag = 1;
break;
default:
@@ -1471,95 +3221,243 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
}
- /* Check specification and mask to get the filter type */
- if (vlan_spec && vlan_mask &&
- (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
- /* If there's inner vlan */
- filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
- & I40E_TCI_MASK;
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
- else {
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ filter->filter_type = filter_type;
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ enum rte_flow_item_type item_type;
+ uint8_t filter_type = 0;
+ bool is_tni_masked = 0;
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ bool nvgre_flag = 0;
+ uint32_t tenant_id_be = 0;
+ int ret;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
+ item,
+ "Invalid ether spec/mask");
return -rte_errno;
}
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN;
- else {
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!nvgre_flag) {
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
+ item,
+ "Invalid vlan item");
return -rte_errno;
}
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else if ((!vlan_spec && !vlan_mask) ||
- (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
- /* If there's no inner vlan */
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_TENID;
- else if (o_eth_spec && o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask) {
- filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
- } else {
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
return -rte_errno;
}
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
- return -rte_errno;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ is_tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (is_tni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
+ }
+
+ nvgre_flag = 1;
+ break;
+ default:
+ break;
}
- } else {
+ }
+
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Not supported by tunnel filter.");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
return -rte_errno;
}
+ filter->filter_type = filter_type;
- filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+ filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
return 0;
}
static int
-i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -1570,7 +3468,7 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
&filter->consistent_tunnel_filter;
int ret;
- ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
error, tunnel_filter);
if (ret)
return ret;
@@ -1821,8 +3719,10 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Get filter specification */
- if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
- (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+ (i_vlan_mask != NULL) &&
+ (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
& I40E_TCI_MASK;
filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
@@ -1880,7 +3780,8 @@ i40e_flow_validate(struct rte_eth_dev *dev,
parse_filter_t parse_filter;
uint32_t item_num = 0; /* non-void item number of pattern*/
uint32_t i = 0;
- int ret;
+ bool flag = false;
+ int ret = I40E_NOT_SUPPORTED;
if (!pattern) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1922,16 +3823,21 @@ i40e_flow_validate(struct rte_eth_dev *dev,
i40e_pattern_skip_void_item(items, pattern);
- /* Find if there's matched parse filter function */
- parse_filter = i40e_find_parse_filter_func(items);
- if (!parse_filter) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- pattern, "Unsupported pattern");
- return -rte_errno;
- }
-
- ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+ i = 0;
+ do {
+ parse_filter = i40e_find_parse_filter_func(items, &i);
+ if (!parse_filter && !flag) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ rte_free(items);
+ return -rte_errno;
+ }
+ if (parse_filter)
+ ret = parse_filter(dev, attr, items, actions,
+ error, &cons_filter);
+ flag = true;
+ } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
rte_free(items);
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 0758503e..100f8dc2 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -61,7 +61,7 @@
static int
i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_queue_select *qsel,
+ struct virtchnl_queue_select *qsel,
bool on);
/**
@@ -128,7 +128,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
struct i40e_pf *pf;
uint16_t vf_id, abs_vf_id, vf_msix_num;
int ret;
- struct i40e_virtchnl_queue_select qsel;
+ struct virtchnl_queue_select qsel;
if (vf == NULL)
return -EINVAL;
@@ -139,7 +139,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
abs_vf_id = vf_id + hw->func_caps.vf_base_id;
/* Notify VF that we are in VFR progress */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS);
/*
* If require a SW VF reset, a VFLR interrupt will be generated,
@@ -167,7 +167,6 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
PMD_DRV_LOG(ERR, "VF reset timeout");
return -ETIMEDOUT;
}
-
/* This is not first time to do reset, do cleanup job first */
if (vf->vsi) {
/* Disable queues */
@@ -220,7 +219,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
}
/* Reset done, Set COMPLETE flag and clear reset bit */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED);
val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
@@ -248,7 +247,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
return -EFAULT;
}
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE);
return ret;
}
@@ -277,7 +276,7 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
static void
i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
{
- struct i40e_virtchnl_version_info info;
+ struct virtchnl_version_info info;
/* Respond like a Linux PF host in order to support both DPDK VF and
* Linux VF driver. The expense is original DPDK host specific feature
@@ -286,16 +285,16 @@ i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
* DPDK VF also can't identify host driver by version number returned.
* It always assume talking with Linux PF.
*/
- info.major = I40E_VIRTCHNL_VERSION_MAJOR;
- info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ info.major = VIRTCHNL_VERSION_MAJOR;
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
if (b_op)
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_SUCCESS,
(uint8_t *)&info,
sizeof(info));
else
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_NOT_SUPPORTED,
(uint8_t *)&info,
sizeof(info));
@@ -313,22 +312,22 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
static int
i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
{
- struct i40e_virtchnl_vf_resource *vf_res = NULL;
+ struct virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
int ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
/* only have 1 VSI by default */
- len = sizeof(struct i40e_virtchnl_vf_resource) +
+ len = sizeof(struct virtchnl_vf_resource) +
I40E_DEFAULT_VF_VSI_NUM *
- sizeof(struct i40e_virtchnl_vsi_resource);
+ sizeof(struct virtchnl_vsi_resource);
vf_res = rte_zmalloc("i40e_vf_res", len, 0);
if (vf_res == NULL) {
@@ -339,21 +338,21 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
goto send_msg;
}
- vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
vf_res->num_queue_pairs = vf->vsi->nb_qps;
vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
/* Change below setting if PF host can support more VSIs for VF */
- vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
ether_addr_copy(&vf->mac_addr,
(struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
ret, (uint8_t *)vf_res, len);
rte_free(vf_res);
@@ -363,7 +362,7 @@ send_msg:
static int
i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
- struct i40e_virtchnl_rxq_info *rxq,
+ struct virtchnl_rxq_info *rxq,
uint8_t crcstrip)
{
int err = I40E_SUCCESS;
@@ -431,7 +430,7 @@ i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
static int
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
- struct i40e_virtchnl_txq_info *txq)
+ struct virtchnl_txq_info *txq)
{
int err = I40E_SUCCESS;
struct i40e_hmc_obj_txq tx_ctx;
@@ -480,14 +479,14 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
- struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct virtchnl_vsi_queue_config_info *vc_vqci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *vc_qpi;
int i, ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -511,9 +510,9 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
/*
* Apply VF RX queue setting to HMC.
- * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
* then the extra information of
- * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
+ * 'struct virtchnl_queue_pair_extra_info' is needed,
* otherwise set the last parameter to NULL.
*/
if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
@@ -533,7 +532,7 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
ret, NULL, 0);
return ret;
@@ -547,15 +546,15 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
- struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
- (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
- struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ struct virtchnl_vsi_queue_config_ext_info *vc_vqcei =
+ (struct virtchnl_vsi_queue_config_ext_info *)msg;
+ struct virtchnl_queue_pair_ext_info *vc_qpei;
int i, ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -578,9 +577,9 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
}
/*
* Apply VF RX queue setting to HMC.
- * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
* then the extra information of
- * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
+ * 'struct virtchnl_queue_pair_ext_info' is needed,
* otherwise set the last parameter to NULL.
*/
if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
@@ -600,7 +599,7 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
ret, NULL, 0);
return ret;
@@ -608,7 +607,7 @@ send_msg:
static void
i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_vector_map *vvm)
+ struct virtchnl_vector_map *vvm)
{
#define BITS_PER_CHAR 8
uint64_t linklistmap = 0, tempmap;
@@ -711,9 +710,9 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
int ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
- struct i40e_virtchnl_irq_map_info *irqmap =
- (struct i40e_virtchnl_irq_map_info *)msg;
- struct i40e_virtchnl_vector_map *map;
+ struct virtchnl_irq_map_info *irqmap =
+ (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_vector_map *map;
int i;
uint16_t vector_id;
unsigned long qbit_max;
@@ -721,12 +720,12 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
- if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
+ if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) {
PMD_DRV_LOG(ERR, "buffer too short");
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -773,7 +772,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
ret, NULL, 0);
return ret;
@@ -781,7 +780,7 @@ send_msg:
static int
i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_queue_select *qsel,
+ struct virtchnl_queue_select *qsel,
bool on)
{
int ret = I40E_SUCCESS;
@@ -831,8 +830,8 @@ i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
uint16_t msglen)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_queue_select *q_sel =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *q_sel =
+ (struct virtchnl_queue_select *)msg;
if (msg == NULL || msglen != sizeof(*q_sel)) {
ret = I40E_ERR_PARAM;
@@ -841,7 +840,7 @@ i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
ret = i40e_pf_host_switch_queues(vf, q_sel, true);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
ret, NULL, 0);
return ret;
@@ -854,13 +853,13 @@ i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_queue_select *q_sel =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *q_sel =
+ (struct virtchnl_queue_select *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ VIRTCHNL_OP_DISABLE_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -872,7 +871,7 @@ i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
ret = i40e_pf_host_switch_queues(vf, q_sel, false);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
ret, NULL, 0);
return ret;
@@ -886,8 +885,8 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_ether_addr_list *addr_list =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *addr_list =
+ (struct virtchnl_ether_addr_list *)msg;
struct i40e_mac_filter_info filter;
int i;
struct ether_addr *mac;
@@ -895,7 +894,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -920,7 +919,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret, NULL, 0);
return ret;
@@ -933,15 +932,15 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_ether_addr_list *addr_list =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *addr_list =
+ (struct virtchnl_ether_addr_list *)msg;
int i;
struct ether_addr *mac;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ VIRTCHNL_OP_DEL_ETH_ADDR,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -962,7 +961,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
ret, NULL, 0);
return ret;
@@ -974,15 +973,15 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct virtchnl_vlan_filter_list *)msg;
int i;
uint16_t *vid;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_ADD_VLAN,
+ VIRTCHNL_OP_ADD_VLAN,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1002,7 +1001,7 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
ret, NULL, 0);
return ret;
@@ -1015,15 +1014,15 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct virtchnl_vlan_filter_list *)msg;
int i;
uint16_t *vid;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_OP_DEL_VLAN,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1042,7 +1041,7 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
ret, NULL, 0);
return ret;
@@ -1056,15 +1055,15 @@ i40e_pf_host_process_cmd_config_promisc_mode(
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_promisc_info *promisc =
- (struct i40e_virtchnl_promisc_info *)msg;
+ struct virtchnl_promisc_info *promisc =
+ (struct virtchnl_promisc_info *)msg;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
bool unicast = FALSE, multicast = FALSE;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1074,21 +1073,21 @@ i40e_pf_host_process_cmd_config_promisc_mode(
goto send_msg;
}
- if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ if (promisc->flags & FLAG_VF_UNICAST_PROMISC)
unicast = TRUE;
ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
vf->vsi->seid, unicast, NULL, true);
if (ret != I40E_SUCCESS)
goto send_msg;
- if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ if (promisc->flags & FLAG_VF_MULTICAST_PROMISC)
multicast = TRUE;
ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
multicast, NULL);
send_msg:
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
return ret;
}
@@ -1099,12 +1098,12 @@ i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
i40e_update_vsi_stats(vf->vsi);
if (b_op)
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
I40E_SUCCESS,
(uint8_t *)&vf->vsi->eth_stats,
sizeof(vf->vsi->eth_stats));
else
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
I40E_NOT_SUPPORTED,
(uint8_t *)&vf->vsi->eth_stats,
sizeof(vf->vsi->eth_stats));
@@ -1113,37 +1112,47 @@ i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
}
static int
-i40e_pf_host_process_cmd_cfg_vlan_offload(
- struct i40e_pf_vf *vf,
- uint8_t *msg,
- uint16_t msglen,
- bool b_op)
+i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_offload_info *offload =
- (struct i40e_virtchnl_vlan_offload_info *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
- if (msg == NULL || msglen != sizeof(*offload)) {
- ret = I40E_ERR_PARAM;
- goto send_msg;
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to enable vlan stripping");
+
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
+{
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
}
- ret = i40e_vsi_config_vlan_stripping(vf->vsi,
- !!offload->enable_vlan_strip);
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE);
if (ret != 0)
- PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
+ PMD_DRV_LOG(ERR, "Failed to disable vlan stripping");
-send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
- ret, NULL, 0);
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ ret, NULL, 0);
return ret;
}
@@ -1155,8 +1164,8 @@ i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_pvid_info *tpid_info =
- (struct i40e_virtchnl_pvid_info *)msg;
+ struct virtchnl_pvid_info *tpid_info =
+ (struct virtchnl_pvid_info *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
@@ -1183,39 +1192,39 @@ send_msg:
void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
- struct i40e_virtchnl_pf_event event;
+ struct virtchnl_pf_event event;
- event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event = VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
+ /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
switch (dev->data->dev_link.link_speed) {
case ETH_SPEED_NUM_100M:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
break;
case ETH_SPEED_NUM_1G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
break;
case ETH_SPEED_NUM_10G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
break;
case ETH_SPEED_NUM_20G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
break;
case ETH_SPEED_NUM_25G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
break;
case ETH_SPEED_NUM_40G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
default:
event.event_data.link_event.link_speed =
- I40E_LINK_SPEED_UNKNOWN;
+ VIRTCHNL_LINK_SPEED_UNKNOWN;
break;
}
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
@@ -1231,7 +1240,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
struct i40e_pf_vf *vf;
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
- struct rte_pmd_i40e_mb_event_param cb_param;
+ struct rte_pmd_i40e_mb_event_param ret_param;
bool b_op = TRUE;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
@@ -1251,100 +1260,104 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
* initialise structure to send to user application
* will return response from user in retval field
*/
- cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
- cb_param.vfid = vf_id;
- cb_param.msg_type = opcode;
- cb_param.msg = (void *)msg;
- cb_param.msglen = msglen;
+ ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
+ ret_param.vfid = vf_id;
+ ret_param.msg_type = opcode;
+ ret_param.msg = (void *)msg;
+ ret_param.msglen = msglen;
/**
* Ask user application if we're allowed to perform those functions.
- * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
+ * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
* then business as usual.
* If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
* do nothing and send not_supported to VF. As PF must send a response
* to VF and ACK/NACK is not defined.
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
- if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
+ if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
opcode);
b_op = FALSE;
}
switch (opcode) {
- case I40E_VIRTCHNL_OP_VERSION :
+ case VIRTCHNL_OP_VERSION:
PMD_DRV_LOG(INFO, "OP_VERSION received");
i40e_pf_host_process_cmd_version(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_RESET_VF :
+ case VIRTCHNL_OP_RESET_VF:
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
i40e_pf_host_process_cmd_reset_vf(vf);
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
if (b_op) {
i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
i40e_notify_vf_link_status(dev, vf);
} else {
i40e_pf_host_send_msg_to_vf(
- vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
}
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
i40e_pf_host_process_cmd_add_ether_address(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
i40e_pf_host_process_cmd_del_ether_address(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
PMD_DRV_LOG(INFO, "OP_GET_STATS received");
i40e_pf_host_process_cmd_get_stats(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
- PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
- i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
- msglen, b_op);
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received");
+ i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
+ i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index b4c22876..7afb7eae 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -35,7 +35,7 @@
#define _I40E_PF_H_
/* VERSION info to exchange between VF and PF host. In case VF works with
- * ND kernel driver, it reads I40E_VIRTCHNL_VERSION_MAJOR/MINOR. In
+ * ND kernel driver, it reads VIRTCHNL_VERSION_MAJOR/MINOR. In
* case works with DPDK host, it reads version below. Then VF realize who it
* is talking to and use proper language to communicate.
* */
@@ -49,45 +49,44 @@
#define I40E_DPDK_OFFSET 0x100
/* DPDK pf driver specific command to VF */
-enum i40e_virtchnl_ops_dpdk {
+enum virtchnl_ops_dpdk {
/*
* Keep some gap between Linux PF commands and
* DPDK PF extended commands.
*/
- I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD = I40E_VIRTCHNL_OP_VERSION +
- I40E_DPDK_OFFSET,
- I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID = VIRTCHNL_OP_VERSION +
+ I40E_DPDK_OFFSET,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
};
/* A structure to support extended info of a receive queue. */
-struct i40e_virtchnl_rxq_ext_info {
+struct virtchnl_rxq_ext_info {
uint8_t crcstrip;
};
/*
* A structure to support extended info of queue pairs, an additional field
- * is added, comparing to original 'struct i40e_virtchnl_queue_pair_info'.
+ * is added, comparing to original 'struct virtchnl_queue_pair_info'.
*/
-struct i40e_virtchnl_queue_pair_ext_info {
+struct virtchnl_queue_pair_ext_info {
/* vsi_id and queue_id should be identical for both rx and tx queues.*/
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
- struct i40e_virtchnl_rxq_ext_info rxq_ext;
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+ struct virtchnl_rxq_ext_info rxq_ext;
};
/*
* A structure to support extended info of VSI queue pairs,
- * 'struct i40e_virtchnl_queue_pair_ext_info' is used, see its original
- * of 'struct i40e_virtchnl_queue_pair_info'.
+ * 'struct virtchnl_queue_pair_ext_info' is used, see its original
+ * of 'struct virtchnl_queue_pair_info'.
*/
-struct i40e_virtchnl_vsi_queue_config_ext_info {
+struct virtchnl_vsi_queue_config_ext_info {
uint16_t vsi_id;
uint16_t num_queue_pairs;
- struct i40e_virtchnl_queue_pair_ext_info qpair[0];
+ struct virtchnl_queue_pair_ext_info qpair[0];
};
-struct i40e_virtchnl_vlan_offload_info {
+struct virtchnl_vlan_offload_info {
uint16_t vsi_id;
uint8_t enable_vlan_strip;
uint8_t reserved;
@@ -106,7 +105,7 @@ struct i40e_virtchnl_vlan_offload_info {
* enable op, needs to specify the pvid. PF returns status
* code in retval.
*/
-struct i40e_virtchnl_pvid_info {
+struct virtchnl_pvid_info {
uint16_t vsi_id;
struct i40e_vsi_vlan_pvid_info info;
};
@@ -114,7 +113,7 @@ struct i40e_virtchnl_pvid_info {
int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset);
void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint16_t abs_vf_id, uint32_t opcode,
- __rte_unused uint32_t retval,
+ uint32_t retval,
uint8_t *msg, uint16_t msglen);
int i40e_pf_host_init(struct rte_eth_dev *dev);
int i40e_pf_host_uninit(struct rte_eth_dev *dev);
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 351cb94d..d42c23c0 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1257,7 +1257,7 @@ end_of_tx:
return nb_tx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
@@ -1608,7 +1608,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq = dev->data->rx_queues[rx_queue_id];
/*
- * rx_queue_id is queue id aplication refers to, while
+ * rx_queue_id is queue id application refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
@@ -1639,7 +1639,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
txq = dev->data->tx_queues[tx_queue_id];
/*
- * tx_queue_id is queue id aplication refers to, while
+ * tx_queue_id is queue id application refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
@@ -1664,7 +1664,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
txq = dev->data->tx_queues[tx_queue_id];
/*
- * tx_queue_id is queue id aplication refers to, while
+ * tx_queue_id is queue id application refers to, while
* txq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
@@ -2474,7 +2474,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
case I40E_FLAG_HEADER_SPLIT_DISABLED:
default:
rxq->rx_hdr_len = 0;
- rxq->rx_buf_len = RTE_ALIGN(buf_size,
+ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size,
(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
rxq->hs_mode = i40e_header_split_none;
break;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 69209668..39a6da06 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -102,7 +102,7 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
return pkt_idx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
@@ -159,7 +159,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
return txq->tx_rs_thresh;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
tx_backlog_entry(struct i40e_tx_entry *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 3b4a352e..779f14e5 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -87,6 +87,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
mb1 = rxep[1].mbuf;
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -117,7 +119,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
}
static inline void
-desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
+desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4],
struct rte_mbuf **rx_pkts)
{
const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
@@ -203,6 +205,12 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
@@ -252,6 +260,15 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-rxq->crc_len, /* sub crc on pkt_len */
0, 0 /* ignore pkt_type field */
);
+ /*
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
__m128i dd_check, eop_check;
/* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
@@ -296,6 +313,19 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0xFF, 0xFF, /* pkt_type set as unknown */
0xFF, 0xFF /*pkt_type set as unknown */
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
@@ -621,11 +651,5 @@ i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
int __attribute__((cold))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
- /* need SSE4.1 support */
- if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- return -1;
-#endif
-
return i40e_rx_vec_dev_conf_condition_check_default(dev);
}
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
new file mode 100644
index 00000000..d90313af
--- /dev/null
+++ b/drivers/net/i40e/i40e_tm.c
@@ -0,0 +1,976 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int i40e_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops i40e_tm_ops = {
+ .capabilities_get = i40e_tm_capabilities_get,
+ .shaper_profile_add = i40e_shaper_profile_add,
+ .shaper_profile_delete = i40e_shaper_profile_del,
+ .node_add = i40e_node_add,
+ .node_delete = i40e_node_delete,
+ .node_type_get = i40e_node_type_get,
+ .level_capabilities_get = i40e_level_capabilities_get,
+ .node_capabilities_get = i40e_node_capabilities_get,
+ .hierarchy_commit = i40e_hierarchy_commit,
+};
+
+int
+i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &i40e_tm_ops;
+
+ return 0;
+}
+
+void
+i40e_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+
+ /* initialize node configuration */
+ pf->tm_conf.root = NULL;
+ TAILQ_INIT(&pf->tm_conf.tc_list);
+ TAILQ_INIT(&pf->tm_conf.queue_list);
+ pf->tm_conf.nb_tc_node = 0;
+ pf->tm_conf.nb_queue_node = 0;
+ pf->tm_conf.committed = false;
+}
+
+void
+i40e_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct i40e_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_tc_node = 0;
+ if (pf->tm_conf.root) {
+ rte_free(pf->tm_conf.root);
+ pf->tm_conf.root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
+static inline uint16_t
+i40e_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ uint16_t sum = 0;
+ int i;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (main_vsi->enabled_tc & BIT_ULL(i))
+ sum++;
+ }
+
+ return sum;
+}
+
+static int
+i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t tc_nb = i40e_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->func_caps.num_tx_qp)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * support port + TCs + queues
+ * here shows the max capability not the current configuration.
+ */
+ cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
+ cap->n_levels_max = 3; /* port, TC, queue */
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->shaper_private_rate_max = 5000000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->func_caps.num_tx_qp;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct i40e_tm_shaper_profile *
+i40e_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_shaper_profile_list *shaper_profile_list =
+ &pf->tm_conf.shaper_profile_list;
+ struct i40e_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = i40e_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
+ sizeof(struct i40e_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ (void)rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+i40e_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static inline struct i40e_tm_node *
+i40e_tm_node_search(struct rte_eth_dev *dev,
+ uint32_t node_id, enum i40e_tm_node_type *node_type)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct i40e_tm_node *tm_node;
+
+ if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_PORT;
+ return pf->tm_conf.root;
+ }
+
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for root node */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for TC or queue node */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct i40e_tm_node *tm_node;
+ struct i40e_tm_node *parent_node;
+ uint16_t tc_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = i40e_node_param_check(node_id, parent_node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (i40e_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ shaper_profile = i40e_shaper_profile_search(dev,
+ params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > I40E_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (pf->tm_conf.root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("i40e_tm_node",
+ sizeof(struct i40e_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ pf->tm_conf.root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = i40e_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
+ parent_node_type != I40E_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
+ /* check the TC number */
+ tc_nb = i40e_tc_nb_get(dev);
+ if (pf->tm_conf.nb_tc_node >= tc_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check the queue number */
+ if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= hw->func_caps.num_tx_qp) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("i40e_tm_node",
+ sizeof(struct i40e_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = pf->tm_conf.root;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
+ tm_node, node);
+ pf->tm_conf.nb_tc_node++;
+ } else {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
+ tm_node, node);
+ pf->tm_conf.nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == I40E_TM_NODE_TYPE_PORT) {
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ pf->tm_conf.root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == I40E_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ pf->tm_conf.nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ pf->tm_conf.nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == I40E_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+i40e_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= I40E_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == I40E_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->nonleaf.shaper_private_rate_max = 5000000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ cap->nonleaf.sched_n_children_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (level_id == I40E_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->func_caps.num_tx_qp;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
+ cap->non_leaf_nodes_identical = true;
+ }
+ cap->leaf_nodes_identical = true;
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->leaf.shaper_private_rate_max = 5000000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum i40e_tm_node_type node_type;
+ struct i40e_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->shaper_private_rate_max = 5000000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == I40E_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == I40E_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ I40E_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->func_caps.num_tx_qp;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static int
+i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct i40e_tm_node *tm_node;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+ uint64_t bw;
+ uint8_t tc_map;
+ int ret;
+ int i;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!pf->tm_conf.root)
+ goto done;
+
+ vsi = pf->main_vsi;
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /**
+ * Don't support bandwidth control for port and TCs in parallel.
+ * If the port has a max bandwidth, the TCs should have none.
+ */
+ /* port */
+ bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
+ if (bw) {
+ /* check if any TC has a max bandwidth */
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port and TC max bandwidth"
+ " in parallel";
+ goto fail_clear;
+ }
+ }
+
+ /* interpret Bps to 50Mbps */
+ bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+ /* set the max bandwidth */
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid,
+ (uint16_t)bw, 0, NULL);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "fail to set port max bandwidth";
+ goto fail_clear;
+ }
+
+ goto done;
+ }
+
+ /* TC */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ tc_map = vsi->enabled_tc;
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (!tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "TC without queue assigned";
+ goto fail_clear;
+ }
+
+ i = 0;
+ while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i)))
+ i++;
+ if (i >= I40E_MAX_TRAFFIC_CLASS) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "cannot find the TC";
+ goto fail_clear;
+ }
+ tc_map &= ~BIT_ULL(i);
+
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ if (!bw)
+ continue;
+
+ /* interpret Bps to 50Mbps */
+ bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+ tc_bw.tc_bw_credits[i] = rte_cpu_to_le_16((uint16_t)bw);
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ if (bw) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "not support queue QoS";
+ goto fail_clear;
+ }
+ }
+
+ ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "fail to set TC max bandwidth";
+ goto fail_clear;
+ }
+
+done:
+ pf->tm_conf.committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ i40e_tm_conf_uninit(dev);
+ i40e_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index f7ce62bb..f12b7f4a 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -40,15 +40,6 @@
#include "i40e_rxtx.h"
#include "rte_pmd_i40e.h"
-/* The max bandwidth of i40e is 40Gbps. */
-#define I40E_QOS_BW_MAX 40000
-/* The bandwidth should be the multiple of 50Mbps. */
-#define I40E_QOS_BW_GRANULARITY 50
-/* The min bandwidth weight is 1. */
-#define I40E_QOS_BW_WEIGHT_MIN 1
-/* The max bandwidth weight is 127. */
-#define I40E_QOS_BW_WEIGHT_MAX 127
-
int
rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
{
@@ -1468,7 +1459,7 @@ rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
return ret;
}
-#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
#define I40E_MAX_PROFILE_NUM 16
static void
@@ -1520,9 +1511,6 @@ i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
return status;
}
-#define I40E_PROFILE_INFO_SIZE 48
-#define I40E_MAX_PROFILE_NUM 16
-
/* Check if the profile info exists */
static int
i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
@@ -1557,11 +1545,7 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
sizeof(struct i40e_profile_section_header));
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
- if ((pinfo->track_id == p->track_id) &&
- !memcmp(&pinfo->version, &p->version,
- sizeof(struct i40e_ddp_version)) &&
- !memcmp(&pinfo->name, &p->name,
- I40E_DDP_NAME_SIZE)) {
+ if (pinfo->track_id == p->track_id) {
PMD_DRV_LOG(INFO, "Profile exists.");
rte_free(buff);
return 1;
@@ -1587,6 +1571,13 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
int is_exist;
enum i40e_status_code status = I40E_SUCCESS;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Operation not supported.");
+ return -ENOTSUP;
+ }
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
@@ -1623,6 +1614,10 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
return -EINVAL;
}
track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ PMD_DRV_LOG(ERR, "Invalid track_id");
+ return -EINVAL;
+ }
/* Find profile segment */
profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
@@ -1642,46 +1637,231 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
return -EINVAL;
}
+ /* Check if the profile already loaded */
+ i40e_generate_profile_info_sec(
+ ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+ &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+ track_id, profile_info_sec,
+ op == RTE_PMD_I40E_PKG_OP_WR_ADD);
+ is_exist = i40e_check_profile_info(port, profile_info_sec);
+ if (is_exist < 0) {
+ PMD_DRV_LOG(ERR, "Failed to check profile.");
+ rte_free(profile_info_sec);
+ return -EINVAL;
+ }
+
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
- /* Check if the profile exists */
- i40e_generate_profile_info_sec(
- ((struct i40e_profile_segment *)profile_seg_hdr)->name,
- &((struct i40e_profile_segment *)profile_seg_hdr)->version,
- track_id, profile_info_sec, 1);
- is_exist = i40e_check_profile_info(port, profile_info_sec);
- if (is_exist > 0) {
+ if (is_exist) {
PMD_DRV_LOG(ERR, "Profile already exists.");
rte_free(profile_info_sec);
- return 1;
- } else if (is_exist < 0) {
- PMD_DRV_LOG(ERR, "Failed to check profile.");
+ return -EEXIST;
+ }
+ } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ if (!is_exist) {
+ PMD_DRV_LOG(ERR, "Profile does not exist.");
rte_free(profile_info_sec);
- return -EINVAL;
+ return -EACCES;
}
+ }
- /* Write profile to HW */
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ status = i40e_rollback_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+ } else {
status = i40e_write_profile(
- hw,
- (struct i40e_profile_segment *)profile_seg_hdr,
- track_id);
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
if (status) {
- PMD_DRV_LOG(ERR, "Failed to write profile.");
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to write profile for add.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to write profile.");
rte_free(profile_info_sec);
return status;
}
+ }
- /* Add profile info to info list */
+ if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
+ /* Modify loaded profiles info list */
status = i40e_add_rm_profile_info(hw, profile_info_sec);
- if (status)
- PMD_DRV_LOG(ERR, "Failed to add profile info.");
- } else {
- PMD_DRV_LOG(ERR, "Operation not supported.");
+ if (status) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
+ }
}
rte_free(profile_info_sec);
return status;
}
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
+ uint8_t *info_buff, uint32_t info_size,
+ enum rte_pmd_i40e_package_info type)
+{
+ uint32_t ret_size;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *i40e_seg_hdr;
+ struct i40e_generic_seg_header *note_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+
+ if (!info_buff) {
+ PMD_DRV_LOG(ERR, "Output info buff is invalid.");
+ return -EINVAL;
+ }
+
+ if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Package buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)pkg_buff;
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+
+ /* Find global notes segment */
+ note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
+ pkg_hdr);
+
+ /* Find i40e profile segment */
+ i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+
+ /* get global header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get global note size */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ if (note_seg_hdr == NULL)
+ ret_size = 0;
+ else
+ ret_size = note_seg_hdr->size;
+ *(uint32_t *)info_buff = ret_size;
+ return I40E_SUCCESS;
+ }
+
+ /* get global note */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
+ if (note_seg_hdr == NULL)
+ return -ENOTSUP;
+ if (info_size < note_seg_hdr->size) {
+ PMD_DRV_LOG(ERR, "Information buffer size is too small");
+ return -EINVAL;
+ }
+ memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
+ return I40E_SUCCESS;
+ }
+
+ /* get i40e segment header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ if (!i40e_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get number of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ *(uint32_t *)info_buff =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ return I40E_SUCCESS;
+ }
+
+ /* get list of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
+ uint32_t dev_num;
+ dev_num =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ memcpy(info_buff,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
+ sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
+ return I40E_SUCCESS;
+ }
+
+ PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
+ return -EINVAL;
+}
+
int
rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
{
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index 1efb2c4b..356fa89d 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -59,7 +59,7 @@ enum rte_pmd_i40e_mb_event_rsp {
*/
struct rte_pmd_i40e_mb_event_param {
uint16_t vfid; /**< Virtual Function number */
- uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */
+ uint16_t msg_type; /**< VF to PF message type, see virtchnl_ops */
uint16_t retval; /**< return value */
void *msg; /**< pointer to message */
uint16_t msglen; /**< length of the message */
@@ -71,9 +71,26 @@ struct rte_pmd_i40e_mb_event_param {
enum rte_pmd_i40e_package_op {
RTE_PMD_I40E_PKG_OP_UNDEFINED = 0,
RTE_PMD_I40E_PKG_OP_WR_ADD, /**< load package and add to info list */
+ RTE_PMD_I40E_PKG_OP_WR_DEL, /**< load package and delete from info list */
+ RTE_PMD_I40E_PKG_OP_WR_ONLY, /**< load package without modifying info list */
RTE_PMD_I40E_PKG_OP_MAX = 32
};
+/**
+ * Types of package information.
+ */
+enum rte_pmd_i40e_package_info {
+ RTE_PMD_I40E_PKG_INFO_UNDEFINED = 0,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_MAX = 1024,
+ RTE_PMD_I40E_PKG_INFO_HEADER,
+ RTE_PMD_I40E_PKG_INFO_DEVID_NUM,
+ RTE_PMD_I40E_PKG_INFO_DEVID_LIST,
+ RTE_PMD_I40E_PKG_INFO_MAX = 0xFFFFFFFF
+};
+
#define RTE_PMD_I40E_DDP_NAME_SIZE 32
/**
@@ -88,6 +105,14 @@ struct rte_pmd_i40e_ddp_version {
};
/**
+ * Device ID for dynamic device personalization.
+ */
+struct rte_pmd_i40e_ddp_device_id {
+ uint32_t vendor_dev_id;
+ uint32_t sub_vendor_dev_id;
+};
+
+/**
* Profile information in profile info list.
*/
struct rte_pmd_i40e_profile_info {
@@ -98,6 +123,8 @@ struct rte_pmd_i40e_profile_info {
uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE];
};
+#define RTE_PMD_I40E_DDP_OWNER_UNKNOWN 0xFF
+
/**
* Profile information list returned from HW.
*/
@@ -492,13 +519,36 @@ int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
* - (0) if successful.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
- * - (1) if profile exists.
+ * - (-EEXIST) if profile exists.
+ * - (-EACCES) if profile does not exist.
+ * - (-ENOTSUP) if operation not supported.
*/
int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
uint32_t size,
enum rte_pmd_i40e_package_op op);
/**
+ * rte_pmd_i40e_get_ddp_info - Get profile's info
+ * @param pkg
+ * buffer of package.
+ * @param pkg_size
+ * package buffer size
+ * @param info
+ * buffer for response
+ * @param size
+ * response buffer size
+ * @param type
+ * type of information requested
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if information type not supported by the profile.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg, uint32_t pkg_size,
+ uint8_t *info, uint32_t size,
+ enum rte_pmd_i40e_package_info type);
+
+/**
* rte_pmd_i40e_get_ddp_list - Get loaded profile list
* @param port
* port id
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index 3b0e805d..20cc9801 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -38,3 +38,10 @@ DPDK_17.05 {
rte_pmd_i40e_get_ddp_list;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_pmd_i40e_get_ddp_info;
+
+} DPDK_17.05;