summaryrefslogtreecommitdiffstats
path: root/src/dpdk/drivers/net/i40e
diff options
context:
space:
mode:
authorIdo Barnea <ibarnea@cisco.com>2017-02-05 15:21:19 +0200
committerIdo Barnea <ibarnea@cisco.com>2017-02-13 12:32:01 +0200
commit9ca4a157305e4e23a892ba9bafc9eee0f66954ce (patch)
tree1a8afcf815fd33e7623e3c16246abe86c01bc8fd /src/dpdk/drivers/net/i40e
parent2dab8f65015e9fa90df395be6ee1a07e9ac71044 (diff)
dpdk1702-rc2 upstream files unchanged + mlx5 driver rc3
Signed-off-by: Ido Barnea <ibarnea@cisco.com>
Diffstat (limited to 'src/dpdk/drivers/net/i40e')
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_adminq.c4
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h159
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_common.c501
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_devids.h4
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c5
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_nvm.c52
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_osdep.h10
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_prototype.h33
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_register.h2
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_type.h377
-rw-r--r--src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h5
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.c2723
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.h239
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c346
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_fdir.c215
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_flow.c1849
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_pf.c449
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_pf.h12
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx.c833
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx.h571
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h251
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c614
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c (renamed from src/dpdk/drivers/net/i40e/i40e_rxtx_vec.c)268
-rw-r--r--src/dpdk/drivers/net/i40e/rte_pmd_i40e.h335
24 files changed, 7882 insertions, 1975 deletions
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_adminq.c b/src/dpdk/drivers/net/i40e/base/i40e_adminq.c
index 0d3a83fa..5bdf3f77 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_adminq.c
+++ b/src/dpdk/drivers/net/i40e/base/i40e_adminq.c
@@ -1077,11 +1077,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h b/src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h
index 2b7a7608..67cef7cf 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/src/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -139,12 +139,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
-#ifdef X722_SUPPORT
/* Proxy commands */
i40e_aqc_opc_set_proxy_config = 0x0104,
i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
-#endif
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -152,12 +150,11 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
-#ifdef X722_SUPPORT
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
+ i40e_aqc_opc_clear_all_wol_filters = 0x025E,
-#endif
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -196,6 +193,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -223,6 +221,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
@@ -278,12 +279,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
-#ifdef X722_SUPPORT
i40e_aqc_opc_set_rss_key = 0x0B02,
i40e_aqc_opc_set_rss_lut = 0x0B03,
i40e_aqc_opc_get_rss_key = 0x0B04,
i40e_aqc_opc_get_rss_lut = 0x0B05,
-#endif
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -471,13 +470,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -492,17 +493,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -532,7 +535,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -552,6 +556,8 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@@ -575,15 +581,24 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
-#ifdef X722_SUPPORT
/* Set WoL Filter (0x0120) */
struct i40e_aqc_set_wol_filter {
__le16 filter_index;
#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
@@ -594,24 +609,29 @@ struct i40e_aqc_set_wol_filter {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
/* Get Wake Reason (0x0121) */
struct i40e_aqc_get_wake_reason_completion {
u8 reserved_1[2];
__le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
u8 reserved_2[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
-struct i40e_aqc_set_wol_filter_data {
- u8 filter[128];
- u8 mask[16];
-};
-
-I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
-
-#endif /* X722_SUPPORT */
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -694,6 +714,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -745,6 +767,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -913,16 +936,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
-#ifdef X722_SUPPORT
#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
-#endif
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#ifdef X722_SUPPORT
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
-#endif
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1644,6 +1663,24 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1691,6 +1728,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1698,7 +1736,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1721,6 +1760,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@@ -1731,7 +1772,22 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1753,7 +1809,16 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1833,16 +1898,26 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
@@ -2340,7 +2415,6 @@ struct i40e_aqc_del_udp_tunnel_completion {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
-#ifdef X722_SUPPORT
struct i40e_aqc_get_set_rss_key {
#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
@@ -2381,7 +2455,6 @@ struct i40e_aqc_get_set_rss_lut {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-#endif
/* tunnel key structure 0x0B10 */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_common.c b/src/dpdk/drivers/net/i40e/base/i40e_common.c
index 98ed4b68..b8d81651 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_common.c
+++ b/src/dpdk/drivers/net/i40e/base/i40e_common.c
@@ -71,7 +71,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
-#ifdef X722_SUPPORT
#ifdef X722_A0_SUPPORT
case I40E_DEV_ID_X722_A0:
#endif
@@ -81,21 +80,16 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
- case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
-#endif
-#ifdef X722_SUPPORT
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
#ifdef X722_A0_SUPPORT
case I40E_DEV_ID_X722_A0_VF:
#endif
hw->mac.type = I40E_MAC_X722_VF;
break;
#endif /* INTEGRATED_VF || VF_DRIVER */
-#endif /* X722_SUPPORT */
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
@@ -115,7 +109,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
return status;
}
-#ifndef I40E_NDIS_SUPPORT
/**
* i40e_aq_str - convert AQ err code to a string
* @hw: pointer to the HW structure
@@ -322,7 +315,6 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
return hw->err_str;
}
-#endif /* I40E_NDIS_SUPPORT */
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
@@ -383,8 +375,7 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
d_buf[j] = buf[i];
i40e_debug(hw, mask,
"\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i_sav, d_buf[0], d_buf[1],
- d_buf[2], d_buf[3],
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
d_buf[4], d_buf[5], d_buf[6], d_buf[7],
d_buf[8], d_buf[9], d_buf[10], d_buf[11],
d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
@@ -449,7 +440,6 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
-#ifdef X722_SUPPORT
/**
* i40e_aq_get_set_rss_lut
@@ -608,7 +598,6 @@ enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-#endif /* X722_SUPPORT */
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
@@ -773,7 +762,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -1024,9 +1013,7 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
-#ifdef X722_SUPPORT
case I40E_MAC_X722:
-#endif
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -1046,11 +1033,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
-#ifdef X722_SUPPORT
if (hw->mac.type == I40E_MAC_X722)
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
-#endif
status = i40e_init_nvm(hw);
return status;
}
@@ -1128,7 +1113,8 @@ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+ i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac),
+ I40E_NONDMA_TO_NONDMA);
return status;
}
@@ -1151,7 +1137,8 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
- memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac),
+ I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1191,6 +1178,33 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
}
/**
+ * i40e_get_san_mac_addr - get SAN MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to SAN MAC address
+ *
+ * Reads the adapter's SAN MAC address from NVM
+ **/
+enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw,
+ u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_SAN_ADDR_VALID)
+ i40e_memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac),
+ I40E_NONDMA_TO_NONDMA);
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
* i40e_read_pba_string - Reads part number string from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number string from the EEPROM
@@ -1264,6 +1278,8 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
@@ -1278,6 +1294,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1285,6 +1302,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -1670,8 +1688,10 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
- if (report_init)
+ if (report_init) {
hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ }
return status;
}
@@ -1763,10 +1783,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
@@ -1926,6 +1949,8 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
@@ -1948,12 +1973,13 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->crc_enable = false;
- if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = true;
else
hw_link_info->lse_enable = false;
- if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
@@ -2215,6 +2241,34 @@ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
}
/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(0);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2290,6 +2344,43 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+* i40e_aq_set_vsi_full_promiscuous
+* @hw: pointer to the hw struct
+* @seid: VSI number
+* @set: set promiscuous enable/disable
+* @cmd_details: pointer to command details structure or NULL
+**/
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_mc_promisc_on_vlan
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2358,6 +2449,40 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2691,14 +2816,17 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, false, false,
&abilities, NULL);
if (status)
return status;
- memcpy(hw->phy.link_info.module_type, &abilities.module_type,
- sizeof(hw->phy.link_info.module_type));
+ i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
return status;
}
@@ -3549,6 +3677,14 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Management Mode = %d\n",
p->management_mode);
@@ -3768,7 +3904,6 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
p->update_disabled = true;
break;
-#ifdef X722_SUPPORT
case I40E_AQ_CAP_ID_WOL_AND_PROXY:
hw->num_wol_proxy_filters = (u16)number;
hw->wol_proxy_vsi_seid = (u16)logical_id;
@@ -3778,12 +3913,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
else
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
- p->proxy_support = p->proxy_support;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: WOL proxy filters = %d\n",
hw->num_wol_proxy_filters);
break;
-#endif
default:
break;
}
@@ -3792,16 +3925,8 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (p->fcoe)
i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
-#ifdef I40E_FCOE_ENA
- /* Software override ensuring FCoE is disabled if npar or mfp
- * mode because it is not supported in these modes.
- */
- if (p->npar_enable || p->flex10_enable)
- p->fcoe = false;
-#else
/* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
p->fcoe = false;
-#endif
/* count the enabled ports (aka the "not disabled" ports) */
hw->num_ports = 0;
@@ -3828,8 +3953,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
- hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
- hw->num_partitions = num_functions / hw->num_ports;
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
/* additional HW specific goodies that might
* someday be HW version specific
@@ -4314,11 +4441,15 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
- * @udp_port: the UDP port to add
+ * @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call CPU_TO_LE16 to convert from Host byte order to
+ * Little Endian order.
**/
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
@@ -5452,12 +5583,12 @@ STATIC void i40e_fix_up_geneve_vni(
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(f[i].flags) &
+ tnl_type = (LE16_TO_CPU(f[i].flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
- ti = le32_to_cpu(f[i].tenant_id);
- f[i].tenant_id = cpu_to_le32(ti << 8);
+ ti = LE32_TO_CPU(f[i].tenant_id);
+ f[i].tenant_id = CPU_TO_LE32(ti << 8);
}
}
}
@@ -5961,9 +6092,6 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
- if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
desc.datalen = CPU_TO_LE16(bwd_size);
status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
@@ -5972,7 +6100,92 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
}
/**
- * i40e_read_phy_register
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -5981,9 +6194,8 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 *value)
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -5993,8 +6205,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6016,8 +6228,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_READ) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6047,7 +6259,7 @@ phy_read_end:
}
/**
- * i40e_write_phy_register
+ * i40e_write_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -6056,9 +6268,8 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 value)
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -6068,8 +6279,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6093,8 +6304,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_WRITE) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6115,6 +6326,78 @@ phy_write_end:
}
/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw,
+ reg, phy_addr, value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw,
+ page, reg, phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -6156,14 +6439,16 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
@@ -6175,20 +6460,18 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
i40e_msec_delay(interval);
@@ -6196,8 +6479,9 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
}
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
phy_blinking_end:
return status;
@@ -6228,8 +6512,10 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- temp_addr, phy_addr, &reg_val);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ &reg_val);
if (status)
return status;
*val = reg_val;
@@ -6262,41 +6548,42 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
-
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
return status;
}
#endif /* PF_DRIVER */
@@ -6522,7 +6809,6 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
I40E_SUCCESS, NULL, 0, NULL);
}
#endif /* VF_DRIVER */
-#ifdef X722_SUPPORT
/**
* i40e_aq_set_arp_proxy_config
@@ -6545,10 +6831,13 @@ enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
status = i40e_asq_send_command(hw, &desc, proxy_config,
sizeof(struct i40e_aqc_arp_proxy_data),
@@ -6579,10 +6868,13 @@ enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_ns_proxy_table_entry);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
sizeof(struct i40e_aqc_ns_proxy_data),
@@ -6629,9 +6921,11 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
if (set_filter) {
if (!filter)
return I40E_ERR_PARAM;
+
cmd_flags |= I40E_AQC_SET_WOL_FILTER;
- buff_len = sizeof(*filter);
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
}
+
if (no_wol_tco)
cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
@@ -6642,6 +6936,12 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
cmd->valid_flags = CPU_TO_LE16(valid_flags);
+ buff_len = sizeof(*filter);
+ desc.datalen = CPU_TO_LE16(buff_len);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
@@ -6678,4 +6978,23 @@ enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
return status;
}
-#endif /* X722_SUPPORT */
+/**
+* i40e_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_all_wol_filters);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+} \ No newline at end of file
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_devids.h b/src/dpdk/drivers/net/i40e/base/i40e_devids.h
index ed73e1d2..4546689a 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_devids.h
+++ b/src/dpdk/drivers/net/i40e/base/i40e_devids.h
@@ -55,7 +55,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#endif /* VF_DRIVER */
-#ifdef X722_SUPPORT
#ifdef X722_A0_SUPPORT
#define I40E_DEV_ID_X722_A0 0x374C
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
@@ -68,12 +67,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
-#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#endif /* VF_DRIVER */
-#endif /* X722_SUPPORT */
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c b/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c
index 22606484..f03f3813 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c
+++ b/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -1239,11 +1239,6 @@ enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
- if (NULL == hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
- DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
- goto exit;
- }
if (NULL == hmc_info->hmc_obj) {
ret_code = I40E_ERR_BAD_PTR;
DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_nvm.c b/src/dpdk/drivers/net/i40e/base/i40e_nvm.c
index 4fa1220b..e8965024 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_nvm.c
+++ b/src/dpdk/drivers/net/i40e/base/i40e_nvm.c
@@ -219,19 +219,15 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
-#else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#endif
return ret_code;
}
@@ -249,14 +245,10 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
else
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#endif
return ret_code;
}
@@ -348,14 +340,10 @@ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
else
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#endif
return ret_code;
}
@@ -375,7 +363,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
@@ -386,9 +373,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
-#else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#endif
return ret_code;
}
@@ -901,9 +885,20 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return I40E_SUCCESS;
}
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -1253,6 +1248,7 @@ retry:
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
{
if (opcode == hw->nvm_wait_opcode) {
+
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
if (hw->nvm_release_on_done) {
@@ -1261,6 +1257,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
@@ -1423,7 +1424,8 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
if (hw->nvm_buff.va) {
buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
+ I40E_NONDMA_TO_NONDMA);
}
}
@@ -1496,7 +1498,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
__func__, cmd->offset, cmd->offset + len);
buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
+ i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
bytes += len;
remainder -= len;
@@ -1510,7 +1512,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
__func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
+ i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
}
return I40E_SUCCESS;
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_osdep.h b/src/dpdk/drivers/net/i40e/base/i40e_osdep.h
index 38e7ba5b..c57ecded 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_osdep.h
+++ b/src/dpdk/drivers/net/i40e/base/i40e_osdep.h
@@ -44,6 +44,7 @@
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
+#include <rte_io.h>
#include "../i40e_logs.h"
@@ -153,15 +154,18 @@ do { \
* I40E_PRTQF_FD_MSK
*/
-#define I40E_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define I40E_PCI_REG(reg) rte_read32(reg)
#define I40E_PCI_REG_ADDR(a, reg) \
((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
static inline uint32_t i40e_read_addr(volatile void *addr)
{
return rte_le_to_cpu_32(I40E_PCI_REG(addr));
}
-#define I40E_PCI_REG_WRITE(reg, value) \
- do { I40E_PCI_REG((reg)) = rte_cpu_to_le_32(value); } while (0)
+
+#define I40E_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+#define I40E_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT)
#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT)
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_prototype.h b/src/dpdk/drivers/net/i40e/base/i40e_prototype.h
index 03dda937..109d3c56 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_prototype.h
+++ b/src/dpdk/drivers/net/i40e/base/i40e_prototype.h
@@ -78,7 +78,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-#ifdef X722_SUPPORT
enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -90,11 +89,8 @@ enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
-#endif
-#ifndef I40E_NDIS_SUPPORT
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
-#endif /* I40E_NDIS_SUPPORT */
#ifdef PF_DRIVER
@@ -124,6 +120,8 @@ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
@@ -170,12 +168,18 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -438,6 +442,7 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
/* prototype for functions used for NVM access */
enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
@@ -518,7 +523,6 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-#ifdef X722_SUPPORT
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
@@ -534,11 +538,20 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
-#endif
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 *value);
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_register.h b/src/dpdk/drivers/net/i40e/base/i40e_register.h
index fd0a7230..3a305b67 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_register.h
+++ b/src/dpdk/drivers/net/i40e/base/i40e_register.h
@@ -3401,7 +3401,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#ifdef X722_SUPPORT
#ifdef PF_DRIVER
#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
@@ -5366,5 +5365,4 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#endif /* X722_SUPPORT */
#endif /* _I40E_REGISTER_H_ */
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_type.h b/src/dpdk/drivers/net/i40e/base/i40e_type.h
index 5349419f..590d97c7 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_type.h
+++ b/src/dpdk/drivers/net/i40e/base/i40e_type.h
@@ -157,13 +157,22 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
-#define I40E_MDIO_STCODE 0
-#define I40E_MDIO_OPCODE_ADDRESS 0
-#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
@@ -187,9 +196,7 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#ifdef X722_SUPPORT
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#endif
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
@@ -203,13 +210,10 @@ enum i40e_memcpy_type {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
-#ifdef X722_SUPPORT
I40E_MAC_X722,
I40E_MAC_X722_VF,
-#endif
I40E_MAC_GENERIC,
};
@@ -264,6 +268,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -292,61 +297,73 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- u32 phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/*
+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
-#ifdef X722_SUPPORT
enum i40e_acpi_programming_method {
I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
};
-#define I40E_WOL_SUPPORT_MASK 1
-#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
-#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+#define I40E_WOL_SUPPORT_MASK 0x1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define I40E_PROXY_SUPPORT_MASK 0x4
-#endif
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
@@ -355,6 +372,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -410,11 +431,9 @@ struct i40e_hw_capabilities {
u32 enabled_tcmap;
u32 maxtc;
u64 wr_csr_prot;
-#ifdef X722_SUPPORT
bool apm_wol_support;
enum i40e_acpi_programming_method acpi_prog_method;
bool proxy_support;
-#endif
};
struct i40e_mac_info {
@@ -472,6 +491,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -550,6 +570,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
@@ -674,30 +695,22 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-#ifdef X722_SUPPORT
/* WoL and proxy support */
u16 num_wol_proxy_filters;
u16 wol_proxy_vsi_seid;
-#endif
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
u64 flags;
/* debug mask */
u32 debug_mask;
-#ifndef I40E_NDIS_SUPPORT
char err_str[16];
-#endif /* I40E_NDIS_SUPPORT */
};
STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw)
{
-#ifdef X722_SUPPORT
return (hw->mac.type == I40E_MAC_VF ||
hw->mac.type == I40E_MAC_X722_VF);
-#else
- return hw->mac.type == I40E_MAC_VF;
-#endif
}
struct i40e_driver_version {
@@ -801,11 +814,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
-#ifdef X722_SUPPORT
I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
-#else
- I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
-#endif
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
@@ -813,11 +822,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
-#ifdef X722_SUPPORT
I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
-#else
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
-#endif
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
@@ -1195,10 +1200,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
-#ifdef X722_SUPPORT
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
-#endif
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@@ -1235,38 +1238,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
-#ifdef X722_SUPPORT
/* Note: Values 0-28 are reserved for future use.
* Value 29, 30, 32 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
-#else
- /* Note: Values 0-30 are reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
-#ifdef X722_SUPPORT
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
-#else
- /* Note: Value 32 is reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
-#ifdef X722_SUPPORT
/* Note: Values 37-38 are reserved for future use.
* Value 39, 40, 42 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
-#else
- /* Note: Values 37-40 are reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
-#ifdef X722_SUPPORT
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
-#endif
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1321,12 +1310,10 @@ enum i40e_filter_program_desc_pcmd {
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-#ifdef X722_SUPPORT
#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
-#endif
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
@@ -1388,6 +1375,23 @@ struct i40e_veb_tc_stats {
u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
};
+/* Statistics collected per function for FCoE */
+struct i40e_fcoe_stats {
+ u64 rx_fcoe_packets; /* fcoeprc */
+ u64 rx_fcoe_dwords; /* focedwrc */
+ u64 rx_fcoe_dropped; /* fcoerpdc */
+ u64 tx_fcoe_packets; /* fcoeptc */
+ u64 tx_fcoe_dwords; /* focedwtc */
+ u64 fcoe_bad_fccrc; /* fcoecrc */
+ u64 fcoe_last_error; /* fcoelast */
+ u64 fcoe_ddp_count; /* fcoeddpc */
+};
+
+/* offset to per function FCoE statistics block */
+#define I40E_FCOE_VF_STAT_OFFSET 0
+#define I40E_FCOE_PF_STAT_OFFSET 128
+#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
+
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
@@ -1481,6 +1485,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
@@ -1509,6 +1514,208 @@ struct i40e_hw_port_stats {
#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
+
+enum i40E_fcoe_tx_ctx_desc_cmd_bits {
+ I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
+ I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
+ I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
+ I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
+};
+
+/* FCoE DIF/DIX Context descriptor */
+struct i40e_fcoe_difdix_context_desc {
+ __le64 flags_buff0_buff1_ref;
+ __le64 difapp_msk_bias;
+};
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT 0
+#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK (0xFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT)
+
+enum i40e_fcoe_difdix_ctx_desc_flags_bits {
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_RSVD = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGCHK = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGNOTCHK = 0x0004,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_OPAQUE = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY = 0x0008,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPTAG = 0x0010,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPREFTAG = 0x0018,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_CNST = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_INC1BLK = 0x0020,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_APPTAG = 0x0040,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_RSVD = 0x0060,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_XSUM = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_CRC = 0x0080,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_UNTAG = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_BUF = 0x0100,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_RSVD = 0x0200,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_EMBDTAGS = 0x0300,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_UNTAG = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_TAG = 0x0400,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_512B = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K = 0x0800
+};
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT 12
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK (0x3FFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT 22
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK (0x3FFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT 32
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_MASK (0xFFFFFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT 0
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MASK (0xFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK (0xFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT 32
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT)
+
+/* FCoE DIF/DIX Buffers descriptor */
+struct i40e_fcoe_difdix_buffers_desc {
+ __le64 buff_addr0;
+ __le64 buff_addr1;
+};
+
+/* FCoE DDP Context descriptor */
+struct i40e_fcoe_ddp_context_desc {
+ __le64 rsvd;
+ __le64 type_cmd_foff_lsize;
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
+ I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
+#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
+ I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
+
+enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
+ I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
+ I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
+ I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
+
+/* FCoE DDP/DWO Queue Context descriptor */
+struct i40e_fcoe_queue_context_desc {
+ __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
+ __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+enum i40e_fcoe_queue_ctx_desc_tph_bits {
+ I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
+ I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
+
+/* FCoE DDP/DWO Filter Context descriptor */
+struct i40e_fcoe_filter_context_desc {
+ __le32 param;
+ __le16 seqn;
+
+ /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
+ __le16 rsvd_dmaindx;
+
+ /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
+ __le64 flags_rsvd_lanq;
+};
+
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
+ I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
+
+enum i40e_fcoe_filter_ctx_desc_flags_bits {
+ I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
+ I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
+ I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
+};
+
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
+ I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
+ I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
+ I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
+
enum i40e_switch_element_types {
I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
I40E_SWITCH_ELEMENT_TYPE_PF = 2,
diff --git a/src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h b/src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h
index fd51ec32..8fba6081 100644
--- a/src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h
+++ b/src/dpdk/drivers/net/i40e/base/i40e_virtchnl.h
@@ -170,6 +170,11 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
index ca1a4808..4492bcc1 100644
--- a/src/dpdk/drivers/net/i40e/i40e_ethdev.c
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -62,6 +63,7 @@
#include "i40e_rxtx.h"
#include "i40e_pf.h"
#include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
@@ -108,7 +110,6 @@
I40E_PFINT_ICR0_ENA_GRST_MASK | \
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
- I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
I40E_PFINT_ICR0_ENA_VFLR_MASK | \
@@ -139,60 +140,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -202,7 +149,7 @@
/* Source MAC address */
#define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
/* Outer (S-Tag) VLAN tag in the outer L2 header */
-#define I40E_REG_INSET_L2_OUTER_VLAN 0x0200000000000000ULL
+#define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000004000000ULL
/* Inner (C-Tag) or single VLAN tag in the outer L2 header */
#define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL
/* Single VLAN tag in the inner L2 header */
@@ -211,6 +158,14 @@
#define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
/* Destination IPv4 address */
#define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
+/* Source IPv4 address for X722 */
+#define I40E_X722_REG_INSET_L3_SRC_IP4 0x0006000000000000ULL
+/* Destination IPv4 address for X722 */
+#define I40E_X722_REG_INSET_L3_DST_IP4 0x0000060000000000ULL
+/* IPv4 Protocol for X722 */
+#define I40E_X722_REG_INSET_L3_IP4_PROTO 0x0010000000000000ULL
+/* IPv4 Time to Live for X722 */
+#define I40E_X722_REG_INSET_L3_IP4_TTL 0x0010000000000000ULL
/* IPv4 Type of Service (TOS) */
#define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
/* IPv4 Protocol */
@@ -277,11 +232,6 @@
#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
-#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
- I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
-
/* PCI offset for querying capability */
#define PCI_DEV_CAP_REG 0xA4
/* PCI offset for enabling/disabling Extended Tag */
@@ -317,6 +267,8 @@ static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
+static int i40e_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
static void i40e_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
@@ -366,8 +318,8 @@ static void i40e_stat_update_48(struct i40e_hw *hw,
uint64_t *offset,
uint64_t *stat);
static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
-static void i40e_dev_interrupt_handler(
- __rte_unused struct rte_intr_handle *handle, void *param);
+static void i40e_dev_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
uint32_t base, uint32_t num);
static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
@@ -399,9 +351,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -411,6 +360,7 @@ static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
void *arg);
static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
+static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
static void i40e_configure_registers(struct i40e_hw *hw);
static void i40e_hw_init(struct rte_eth_dev *dev);
static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
@@ -453,6 +403,22 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -474,7 +440,6 @@ static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
- { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_I_X722) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -496,6 +461,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.stats_reset = i40e_dev_stats_reset,
.xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
+ .fw_version_get = i40e_fw_version_get,
.dev_infos_get = i40e_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.vlan_filter_set = i40e_vlan_filter_set,
@@ -663,10 +629,10 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
static struct eth_driver rte_i40e_pmd = {
.pci_drv = {
- .name = "rte_i40e_pmd",
.id_table = pci_id_i40e_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_i40e_dev_init,
.eth_dev_uninit = eth_i40e_dev_uninit,
@@ -701,33 +667,10 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
- */
-static int
-rte_i40e_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- rte_eth_driver_register(&rte_i40e_pmd);
-
- return 0;
-}
-
-static struct rte_driver rte_i40e_driver = {
- .type = PMD_PDEV,
- .init = rte_i40e_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_i40e_driver, i40e);
-DRIVER_REGISTER_PCI_TABLE(i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio");
-/*
- * Initialize registers for flexible payload, which should be set by NVM.
- * This should be removed from code once it is fixed in NVM.
- */
#ifndef I40E_GLQF_ORT
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
#endif
@@ -735,8 +678,12 @@ DRIVER_REGISTER_PCI_TABLE(i40e, pci_id_i40e_map);
#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
#endif
-static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
+static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
+ /*
+ * Initialize registers for flexible payload, which should be set by NVM.
+ * This should be removed from code once it is fixed in NVM.
+ */
I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
@@ -747,17 +694,16 @@ static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
-
- /* GLQF_PIT Registers */
I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
+
+ /* Initialize registers for parsing packet type of QinQ */
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
}
#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
-#define TREX_PATCH
-#define TREX_PATCH_LOW_LATENCY
-
/*
* Add a ethertype filter to drop all flow control frames transmitted
* from VSIs.
@@ -776,8 +722,8 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
pf->main_vsi_seid, 0,
TRUE, NULL, NULL);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
- " frames from VSIs.");
+ PMD_INIT_LOG(ERR,
+ "Failed to add filter to drop flow control frames from VSIs.");
}
static int
@@ -920,25 +866,159 @@ is_floating_veb_supported(struct rte_devargs *devargs)
static void
config_floating_veb(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
- pf->floating_veb = is_floating_veb_supported(pci_dev->devargs);
- config_vf_floating_veb(pci_dev->devargs, pf->floating_veb,
+ pf->floating_veb =
+ is_floating_veb_supported(pci_dev->device.devargs);
+ config_vf_floating_veb(pci_dev->device.devargs,
+ pf->floating_veb,
pf->floating_veb_list);
} else {
pf->floating_veb = false;
}
}
+#define I40E_L2_TAGS_S_TAG_SHIFT 1
+#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
+
+static int
+i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(&ethertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ return -EINVAL;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
+ return 0;
+
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ return -EINVAL;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
+ return 0;
+
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ };
+
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+ return 0;
+
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+
+ return ret;
+}
+
static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi;
@@ -951,6 +1031,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -960,9 +1041,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
i40e_set_tx_function(dev);
return 0;
}
- pci_dev = dev->pci_dev;
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
@@ -971,8 +1054,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->back = I40E_PF_TO_ADAPTER(pf);
hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
if (!hw->hw_addr) {
- PMD_INIT_LOG(ERR, "Hardware is not available, "
- "as address is NULL");
+ PMD_INIT_LOG(ERR,
+ "Hardware is not available, as address is NULL");
return -ENODEV;
}
@@ -1005,11 +1088,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
}
/*
- * To work around the NVM issue,initialize registers
- * for flexible payload by software.
- * It should be removed once issues are fixed in NVM.
+ * To work around the NVM issue, initialize registers
+ * for flexible payload and packet type of QinQ by
+ * software. It should be removed once issues are fixed
+ * in NVM.
*/
- i40e_flex_payload_reg_init(hw);
+ i40e_GLQF_reg_init(hw);
/* Initialize the input set for filters (hash and fd) to default value */
i40e_filter_input_set_init(pf);
@@ -1032,7 +1116,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
config_floating_veb(dev);
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
-
+ ret = i40e_dev_sync_phy_type(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret);
+ goto err_sync_phy_type;
+ }
/*
* On X710, performance number is far from the expectation on recent
* firmware versions. The fix for this issue may not be integrated in
@@ -1103,8 +1191,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Set the global registers with default ether type value */
ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to set the default outer "
- "VLAN ether type");
+ PMD_INIT_LOG(ERR,
+ "Failed to set the default outer VLAN ether type");
goto err_setup_pf_switch;
}
@@ -1123,6 +1211,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Disable double vlan by default */
i40e_vsi_config_double_vlan(vsi, FALSE);
+ /* Disable S-TAG identification when floating_veb is disabled */
+ if (!pf->floating_veb) {
+ ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
+ if (ret & I40E_L2_TAGS_S_TAG_MASK) {
+ ret &= ~I40E_L2_TAGS_S_TAG_MASK;
+ I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
+ }
+ }
+
if (!vsi->max_macaddrs)
len = ETHER_ADDR_LEN;
else
@@ -1131,8 +1228,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Should be after VSI initialized */
dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
if (!dev->data->mac_addrs) {
- PMD_INIT_LOG(ERR, "Failed to allocated memory "
- "for storing mac address");
+ PMD_INIT_LOG(ERR,
+ "Failed to allocated memory for storing mac address");
goto err_mac_alloc;
}
ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
@@ -1142,15 +1239,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
i40e_pf_host_init(dev);
/* register callback func to eal lib */
- rte_intr_callback_register(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler, dev);
/* configure and enable device interrupt */
i40e_pf_config_irq0(hw, TRUE);
i40e_pf_enable_irq0(hw);
/* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
/*
* Add an ethertype filter to drop all flow control frames transmitted
* from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
@@ -1173,8 +1270,26 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
pf->flags &= ~I40E_FLAG_DCB;
}
+ ret = i40e_init_ethtype_filter_list(dev);
+ if (ret < 0)
+ goto err_init_ethtype_filter_list;
+ ret = i40e_init_tunnel_filter_list(dev);
+ if (ret < 0)
+ goto err_init_tunnel_filter_list;
+ ret = i40e_init_fdir_filter_list(dev);
+ if (ret < 0)
+ goto err_init_fdir_filter_list;
+
return 0;
+err_init_fdir_filter_list:
+ rte_free(pf->tunnel.hash_table);
+ rte_free(pf->tunnel.hash_map);
+err_init_tunnel_filter_list:
+ rte_free(pf->ethertype.hash_table);
+ rte_free(pf->ethertype.hash_map);
+err_init_ethtype_filter_list:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1188,17 +1303,79 @@ err_msix_pool_init:
err_qp_pool_init:
err_parameter_init:
err_get_capabilities:
+err_sync_phy_type:
(void)i40e_shutdown_adminq(hw);
return ret;
}
+static void
+i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_ethertype_rule *ethertype_rule;
+
+ ethertype_rule = &pf->ethertype;
+ /* Remove all ethertype filter rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(&ethertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+}
+
+static void
+i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_tunnel_rule *tunnel_rule;
+
+ tunnel_rule = &pf->tunnel;
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+}
+
+static void
+i40e_rm_fdir_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *p_fdir;
+ struct i40e_fdir_info *fdir_info;
+
+ fdir_info = &pf->fdir;
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct rte_flow *p_flow;
int ret;
uint8_t aq_fail = 0;
@@ -1207,8 +1384,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = dev->pci_dev;
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1217,11 +1396,6 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
- /* Disable LLDP */
- ret = i40e_aq_stop_lldp(hw, true, NULL);
- if (ret != I40E_SUCCESS) /* Its failure can be ignored */
- PMD_INIT_LOG(INFO, "Failed to stop lldp");
-
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
@@ -1243,11 +1417,21 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
dev->data->mac_addrs = NULL;
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* register callback func to eal lib */
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+
+ i40e_rm_ethtype_filter_list(pf);
+ i40e_rm_tunnel_filter_list(pf);
+ i40e_rm_fdir_filter_list(pf);
+
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ rte_free(p_flow);
+ }
return 0;
}
@@ -1313,6 +1497,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
}
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_dcb:
@@ -1333,7 +1519,8 @@ void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
@@ -1446,7 +1633,8 @@ void
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
@@ -1517,7 +1705,8 @@ static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1548,7 +1737,8 @@ static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_intr, i;
@@ -1571,6 +1761,8 @@ i40e_parse_link_speeds(uint16_t link_speeds)
if (link_speeds & ETH_LINK_SPEED_40G)
link_speed |= I40E_LINK_SPEED_40GB;
+ if (link_speeds & ETH_LINK_SPEED_25G)
+ link_speed |= I40E_LINK_SPEED_25GB;
if (link_speeds & ETH_LINK_SPEED_20G)
link_speed |= I40E_LINK_SPEED_20GB;
if (link_speeds & ETH_LINK_SPEED_10G)
@@ -1596,6 +1788,7 @@ i40e_phy_conf_link(struct i40e_hw *hw,
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_LOW_POWER;
const uint8_t advt = I40E_LINK_SPEED_40GB |
+ I40E_LINK_SPEED_25GB |
I40E_LINK_SPEED_10GB |
I40E_LINK_SPEED_1GB |
I40E_LINK_SPEED_100MB;
@@ -1623,6 +1816,8 @@ i40e_phy_conf_link(struct i40e_hw *hw,
/* use get_phy_abilities_resp value for the rest */
phy_conf.phy_type = phy_ab.phy_type;
+ phy_conf.phy_type_ext = phy_ab.phy_type_ext;
+ phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
phy_conf.low_power_ctrl = phy_ab.d3_lpan;
@@ -1654,7 +1849,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
abilities |= I40E_AQ_PHY_LINK_ENABLED;
/* Skip changing speed on 40G interfaces, FW does not support */
- if (i40e_is_40G_device(hw->device_id)) {
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
speed = I40E_LINK_SPEED_UNKNOWN;
abilities |= I40E_AQ_PHY_AN_ENABLED;
}
@@ -1669,7 +1864,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
hw->adapter_stopped = 0;
@@ -1686,8 +1882,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
!RTE_ETH_DEV_SRIOV(dev).active) &&
dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = dev->data->nb_rx_queues;
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -1;
+ ret = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (ret)
+ return ret;
}
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
@@ -1696,8 +1893,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int),
0);
if (!intr_handle->intr_vec) {
- PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d rx_queues intr_vec",
+ dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -1750,7 +1948,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* Apply link configure */
if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G | ETH_LINK_SPEED_40G)) {
+ ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G)) {
PMD_DRV_LOG(ERR, "Invalid link setting");
goto err_up;
}
@@ -1769,13 +1968,25 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_pf_enable_irq0(hw);
if (dev->data->dev_conf.intr_conf.lsc != 0)
- PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ PMD_INIT_LOG(INFO,
+ "lsc won't enable because of no intr multiplex");
+ } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ ret = i40e_aq_set_phy_int_mask(hw,
+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL |
+ I40E_AQ_EVENT_MEDIA_NA), NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+
+ /* Call get_link_info aq commond to enable LSE */
+ i40e_dev_link_update(dev, 0);
}
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -1791,7 +2002,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
/* Disable all queues */
@@ -1842,6 +2054,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t reg;
int i;
@@ -1853,23 +2067,22 @@ i40e_dev_close(struct rte_eth_dev *dev)
/* Disable interrupt */
i40e_pf_disable_irq0(hw);
- rte_intr_disable(&(dev->pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
- /* release all the existing VSIs and VEBs */
- i40e_fdir_teardown(pf);
- i40e_vsi_release(pf->main_vsi);
-
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
i40e_vsi_release(pf->vmdq[i].vsi);
pf->vmdq[i].vsi = NULL;
}
-
rte_free(pf->vmdq);
pf->vmdq = NULL;
+ /* release all the existing VSIs and VEBs */
+ i40e_fdir_teardown(pf);
+ i40e_vsi_release(pf->main_vsi);
+
/* shutdown the adminq */
i40e_aq_queue_shutdown(hw, true);
i40e_shutdown_adminq(hw);
@@ -1970,9 +2183,10 @@ static int
i40e_dev_set_link_down(struct rte_eth_dev *dev)
{
uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
- uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ uint8_t abilities = 0;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
return i40e_phy_conf_link(hw, abilities, speed);
}
@@ -1987,6 +2201,7 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
struct rte_eth_link link, old;
int status;
unsigned rep_cnt = MAX_REPEAT_TIME;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
memset(&link, 0, sizeof(link));
memset(&old, 0, sizeof(old));
@@ -1995,7 +2210,8 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
do {
/* Get link status information from hardware */
- status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
+ status = i40e_aq_get_link_info(hw, enable_lse,
+ &link_status, NULL);
if (status != I40E_SUCCESS) {
link.link_speed = ETH_SPEED_NUM_100M;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -2030,6 +2246,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
case I40E_LINK_SPEED_20GB:
link.link_speed = ETH_SPEED_NUM_20G;
break;
+ case I40E_LINK_SPEED_25GB:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
case I40E_LINK_SPEED_40GB:
link.link_speed = ETH_SPEED_NUM_40G;
break;
@@ -2296,11 +2515,9 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
I40E_GLPRT_PTC9522L(hw->port),
pf->offset_loaded, &os->tx_size_big,
&ns->tx_size_big);
-#ifndef TREX_PATCH
i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
pf->offset_loaded,
&os->fd_sb_match, &ns->fd_sb_match);
-#endif
/* GLPRT_MSPDC not supported */
/* GLPRT_XEC not supported */
@@ -2310,46 +2527,6 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
i40e_update_vsi_stats(pf->main_vsi);
}
-//TREX_PATCH
-// fill stats array with fdir rules match count statistics
-// Notice that we read statistics from start to start + len, but we fill the stats are
-// starting from 0 with len values
-void
-i40e_trex_fdir_stats_get(struct rte_eth_dev *dev, uint32_t *stats, uint32_t start, uint32_t len)
-{
- int i;
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- for (i = 0; i < len; i++) {
- stats[i] = I40E_READ_REG(hw, I40E_GLQF_PCNT(i + start));
- }
-}
-
-// TREX_PATCH
-void
-i40e_trex_fdir_stats_reset(struct rte_eth_dev *dev, uint32_t *stats, uint32_t start, uint32_t len)
-{
- int i;
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- for (i = 0; i < len; i++) {
- if (stats) {
- stats[i] = I40E_READ_REG(hw, I40E_GLQF_PCNT(i + start));
- }
- I40E_WRITE_REG(hw, I40E_GLQF_PCNT(i + start), 0xffffffff);
- }
-}
-
-// TREX_PATCH
-int
-i40e_trex_get_fw_ver(struct rte_eth_dev *dev, uint32_t *nvm_ver)
-{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- *nvm_ver = hw->nvm.version;
- return 0;
-}
-
/* Get all statistics of a port */
static void
i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
@@ -2366,17 +2543,10 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
pf->main_vsi->eth_stats.rx_multicast +
pf->main_vsi->eth_stats.rx_broadcast -
pf->main_vsi->eth_stats.rx_discards;
-#ifndef TREX_PATCH
stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
pf->main_vsi->eth_stats.tx_multicast +
pf->main_vsi->eth_stats.tx_broadcast;
stats->ibytes = ns->eth.rx_bytes;
-#else
- /* Hanoch: move to global transmit and not pf->vsi and we have two high and low priorty */
- stats->opackets = ns->eth.tx_unicast +ns->eth.tx_multicast +ns->eth.tx_broadcast;
- stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
-#endif
-
stats->obytes = ns->eth.tx_bytes;
stats->oerrors = ns->eth.tx_errors +
pf->main_vsi->eth_stats.tx_errors;
@@ -2557,6 +2727,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
rte_i40e_stats_strings[i].offset);
+ xstats[count].id = count;
count++;
}
@@ -2564,6 +2735,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_i40e_hw_port_strings[i].offset);
+ xstats[count].id = count;
count++;
}
@@ -2573,6 +2745,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
*(uint64_t *)(((char *)hw_stats) +
rte_i40e_rxq_prio_strings[i].offset +
(sizeof(uint64_t) * prio));
+ xstats[count].id = count;
count++;
}
}
@@ -2583,6 +2756,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
*(uint64_t *)(((char *)hw_stats) +
rte_i40e_txq_prio_strings[i].offset +
(sizeof(uint64_t) * prio));
+ xstats[count].id = count;
count++;
}
}
@@ -2601,19 +2775,49 @@ i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
return -ENOSYS;
}
+static int
+i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+ int ret;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> 24);
+ build = (u16)((full_ver >> 8) & 0xffff);
+ patch = (u8)(full_ver & 0xff);
+
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d%d 0x%08x %d.%d.%d",
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack,
+ ver, build, patch);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2628,7 +2832,11 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
@@ -2666,6 +2874,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_max = I40E_MAX_RING_DESC,
.nb_min = I40E_MIN_RING_DESC,
.nb_align = I40E_ALIGN_RING_DESC,
+ .nb_seg_max = I40E_TX_MAX_SEG,
+ .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
};
if (pf->flags & I40E_FLAG_VMDQ) {
@@ -2678,9 +2888,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues += dev_info->vmdq_queue_num;
}
- if (i40e_is_40G_device(hw->device_id))
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
/* For XL710 */
dev_info->speed_capa = ETH_LINK_SPEED_40G;
+ else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+ /* For XXV710 */
+ dev_info->speed_capa = ETH_LINK_SPEED_25G;
else
/* For X710 */
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
@@ -2723,7 +2936,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
else {
ret = -EINVAL;
PMD_DRV_LOG(ERR,
- "Unsupported vlan type in single vlan.\n");
+ "Unsupported vlan type in single vlan.");
return ret;
}
break;
@@ -2735,13 +2948,15 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
&reg_r, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Fail to debug read from "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
ret = -EIO;
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
- "0x%08"PRIx64"", reg_id, reg_r);
+ PMD_DRV_LOG(DEBUG,
+ "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
+ reg_id, reg_r);
reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
@@ -2755,12 +2970,14 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
reg_w, NULL);
if (ret != I40E_SUCCESS) {
ret = -EIO;
- PMD_DRV_LOG(ERR, "Fail to debug write to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ PMD_DRV_LOG(ERR,
+ "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
+ PMD_DRV_LOG(DEBUG,
+ "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_w, reg_id);
return ret;
}
@@ -2904,8 +3121,9 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
- PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
- "High_water must <= %d.", max_high_water);
+ PMD_INIT_LOG(ERR,
+ "Invalid high/low water setup value in KB, High_water must be <= %d.",
+ max_high_water);
return -EINVAL;
}
@@ -2926,7 +3144,7 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (err < 0)
return -ENOSYS;
- if (i40e_is_40G_device(hw->device_id)) {
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* Configure flow control refresh threshold,
* the value for stat_tx_pause_refresh_timer[8]
* is used for global pause operation.
@@ -3077,8 +3295,8 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
/* No VMDQ pool enabled or configured */
if (!(pf->flags & I40E_FLAG_VMDQ) ||
(i > pf->nb_cfg_vmdq_vsi)) {
- PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
- "/configured");
+ PMD_DRV_LOG(ERR,
+ "No VMDQ pool enabled/configured");
return;
}
vsi = pf->vmdq[i - 1].vsi;
@@ -3279,9 +3497,9 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, lut_size);
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
return -EINVAL;
}
@@ -3320,9 +3538,9 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, lut_size);
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
return -EINVAL;
}
@@ -3377,8 +3595,9 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
mem->va = mz->addr;
mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
mem->zone = (const void *)mz;
- PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
- "%"PRIu64, mz->name, mem->pa);
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s allocated with physical address: %"PRIu64,
+ mz->name, mem->pa);
return I40E_SUCCESS;
}
@@ -3395,9 +3614,9 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
if (!mem)
return I40E_ERR_PARAM;
- PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
- "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
- mem->pa);
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s to be freed with physical address: %"PRIu64,
+ ((const struct rte_memzone *)mem->zone)->name, mem->pa);
rte_memzone_free((const struct rte_memzone *)mem->zone);
mem->zone = NULL;
mem->va = NULL;
@@ -3508,9 +3727,10 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint16_t qp_count = 0, vsi_count = 0;
- if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+ if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
return -EINVAL;
}
@@ -3551,13 +3771,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
/* VF queue/VSI allocation */
pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
- if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
+ if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
pf->flags |= I40E_FLAG_SRIOV;
pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
- pf->vf_num = dev->pci_dev->max_vfs;
- PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
- "in total %u queues", pf->vf_num, pf->vf_nb_qps,
- pf->vf_nb_qps * pf->vf_num);
+ pf->vf_num = pci_dev->max_vfs;
+ PMD_DRV_LOG(DEBUG,
+ "%u VF VSIs, %u queues per VF VSI, in total %u queues",
+ pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
} else {
pf->vf_nb_qps = 0;
pf->vf_num = 0;
@@ -3585,14 +3805,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
if (pf->max_nb_vmdq_vsi) {
pf->flags |= I40E_FLAG_VMDQ;
pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
- PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
- "per VMDQ VSI, in total %u queues",
- pf->max_nb_vmdq_vsi,
- pf->vmdq_nb_qps, pf->vmdq_nb_qps *
- pf->max_nb_vmdq_vsi);
+ PMD_DRV_LOG(DEBUG,
+ "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
+ pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
} else {
- PMD_DRV_LOG(INFO, "No enough queues left for "
- "VMDq");
+ PMD_DRV_LOG(INFO,
+ "No enough queues left for VMDq");
}
} else {
PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
@@ -3605,15 +3824,15 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
pf->flags |= I40E_FLAG_DCB;
if (qp_count > hw->func_caps.num_tx_qp) {
- PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
- "the hardware maximum %u", qp_count,
- hw->func_caps.num_tx_qp);
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u queues, which exceeds the hardware maximum %u",
+ qp_count, hw->func_caps.num_tx_qp);
return -EINVAL;
}
if (vsi_count > hw->func_caps.num_vsis) {
- PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
- "the hardware maximum %u", vsi_count,
- hw->func_caps.num_vsis);
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
+ vsi_count, hw->func_caps.num_vsis);
return -EINVAL;
}
@@ -3859,8 +4078,8 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
*/
entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
if (entry == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "resource pool");
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate memory for resource pool");
return -ENOMEM;
}
entry->base = valid_entry->base;
@@ -3900,9 +4119,9 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
}
if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
- PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
- "HW support 0x%x", hw->func_caps.enabled_tcmap,
- enabled_tcmap);
+ PMD_DRV_LOG(ERR,
+ "Enabled TC map 0x%x not applicable to HW support 0x%x",
+ hw->func_caps.enabled_tcmap, enabled_tcmap);
return I40E_NOT_SUPPORTED;
}
return I40E_SUCCESS;
@@ -4108,18 +4327,10 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
/* create floating veb if vsi is NULL */
if (vsi != NULL) {
ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
-#ifdef TREX_PATCH_LOW_LATENCY
- vsi->enabled_tc, false,
-#else
- I40E_DEFAULT_TCMAP, false,
-#endif
+ I40E_DEFAULT_TCMAP, false,
&veb->seid, false, NULL);
} else {
-#ifdef TREX_PATCH_LOW_LATENCY
- ret = i40e_aq_add_veb(hw, 0, 0, vsi->enabled_tc,
-#else
ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
-#endif
true, &veb->seid, false, NULL);
}
@@ -4133,7 +4344,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
&veb->stats_idx, NULL, NULL, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
+ PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
hw->aq.asq_last_status);
goto fail;
}
@@ -4157,11 +4368,16 @@ i40e_vsi_release(struct i40e_vsi *vsi)
void *temp;
int ret;
struct i40e_mac_filter *f;
- uint16_t user_param = vsi->user_param;
+ uint16_t user_param;
if (!vsi)
return I40E_SUCCESS;
+ if (!vsi->adapter)
+ return -EFAULT;
+
+ user_param = vsi->user_param;
+
pf = I40E_VSI_TO_PF(vsi);
hw = I40E_VSI_TO_HW(vsi);
@@ -4250,8 +4466,8 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
struct i40e_mac_filter *f;
struct ether_addr *mac;
- PMD_DRV_LOG(WARNING, "Cannot remove the default "
- "macvlan filter");
+ PMD_DRV_LOG(WARNING,
+ "Cannot remove the default macvlan filter");
/* It needs to add the permanent mac into mac list */
f = rte_zmalloc("macv_filter", sizeof(*f), 0);
if (f == NULL) {
@@ -4273,57 +4489,6 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
return i40e_vsi_add_mac(vsi, &filter);
}
-#ifdef TREX_PATCH_LOW_LATENCY
-static int
-i40e_vsi_update_tc_max_bw(struct i40e_vsi *vsi, u16 credit){
- struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- int ret;
-
- if (!vsi->seid) {
- PMD_DRV_LOG(ERR, "seid not valid");
- return -EINVAL;
- }
-
- ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, credit,0, NULL);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure TC BW");
- return ret;
- }
- return (0);
-}
-
-static int
-i40e_vsi_update_tc_bandwidth_ex(struct i40e_vsi *vsi)
-{
- struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- int i, ret;
- struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw_data;
- struct i40e_aqc_configure_vsi_tc_bw_data * res_buffer;
-
- if (!vsi->seid) {
- PMD_DRV_LOG(ERR, "seid not valid");
- return -EINVAL;
- }
-
- memset(&tc_bw_data, 0, sizeof(tc_bw_data));
- tc_bw_data.tc_valid_bits = 3;
-
- /* enable TC 0,1 */
- ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw_data, NULL);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure TC BW");
- return ret;
- }
-
- vsi->enabled_tc=3;
- res_buffer = ( struct i40e_aqc_configure_vsi_tc_bw_data *)&tc_bw_data;
- (void)rte_memcpy(vsi->info.qs_handle, res_buffer->qs_handles,
- sizeof(vsi->info.qs_handle));
-
- return I40E_SUCCESS;
-}
-#endif
-
/*
* i40e_vsi_get_bw_config - Query VSI BW Information
* @vsi: the VSI to be queried
@@ -4352,8 +4517,9 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
- "configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "VSI failed to get TC bandwdith configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
@@ -4399,8 +4565,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
/* Use the FW API if FW >= v5.0 */
if (hw->aq.fw_maj_ver < 5) {
- //TREX_PATCH - changed from ERR to INFO. Most of our customers do not have latest FW
- PMD_INIT_LOG(INFO, "FW < v5.0, cannot enable loopback");
+ PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
return;
}
@@ -4421,7 +4586,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret)
- PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
+ PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
hw->aq.asq_last_status);
}
@@ -4442,14 +4607,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
uplink_vsi == NULL) {
- PMD_DRV_LOG(ERR, "VSI setup failed, "
- "VSI link shouldn't be NULL");
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, VSI link shouldn't be NULL");
return NULL;
}
if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
- PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
- "uplink VSI should be NULL");
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, MAIN VSI uplink VSI should be NULL");
return NULL;
}
@@ -4493,6 +4658,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
vsi->user_param = user_param;
+ vsi->vlan_anti_spoof_on = 0;
/* Allocate queues */
switch (vsi->type) {
case I40E_VSI_MAIN :
@@ -4600,8 +4766,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.seid = vsi->seid;
@@ -4671,8 +4837,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4714,8 +4880,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4732,8 +4898,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping.");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping.");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4996,8 +5162,9 @@ i40e_pf_setup(struct i40e_pf *pf)
/* make queue allocated first, let FDIR use queue pair 0*/
ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
if (ret != I40E_FDIR_QUEUE_ID) {
- PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
- " ret =%d", ret);
+ PMD_DRV_LOG(ERR,
+ "queue allocation fails for FDIR: ret =%d",
+ ret);
pf->flags &= ~I40E_FLAG_FDIR;
}
}
@@ -5016,12 +5183,12 @@ i40e_pf_setup(struct i40e_pf *pf)
else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
else {
- PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
- hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
+ hw->func_caps.rss_table_size);
return I40E_ERR_PARAM;
}
- PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
- "size: %u\n", hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
+ hw->func_caps.rss_table_size);
pf->hash_lut_size = hw->func_caps.rss_table_size;
/* Enable ethtype and macvlan filters */
@@ -5271,8 +5438,8 @@ i40e_dev_rx_init(struct i40e_pf *pf)
ret = i40e_rx_queue_init(rxq);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to do RX queue "
- "initialization");
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
break;
}
}
@@ -5519,6 +5686,24 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
}
static void
+i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_virtchnl_pf_event event;
+ int i;
+
+ event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event_data.link_event.link_status =
+ dev->data->dev_link.link_status;
+ event.event_data.link_event.link_speed =
+ (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+ for (i = 0; i < pf->vf_num; i++)
+ i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
+static void
i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5538,8 +5723,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
ret = i40e_clean_arq_element(hw, &info, &pending);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
- "aq_err: %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(INFO,
+ "Failed to read msg from AdminQ, aq_err: %u",
+ hw->aq.asq_last_status);
break;
}
opcode = rte_le_to_cpu_16(info.desc.opcode);
@@ -5554,6 +5740,14 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
info.msg_buf,
info.msg_len);
break;
+ case i40e_aqc_opc_get_link_status:
+ ret = i40e_dev_link_update(dev, 0);
+ if (!ret) {
+ i40e_notify_all_vfs_link_status(dev);
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+ break;
default:
PMD_DRV_LOG(ERR, "Request %u is not supported yet",
opcode);
@@ -5563,57 +5757,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
rte_free(info.msg_buf);
}
-/*
- * Interrupt handler is registered as the alarm callback for handling LSC
- * interrupt in a definite of time, in order to wait the NIC into a stable
- * state. Currently it waits 1 sec in i40e for the link up interrupt, and
- * no need for link down interrupt.
- */
-static void
-i40e_dev_interrupt_delayed_handler(void *param)
-{
- struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t icr0;
-
- /* read interrupt causes again */
- icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
- if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
- if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
- PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
- if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
- PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
- if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
- PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
- if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
- PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
- "state\n");
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
- if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
-
- if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
- PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
- i40e_dev_handle_vfr_event(dev);
- }
- if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
- PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
- i40e_dev_handle_aq_msg(dev);
- }
-
- /* handle the link up interrupt in an alarm callback */
- i40e_dev_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
-
- i40e_pf_enable_irq0(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
-}
-
/**
* Interrupt handler triggered by NIC for handling
* specific interrupt.
@@ -5627,7 +5770,7 @@ i40e_dev_interrupt_delayed_handler(void *param)
* void
*/
static void
-i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+i40e_dev_interrupt_handler(struct rte_intr_handle *intr_handle,
void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -5671,34 +5814,10 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
i40e_dev_handle_aq_msg(dev);
}
- /* Link Status Change interrupt */
- if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
-#define I40E_US_PER_SECOND 1000000
- struct rte_eth_link link;
-
- PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
- memset(&link, 0, sizeof(link));
- rte_i40e_dev_atomic_read_link_status(dev, &link);
- i40e_dev_link_update(dev, 0);
-
- /*
- * For link up interrupt, it needs to wait 1 second to let the
- * hardware be a stable state. Otherwise several consecutive
- * interrupts can be observed.
- * For link down interrupt, no need to wait.
- */
- if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
- i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
- return;
- else
- _rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC);
- }
-
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
}
static int
@@ -5751,7 +5870,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC match type");
ret = I40E_ERR_PARAM;
goto DONE;
}
@@ -5826,7 +5945,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC filter type");
ret = I40E_ERR_PARAM;
goto DONE;
}
@@ -5881,14 +6000,11 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
}
static void
-i40e_set_vlan_filter(struct i40e_vsi *vsi,
- uint16_t vlan_id, bool on)
+i40e_store_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
{
uint32_t vid_idx, vid_bit;
- if (vlan_id > ETH_VLAN_ID_MAX)
- return;
-
vid_idx = I40E_VFTA_IDX(vlan_id);
vid_bit = I40E_VFTA_BIT(vlan_id);
@@ -5898,6 +6014,38 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
vsi->vfta[vid_idx] &= ~vid_bit;
}
+static void
+i40e_set_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return;
+
+ i40e_store_vlan_filter(vsi, vlan_id, on);
+
+ if (!vsi->vlan_anti_spoof_on || !vlan_id)
+ return;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+
+ if (on) {
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to add vlan filter");
+ } else {
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR,
+ "Failed to remove vlan filter");
+ }
+}
+
/**
* Find all vlan options for specific mac addr,
* return with actual vlan found.
@@ -5923,8 +6071,8 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
if (vsi->vfta[j] & (1 << k)) {
if (i > num - 1) {
- PMD_DRV_LOG(ERR, "vlan number "
- "not match");
+ PMD_DRV_LOG(ERR,
+ "vlan number doesn't match");
return I40E_ERR_PARAM;
}
(void)rte_memcpy(&mv_f[i].macaddr,
@@ -5969,7 +6117,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
static int
i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
{
- int i, num;
+ int i, j, num;
struct i40e_mac_filter *f;
struct i40e_macvlan_filter *mv_f;
int ret = I40E_SUCCESS;
@@ -5994,6 +6142,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
TAILQ_FOREACH(f, &vsi->mac_list, next) {
(void)rte_memcpy(&mv_f[i].macaddr,
&f->mac_info.mac_addr, ETH_ADDR_LEN);
+ mv_f[i].filter_type = f->mac_info.filter_type;
mv_f[i].vlan_id = 0;
i++;
}
@@ -6003,6 +6152,8 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
vsi->vlan_num, &f->mac_info.mac_addr);
if (ret != I40E_SUCCESS)
goto DONE;
+ for (j = i; j < i + vsi->vlan_num; j++)
+ mv_f[j].filter_type = f->mac_info.filter_type;
i += vsi->vlan_num;
}
}
@@ -6214,7 +6365,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
filter_type == RTE_MACVLAN_HASH_MATCH) {
if (vlan_num == 0) {
- PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
return I40E_ERR_PARAM;
}
} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
@@ -6256,7 +6407,7 @@ DONE:
/* Configure hash enable flags for RSS */
uint64_t
-i40e_config_hena(uint64_t flags)
+i40e_config_hena(uint64_t flags, enum i40e_mac_type type)
{
uint64_t hena = 0;
@@ -6265,20 +6416,42 @@ i40e_config_hena(uint64_t flags)
if (flags & ETH_RSS_FRAG_IPV4)
hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
- if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
- if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ if (flags & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (type == I40E_MAC_X722) {
+ hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ } else
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ }
+ if (flags & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (type == I40E_MAC_X722) {
+ hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+ } else
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ }
if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
if (flags & ETH_RSS_FRAG_IPV6)
hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
- if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
- if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ if (flags & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (type == I40E_MAC_X722) {
+ hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ } else
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ }
+ if (flags & ETH_RSS_NONFRAG_IPV6_UDP) {
+ if (type == I40E_MAC_X722) {
+ hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+ } else
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ }
if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
@@ -6301,8 +6474,14 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV4;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
@@ -6311,8 +6490,14 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV6;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
@@ -6332,7 +6517,10 @@ i40e_pf_disable_rss(struct i40e_pf *pf)
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- hena &= ~I40E_RSS_HENA_ALL;
+ if (hw->mac.type == I40E_MAC_X722)
+ hena &= ~I40E_RSS_HENA_ALL_X722;
+ else
+ hena &= ~I40E_RSS_HENA_ALL;
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
I40E_WRITE_FLUSH(hw);
@@ -6360,8 +6548,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to configure RSS key "
- "via AQ");
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
} else {
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
@@ -6419,8 +6606,11 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
rss_hf = rss_conf->rss_hf;
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- hena &= ~I40E_RSS_HENA_ALL;
- hena |= i40e_config_hena(rss_hf);
+ if (hw->mac.type == I40E_MAC_X722)
+ hena &= ~I40E_RSS_HENA_ALL_X722;
+ else
+ hena &= ~I40E_RSS_HENA_ALL;
+ hena |= i40e_config_hena(rss_hf, hw->mac.type);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
I40E_WRITE_FLUSH(hw);
@@ -6439,7 +6629,9 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+ if (!(hena & ((hw->mac.type == I40E_MAC_X722)
+ ? I40E_RSS_HENA_ALL_X722
+ : I40E_RSS_HENA_ALL))) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -EINVAL;
return 0; /* Nothing to do */
@@ -6502,7 +6694,86 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
static int
+i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
+ *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
+ tunnel_filter->input.flags = cld_filter->flags;
+ tunnel_filter->input.tenant_id = cld_filter->tenant_id;
+ tunnel_filter->queue = cld_filter->queue_number;
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -6517,6 +6788,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
@@ -6579,11 +6853,38 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
- if (add)
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return ret;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ cld_filter, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return ret;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
@@ -6620,8 +6921,9 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
/* Now check if there is space to add the new port */
idx = i40e_get_vxlan_port_idx(pf, 0);
if (idx < 0) {
- PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
- "not adding port %d", port);
+ PMD_DRV_LOG(ERR,
+ "Maximum number of UDP ports reached, not adding port %d",
+ port);
return -ENOSPC;
}
@@ -6860,7 +7162,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
int ret = -EINVAL;
val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
- PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
if (len == 3) {
reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
@@ -6879,7 +7181,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
} else {
ret = 0;
}
- PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
return ret;
@@ -6992,15 +7294,15 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
if (enable > 0) {
if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
- PMD_DRV_LOG(INFO, "Symmetric hash has already "
- "been enabled");
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been enabled");
return;
}
reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
} else {
if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
- PMD_DRV_LOG(INFO, "Symmetric hash has already "
- "been disabled");
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been disabled");
return;
}
reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
@@ -7124,16 +7426,16 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
/* Toeplitz */
if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to "
- "Toeplitz");
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Toeplitz");
goto out;
}
reg |= I40E_GLQF_CTL_HTOEP_MASK;
} else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
/* Simple XOR */
if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to "
- "Simple XOR");
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Simple XOR");
goto out;
}
reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
@@ -7176,6 +7478,24 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7185,6 +7505,15 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7218,6 +7547,24 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7227,6 +7574,15 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7266,11 +7622,26 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -7292,11 +7663,26 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -7340,7 +7726,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
@@ -7349,9 +7735,18 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7363,9 +7758,18 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7484,25 +7888,23 @@ i40e_parse_input_set(uint64_t *inset,
* and vice versa
*/
static uint64_t
-i40e_translate_input_set_reg(uint64_t input)
+i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
{
uint64_t val = 0;
uint16_t i;
- static const struct {
+ struct inset_map {
uint64_t inset;
uint64_t inset_reg;
- } inset_map[] = {
+ };
+
+ static const struct inset_map inset_map_common[] = {
{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
- {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
- {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
- {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
- {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
@@ -7531,13 +7933,40 @@ i40e_translate_input_set_reg(uint64_t input)
{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
};
+ /* some different registers map in x722*/
+ static const struct inset_map inset_map_diff_x722[] = {
+ {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
+ {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
+ {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
+ {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
+ };
+
+ static const struct inset_map inset_map_diff_not_x722[] = {
+ {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
+ {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
+ {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
+ {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
+ };
+
if (input == 0)
return val;
/* Translate input set to register aware inset */
- for (i = 0; i < RTE_DIM(inset_map); i++) {
- if (input & inset_map[i].inset)
- val |= inset_map[i].inset_reg;
+ if (type == I40E_MAC_X722) {
+ for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
+ if (input & inset_map_diff_x722[i].inset)
+ val |= inset_map_diff_x722[i].inset_reg;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
+ if (input & inset_map_diff_not_x722[i].inset)
+ val |= inset_map_diff_not_x722[i].inset_reg;
+ }
+ }
+
+ for (i = 0; i < RTE_DIM(inset_map_common); i++) {
+ if (input & inset_map_common[i].inset)
+ val |= inset_map_common[i].inset_reg;
}
return val;
@@ -7596,10 +8025,10 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
- PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
if (reg != val)
i40e_write_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
(uint32_t)i40e_read_rx_ctl(hw, addr));
}
@@ -7614,15 +8043,22 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
- if (!I40E_VALID_PCTYPE(pctype))
- continue;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (!I40E_VALID_PCTYPE_X722(pctype))
+ continue;
+ } else {
+ if (!I40E_VALID_PCTYPE(pctype))
+ continue;
+ }
+
input_set = i40e_get_default_input_set(pctype);
num = i40e_generate_inset_mask_reg(input_set, mask_reg,
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return;
- inset_reg = i40e_translate_input_set_reg(input_set);
+ inset_reg = i40e_translate_input_set_reg(hw->mac.type,
+ input_set);
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
(uint32_t)(inset_reg & UINT32_MAX));
@@ -7680,7 +8116,15 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
- pctype = i40e_flowtype_to_pctype(conf->flow_type);
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
+ I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
+ conf->flow_type)));
+ } else
+ pctype = i40e_flowtype_to_pctype(conf->flow_type);
+
ret = i40e_parse_input_set(&input_set, pctype, conf->field,
conf->inset_size);
if (ret) {
@@ -7704,7 +8148,7 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
if (num < 0)
return -EINVAL;
- inset_reg |= i40e_translate_input_set_reg(input_set);
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
(uint32_t)(inset_reg & UINT32_MAX));
@@ -7749,7 +8193,9 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
+
pctype = i40e_flowtype_to_pctype(conf->flow_type);
+
ret = i40e_parse_input_set(&input_set, pctype, conf->field,
conf->inset_size);
if (ret) {
@@ -7780,7 +8226,7 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
if (num < 0)
return -EINVAL;
- inset_reg |= i40e_translate_input_set_reg(input_set);
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
(uint32_t)(inset_reg & UINT32_MAX));
@@ -7893,16 +8339,95 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -7913,13 +8438,29 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
}
if (filter->ether_type == ETHER_TYPE_IPv4 ||
filter->ether_type == ETHER_TYPE_IPv6) {
- PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
- " control packet filter.", filter->ether_type);
+ PMD_DRV_LOG(ERR,
+ "unsupported ether_type(0x%04x) in control packet filter.",
+ filter->ether_type);
return -EINVAL;
}
if (filter->ether_type == ETHER_TYPE_VLAN)
- PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
- " not supported.");
+ PMD_DRV_LOG(WARNING,
+ "filter vlan ether_type in first tag is not supported.");
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
@@ -7934,14 +8475,25 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
pf->main_vsi->seid,
filter->queue, add, &stats, NULL);
- PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
- " mac_etype_used = %u, etype_used = %u,"
- " mac_etype_free = %u, etype_free = %u\n",
- ret, stats.mac_etype_used, stats.etype_used,
- stats.mac_etype_free, stats.etype_free);
+ PMD_DRV_LOG(INFO,
+ "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
+ ret, stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
@@ -7976,7 +8528,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
FALSE);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -ENOSYS;
break;
}
@@ -8014,6 +8566,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -8031,10 +8588,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
static void
i40e_enable_extended_tag(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint32_t buf = 0;
int ret;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CAP_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8047,7 +8605,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
}
buf = 0;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_eal_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8059,7 +8617,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
return;
}
buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
- ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_eal_pci_write_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
@@ -8122,8 +8680,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
@@ -8131,8 +8695,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
@@ -8168,6 +8738,23 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+static int
+i40e_dev_sync_phy_type(struct i40e_hw *hw)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp phy_ab;
+ int ret = -ENOTSUP;
+
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+
+ if (status)
+ return ret;
+
+ return 0;
+}
+
+
static void
i40e_configure_registers(struct i40e_hw *hw)
{
@@ -8185,7 +8772,8 @@ i40e_configure_registers(struct i40e_hw *hw)
for (i = 0; i < RTE_DIM(reg_table); i++) {
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
- if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
+ I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
reg_table[i].val =
I40E_GL_SWR_PM_UP_THR_SF_VALUE;
else /* For X710 */
@@ -8208,9 +8796,9 @@ i40e_configure_registers(struct i40e_hw *hw)
ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
reg_table[i].val, NULL);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
- "address of 0x%"PRIx32, reg_table[i].val,
- reg_table[i].addr);
+ PMD_DRV_LOG(ERR,
+ "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
+ reg_table[i].val, reg_table[i].addr);
break;
}
PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
@@ -8255,8 +8843,9 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
I40E_VSI_L2TAGSTXVALID(
vsi->vsi_id), reg, NULL);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to update "
- "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
+ PMD_DRV_LOG(ERR,
+ "Failed to update VSI_L2TAGSTXVALID[%d]",
+ vsi->vsi_id);
return I40E_ERR_CONFIG;
}
}
@@ -8307,11 +8896,10 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw,
rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
- PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
- "rule_id = %u"
- " mirror_rules_used = %u, mirror_rules_free = %u,",
- hw->aq.asq_last_status, resp->rule_id,
- resp->mirror_rules_used, resp->mirror_rules_free);
+ PMD_DRV_LOG(INFO,
+ "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
+ hw->aq.asq_last_status, resp->rule_id,
+ resp->mirror_rules_used, resp->mirror_rules_free);
*rule_id = rte_le_to_cpu_16(resp->rule_id);
return status;
@@ -8389,8 +8977,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
- PMD_DRV_LOG(ERR, "mirror rule can not be configured"
- " without veb or vfs.");
+ PMD_DRV_LOG(ERR,
+ "mirror rule can not be configured without veb or vfs.");
return -ENOSYS;
}
if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
@@ -8422,9 +9010,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
- " ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
@@ -8513,9 +9101,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
mirr_rule->rule_type, mirr_rule->entries,
j, &rule_id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to add mirror rule:"
- " ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to add mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
rte_free(mirr_rule);
return -ENOSYS;
}
@@ -8567,9 +9155,9 @@ i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
- " status = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: status = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
@@ -9001,9 +9589,9 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
&veb_bw, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation"
- " per TC failed = %d",
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "AQ command Config switch_comp BW allocation per TC failed = %d",
+ hw->aq.asq_last_status);
return ret;
}
@@ -9011,16 +9599,18 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
&ets_query, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
- " configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp ETS configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
memset(&bw_query, 0, sizeof(bw_query));
ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
&bw_query, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
- " configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp bandwidth configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
@@ -9085,8 +9675,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
}
ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
- " per TC failed = %d",
+ PMD_INIT_LOG(ERR,
+ "AQ command Config VSI BW allocation per TC failed = %d",
hw->aq.asq_last_status);
goto out;
}
@@ -9107,9 +9697,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
/* Update the VSI after updating the VSI queue-mapping information */
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to configure "
- "TC queue mapping = %d",
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
+ hw->aq.asq_last_status);
goto out;
}
/* update the local VSI info with updated queue map */
@@ -9161,8 +9750,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
/* Use the FW API if FW > v4.4*/
if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
(hw->aq.fw_maj_ver >= 5))) {
- PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
- " to configure DCB");
+ PMD_INIT_LOG(ERR,
+ "FW < v4.4, can not use FW LLDP API to configure DCB");
return I40E_ERR_FIRMWARE_API_VERSION;
}
@@ -9177,8 +9766,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
old_cfg->etsrec = old_cfg->etscfg;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Set DCB Config failed, err %s aq_err %s\n",
+ PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
@@ -9210,7 +9798,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VEB seid=%d\n",
+ "Failed configuring TC for VEB seid=%d",
main_vsi->veb->seid);
}
/* Update each VSI */
@@ -9228,8 +9816,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
I40E_DEFAULT_TCMAP);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VSI seid=%d\n",
- vsi_list->vsi->seid);
+ "Failed configuring TC for VSI seid=%d",
+ vsi_list->vsi->seid);
/* continue */
}
}
@@ -9243,7 +9831,6 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
*
* Returns 0 on success, negative value on failure
*/
-//TREX_PATCH - changed all ERR to INFO in below func
static int
i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
{
@@ -9252,7 +9839,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
int ret = 0;
if ((pf->flags & I40E_FLAG_DCB) == 0) {
- PMD_INIT_LOG(INFO, "HW doesn't support DCB");
+ PMD_INIT_LOG(ERR, "HW doesn't support DCB");
return -ENOTSUP;
}
@@ -9261,29 +9848,21 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
* LLDP MIB change event.
*/
if (sw_dcb == TRUE) {
- ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
- if (ret != I40E_SUCCESS)
- PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
-
ret = i40e_init_dcb(hw);
- /* if sw_dcb, lldp agent is stopped, the return from
+ /* If lldp agent is stopped, the return value from
* i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
- * adminq status.
+ * adminq status. Otherwise, it should return success.
*/
- if (ret != I40E_SUCCESS &&
- hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
memset(&hw->local_dcbx_config, 0,
sizeof(struct i40e_dcbx_config));
/* set dcb default configuration */
hw->local_dcbx_config.etscfg.willing = 0;
hw->local_dcbx_config.etscfg.maxtcs = 0;
hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
- hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
-#ifdef TREX_PATCH_LOW_LATENCY
- hw->local_dcbx_config.etscfg.tcbwtable[1] = 0;
- hw->local_dcbx_config.etscfg.tsatable[1] = I40E_IEEE_TSA_STRICT;
- hw->local_dcbx_config.etscfg.prioritytable[1] = 1;
-#endif
+ hw->local_dcbx_config.etscfg.tsatable[0] =
+ I40E_IEEE_TSA_ETS;
hw->local_dcbx_config.etsrec =
hw->local_dcbx_config.etscfg;
hw->local_dcbx_config.pfc.willing = 0;
@@ -9298,22 +9877,15 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
I40E_APP_PROTOID_FCOE;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(INFO, "default dcb config fails."
- " err = %d, aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "default dcb config fails. err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
-#ifdef TREX_PATCH_LOW_LATENCY
- if (i40e_vsi_update_tc_bandwidth_ex(pf->main_vsi) !=
- I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
- return -ENOSYS;
- }
-#endif
} else {
- PMD_INIT_LOG(INFO, "DCBX configuration failed, err = %d,"
- " aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "DCB initialization in FW fails, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
} else {
@@ -9324,14 +9896,14 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
ret = i40e_init_dcb(hw);
if (!ret) {
if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- PMD_INIT_LOG(INFO, "HW doesn't support"
- " DCBX offload.");
+ PMD_INIT_LOG(ERR,
+ "HW doesn't support DCBX offload.");
return -ENOTSUP;
}
} else {
- PMD_INIT_LOG(INFO, "DCBX configuration failed, err = %d,"
- " aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "DCBX configuration failed, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
}
@@ -9440,7 +10012,8 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -9465,7 +10038,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
I40E_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
@@ -9473,7 +10046,8 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -9605,8 +10179,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* mtu setting is forbidden if port is start */
if (dev_data->dev_started) {
- PMD_DRV_LOG(ERR,
- "port %d must be stopped before configuration\n",
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
dev_data->port_id);
return -EBUSY;
}
@@ -9620,3 +10193,997 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ rte_memcpy(&cld_filter, &f->input, sizeof(f->input));
+ cld_filter.queue_number = f->queue;
+ i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1);
+ }
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
+}
+
+static int
+is_i40e_pmd(const char *driver_name)
+{
+ if (!strstr(driver_name, "i40e"))
+ return -ENOTSUP;
+
+ if (strstr(driver_name, "i40e_vf"))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
+ if (on) {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+ if (add)
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ else
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add/rm vlan filter");
+ return ret;
+ }
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+int
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->vlan_anti_spoof_on == on)
+ return 0; /* already on or off */
+
+ vsi->vlan_anti_spoof_on = on;
+ ret = i40e_add_rm_all_vlan_filter(vsi, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to remove VLAN filters.");
+ return -ENOTSUP;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num;
+ enum rte_mac_filter_type filter_type;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* remove all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ vlan_num = vsi->vlan_num;
+ filter_type = f->mac_info.filter_type;
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (vlan_num == 0) {
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
+ return I40E_ERR_PARAM;
+ }
+ } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+ filter_type == RTE_MAC_HASH_MATCH)
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num = 0;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* restore all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
+ /**
+ * If vlan_num is 0, that's the first time to add mac,
+ * set mask for vlan_id 0.
+ */
+ if (vsi->vlan_num == 0) {
+ i40e_set_vlan_filter(vsi, 0, 1);
+ vsi->vlan_num = 1;
+ }
+ vlan_num = vsi->vlan_num;
+ } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+
+ if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
+{
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Use the FW API if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver < 5) {
+ PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+ return -ENOTSUP;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
+ if (on) {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ /* remove all the MAC and VLAN first */
+ ret = i40e_vsi_rm_mac_filter(vsi);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
+ return ret;
+ }
+ if (vsi->vlan_anti_spoof_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
+ return ret;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ if (on)
+ vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+ else
+ vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ return ret;
+ }
+
+ /* add all the MAC and VLAN back */
+ ret = i40e_vsi_restore_mac_filter(vsi);
+ if (ret)
+ return ret;
+ if (vsi->vlan_anti_spoof_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ uint16_t vf_id;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* setup PF TX loopback */
+ vsi = pf->main_vsi;
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+
+ /* setup TX loopback for all the VFs */
+ if (!pf->vfs) {
+ /* if no VF, do nothing. */
+ return 0;
+ }
+
+ for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ on, NULL, true);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ on, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_mac_filter *f;
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ void *temp;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ ether_addr_copy(mac_addr, &vf->mac_addr);
+
+ /* Remove all existing mac */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
+ i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+
+ return 0;
+}
+
+/* Set vlan strip on/off for specific VF from host */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+
+ if (!vsi)
+ return -EINVAL;
+
+ ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+ uint16_t vlan_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (vlan_id > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0)
+ return -ENODEV;
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = vlan_id;
+ if (vlan_id > 0)
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+ else
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+ uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, on, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ if (on) {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ } else {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ uint16_t vf_idx;
+ int ret = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ if (vf_mask == 0) {
+ PMD_DRV_LOG(ERR, "No VF.");
+ return -EINVAL;
+ }
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ for (vf_idx = 0; vf_idx < 64 && ret == I40E_SUCCESS; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ if (on)
+ ret = i40e_vsi_add_vlan(pf->vfs[vf_idx].vsi,
+ vlan_id);
+ else
+ ret = i40e_vsi_delete_vlan(pf->vfs[vf_idx].vsi,
+ vlan_id);
+ }
+ }
+
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+
+ stats->ipackets = vsi->eth_stats.rx_unicast +
+ vsi->eth_stats.rx_multicast +
+ vsi->eth_stats.rx_broadcast;
+ stats->opackets = vsi->eth_stats.tx_unicast +
+ vsi->eth_stats.tx_multicast +
+ vsi->eth_stats.tx_broadcast;
+ stats->ibytes = vsi->eth_stats.rx_bytes;
+ stats->obytes = vsi->eth_stats.tx_bytes;
+ stats->ierrors = vsi->eth_stats.rx_discards;
+ stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_reset_vf_stats(uint8_t port,
+ uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (is_i40e_pmd(dev->data->drv_name))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->offset_loaded = false;
+ i40e_update_vsi_stats(vsi);
+
+ return 0;
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.h b/src/dpdk/drivers/net/i40e/i40e_ethdev.h
index 92c8fad0..9e2f7a28 100644
--- a/src/dpdk/drivers/net/i40e/i40e_ethdev.h
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.h
@@ -37,6 +37,8 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -126,6 +128,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_FLAG_FDIR (1ULL << 6)
#define I40E_FLAG_VXLAN (1ULL << 7)
#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8)
+#define I40E_FLAG_VF_MAC_BY_PF (1ULL << 9)
#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
I40E_FLAG_DCB | \
I40E_FLAG_VMDQ | \
@@ -134,7 +137,8 @@ enum i40e_flxpld_layer_idx {
I40E_FLAG_HEADER_SPLIT_ENABLED | \
I40E_FLAG_FDIR | \
I40E_FLAG_VXLAN | \
- I40E_FLAG_RSS_AQ_CAPABLE)
+ I40E_FLAG_RSS_AQ_CAPABLE | \
+ I40E_FLAG_VF_MAC_BY_PF)
#define I40E_RSS_OFFLOAD_ALL ( \
ETH_RSS_FRAG_IPV4 | \
@@ -149,6 +153,16 @@ enum i40e_flxpld_layer_idx {
ETH_RSS_NONFRAG_IPV6_OTHER | \
ETH_RSS_L2_PAYLOAD)
+/* All bits of RSS hash enable for X722*/
+#define I40E_RSS_HENA_ALL_X722 ( \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ I40E_RSS_HENA_ALL)
+
/* All bits of RSS hash enable */
#define I40E_RSS_HENA_ALL ( \
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
@@ -178,6 +192,65 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
+ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
struct i40e_adapter;
/**
@@ -290,6 +363,7 @@ struct i40e_vsi {
uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
uint16_t nb_msix; /* The max number of msix vector */
uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
struct i40e_bw_info bw_info; /* VSI bandwidth information */
};
@@ -366,6 +440,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -384,6 +466,60 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
};
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
@@ -408,6 +544,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, rte_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -456,12 +603,15 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
};
enum pending_msg {
@@ -517,7 +667,7 @@ struct i40e_vf {
enum i40e_aq_link_speed link_speed;
bool vf_reset;
volatile uint32_t pend_cmd; /* pending command not finished yet */
- uint32_t cmd_retval; /* return value of the cmd response from PF */
+ int32_t cmd_retval; /* return value of the cmd response from PF */
u16 pend_msg; /* flags indicates events from pf not handled yet */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
@@ -554,6 +704,25 @@ struct i40e_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+extern const struct rte_flow_ops i40e_flow_ops;
+
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -577,7 +746,7 @@ int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
struct i40e_vsi_vlan_pvid_info *info);
int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on);
int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on);
-uint64_t i40e_config_hena(uint64_t flags);
+uint64_t i40e_config_hena(uint64_t flags, enum i40e_mac_type type);
uint64_t i40e_parse_hena(uint64_t flags);
enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf);
enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf);
@@ -595,15 +764,44 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
struct rte_eth_input_set_conf *conf);
-
+int i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, uint32_t opcode,
+ uint32_t retval, uint8_t *msg,
+ uint16_t msglen);
void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
+
+#define I40E_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
@@ -699,6 +897,25 @@ i40e_calc_itr_interval(int16_t interval)
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER || \
(flow_type) == RTE_ETH_FLOW_L2_PAYLOAD)
+#define I40E_VALID_PCTYPE_X722(pctype) \
+ ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+
#define I40E_VALID_PCTYPE(pctype) \
((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
@@ -712,4 +929,18 @@ i40e_calc_itr_interval(int16_t interval)
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
(pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+#define I40E_PHY_TYPE_SUPPORT_40G(phy_type) \
+ (((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_KR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_AOC) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_SR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_LR4))
+
+#define I40E_PHY_TYPE_SUPPORT_25G(phy_type) \
+ (((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_KR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_CR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR))
+
#endif /* _I40E_ETHDEV_H_ */
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c b/src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c
index a616ae0b..a606aefe 100644
--- a/src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev_vf.c
@@ -126,8 +126,6 @@ static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
-static int i40evf_get_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link);
static int i40evf_init_vlan(struct rte_eth_dev *dev);
static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
@@ -153,6 +151,9 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
+static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
@@ -178,11 +179,11 @@ static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
rx_unknown_protocol)},
{"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
+ {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
+ {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
+ {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
+ {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
};
#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
@@ -227,6 +228,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.reta_query = i40evf_dev_rss_reta_query,
.rss_hash_update = i40evf_dev_rss_hash_update,
.rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
+ .mtu_set = i40evf_dev_mtu_set,
+ .mac_addr_set = i40evf_set_default_mac_addr,
};
/*
@@ -363,6 +366,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
err = -1;
do {
ret = i40evf_read_pfmsg(dev, &info);
+ vf->cmd_retval = info.result;
if (ret == I40EVF_MSG_CMD) {
err = 0;
break;
@@ -641,7 +645,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
return ret;
}
@@ -694,7 +698,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
return ret;
}
@@ -720,7 +724,8 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
sizeof(struct i40e_virtchnl_vector_map)];
struct i40e_virtchnl_irq_map_info *map_info;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vector_id;
int i, err;
@@ -888,19 +893,16 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
}
static void
-i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
{
struct i40e_virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct rte_eth_dev_data *data = dev->data;
- struct ether_addr *addr;
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
sizeof(struct i40e_virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
- addr = &(data->mac_addrs[index]);
-
if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
addr->addr_bytes[0], addr->addr_bytes[1],
@@ -927,6 +929,17 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
return;
}
+static void
+i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *addr;
+
+ addr = &data->mac_addrs[index];
+
+ i40evf_del_mac_addr_by_addr(dev, addr);
+}
+
static int
i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
@@ -954,7 +967,7 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
}
static int
-i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
int ret;
struct i40e_eth_stats *pstats = NULL;
@@ -1084,37 +1097,11 @@ i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
return err;
}
-static int
-i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
-{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int err;
- struct vf_cmd_info args;
- struct rte_eth_link *new_link;
-
- args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
- args.in_args = NULL;
- args.in_args_size = 0;
- args.out_buffer = vf->aq_resp;
- args.out_size = I40E_AQ_BUF_SZ;
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err) {
- PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
- return err;
- }
-
- new_link = (struct rte_eth_link *)args.out_buffer;
- (void)rte_memcpy(link, new_link, sizeof(*link));
-
- return 0;
-}
-
static const struct rte_pci_id pci_id_i40evf_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
- { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -1208,7 +1195,6 @@ i40evf_init_vf(struct rte_eth_dev *dev)
int i, err, bufsz;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct ether_addr *p_mac_addr;
uint16_t interval =
i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
@@ -1285,9 +1271,8 @@ i40evf_init_vf(struct rte_eth_dev *dev)
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Store the MAC address configured by host, or generate random one */
- p_mac_addr = (struct ether_addr *)(vf->vsi_res->default_mac_addr);
- if (is_valid_assigned_ether_addr(p_mac_addr)) /* Configured by host */
- ether_addr_copy(p_mac_addr, (struct ether_addr *)hw->mac.addr);
+ if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
else
eth_random_addr(hw->mac.addr); /* Generate a random one */
@@ -1340,16 +1325,16 @@ i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
switch (pf_msg->event) {
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
vf->link_up = pf_msg->event_data.link_event.link_status;
vf->link_speed = pf_msg->event_data.link_event.link_speed;
break;
case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
default:
PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
@@ -1363,8 +1348,9 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_arq_event_info info;
- struct i40e_virtchnl_msg *v_msg;
- uint16_t pending, opcode;
+ uint16_t pending, aq_opc;
+ enum i40e_virtchnl_ops msg_opc;
+ enum i40e_status_code msg_ret;
int ret;
info.buf_len = I40E_AQ_BUF_SZ;
@@ -1373,7 +1359,6 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
return;
}
info.msg_buf = vf->aq_resp;
- v_msg = (struct i40e_virtchnl_msg *)&info.desc;
pending = 1;
while (pending) {
@@ -1384,32 +1369,39 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
"ret: %d", ret);
break;
}
- opcode = rte_le_to_cpu_16(info.desc.opcode);
-
- switch (opcode) {
+ aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+ /* For the message sent from pf to vf, opcode is stored in
+ * cookie_high of struct i40e_aq_desc, while return error code
+ * are stored in cookie_low, Which is done by
+ * i40e_aq_send_msg_to_vf in PF driver.*/
+ msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(
+ info.desc.cookie_high);
+ msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
+ info.desc.cookie_low);
+ switch (aq_opc) {
case i40e_aqc_opc_send_msg_to_vf:
- if (v_msg->v_opcode == I40E_VIRTCHNL_OP_EVENT)
+ if (msg_opc == I40E_VIRTCHNL_OP_EVENT)
/* process event*/
i40evf_handle_pf_event(dev, info.msg_buf,
info.msg_len);
else {
/* read message and it's expected one */
- if (v_msg->v_opcode == vf->pend_cmd) {
- vf->cmd_retval = v_msg->v_retval;
+ if (msg_opc == vf->pend_cmd) {
+ vf->cmd_retval = msg_ret;
/* prevent compiler reordering */
rte_compiler_barrier();
_clear_cmd(vf);
} else
PMD_DRV_LOG(ERR, "command mismatch,"
"expect %u, get %u",
- vf->pend_cmd, v_msg->v_opcode);
+ vf->pend_cmd, msg_opc);
PMD_DRV_LOG(DEBUG, "adminq response is received,"
- " opcode = %d\n", v_msg->v_opcode);
+ " opcode = %d", msg_opc);
}
break;
default:
PMD_DRV_LOG(ERR, "Request %u is not supported yet",
- opcode);
+ aq_opc);
break;
}
}
@@ -1428,7 +1420,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+i40evf_dev_interrupt_handler(struct rte_intr_handle *intr_handle,
void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -1442,31 +1434,31 @@ i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
/* No interrupt event indicated */
if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
- PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n");
+ PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
goto done;
}
if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
- PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n");
+ PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
i40evf_handle_aq_msg(dev);
}
/* Link Status Change interrupt */
if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
- " do nothing\n");
+ " do nothing");
done:
i40evf_enable_irq0(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
}
static int
i40evf_dev_init(struct rte_eth_dev *eth_dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
- eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct i40e_hw *hw
+ = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1485,15 +1477,16 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
- hw->device_id = eth_dev->pci_dev->id.device_id;
- hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
- hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
- hw->bus.device = eth_dev->pci_dev->addr.devid;
- hw->bus.func = eth_dev->pci_dev->addr.function;
- hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->adapter_stopped = 0;
if(i40evf_init_vf(eth_dev) != 0) {
@@ -1554,38 +1547,19 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
*/
static struct eth_driver rte_i40evf_pmd = {
.pci_drv = {
- .name = "rte_i40evf_pmd",
.id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = i40evf_dev_init,
.eth_dev_uninit = i40evf_dev_uninit,
.dev_private_size = sizeof(struct i40e_adapter),
};
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
- */
-static int
-rte_i40evf_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_i40evf_pmd);
-
- return 0;
-}
-
-static struct rte_driver rte_i40evf_driver = {
- .type = PMD_PDEV,
- .init = rte_i40evf_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_i40evf_driver, i40evf);
-DRIVER_REGISTER_PCI_TABLE(i40evf, pci_id_i40evf_map);
+RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio");
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
@@ -1900,7 +1874,8 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
I40E_WRITE_REG(hw,
@@ -1932,7 +1907,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
@@ -1958,7 +1934,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1984,7 +1961,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
@@ -1992,7 +1969,8 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -2072,7 +2050,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
@@ -2096,7 +2075,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -2137,7 +2116,8 @@ err_queue:
static void
i40evf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -2166,35 +2146,33 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
* DPDK pf host provide interfacet to acquire link status
* while Linux driver does not
*/
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
- i40evf_get_link_status(dev, &new_link);
- else {
- /* Linux driver PF host */
- switch (vf->link_speed) {
- case I40E_LINK_SPEED_100MB:
- new_link.link_speed = ETH_SPEED_NUM_100M;
- break;
- case I40E_LINK_SPEED_1GB:
- new_link.link_speed = ETH_SPEED_NUM_1G;
- break;
- case I40E_LINK_SPEED_10GB:
- new_link.link_speed = ETH_SPEED_NUM_10G;
- break;
- case I40E_LINK_SPEED_20GB:
- new_link.link_speed = ETH_SPEED_NUM_20G;
- break;
- case I40E_LINK_SPEED_40GB:
- new_link.link_speed = ETH_SPEED_NUM_40G;
- break;
- default:
- new_link.link_speed = ETH_SPEED_NUM_100M;
- break;
- }
- /* full duplex only */
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = vf->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+
+ /* Linux driver PF host */
+ switch (vf->link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ new_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ new_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ new_link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ new_link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
}
+ /* full duplex only */
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? ETH_LINK_UP :
+ ETH_LINK_DOWN;
+
i40evf_dev_atomic_write_link_status(dev, &new_link);
return 0;
@@ -2266,6 +2244,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2326,15 +2305,16 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static void
i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- if (i40evf_get_statics(dev, stats))
- PMD_DRV_LOG(ERR, "Get statics failed");
+ if (i40evf_get_statistics(dev, stats))
+ PMD_DRV_LOG(ERR, "Get statistics failed");
}
static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
i40evf_dev_stop(dev);
hw->adapter_stopped = 1;
@@ -2342,11 +2322,11 @@ i40evf_dev_close(struct rte_eth_dev *dev)
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
/* disable uio intr before callback unregister */
- rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_disable(intr_handle);
/* unregister callback func from eal lib */
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40evf_dev_interrupt_handler, dev);
i40evf_disable_irq0(hw);
}
@@ -2423,7 +2403,7 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
@@ -2462,7 +2442,7 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
@@ -2568,8 +2548,11 @@ i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
rss_hf = rss_conf->rss_hf;
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- hena &= ~I40E_RSS_HENA_ALL;
- hena |= i40e_config_hena(rss_hf);
+ if (hw->mac.type == I40E_MAC_X722)
+ hena &= ~I40E_RSS_HENA_ALL_X722;
+ else
+ hena &= ~I40E_RSS_HENA_ALL;
+ hena |= i40e_config_hena(rss_hf, hw->mac.type);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
I40EVF_WRITE_FLUSH(hw);
@@ -2585,7 +2568,10 @@ i40evf_disable_rss(struct i40e_vf *vf)
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- hena &= ~I40E_RSS_HENA_ALL;
+ if (hw->mac.type == I40E_MAC_X722)
+ hena &= ~I40E_RSS_HENA_ALL_X722;
+ else
+ hena &= ~I40E_RSS_HENA_ALL;
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
I40EVF_WRITE_FLUSH(hw);
@@ -2601,7 +2587,7 @@ i40evf_config_rss(struct i40e_vf *vf)
if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "RSS not configured\n");
+ PMD_DRV_LOG(DEBUG, "RSS not configured");
return 0;
}
@@ -2618,7 +2604,7 @@ i40evf_config_rss(struct i40e_vf *vf)
rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
+ PMD_DRV_LOG(DEBUG, "No hash flag is set");
return 0;
}
@@ -2646,7 +2632,9 @@ i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+ if (!(hena & ((hw->mac.type == I40E_MAC_X722)
+ ? I40E_RSS_HENA_ALL_X722
+ : I40E_RSS_HENA_ALL))) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -EINVAL;
return 0;
@@ -2676,3 +2664,55 @@ i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+
+static int
+i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = vf->dev_data;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN
+ + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
+ int ret = 0;
+
+ /* check if mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev_data->dev_conf.rxmode.jumbo_frame = 0;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+static void
+i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (!is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return;
+ }
+
+ if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
+ return;
+
+ if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
+ return;
+
+ i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
+
+ i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_fdir.c b/src/dpdk/drivers/net/i40e/i40e_fdir.c
index 33cb6dab..0700253b 100644
--- a/src/dpdk/drivers/net/i40e/i40e_fdir.c
+++ b/src/dpdk/drivers/net/i40e/i40e_fdir.c
@@ -74,11 +74,8 @@
#define I40E_FDIR_UDP_DEFAULT_LEN 400
/* Wait count and interval for fdir filter programming */
-#define TREX_PATCH
-// TREX_PATCH - Values were 10 and 1000. These numbers give much better performance when
-// configuring large amount of rules
-#define I40E_FDIR_WAIT_COUNT 100
-#define I40E_FDIR_WAIT_INTERVAL_US 100
+#define I40E_FDIR_WAIT_COUNT 10
+#define I40E_FDIR_WAIT_INTERVAL_US 1000
/* Wait count and interval for fdir filter flush */
#define I40E_FDIR_FLUSH_RETRY 50
@@ -122,7 +119,13 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -254,7 +257,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
- eth_dev->driver->pci_drv.name,
+ eth_dev->driver->pci_drv.driver.name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
@@ -356,8 +359,15 @@ i40e_init_flx_pld(struct i40e_pf *pf)
/* initialize the masks */
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
- if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype))
- continue;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (!I40E_VALID_PCTYPE_X722(
+ (enum i40e_filter_pctype)pctype))
+ continue;
+ } else {
+ if (!I40E_VALID_PCTYPE(
+ (enum i40e_filter_pctype)pctype))
+ continue;
+ }
pf->fdir.flex_mask[pctype].word_mask = 0;
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
@@ -667,7 +677,16 @@ i40e_fdir_configure(struct rte_eth_dev *dev)
i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
/* configure flex mask*/
for (i = 0; i < conf->nb_flexmasks; i++) {
- pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type);
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+ hw, I40E_GLQF_FD_PCTYPES(
+ (int)i40e_flowtype_to_pctype(
+ conf->flex_mask[i].flow_type)));
+ } else
+ pctype = i40e_flowtype_to_pctype(
+ conf->flex_mask[i].flow_type);
+
i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
}
@@ -732,9 +751,6 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
fdir_input->flow.ip4_flow.ttl :
I40E_FDIR_IP_DEFAULT_TTL;
ip->type_of_service = fdir_input->flow.ip4_flow.tos;
-#ifdef TREX_PATCH
- ip->packet_id = rte_cpu_to_be_16(fdir_input->flow.ip4_flow.ip_id);
-#endif
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@@ -755,11 +771,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
ip6->vtc_flow =
rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
(fdir_input->flow.ipv6_flow.tc <<
- I40E_FDIR_IPv6_TC_OFFSET)
-#ifdef TREX_PATCH
- | (fdir_input->flow.ipv6_flow.flow_label & 0x000fffff)
-#endif
- );
+ I40E_FDIR_IPv6_TC_OFFSET));
ip6->payload_len =
rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
ip6->proto = fdir_input->flow.ipv6_flow.proto ?
@@ -1011,20 +1023,92 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1047,6 +1131,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1054,13 +1154,32 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
return ret;
}
- pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+ hw, I40E_GLQF_FD_PCTYPES(
+ (int)i40e_flowtype_to_pctype(
+ filter->input.flow_type)));
+ } else
+ pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
if (ret < 0) {
PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
@@ -1153,12 +1272,8 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
fdirdp->dtype_cmd_cntindex |=
rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
fdirdp->dtype_cmd_cntindex |=
-#ifdef TREX_PATCH
- rte_cpu_to_le_32((fdir_action->stat_count_index <<
-#else
rte_cpu_to_le_32(
((uint32_t)pf->fdir.match_counter_index <<
-#endif
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
@@ -1182,17 +1297,11 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
-#ifndef TREX_PATCH
- /* itay: moved this delay after the check to avoid first check */
rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
-#endif
if ((txdp->cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
break;
-#ifdef TREX_PATCH
- rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
-#endif
}
if (i >= I40E_FDIR_WAIT_COUNT) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
@@ -1200,10 +1309,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
return -ETIMEDOUT;
}
/* totally delay 10 ms to check programming status*/
-#ifndef TREX_PATCH
- /* itay: tests show this is not needed */
rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
-#endif
if (i40e_check_fdir_programming_status(rxq) < 0) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
" programming status reported.");
@@ -1217,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -1296,6 +1402,7 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
{
struct i40e_fdir_flex_mask *mask;
struct rte_eth_fdir_flex_mask *ptr = flex_mask;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint16_t flow_type;
uint8_t i, j;
uint16_t off_bytes, mask_tmp;
@@ -1304,8 +1411,13 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
i++) {
mask = &pf->fdir.flex_mask[i];
- if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
- continue;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (!I40E_VALID_PCTYPE_X722((enum i40e_filter_pctype)i))
+ continue;
+ } else {
+ if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
+ continue;
+ }
flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i);
for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
@@ -1472,3 +1584,34 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
+ guarant_cnt, best_cnt);
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_flow.c b/src/dpdk/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 00000000..76bb3320
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,1849 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
+static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
+static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
+static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+ .create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
+};
+
+union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_flow_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* tunnel */
+ { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static uint16_t
+i40e_get_outer_vlan(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint16_t reg_id;
+ uint16_t tpid;
+
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
+
+ i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ &reg_r, NULL);
+
+ tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
+
+ return tpid;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
+ * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
+ * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
+ * FF:FF:FF:FF:FF:FF
+ * 5. Ether_type mask should be 0xFFFF.
+ */
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+ uint16_t outer_tpid;
+
+ outer_tpid = i40e_get_outer_vlan(dev);
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6 ||
+ filter->ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Ethertype action only supports QUEUE or DROP. */
+static int
+i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_ethertype_action(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported flow type and input set: refer to array
+ * default_inset_table in i40e_ethdev.c.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter.
+ * FDIR action supports QUEUE or (QUEUE + MARK).
+ */
+static int
+i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* Parse to get the action info of a tunnle filter
+ * Tunnel action only supports QUEUE.
+ */
+static int
+i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+ const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec = NULL;
+ const struct rte_flow_item_eth *i_eth_mask = NULL;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* IPv4/IPv6/UDP are used to describe protocol,
+ * spec amd mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
+
+ return ret;
+}
+
+static int
+i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_tunnel_filter_conf *tunnel_filter =
+ &filter->tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_tunnel_filter_set(pf,
+ &cons_filter.tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_filter_type filter_type = flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_flow_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_flow_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_flow_flush_fdir_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_ethertype_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_tunnel_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+static int
+i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all ethertype filters */
+static int
+i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_flow_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all tunnel filters */
+static int
+i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_flow_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_pf.c b/src/dpdk/drivers/net/i40e/i40e_pf.c
index d5b2d450..f771dfb6 100644
--- a/src/dpdk/drivers/net/i40e/i40e_pf.c
+++ b/src/dpdk/drivers/net/i40e/i40e_pf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,6 +55,7 @@
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
#include "i40e_pf.h"
+#include "rte_pmd_i40e.h"
#define I40E_CFG_CRCSTRIP_DEFAULT 1
@@ -138,7 +139,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
abs_vf_id = vf_id + hw->func_caps.vf_base_id;
/* Notify VF that we are in VFR progress */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
/*
* If require a SW VF reset, a VFLR interrupt will be generated,
@@ -219,7 +220,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
}
/* Reset done, Set COMPLETE flag and clear reset bit */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
@@ -247,10 +248,12 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
return -EFAULT;
}
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+
return ret;
}
-static int
+int
i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
uint32_t opcode,
uint32_t retval,
@@ -272,14 +275,30 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
}
static void
-i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
{
struct i40e_virtchnl_version_info info;
- info.major = I40E_DPDK_VERSION_MAJOR;
- info.minor = I40E_DPDK_VERSION_MINOR;
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
- I40E_SUCCESS, (uint8_t *)&info, sizeof(info));
+ /* Respond like a Linux PF host in order to support both DPDK VF and
+ * Linux VF driver. The expense is original DPDK host specific feature
+ * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
+ *
+ * DPDK VF also can't identify host driver by version number returned.
+ * It always assume talking with Linux PF.
+ */
+ info.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS,
+ (uint8_t *)&info,
+ sizeof(info));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&info,
+ sizeof(info));
}
static int
@@ -292,13 +311,20 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
}
static int
-i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
{
struct i40e_virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
int ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
/* only have 1 VSI by default */
len = sizeof(struct i40e_virtchnl_vf_resource) +
I40E_DEFAULT_VF_VSI_NUM *
@@ -321,8 +347,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
/* Change below setting if PF host can support more VSIs for VF */
vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
- /* As assume Vf only has single VSI now, always return 0 */
- vf_res->vsi_res[0].vsi_id = 0;
+ vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
ether_addr_copy(&vf->mac_addr,
(struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
@@ -393,10 +418,12 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(tx_ctx));
- tx_ctx.new_context = 1;
tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->ring_len;
tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
+ tx_ctx.head_wb_ena = txq->headwb_enabled;
+ tx_ctx.head_wb_addr = txq->dma_headwb_addr;
+
err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
if (err != I40E_SUCCESS)
return err;
@@ -423,7 +450,8 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
static int
i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
@@ -432,11 +460,18 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
struct i40e_virtchnl_queue_pair_info *vc_qpi;
int i, ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
vc_vqci->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -482,7 +517,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
@@ -491,11 +527,19 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
int i, ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
vc_vqcei->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -537,13 +581,125 @@ send_msg:
return ret;
}
+static void
+i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_vector_map *vvm)
+{
+#define BITS_PER_CHAR 8
+ uint64_t linklistmap = 0, tempmap;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t qid;
+ bool b_first_q = true;
+ enum i40e_queue_type qtype;
+ uint16_t vector_id;
+ uint32_t reg, reg_idx;
+ uint16_t itr_idx = 0, i;
+
+ vector_id = vvm->vector_id;
+ /* setup the head */
+ if (!vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
+ + (vector_id - 1));
+
+ if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
+ I40E_WRITE_REG(hw, reg_idx,
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto cfg_irq_done;
+ }
+
+ /* sort all rx and tx queues */
+ tempmap = vvm->rxq_map;
+ for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i));
+ tempmap >>= 1;
+ }
+
+ tempmap = vvm->txq_map;
+ for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i + 1));
+ tempmap >>= 1;
+ }
+
+ /* Link all rx and tx queues into a chained list */
+ tempmap = linklistmap;
+ i = 0;
+ b_first_q = true;
+ do {
+ if (tempmap & 0x1) {
+ qtype = (enum i40e_queue_type)(i % 2);
+ qid = vf->vsi->base_queue + i / 2;
+ if (b_first_q) {
+ /* This is header */
+ b_first_q = false;
+ reg = ((qtype <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+ | qid);
+ } else {
+ /* element in the link list */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ }
+ I40E_WRITE_REG(hw, reg_idx, reg);
+ /* find next register to program */
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(qid);
+ itr_idx = vvm->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(qid);
+ itr_idx = vvm->txitr_idx;
+ break;
+ default:
+ break;
+ }
+ }
+ i++;
+ tempmap >>= 1;
+ } while (tempmap);
+
+ /* Terminate the link list */
+ reg = (vector_id) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ I40E_WRITE_REG(hw, reg_idx, reg);
+
+cfg_irq_done:
+ I40E_WRITE_FLUSH(hw);
+}
+
static int
i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
- uint8_t *msg, uint16_t msglen)
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_virtchnl_irq_map_info *irqmap =
(struct i40e_virtchnl_irq_map_info *)msg;
+ struct i40e_virtchnl_vector_map *map;
+ int i;
+ uint16_t vector_id;
+ unsigned long qbit_max;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
PMD_DRV_LOG(ERR, "buffer too short");
@@ -551,23 +707,46 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
goto send_msg;
}
- /* Assume VF only have 1 vector to bind all queues */
- if (irqmap->num_vectors != 1) {
- PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
- ret = I40E_ERR_PARAM;
+ /* PF host will support both DPDK VF or Linux VF driver, identify by
+ * number of vectors requested.
+ */
+
+ /* DPDK VF only requires single vector */
+ if (irqmap->num_vectors == 1) {
+ /* This MSIX intr store the intr in VF range */
+ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
+ vf->vsi->nb_msix = irqmap->num_vectors;
+ vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+
+ /* Don't care how the TX/RX queue mapping with this vector.
+ * Link all VF RX queues together. Only did mapping work.
+ * VF can disable/enable the intr by itself.
+ */
+ i40e_vsi_queues_bind_intr(vf->vsi);
goto send_msg;
}
- /* This MSIX intr store the intr in VF range */
- vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
- vf->vsi->nb_msix = irqmap->num_vectors;
- vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+ /* Then, it's Linux VF driver */
+ qbit_max = 1 << pf->vf_nb_qp_max;
+ for (i = 0; i < irqmap->num_vectors; i++) {
+ map = &irqmap->vecmap[i];
+
+ vector_id = map->vector_id;
+ /* validate msg params */
+ if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
+ i40e_pf_config_irq_link_list(vf, map);
+ } else {
+ /* configured queue size excceed limit */
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
- /* Don't care how the TX/RX queue mapping with this vector.
- * Link all VF RX queues together. Only did mapping work.
- * VF can disable/enable the intr by itself.
- */
- i40e_vsi_queues_bind_intr(vf->vsi);
send_msg:
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
ret, NULL, 0);
@@ -646,12 +825,21 @@ send_msg:
static int
i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_queue_select *q_sel =
(struct i40e_virtchnl_queue_select *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*q_sel)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -669,7 +857,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_ether_addr_list *addr_list =
@@ -678,6 +867,14 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
int i;
struct ether_addr *mac;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
if (msg == NULL || msglen <= sizeof(*addr_list)) {
@@ -690,8 +887,8 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
mac = (struct ether_addr *)(addr_list->list[i].addr);
(void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
- if(!is_valid_assigned_ether_addr(mac) ||
- i40e_vsi_add_mac(vf->vsi, &filter)) {
+ if (is_zero_ether_addr(mac) ||
+ i40e_vsi_add_mac(vf->vsi, &filter)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
}
@@ -707,7 +904,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_ether_addr_list *addr_list =
@@ -715,6 +913,14 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
int i;
struct ether_addr *mac;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*addr_list)) {
PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
ret = I40E_ERR_PARAM;
@@ -723,7 +929,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
for (i = 0; i < addr_list->num_elements; i++) {
mac = (struct ether_addr *)(addr_list->list[i].addr);
- if(!is_valid_assigned_ether_addr(mac) ||
+ if(is_zero_ether_addr(mac) ||
i40e_vsi_delete_mac(vf->vsi, mac)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
@@ -739,7 +945,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
- uint8_t *msg, uint16_t msglen)
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
@@ -747,6 +954,14 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
int i;
uint16_t *vid;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
PMD_DRV_LOG(ERR, "add_vlan argument too short");
ret = I40E_ERR_PARAM;
@@ -771,7 +986,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
@@ -779,6 +995,14 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
int i;
uint16_t *vid;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
PMD_DRV_LOG(ERR, "delete_vlan argument too short");
ret = I40E_ERR_PARAM;
@@ -803,7 +1027,8 @@ static int
i40e_pf_host_process_cmd_config_promisc_mode(
struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_promisc_info *promisc =
@@ -811,6 +1036,14 @@ i40e_pf_host_process_cmd_config_promisc_mode(
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
bool unicast = FALSE, multicast = FALSE;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*promisc)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -836,39 +1069,43 @@ send_msg:
}
static int
-i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
{
i40e_update_vsi_stats(vf->vsi);
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats,
- sizeof(vf->vsi->eth_stats));
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
return I40E_SUCCESS;
}
-static void
-i40e_pf_host_process_cmd_get_link_status(struct i40e_pf_vf *vf)
-{
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vf->pf->main_vsi);
-
- /* Update link status first to acquire latest link change */
- i40e_dev_link_update(dev, 1);
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_LINK_STAT,
- I40E_SUCCESS, (uint8_t *)&dev->data->dev_link,
- sizeof(struct rte_eth_link));
-}
-
static int
i40e_pf_host_process_cmd_cfg_vlan_offload(
struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_offload_info *offload =
(struct i40e_virtchnl_vlan_offload_info *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*offload)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -889,12 +1126,21 @@ send_msg:
static int
i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_pvid_info *tpid_info =
(struct i40e_virtchnl_pvid_info *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*tpid_info)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -910,6 +1156,20 @@ send_msg:
}
void
+i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
+{
+ struct i40e_virtchnl_pf_event event;
+
+ event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event_data.link_event.link_status =
+ dev->data->dev_link.link_status;
+ event.event_data.link_event.link_speed =
+ (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
+void
i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint16_t abs_vf_id, uint32_t opcode,
__rte_unused uint32_t retval,
@@ -921,6 +1181,8 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
struct i40e_pf_vf *vf;
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
+ struct rte_pmd_i40e_mb_event_param cb_param;
+ bool b_op = TRUE;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
PMD_DRV_LOG(ERR, "invalid argument");
@@ -935,10 +1197,35 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
return;
}
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
+ cb_param.vfid = vf_id;
+ cb_param.msg_type = opcode;
+ cb_param.msg = (void *)msg;
+ cb_param.msglen = msglen;
+
+ /**
+ * Ask user application if we're allowed to perform those functions.
+ * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
+ * then business as usual.
+ * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
+ * do nothing and send not_supported to VF. As PF must send a response
+ * to VF and ACK/NACK is not defined.
+ */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+ if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
+ PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
+ opcode);
+ b_op = FALSE;
+ }
+
switch (opcode) {
case I40E_VIRTCHNL_OP_VERSION :
PMD_DRV_LOG(INFO, "OP_VERSION received");
- i40e_pf_host_process_cmd_version(vf);
+ i40e_pf_host_process_cmd_version(vf, b_op);
break;
case I40E_VIRTCHNL_OP_RESET_VF :
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
@@ -946,64 +1233,72 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
- i40e_pf_host_process_cmd_get_vf_resource(vf);
+ i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
- i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
- msglen);
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
- i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
- i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ if (b_op) {
+ i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ i40e_notify_vf_link_status(dev, vf);
+ } else {
+ i40e_pf_host_send_msg_to_vf(
+ vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ }
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
- i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen);
+ i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen);
+ i40e_pf_host_process_cmd_add_ether_address(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen);
+ i40e_pf_host_process_cmd_del_ether_address(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
- i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen);
+ i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
- i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen);
+ i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
- i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
PMD_DRV_LOG(INFO, "OP_GET_STATS received");
- i40e_pf_host_process_cmd_get_stats(vf);
- break;
- case I40E_VIRTCHNL_OP_GET_LINK_STAT:
- PMD_DRV_LOG(INFO, "OP_GET_LINK_STAT received");
- i40e_pf_host_process_cmd_get_link_status(vf);
+ i40e_pf_host_process_cmd_get_stats(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
- i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
+ i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
- i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
+ i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
break;
/* Don't add command supported below, which will
* return an error code.
diff --git a/src/dpdk/drivers/net/i40e/i40e_pf.h b/src/dpdk/drivers/net/i40e/i40e_pf.h
index 9c01829a..b4c22876 100644
--- a/src/dpdk/drivers/net/i40e/i40e_pf.h
+++ b/src/dpdk/drivers/net/i40e/i40e_pf.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,20 +48,14 @@
#define I40E_DPDK_OFFSET 0x100
-enum i40e_pf_vfr_state {
- I40E_PF_VFR_INPROGRESS = 0,
- I40E_PF_VFR_COMPLETED = 1,
-};
-
/* DPDK pf driver specific command to VF */
enum i40e_virtchnl_ops_dpdk {
/*
* Keep some gap between Linux PF commands and
* DPDK PF extended commands.
*/
- I40E_VIRTCHNL_OP_GET_LINK_STAT = I40E_VIRTCHNL_OP_VERSION +
+ I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD = I40E_VIRTCHNL_OP_VERSION +
I40E_DPDK_OFFSET,
- I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
};
@@ -124,5 +118,7 @@ void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint8_t *msg, uint16_t msglen);
int i40e_pf_host_init(struct rte_eth_dev *dev);
int i40e_pf_host_uninit(struct rte_eth_dev *dev);
+void i40e_notify_vf_link_status(struct rte_eth_dev *dev,
+ struct i40e_pf_vf *vf);
#endif /* _I40E_PF_H_ */
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx.c b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
index 19b431c3..608685fa 100644
--- a/src/dpdk/drivers/net/i40e/i40e_rxtx.c
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +50,8 @@
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -79,6 +81,17 @@
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
+#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT)
+
+#define I40E_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -138,12 +151,21 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword)
uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
#define I40E_RX_ERR_BITS 0x3f
- if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
+ if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
+ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
return flags;
+ }
+
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
flags |= PKT_RX_EIP_CKSUM_BAD;
@@ -174,569 +196,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
}
#endif
-/* For each value it means, datasheet of hardware can tell more details
- *
- * @note: fix i40e_dev_supported_ptypes_get() if any change here.
- */
-static inline uint32_t
-i40e_rxd_pkt_type_mapping(uint8_t ptype)
-{
- static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
- /* L2 types */
- /* [0] reserved */
- [1] = RTE_PTYPE_L2_ETHER,
- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
- /* [3] - [5] reserved */
- [6] = RTE_PTYPE_L2_ETHER_LLDP,
- /* [7] - [10] reserved */
- [11] = RTE_PTYPE_L2_ETHER_ARP,
- /* [12] - [21] reserved */
-
- /* Non tunneled IPv4 */
- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- /* [25] reserved */
- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
-
- /* IPv4 --> IPv4 */
- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [32] reserved */
- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> IPv6 */
- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [39] reserved */
- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN */
- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [47] reserved */
- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [54] reserved */
- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [62] reserved */
- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [69] reserved */
- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [77] reserved */
- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [84] reserved */
- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* Non tunneled IPv6 */
- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- /* [91] reserved */
- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
-
- /* IPv6 --> IPv4 */
- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [98] reserved */
- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> IPv6 */
- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [105] reserved */
- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN */
- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [113] reserved */
- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [120] reserved */
- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [128] reserved */
- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [135] reserved */
- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [143] reserved */
- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [150] reserved */
- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* L2 NSH packet type */
- [154] = RTE_PTYPE_L2_ETHER_NSH,
- [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
- [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
-
- /* All others reserved */
- };
-
- return type_table[ptype];
-}
-
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
@@ -779,33 +238,65 @@ i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
#endif
return flags;
}
+
+static inline void
+i40e_parse_tunneling_params(uint64_t ol_flags,
+ union i40e_tx_offload tx_offload,
+ uint32_t *cd_tunneling)
+{
+ /* EIPT: External (outer) IP header type */
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ else if (ol_flags & PKT_TX_OUTER_IPV4)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ else if (ol_flags & PKT_TX_OUTER_IPV6)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ /* EIPLEN: External (outer) IP header length, in DWords */
+ *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* L4TUNT: L4 Tunneling Type */
+ switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ /* for non UDP / GRE tunneling, set to 00b */
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ case PKT_TX_TUNNEL_GENEVE:
+ *cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
+ break;
+ default:
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
+ return;
+ }
+
+ /* L4TUNLEN: L4 Tunneling Length, in Words
+ *
+ * We depend on app to set rte_mbuf.l2_len correctly.
+ * For IP in GRE it should be set to the length of the GRE
+ * header;
+ * for MAC in GRE or MAC in UDP it should be set to the length
+ * of the GRE or UDP headers plus the inner MAC up to including
+ * its last Ethertype.
+ */
+ *cd_tunneling |= (tx_offload.l2_len >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+}
+
static inline void
i40e_txd_enable_checksum(uint64_t ol_flags,
uint32_t *td_cmd,
uint32_t *td_offset,
- union i40e_tx_offload tx_offload,
- uint32_t *cd_tunneling)
+ union i40e_tx_offload tx_offload)
{
- /* UDP tunneling packet TX checksum offload */
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
-
+ /* Set MACLEN */
+ if (ol_flags & PKT_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
-
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- else if (ol_flags & PKT_TX_OUTER_IPV4)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- else if (ol_flags & PKT_TX_OUTER_IPV6)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-
- /* Now set the ctx descriptor fields */
- *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- (tx_offload.l2_len >> 1) <<
- I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
- } else
+ else
*td_offset |= (tx_offload.l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
@@ -934,15 +425,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
- RTE_PMD_I40E_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "I40E_MAX_RING_DESC=%d, "
- "RTE_PMD_I40E_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, I40E_MAX_RING_DESC,
- RTE_PMD_I40E_RX_MAX_BURST);
- ret = -EINVAL;
}
#else
ret = -EINVAL;
@@ -994,6 +476,8 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
I40E_RXD_QW1_STATUS_SHIFT;
}
+ rte_smp_rmb();
+
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
@@ -1104,7 +588,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
/* Update rx tail regsiter */
rte_wmb();
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+ I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -1484,7 +968,8 @@ i40e_calc_context_desc(uint64_t flags)
{
static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TCP_SEG |
- PKT_TX_QINQ_PKT;
+ PKT_TX_QINQ_PKT |
+ PKT_TX_TUNNEL_MASK;
#ifdef RTE_LIBRTE_IEEE1588
mask |= PKT_TX_IEEE1588_TMST;
@@ -1506,7 +991,7 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
}
/**
- * in case of tunneling packet, the outer_l2_len and
+ * in case of non tunneling packet, the outer_l2_len and
* outer_l3_len must be 0.
*/
hdr_len = tx_offload.outer_l2_len +
@@ -1623,12 +1108,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Always enable CRC offload insertion */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
- /* Enable checksum offloading */
+ /* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
- if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
- i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
- tx_offload, &cd_tunneling_params);
- }
+ if (ol_flags & PKT_TX_TUNNEL_MASK)
+ i40e_parse_tunneling_params(ol_flags, tx_offload,
+ &cd_tunneling_params);
+ /* Enable checksum offloading */
+ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)
+ i40e_txd_enable_checksum(ol_flags, &td_cmd,
+ &td_offset, tx_offload);
if (nb_ctx) {
/* Setup TX context descriptor if required */
@@ -1747,7 +1235,7 @@ end_of_tx:
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -1899,7 +1387,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
/* Update the tx tail register */
rte_wmb();
- I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
@@ -1930,6 +1418,63 @@ i40e_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * m->nb_segs is uint8_t, so nb_segs is always less than
+ * I40E_TX_MAX_SEG.
+ * We check only a condition for nb_segs > I40E_TX_MAX_MTU_SEG.
+ */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
+ (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ /* MSS outside the range (256B - 9674B) are considered
+ * malicious
+ */
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+ return i;
+}
+
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
@@ -2136,7 +1681,9 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
#endif
- dev->rx_pkt_burst == i40e_recv_scattered_pkts)
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec)
return ptypes;
return NULL;
}
@@ -2161,21 +1708,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t base, bsf, tc_mapping;
int use_def_burst_func = 1;
-#define TREX_PATCH_LOW_LATENCY
-#ifdef TREX_PATCH_LOW_LATENCY
- int is_vf = 0;
-#endif
-
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
struct i40e_vf *vf =
I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
-#ifdef TREX_PATCH_LOW_LATENCY
- is_vf = 1;
-#endif
- } else {
+ } else
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
- }
if (vsi == NULL) {
PMD_DRV_LOG(ERR, "VSI not available or queue "
@@ -2224,8 +1762,19 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/* Allocate the maximun number of RX ring hardware descriptor. */
- ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
- ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ len = I40E_MAX_RING_DESC;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ /**
+ * Allocating a little more memory because vectorized/bulk_alloc Rx
+ * functions doesn't check boundaries each time.
+ */
+ len += RTE_PMD_I40E_RX_MAX_BURST;
+#endif
+
+ ring_size = RTE_ALIGN(len * sizeof(union i40e_rx_desc),
+ I40E_DMA_MEM_ALIGN);
+
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
@@ -2280,12 +1829,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
ad->rx_bulk_alloc_allowed = false;
}
-#ifdef TREX_PATCH_LOW_LATENCY
- if (! is_vf)
- rxq->dcb_tc =0;
- else // The entire for below is in the else
-#endif
-
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
continue;
@@ -2393,25 +1936,12 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t i, base, bsf, tc_mapping;
-#ifdef TREX_PATCH_LOW_LATENCY
- u8 low_latency = 0;
- int is_vf = 1;
-#endif
-
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
struct i40e_vf *vf =
I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
- } else {
+ } else
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-#ifdef TREX_PATCH_LOW_LATENCY
- if (queue_idx == pf->dev_data->nb_tx_queues-1) {
- low_latency = 1;
- }
- is_vf = 0;
-#endif
- }
-
if (vsi == NULL) {
PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
@@ -2461,8 +1991,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
return I40E_ERR_PARAM;
}
if (tx_free_thresh >= (nb_desc - 3)) {
- PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
- "tx_free_thresh must be less than the "
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
"number of TX descriptors minus 3. "
"(tx_free_thresh=%u port=%d queue=%d)",
(unsigned int)tx_free_thresh,
@@ -2567,15 +2096,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Use a simple TX queue without offloads or multi segs if possible */
i40e_set_tx_function_flag(dev, txq);
-#ifdef TREX_PATCH_LOW_LATENCY
- if (! is_vf) {
- if (low_latency) {
- txq->dcb_tc=1;
- }else{
- txq->dcb_tc=0;
- }
- } else // The entire for below is in the else
-#endif
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
continue;
@@ -2985,11 +2505,15 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
i40e_reset_tx_queue(dev->data->tx_queues[i]);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
i40e_reset_rx_queue(dev->data->rx_queues[i]);
}
@@ -3003,12 +2527,16 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
}
@@ -3191,7 +2719,7 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
struct i40e_rx_queue *rxq =
dev->data->rx_queues[i];
- if (i40e_rxq_vec_setup(rxq)) {
+ if (rxq && i40e_rxq_vec_setup(rxq)) {
ad->rx_vec_allowed = false;
break;
}
@@ -3253,7 +2781,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
- rxq->rx_using_sse = rx_using_sse;
+ if (rxq)
+ rxq->rx_using_sse = rx_using_sse;
}
}
}
@@ -3292,7 +2821,7 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
struct i40e_tx_queue *txq =
dev->data->tx_queues[i];
- if (i40e_txq_vec_setup(txq)) {
+ if (txq && i40e_txq_vec_setup(txq)) {
ad->tx_vec_allowed = false;
break;
}
@@ -3308,9 +2837,11 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
}
+ dev->tx_pkt_prepare = NULL;
} else {
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
}
}
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx.h b/src/dpdk/drivers/net/i40e/i40e_rxtx.h
index 98179f00..9df8a56f 100644
--- a/src/dpdk/drivers/net/i40e/i40e_rxtx.h
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx.h
@@ -63,6 +63,12 @@
#define I40E_MIN_RING_DESC 64
#define I40E_MAX_RING_DESC 4096
+#define I40E_MIN_TSO_MSS 256
+#define I40E_MAX_TSO_MSS 9674
+
+#define I40E_TX_MAX_SEG UINT8_MAX
+#define I40E_TX_MAX_MTU_SEG 8
+
#undef container_of
#define container_of(ptr, type, member) ({ \
typeof(((type *)0)->member)(*__mptr) = (ptr); \
@@ -223,6 +229,8 @@ uint16_t i40e_recv_scattered_pkts(void *rx_queue,
uint16_t i40e_xmit_pkts(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int i40e_tx_queue_init(struct i40e_tx_queue *txq);
int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
void i40e_free_tx_resources(struct i40e_tx_queue *txq);
@@ -255,4 +263,567 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq);
void i40e_set_tx_function(struct rte_eth_dev *dev);
+/* For each value it means, datasheet of hardware can tell more details
+ *
+ * @note: fix i40e_dev_supported_ptypes_get() if any change here.
+ */
+static inline uint32_t
+i40e_rxd_pkt_type_mapping(uint8_t ptype)
+{
+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* L2 types */
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
+ /* [3] - [5] reserved */
+ [6] = RTE_PTYPE_L2_ETHER_LLDP,
+ /* [7] - [10] reserved */
+ [11] = RTE_PTYPE_L2_ETHER_ARP,
+ /* [12] - [21] reserved */
+
+ /* Non tunneled IPv4 */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv4 --> IPv4 */
+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [32] reserved */
+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> IPv6 */
+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [39] reserved */
+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [47] reserved */
+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [54] reserved */
+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [62] reserved */
+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [69] reserved */
+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [77] reserved */
+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [84] reserved */
+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* Non tunneled IPv6 */
+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [91] reserved */
+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv6 --> IPv4 */
+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [98] reserved */
+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> IPv6 */
+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [105] reserved */
+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [113] reserved */
+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [120] reserved */
+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [128] reserved */
+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [135] reserved */
+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [143] reserved */
+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [150] reserved */
+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* L2 NSH packet type */
+ [154] = RTE_PTYPE_L2_ETHER_NSH,
+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* All others reserved */
+ };
+
+ return type_table[ptype];
+}
+
#endif /* _I40E_RXTX_H_ */
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h b/src/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h
new file mode 100644
index 00000000..37455589
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -0,0 +1,251 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_RXTX_VEC_COMMON_H_
+#define _I40E_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len)
+ end->data_len -= rxq->crc_len;
+ else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct i40e_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline int
+i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline int
+i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0 ||
+ rxmode->hw_ip_checksum != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+#endif
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c b/src/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c
new file mode 100644
index 00000000..011c54e0
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -0,0 +1,614 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <arm_neon.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ uint64x2_t dma_addr0, dma_addr1;
+ uint64x2_t zero = vdupq_n_u64(0);
+ uint64_t paddr;
+ uint8x8_t p;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (unlikely(rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0)) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vst1q_u64((uint64_t *)&rxdp[i].read, zero);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ vst1_u8((uint8_t *)&mb0->rearm_data, p);
+ paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr0 = vdupq_n_u64(paddr);
+
+ /* flush desc with pa dma_addr */
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
+
+ vst1_u8((uint8_t *)&mb1->rearm_data, p);
+ paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr1 = vdupq_n_u64(paddr);
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+{
+ uint32x4_t vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const uint32x4_t rss_vlan_msk = {
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const uint8x16_t vlan_flags = {
+ 0, 0, 0, 0,
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const uint8x16_t rss_flags = {
+ 0, PKT_RX_FDIR, 0, 0,
+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const uint8x16_t l3_l4e_flags = {
+ 0,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
+ vreinterpretq_u32_u64(descs[2])).val[1];
+ vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
+ vreinterpretq_u32_u64(descs[3])).val[1];
+ vlan0 = vzipq_u32(vlan0, vlan1).val[0];
+
+ vlan1 = vandq_u32(vlan0, rss_vlan_msk);
+ vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags,
+ vreinterpretq_u8_u32(vlan1)));
+
+ rss = vshrq_n_u32(vlan1, 11);
+ rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags,
+ vreinterpretq_u8_u32(rss)));
+
+ l3_l4e = vshrq_n_u32(vlan1, 22);
+ l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
+ vreinterpretq_u8_u32(l3_l4e)));
+
+
+ vlan0 = vorrq_u32(vlan0, rss);
+ vlan0 = vorrq_u32(vlan0, l3_l4e);
+
+ rx_pkts[0]->ol_flags = vgetq_lane_u32(vlan0, 0);
+ rx_pkts[1]->ol_flags = vgetq_lane_u32(vlan0, 1);
+ rx_pkts[2]->ol_flags = vgetq_lane_u32(vlan0, 2);
+ rx_pkts[3]->ol_flags = vgetq_lane_u32(vlan0, 3);
+}
+#else
+#define desc_to_olflags_v(descs, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT 10
+
+#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL
+
+static inline void
+desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+{
+ int i;
+ uint8_t ptype;
+ uint8x16_t tmp;
+
+ for (i = 0; i < 4; i++) {
+ tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
+ ptype = vgetq_lane_u8(tmp, 8);
+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
+ }
+
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+
+ /* mask to shuffle from desc. to mbuf */
+ uint8x16_t shuf_msk = {
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 14, 15, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 14, 15, /* octet 15~14, 16 bits data_len */
+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+
+ uint8x16_t eop_check = {
+ 0x02, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ };
+
+ uint16x8_t crc_adjust = {
+ 0, 0, /* ignore pkt_type field */
+ rxq->crc_len, /* sub crc on pkt_len */
+ 0, /* ignore high-16bits of pkt_len */
+ rxq->crc_len, /* sub crc on data_len */
+ 0, 0, 0 /* ignore non-length fields */
+ };
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch_non_temporal(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP];
+ uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ uint16x8x2_t sterr_tmp1, sterr_tmp2;
+ uint64x2_t mbp1, mbp2;
+ uint16x8_t staterr;
+ uint16x8_t tmp;
+ uint64_t stat;
+
+ int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
+ rte_rmb();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
+
+ descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ descs[0] = vld1q_u64((uint64_t *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
+ len_shl);
+ descs[3] = vreinterpretq_u64_u32(len3);
+ uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]),
+ len_shl);
+ descs[2] = vreinterpretq_u64_u32(len2);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
+ pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]),
+ vreinterpretq_u16_u64(descs[3]));
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]),
+ vreinterpretq_u16_u64(descs[2]));
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = vzipq_u16(sterr_tmp1.val[1],
+ sterr_tmp2.val[1]).val[0];
+ stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
+
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
+ pkt_mb4 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
+ pkt_mb3 = vreinterpretq_u8_u16(tmp);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
+ len_shl);
+ descs[1] = vreinterpretq_u64_u32(len1);
+ uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
+ len_shl);
+ descs[0] = vreinterpretq_u64_u32(len0);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
+ pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
+ pkt_mb2 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
+ pkt_mb1 = vreinterpretq_u8_u16(tmp);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ uint8x16_t eop_shuf_mask = {
+ 0x00, 0x02, 0x04, 0x06,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF};
+ uint8x16_t eop_bits;
+
+ /* and with mask to extract bits, flipping 1-0 */
+ eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr));
+ eop_bits = vandq_u8(eop_bits, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask);
+
+ /* store the resulting 32-bit value */
+ vst1q_lane_u32((uint32_t *)split_packet,
+ vreinterpretq_u32_u8(eop_bits), 0);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos]);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw};
+ vst1q_u64((uint64_t *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx_vec.c b/src/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 51fb282a..b95cc8e1 100644
--- a/src/dpdk/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -39,6 +39,7 @@
#include "base/i40e_type.h"
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
#include <tmmintrin.h>
@@ -138,19 +139,28 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
static inline void
desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
- __m128i vlan0, vlan1, rss;
- union {
- uint16_t e[4];
- uint64_t dword;
- } vol;
+ __m128i vlan0, vlan1, rss, l3_l4e;
/* mask everything except RSS, flow director and VLAN flags
* bit2 is for VLAN tag, bit11 for flow director indication
* bit13:12 for RSS indication.
*/
- const __m128i rss_vlan_msk = _mm_set_epi16(
- 0x0000, 0x0000, 0x0000, 0x0000,
- 0x3804, 0x3804, 0x3804, 0x3804);
+ const __m128i rss_vlan_msk = _mm_set_epi32(
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
+
+ const __m128i cksum_mask = _mm_set_epi32(
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
@@ -163,23 +173,43 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
0, 0, PKT_RX_FDIR, 0);
- vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
- vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
- vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+
+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
- rss = _mm_srli_epi16(vlan1, 11);
+ rss = _mm_srli_epi32(vlan1, 11);
rss = _mm_shuffle_epi8(rss_flags, rss);
+ l3_l4e = _mm_srli_epi32(vlan1, 22);
+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+ /* then we shift left 1 bit */
+ l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
+
vlan0 = _mm_or_si128(vlan0, rss);
- vol.dword = _mm_cvtsi128_si64(vlan0);
+ vlan0 = _mm_or_si128(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = vol.e[0];
- rx_pkts[1]->ol_flags = vol.e[1];
- rx_pkts[2]->ol_flags = vol.e[2];
- rx_pkts[3]->ol_flags = vol.e[3];
+ rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0);
+ rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2);
+ rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4);
+ rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6);
}
#else
#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
@@ -187,6 +217,21 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
#define PKTLEN_SHIFT 10
+static inline void
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
+
+ ptype0 = _mm_srli_epi64(ptype0, 30);
+ ptype1 = _mm_srli_epi64(ptype1, 30);
+
+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0));
+ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8));
+ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0));
+ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8));
+}
+
/*
* Notice:
* - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
@@ -224,7 +269,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*/
rxdp = rxq->rx_ring + rxq->rx_tail;
- _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+ rte_prefetch0(rxdp);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
@@ -282,6 +327,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
@@ -290,8 +336,10 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
/* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
/* B.2 copy 2 mbuf point into rx_pkts */
@@ -393,6 +441,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb2);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos]);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
@@ -421,68 +470,6 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
-static inline uint16_t
-reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
- uint16_t nb_bufs, uint8_t *split_flags)
-{
- struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
- struct rte_mbuf *start = rxq->pkt_first_seg;
- struct rte_mbuf *end = rxq->pkt_last_seg;
- unsigned pkt_idx, buf_idx;
-
- for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
- if (end != NULL) {
- /* processing a split packet */
- end->next = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
-
- start->nb_segs++;
- start->pkt_len += rx_bufs[buf_idx]->data_len;
- end = end->next;
-
- if (!split_flags[buf_idx]) {
- /* it's the last packet of the set */
- start->hash = end->hash;
- start->ol_flags = end->ol_flags;
- /* we need to strip crc for the whole packet */
- start->pkt_len -= rxq->crc_len;
- if (end->data_len > rxq->crc_len) {
- end->data_len -= rxq->crc_len;
- } else {
- /* free up last mbuf */
- struct rte_mbuf *secondlast = start;
-
- while (secondlast->next != end)
- secondlast = secondlast->next;
- secondlast->data_len -= (rxq->crc_len -
- end->data_len);
- secondlast->next = NULL;
- rte_pktmbuf_free_seg(end);
- end = secondlast;
- }
- pkts[pkt_idx++] = start;
- start = end = NULL;
- }
- } else {
- /* not processing a split packet */
- if (!split_flags[buf_idx]) {
- /* not a split packet, save and skip */
- pkts[pkt_idx++] = rx_bufs[buf_idx];
- continue;
- }
- end = start = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
- rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
- }
- }
-
- /* save the partial packet for next time */
- rxq->pkt_first_seg = start;
- rxq->pkt_last_seg = end;
- memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
- return pkt_idx;
-}
-
/* vPMD receive routine that reassembles scattered packets
* Notice:
* - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
@@ -548,73 +535,6 @@ vtx(volatile struct i40e_tx_desc *txdp,
vtx1(txdp, *pkt, flags);
}
-static inline int __attribute__((always_inline))
-i40e_tx_free_bufs(struct i40e_tx_queue *txq)
-{
- struct i40e_tx_entry *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
-
- /* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
- rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
- rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
-static inline void __attribute__((always_inline))
-tx_backlog_entry(struct i40e_tx_entry *txep,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- int i;
-
- for (i = 0; i < (int)nb_pkts; ++i)
- txep[i].mbuf = tx_pkts[i];
-}
-
uint16_t
i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -685,37 +605,13 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
void __attribute__((cold))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
- const unsigned mask = rxq->nb_rx_desc - 1;
- unsigned i;
-
- if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
- return;
-
- /* free all mbufs that are valid in the ring */
- for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
- rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
- rxq->rxrearm_nb = rxq->nb_rx_desc;
-
- /* set all entries to NULL */
- memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+ _i40e_rx_queue_release_mbufs_vec(rxq);
}
int __attribute__((cold))
i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
{
- uintptr_t p;
- struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
- mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
- rte_mbuf_refcnt_set(&mb_def, 1);
-
- /* prevent compiler reordering: rearm_data covers previous fields */
- rte_compiler_barrier();
- p = (uintptr_t)&mb_def.rearm_data;
- rxq->mbuf_initializer = *(uint64_t *)p;
- return 0;
+ return i40e_rxq_vec_setup_default(rxq);
}
int __attribute__((cold))
@@ -728,34 +624,10 @@ int __attribute__((cold))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-
/* need SSE4.1 support */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
return -1;
-
-#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0)
- return -1;
#endif
- /* no fdir support */
- if (fconf->mode != RTE_FDIR_MODE_NONE)
- return -1;
-
- /* - no csum error report support
- * - no header split support
- */
- if (rxmode->hw_ip_checksum == 1 ||
- rxmode->header_split == 1)
- return -1;
-
- return 0;
-#else
- RTE_SET_USED(dev);
- return -1;
-#endif
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
}
diff --git a/src/dpdk/drivers/net/i40e/rte_pmd_i40e.h b/src/dpdk/drivers/net/i40e/rte_pmd_i40e.h
new file mode 100644
index 00000000..a0ad88c6
--- /dev/null
+++ b/src/dpdk/drivers/net/i40e/rte_pmd_i40e.h
@@ -0,0 +1,335 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PMD_I40E_H_
+#define _PMD_I40E_H_
+
+/**
+ * @file rte_pmd_i40e.h
+ *
+ * i40e PMD specific functions.
+ *
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ */
+
+#include <rte_ethdev.h>
+
+/**
+ * Response sent back to i40e driver from user app after callback
+ */
+enum rte_pmd_i40e_mb_event_rsp {
+ RTE_PMD_I40E_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_I40E_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_I40E_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_I40E_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Data sent to the user application when the callback is executed.
+ */
+struct rte_pmd_i40e_mb_event_param {
+ uint16_t vfid; /**< Virtual Function number */
+ uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */
+ uint16_t retval; /**< return value */
+ void *msg; /**< pointer to message */
+ uint16_t msglen; /**< length of the message */
+};
+
+/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf);
+
+/**
+ * Enable/Disable VF MAC anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set MAC anti spoofing.
+ * @param on
+ * 1 - Enable VFs MAC anti spoofing.
+ * 0 - Disable VFs MAC anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set VLAN anti spoofing.
+ * @param on
+ * 1 - Enable VFs VLAN anti spoofing.
+ * 0 - Disable VFs VLAN anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable TX loopback on all the PF and VFs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable TX loopback.
+ * 0 - Disable TX loopback.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_tx_loopback(uint8_t port,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF unicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF multicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Set the VF MAC address.
+ *
+ * PF should set MAC address before VF initialized, if PF sets the MAC
+ * address after VF initialized, new MAC address won't be effective until
+ * VF reinitialize.
+ *
+ * This will remove all existing MAC filters.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable vf broadcast mode
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable broadcast.
+ * 1 - Enable broadcast.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable vf vlan tag
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable VF's vlan tag.
+ * n - Enable VF's vlan tag.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN filter
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vlan_id
+ * ID specifying VLAN
+ * @param vf_mask
+ * Mask to filter VF's
+ * @param on
+ * 0 - Disable VF's VLAN filter.
+ * 1 - Enable VF's VLAN filter.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on);
+
+/**
+ * Get VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters for the following set of statistics:
+ * - *ipackets* with the total of successfully received packets.
+ * - *opackets* with the total of successfully transmitted packets.
+ * - *ibytes* with the total of successfully received bytes.
+ * - *obytes* with the total of successfully transmitted bytes.
+ * - *ierrors* with the total of erroneous received packets.
+ * - *oerrors* with the total of failed transmitted packets.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_i40e_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats);
+
+/**
+ * Clear VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_reset_vf_stats(uint8_t port,
+ uint16_t vf_id);
+
+#endif /* _PMD_I40E_H_ */