aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/i40e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/i40e')
-rw-r--r--drivers/net/i40e/Makefile16
-rw-r--r--drivers/net/i40e/base/README59
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c8
-rw-r--r--drivers/net/i40e/base/i40e_adminq.h4
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h180
-rw-r--r--drivers/net/i40e/base/i40e_common.c802
-rw-r--r--drivers/net/i40e/base/i40e_dcb.c2
-rw-r--r--drivers/net/i40e/base/i40e_devids.h3
-rw-r--r--drivers/net/i40e/base/i40e_lan_hmc.c5
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c52
-rw-r--r--drivers/net/i40e/base/i40e_osdep.h10
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h59
-rw-r--r--drivers/net/i40e/base/i40e_register.h2
-rw-r--r--drivers/net/i40e/base/i40e_type.h174
-rw-r--r--drivers/net/i40e/base/i40e_virtchnl.h7
-rw-r--r--drivers/net/i40e/i40e_ethdev.c1852
-rw-r--r--drivers/net/i40e/i40e_ethdev.h283
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c235
-rw-r--r--drivers/net/i40e/i40e_fdir.c138
-rw-r--r--drivers/net/i40e/i40e_flow.c2258
-rw-r--r--drivers/net/i40e/i40e_logs.h17
-rw-r--r--drivers/net/i40e/i40e_pf.c476
-rw-r--r--drivers/net/i40e/i40e_pf.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx.c272
-rw-r--r--drivers/net/i40e/i40e_rxtx.h29
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_altivec.c645
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h23
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c91
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c92
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c1937
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h590
-rw-r--r--drivers/net/i40e/rte_pmd_i40e_version.map36
32 files changed, 9422 insertions, 939 deletions
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 13085fb7..56f210d6 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -38,7 +38,7 @@ LIB = librte_pmd_i40e.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF
-CFLAGS += -DX722_SUPPORT -DX722_A0_SUPPORT
+CFLAGS += -DX722_A0_SUPPORT
EXPORT_MAP := rte_pmd_i40e_version.map
@@ -99,23 +99,23 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
+else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
else
SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
CFLAGS_i40e_rxtx_vec_sse.o += -msse4.1
endif
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
new file mode 100644
index 00000000..0da9f674
--- /dev/null
+++ b/drivers/net/i40e/base/README
@@ -0,0 +1,59 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel® I40E driver
+==================
+
+This directory contains source code of FreeBSD i40e driver of version
+cid-i40e.2017.03.21.tar.gz released by the team which develops
+basic drivers for any i40e NIC. The directory of base/ contains the
+original source package.
+This driver is valid for the product(s) listed below
+
+* Intel® Ethernet Converged Network Adapters X710
+* Intel® Ethernet Converged Network Adapters XL710
+* Intel® Ethernet Network Adapter XXV710
+* Intel® Ethernet Connection X722 for 10GBASE-T
+* Intel® Ethernet Connection X722 for 10GbE backplane
+* Intel® Ethernet Connection X722 for 10GbE SFP+
+* Intel® Ethernet Connection X722 for 1GbE
+* Intel® Ethernet Controller X710 and XL710 Family
+* Intel® Ethernet Controller XXV710 for 25GbE backplane
+* Intel® Ethernet Controller XXV710 for 25GbE SFP28
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ i40e_osdep.h
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index 0d3a83fa..a60292a3 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -944,8 +944,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- i40e_msec_delay(1);
- total_delay++;
+ i40e_usec_delay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -1077,11 +1077,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h
index 750973c5..182e40b9 100644
--- a/drivers/net/i40e/base/i40e_adminq.h
+++ b/drivers/net/i40e/base/i40e_adminq.h
@@ -158,9 +158,9 @@ STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
#ifdef I40E_ESS_SUPPORT
-#define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT_ESS 50000000 /* usecs */
#endif
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 4f067720..09f5bf5c 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -139,12 +139,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
-#ifdef X722_SUPPORT
/* Proxy commands */
i40e_aqc_opc_set_proxy_config = 0x0104,
i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
-#endif
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -152,12 +150,11 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
-#ifdef X722_SUPPORT
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
+ i40e_aqc_opc_clear_all_wol_filters = 0x025E,
-#endif
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -197,10 +194,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
+ i40e_aqc_opc_replace_cloud_filters = 0x025F,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ /* Dynamic Device Personalization */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -282,12 +284,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
-#ifdef X722_SUPPORT
i40e_aqc_opc_set_rss_key = 0x0B02,
i40e_aqc_opc_set_rss_lut = 0x0B03,
i40e_aqc_opc_get_rss_key = 0x0B04,
i40e_aqc_opc_get_rss_lut = 0x0B05,
-#endif
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -540,7 +540,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -561,6 +562,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
struct i40e_aqc_mac_address_write {
__le16 command_flags;
#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@@ -584,7 +586,6 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
-#ifdef X722_SUPPORT
/* Set WoL Filter (0x0120) */
struct i40e_aqc_set_wol_filter {
@@ -600,6 +601,7 @@ struct i40e_aqc_set_wol_filter {
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
@@ -635,7 +637,6 @@ struct i40e_aqc_get_wake_reason_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
-#endif /* X722_SUPPORT */
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -774,6 +775,7 @@ struct i40e_aqc_set_switch_config {
/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
u8 reserved[12];
};
@@ -940,16 +942,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
-#ifdef X722_SUPPORT
#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
-#endif
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#ifdef X722_SUPPORT
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
-#endif
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1332,7 +1330,9 @@ struct i40e_aqc_add_remove_cloud_filters {
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
+ u8 big_buffer_flag;
+#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
+ u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
};
@@ -1369,6 +1369,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x0010 to 0x0017 is for custom filters */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
@@ -1403,6 +1404,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 response_reserved[7];
};
+/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
+ * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. refer to
+ * DCR288
+ */
+struct i40e_aqc_add_rm_cloud_filt_elem_ext {
+ struct i40e_aqc_add_remove_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
@@ -1414,6 +1455,54 @@ struct i40e_aqc_remove_cloud_filters_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define I40E_AQC_REPLACE_L1_FILTER 0x0
+#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
+#define I40E_AQC_GET_CLOUD_FILTERS 0x2
+#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
+
+/* Field Vector offsets */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct i40e_filter_data filters[8];
+};
+
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
@@ -1449,6 +1538,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+/* Dynamic Device Personalization */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ddp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1781,11 +1900,20 @@ struct i40e_aq_get_phy_abilities_resp {
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
- u8 mod_type_ext;
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
@@ -1809,16 +1937,14 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 eeer;
u8 low_power_ctrl;
u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
u8 fec_config;
-#define I40E_AQ_SET_FEC_ABILITY_KR (1 << 0)
-#define I40E_AQ_SET_FEC_ABILITY_RS (1 << 1)
-#define I40E_AQ_SET_FEC_REQUEST_KR (1 << 2)
-#define I40E_AQ_SET_FEC_REQUEST_RS (1 << 3)
-#define I40E_AQ_SET_FEC_AUTO (1 << 4)
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
u8 reserved;
};
@@ -2416,7 +2542,6 @@ struct i40e_aqc_del_udp_tunnel_completion {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
-#ifdef X722_SUPPORT
struct i40e_aqc_get_set_rss_key {
#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
@@ -2457,7 +2582,6 @@ struct i40e_aqc_get_set_rss_lut {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-#endif
/* tunnel key structure 0x0B10 */
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 9a6b3ed6..03e94bc8 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -71,7 +71,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
-#ifdef X722_SUPPORT
#ifdef X722_A0_SUPPORT
case I40E_DEV_ID_X722_A0:
#endif
@@ -83,18 +82,14 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
-#endif
-#ifdef X722_SUPPORT
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
#ifdef X722_A0_SUPPORT
case I40E_DEV_ID_X722_A0_VF:
#endif
hw->mac.type = I40E_MAC_X722_VF;
break;
#endif /* INTEGRATED_VF || VF_DRIVER */
-#endif /* X722_SUPPORT */
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
@@ -114,7 +109,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
return status;
}
-#ifndef I40E_NDIS_SUPPORT
/**
* i40e_aq_str - convert AQ err code to a string
* @hw: pointer to the HW structure
@@ -321,7 +315,6 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
return hw->err_str;
}
-#endif /* I40E_NDIS_SUPPORT */
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
@@ -447,7 +440,6 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
-#ifdef X722_SUPPORT
/**
* i40e_aq_get_set_rss_lut
@@ -606,7 +598,6 @@ enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-#endif /* X722_SUPPORT */
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
@@ -1022,9 +1013,7 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
-#ifdef X722_SUPPORT
case I40E_MAC_X722:
-#endif
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -1044,11 +1033,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
-#ifdef X722_SUPPORT
if (hw->mac.type == I40E_MAC_X722)
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
-#endif
status = i40e_init_nvm(hw);
return status;
}
@@ -1126,7 +1113,8 @@ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+ i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac),
+ I40E_NONDMA_TO_NONDMA);
return status;
}
@@ -1149,7 +1137,8 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
- memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac),
+ I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1207,7 +1196,8 @@ enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw,
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+ i40e_memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac),
+ I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1288,6 +1278,8 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
@@ -1302,6 +1294,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1309,6 +1302,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -1789,10 +1783,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
@@ -1952,6 +1949,8 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
@@ -1974,12 +1973,13 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->crc_enable = false;
- if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = true;
else
hw_link_info->lse_enable = false;
- if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
@@ -2344,6 +2344,43 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+* i40e_aq_set_vsi_full_promiscuous
+* @hw: pointer to the hw struct
+* @seid: VSI number
+* @set: set promiscuous enable/disable
+* @cmd_details: pointer to command details structure or NULL
+**/
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_mc_promisc_on_vlan
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2412,6 +2449,40 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2745,14 +2816,17 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, false, false,
&abilities, NULL);
if (status)
return status;
- memcpy(hw->phy.link_info.module_type, &abilities.module_type,
- sizeof(hw->phy.link_info.module_type));
+ i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
return status;
}
@@ -3603,6 +3677,14 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Management Mode = %d\n",
p->management_mode);
@@ -3822,7 +3904,6 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
p->update_disabled = true;
break;
-#ifdef X722_SUPPORT
case I40E_AQ_CAP_ID_WOL_AND_PROXY:
hw->num_wol_proxy_filters = (u16)number;
hw->wol_proxy_vsi_seid = (u16)logical_id;
@@ -3832,12 +3913,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
else
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
- p->proxy_support = p->proxy_support;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: WOL proxy filters = %d\n",
hw->num_wol_proxy_filters);
break;
-#endif
default:
break;
}
@@ -3874,8 +3953,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
- hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
- hw->num_partitions = num_functions / hw->num_ports;
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
/* additional HW specific goodies that might
* someday be HW version specific
@@ -4360,11 +4441,15 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
- * @udp_port: the UDP port to add
+ * @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call CPU_TO_LE16 to convert from Host byte order to
+ * Little Endian order.
**/
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
@@ -5548,6 +5633,59 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
}
/**
+ * i40e_aq_add_cloud_filters_big_buffer
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+
+ /* adjust Geneve VNI for HW issue */
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
* i40e_aq_remove_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to remove cloud filters from
@@ -5560,9 +5698,9 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
*
**/
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count)
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
@@ -5587,6 +5725,103 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
}
/**
+ * i40e_aq_remove_cloud_filters_big_buffer
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
+ struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+
+ /* adjust Geneve VNI for HW issue */
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_replace_cloud_filters - Replace cloud filter command
+ * @hw: pointer to the hw struct
+ * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
+ * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
+ *
+ **/
+enum
+i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_replace_cloud_filters_cmd *cmd =
+ (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
+ enum i40e_status_code status = I40E_SUCCESS;
+ int i = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_replace_cloud_filters);
+
+ desc.datalen = CPU_TO_LE16(32);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->old_filter_type = filters->old_filter_type;
+ cmd->new_filter_type = filters->new_filter_type;
+ cmd->valid_flags = filters->valid_flags;
+ cmd->tr_bit = filters->tr_bit;
+
+ status = i40e_asq_send_command(hw, &desc, cmd_buf,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL);
+
+ /* for get cloud filters command */
+ for (i = 0; i < 32; i += 4) {
+ cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
+ cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
+ cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
+ cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
+ }
+
+ return status;
+}
+
+
+/**
* i40e_aq_alternate_write
* @hw: pointer to the hardware structure
* @reg_addr0: address of first dword to be read
@@ -6007,9 +6242,6 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
- if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
desc.datalen = CPU_TO_LE16(bwd_size);
status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
@@ -6018,7 +6250,92 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
}
/**
- * i40e_read_phy_register
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -6027,9 +6344,8 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 *value)
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -6039,8 +6355,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6062,8 +6378,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_READ) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6093,7 +6409,7 @@ phy_read_end:
}
/**
- * i40e_write_phy_register
+ * i40e_write_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -6102,9 +6418,8 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 value)
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -6114,8 +6429,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6139,8 +6454,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_WRITE) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6161,6 +6476,78 @@ phy_write_end:
}
/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw,
+ reg, phy_addr, value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw,
+ page, reg, phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -6202,14 +6589,16 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
@@ -6221,20 +6610,18 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
i40e_msec_delay(interval);
@@ -6242,8 +6629,9 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
}
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
phy_blinking_end:
return status;
@@ -6274,8 +6662,10 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- temp_addr, phy_addr, &reg_val);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ &reg_val);
if (status)
return status;
*val = reg_val;
@@ -6308,41 +6698,42 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
-
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
return status;
}
#endif /* PF_DRIVER */
@@ -6393,7 +6784,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -6452,7 +6845,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -6568,7 +6963,6 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
I40E_SUCCESS, NULL, 0, NULL);
}
#endif /* VF_DRIVER */
-#ifdef X722_SUPPORT
/**
* i40e_aq_set_arp_proxy_config
@@ -6591,10 +6985,13 @@ enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
status = i40e_asq_send_command(hw, &desc, proxy_config,
sizeof(struct i40e_aqc_arp_proxy_data),
@@ -6625,10 +7022,13 @@ enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_ns_proxy_table_entry);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
sizeof(struct i40e_aqc_ns_proxy_data),
@@ -6675,9 +7075,11 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
if (set_filter) {
if (!filter)
return I40E_ERR_PARAM;
+
cmd_flags |= I40E_AQC_SET_WOL_FILTER;
- buff_len = sizeof(*filter);
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
}
+
if (no_wol_tco)
cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
@@ -6688,6 +7090,12 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
cmd->valid_flags = CPU_TO_LE16(valid_flags);
+ buff_len = sizeof(*filter);
+ desc.datalen = CPU_TO_LE16(buff_len);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
@@ -6724,4 +7132,236 @@ enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
return status;
}
-#endif /* X722_SUPPORT */
+/**
+* i40e_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_all_wol_filters);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+
+/**
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ddp_resp *resp;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->profile_track_id = CPU_TO_LE32(track_id);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ u32 dev_cnt;
+ u32 vendor_dev_id;
+ u32 *nvm;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 i;
+
+ if (!track_id) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID)
+ if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ nvm = (u32 *)&profile->device_table[dev_cnt];
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec = (struct i40e_profile_section_header *)((u8 *)profile +
+ sec_tbl->section_offset[i]);
+
+ /* Skip 'AQ', 'note' and 'name' sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write profile */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: offset %d, info %d",
+ offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 26c344fd..9b5405db 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -396,6 +396,8 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
dcbcfg->numapps = length / sizeof(*app);
if (!dcbcfg->numapps)
return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
for (i = 0; i < dcbcfg->numapps; i++) {
u8 up, selector;
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 8bd5793d..4546689a 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -55,7 +55,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#endif /* VF_DRIVER */
-#ifdef X722_SUPPORT
#ifdef X722_A0_SUPPORT
#define I40E_DEV_ID_X722_A0 0x374C
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
@@ -70,9 +69,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#endif /* VF_DRIVER */
-#endif /* X722_SUPPORT */
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/i40e/base/i40e_lan_hmc.c b/drivers/net/i40e/base/i40e_lan_hmc.c
index 22606484..f03f3813 100644
--- a/drivers/net/i40e/base/i40e_lan_hmc.c
+++ b/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -1239,11 +1239,6 @@ enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
- if (NULL == hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
- DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
- goto exit;
- }
if (NULL == hmc_info->hmc_obj) {
ret_code = I40E_ERR_BAD_PTR;
DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index 4fa1220b..e8965024 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -219,19 +219,15 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
-#else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#endif
return ret_code;
}
@@ -249,14 +245,10 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
else
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#endif
return ret_code;
}
@@ -348,14 +340,10 @@ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
else
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#endif
return ret_code;
}
@@ -375,7 +363,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
@@ -386,9 +373,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
-#else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#endif
return ret_code;
}
@@ -901,9 +885,20 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return I40E_SUCCESS;
}
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -1253,6 +1248,7 @@ retry:
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
{
if (opcode == hw->nvm_wait_opcode) {
+
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
if (hw->nvm_release_on_done) {
@@ -1261,6 +1257,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
@@ -1423,7 +1424,8 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
if (hw->nvm_buff.va) {
buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
+ I40E_NONDMA_TO_NONDMA);
}
}
@@ -1496,7 +1498,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
__func__, cmd->offset, cmd->offset + len);
buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
+ i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
bytes += len;
remainder -= len;
@@ -1510,7 +1512,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
__func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
+ i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
}
return I40E_SUCCESS;
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index 38e7ba5b..c57ecded 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -44,6 +44,7 @@
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
+#include <rte_io.h>
#include "../i40e_logs.h"
@@ -153,15 +154,18 @@ do { \
* I40E_PRTQF_FD_MSK
*/
-#define I40E_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define I40E_PCI_REG(reg) rte_read32(reg)
#define I40E_PCI_REG_ADDR(a, reg) \
((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
static inline uint32_t i40e_read_addr(volatile void *addr)
{
return rte_le_to_cpu_32(I40E_PCI_REG(addr));
}
-#define I40E_PCI_REG_WRITE(reg, value) \
- do { I40E_PCI_REG((reg)) = rte_cpu_to_le_32(value); } while (0)
+
+#define I40E_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+#define I40E_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT)
#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT)
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 3aab5ca9..4bd589e7 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -78,7 +78,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-#ifdef X722_SUPPORT
enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -90,11 +89,8 @@ enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
-#endif
-#ifndef I40E_NDIS_SUPPORT
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
-#endif /* I40E_NDIS_SUPPORT */
#ifdef PF_DRIVER
@@ -172,12 +168,18 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -402,11 +404,21 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
-
+enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count);
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
+enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count);
+enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
@@ -521,7 +533,6 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-#ifdef X722_SUPPORT
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
@@ -537,12 +548,38 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
-#endif
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 *value);
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details);
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index fd0a7230..3a305b67 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -3401,7 +3401,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#ifdef X722_SUPPORT
#ifdef PF_DRIVER
#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
@@ -5366,5 +5365,4 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#endif /* X722_SUPPORT */
#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index b5f72c32..84d57576 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -133,6 +133,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -157,15 +158,22 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
-#define I40E_MDIO_STCODE I40E_MASK(0, \
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
@@ -189,9 +197,7 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#ifdef X722_SUPPORT
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#endif
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
@@ -205,13 +211,10 @@ enum i40e_memcpy_type {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
-#ifdef X722_SUPPORT
I40E_MAC_X722,
I40E_MAC_X722_VF,
-#endif
I40E_MAC_GENERIC,
};
@@ -266,6 +269,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -332,25 +336,35 @@ struct i40e_phy_info {
#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
-#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
+/*
+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
-#ifdef X722_SUPPORT
enum i40e_acpi_programming_method {
I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
};
-#define I40E_WOL_SUPPORT_MASK 1
-#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
-#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+#define I40E_WOL_SUPPORT_MASK 0x1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define I40E_PROXY_SUPPORT_MASK 0x4
-#endif
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
@@ -359,6 +373,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -414,11 +432,9 @@ struct i40e_hw_capabilities {
u32 enabled_tcmap;
u32 maxtc;
u64 wr_csr_prot;
-#ifdef X722_SUPPORT
bool apm_wol_support;
enum i40e_acpi_programming_method acpi_prog_method;
bool proxy_support;
-#endif
};
struct i40e_mac_info {
@@ -476,6 +492,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -554,6 +571,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
@@ -678,30 +696,22 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-#ifdef X722_SUPPORT
/* WoL and proxy support */
u16 num_wol_proxy_filters;
u16 wol_proxy_vsi_seid;
-#endif
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
u64 flags;
/* debug mask */
u32 debug_mask;
-#ifndef I40E_NDIS_SUPPORT
char err_str[16];
-#endif /* I40E_NDIS_SUPPORT */
};
STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw)
{
-#ifdef X722_SUPPORT
return (hw->mac.type == I40E_MAC_VF ||
hw->mac.type == I40E_MAC_X722_VF);
-#else
- return hw->mac.type == I40E_MAC_VF;
-#endif
}
struct i40e_driver_version {
@@ -805,11 +815,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
-#ifdef X722_SUPPORT
I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
-#else
- I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
-#endif
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
@@ -817,11 +823,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
-#ifdef X722_SUPPORT
I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
-#else
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
-#endif
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
@@ -1199,10 +1201,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
-#ifdef X722_SUPPORT
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
-#endif
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@@ -1239,38 +1239,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
-#ifdef X722_SUPPORT
/* Note: Values 0-28 are reserved for future use.
* Value 29, 30, 32 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
-#else
- /* Note: Values 0-30 are reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
-#ifdef X722_SUPPORT
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
-#else
- /* Note: Value 32 is reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
-#ifdef X722_SUPPORT
/* Note: Values 37-38 are reserved for future use.
* Value 39, 40, 42 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
-#else
- /* Note: Values 37-40 are reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
-#ifdef X722_SUPPORT
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
-#endif
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1325,12 +1311,10 @@ enum i40e_filter_program_desc_pcmd {
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-#ifdef X722_SUPPORT
#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
-#endif
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
@@ -1502,6 +1486,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
@@ -1894,4 +1879,83 @@ struct i40e_lldp_variables {
#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_DDP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ddp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ddp_version version;
+ u32 size;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+ u32 track_id;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ddp_version version;
+ u8 op;
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_DDP_NAME_SIZE];
+};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h
index fd51ec32..7a24c0f1 100644
--- a/drivers/net/i40e/base/i40e_virtchnl.h
+++ b/drivers/net/i40e/base/i40e_virtchnl.h
@@ -170,6 +170,13 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bf7e5a05..4c49673f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -40,10 +40,12 @@
#include <inttypes.h>
#include <assert.h>
+#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -51,6 +53,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -138,60 +141,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -284,11 +233,6 @@
#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
-#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
- I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
-
/* PCI offset for querying capability */
#define PCI_DEV_CAP_REG 0xA4
/* PCI offset for enabling/disabling Extended Tag */
@@ -324,6 +268,8 @@ static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
+static int i40e_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
static void i40e_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
@@ -345,10 +291,10 @@ static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_pfc_conf *pfc_conf);
-static void i40e_macaddr_add(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index,
- uint32_t pool);
+static int i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -373,8 +319,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw,
uint64_t *offset,
uint64_t *stat);
static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
-static void i40e_dev_interrupt_handler(
- __rte_unused struct rte_intr_handle *handle, void *param);
+static void i40e_dev_interrupt_handler(void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
uint32_t base, uint32_t num);
static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
@@ -388,10 +333,6 @@ static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
struct i40e_vsi *vsi);
static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
-static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
- struct i40e_macvlan_filter *mv_f,
- int num,
- struct ether_addr *addr);
static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
int num,
@@ -406,9 +347,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -461,6 +399,27 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
+
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
+
+int i40e_logtype_init;
+int i40e_logtype_driver;
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -503,6 +462,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.stats_reset = i40e_dev_stats_reset,
.xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
+ .fw_version_get = i40e_fw_version_get,
.dev_infos_get = i40e_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.vlan_filter_set = i40e_vlan_filter_set,
@@ -520,6 +480,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .rx_descriptor_status = i40e_dev_rx_descriptor_status,
+ .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.dev_led_on = i40e_dev_led_on,
@@ -668,17 +630,23 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
sizeof(rte_i40e_txq_prio_strings[0]))
-static struct eth_driver rte_i40e_pmd = {
- .pci_drv = {
- .id_table = pci_id_i40e_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_i40e_dev_init,
- .eth_dev_uninit = eth_i40e_dev_uninit,
- .dev_private_size = sizeof(struct i40e_adapter),
+static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct i40e_adapter), eth_i40e_dev_init);
+}
+
+static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
+}
+
+static struct rte_pci_driver rte_i40e_pmd = {
+ .id_table = pci_id_i40e_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_i40e_pci_probe,
+ .remove = eth_i40e_pci_remove,
};
static inline int
@@ -709,8 +677,9 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
-RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio");
#ifndef I40E_GLQF_ORT
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
@@ -718,6 +687,9 @@ RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
#ifndef I40E_GLQF_PIT
#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
#endif
+#ifndef I40E_GLQF_L3_MAP
+#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
+#endif
static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
@@ -763,8 +735,8 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
pf->main_vsi_seid, 0,
TRUE, NULL, NULL);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
- " frames from VSIs.");
+ PMD_INIT_LOG(ERR,
+ "Failed to add filter to drop flow control frames from VSIs.");
}
static int
@@ -907,7 +879,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
static void
config_floating_veb(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -928,9 +900,144 @@ config_floating_veb(struct rte_eth_dev *dev)
#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
static int
+i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(&ethertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ return -EINVAL;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
+ return 0;
+
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ return -EINVAL;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
+ return 0;
+
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+ return 0;
+
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+
+ return ret;
+}
+
+static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi;
@@ -943,6 +1050,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -952,9 +1060,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
i40e_set_tx_function(dev);
return 0;
}
- pci_dev = dev->pci_dev;
+ i40e_set_default_ptype_table(dev);
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
@@ -963,8 +1074,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->back = I40E_PF_TO_ADAPTER(pf);
hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
if (!hw->hw_addr) {
- PMD_INIT_LOG(ERR, "Hardware is not available, "
- "as address is NULL");
+ PMD_INIT_LOG(ERR,
+ "Hardware is not available, as address is NULL");
return -ENODEV;
}
@@ -1021,6 +1132,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
((hw->nvm.version >> 4) & 0xff),
(hw->nvm.version & 0xf), hw->nvm.eetrack);
+ /* initialise the L3_MAP register */
+ ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
+ 0x00000028, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
+
/* Need the special FW version to support floating VEB */
config_floating_veb(dev);
/* Clear PXE mode */
@@ -1100,8 +1217,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Set the global registers with default ether type value */
ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to set the default outer "
- "VLAN ether type");
+ PMD_INIT_LOG(ERR,
+ "Failed to set the default outer VLAN ether type");
goto err_setup_pf_switch;
}
@@ -1137,26 +1254,35 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Should be after VSI initialized */
dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
if (!dev->data->mac_addrs) {
- PMD_INIT_LOG(ERR, "Failed to allocated memory "
- "for storing mac address");
+ PMD_INIT_LOG(ERR,
+ "Failed to allocated memory for storing mac address");
goto err_mac_alloc;
}
ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
&dev->data->mac_addrs[0]);
+ /* Init dcb to sw mode by default */
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(INFO, "Failed to init dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+ /* Update HW struct after DCB configuration */
+ i40e_get_cap(hw);
+
/* initialize pf host driver to setup SRIOV resource if applicable */
i40e_pf_host_init(dev);
/* register callback func to eal lib */
- rte_intr_callback_register(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler, dev);
/* configure and enable device interrupt */
i40e_pf_config_irq0(hw, TRUE);
i40e_pf_enable_irq0(hw);
/* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
/*
* Add an ethertype filter to drop all flow control frames transmitted
* from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
@@ -1172,15 +1298,26 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
- /* Init dcb to sw mode by default */
- ret = i40e_dcb_init_configure(dev, TRUE);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(INFO, "Failed to init dcb.");
- pf->flags &= ~I40E_FLAG_DCB;
- }
+ ret = i40e_init_ethtype_filter_list(dev);
+ if (ret < 0)
+ goto err_init_ethtype_filter_list;
+ ret = i40e_init_tunnel_filter_list(dev);
+ if (ret < 0)
+ goto err_init_tunnel_filter_list;
+ ret = i40e_init_fdir_filter_list(dev);
+ if (ret < 0)
+ goto err_init_fdir_filter_list;
return 0;
+err_init_fdir_filter_list:
+ rte_free(pf->tunnel.hash_table);
+ rte_free(pf->tunnel.hash_map);
+err_init_tunnel_filter_list:
+ rte_free(pf->ethertype.hash_table);
+ rte_free(pf->ethertype.hash_map);
+err_init_ethtype_filter_list:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1200,12 +1337,73 @@ err_sync_phy_type:
return ret;
}
+static void
+i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_ethertype_rule *ethertype_rule;
+
+ ethertype_rule = &pf->ethertype;
+ /* Remove all ethertype filter rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(&ethertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+}
+
+static void
+i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_tunnel_rule *tunnel_rule;
+
+ tunnel_rule = &pf->tunnel;
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+}
+
+static void
+i40e_rm_fdir_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *p_fdir;
+ struct i40e_fdir_info *fdir_info;
+
+ fdir_info = &pf->fdir;
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct rte_flow *p_flow;
int ret;
uint8_t aq_fail = 0;
@@ -1214,8 +1412,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = dev->pci_dev;
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1245,11 +1445,21 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
dev->data->mac_addrs = NULL;
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* register callback func to eal lib */
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+
+ i40e_rm_ethtype_filter_list(pf);
+ i40e_rm_tunnel_filter_list(pf);
+ i40e_rm_fdir_filter_list(pf);
+
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ rte_free(p_flow);
+ }
return 0;
}
@@ -1315,6 +1525,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
}
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_dcb:
@@ -1335,7 +1547,8 @@ void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
@@ -1448,7 +1661,8 @@ void
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
@@ -1519,7 +1733,8 @@ static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1550,7 +1765,8 @@ static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_intr, i;
@@ -1629,7 +1845,7 @@ i40e_phy_conf_link(struct i40e_hw *hw,
/* use get_phy_abilities_resp value for the rest */
phy_conf.phy_type = phy_ab.phy_type;
phy_conf.phy_type_ext = phy_ab.phy_type_ext;
- phy_conf.fec_config = phy_ab.mod_type_ext;
+ phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
phy_conf.low_power_ctrl = phy_ab.d3_lpan;
@@ -1676,8 +1892,10 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
+ struct i40e_vsi *vsi;
hw->adapter_stopped = 0;
@@ -1693,8 +1911,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
!RTE_ETH_DEV_SRIOV(dev).active) &&
dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = dev->data->nb_rx_queues;
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -1;
+ ret = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (ret)
+ return ret;
}
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
@@ -1703,8 +1922,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int),
0);
if (!intr_handle->intr_vec) {
- PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d rx_queues intr_vec",
+ dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -1754,6 +1974,15 @@ i40e_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
}
+ /* Enable the VLAN promiscuous mode. */
+ if (pf->vfs) {
+ for (i = 0; i < pf->vf_num; i++) {
+ vsi = pf->vfs[i].vsi;
+ i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
+ true, NULL);
+ }
+ }
+
/* Apply link configure */
if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
@@ -1777,8 +2006,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_pf_enable_irq0(hw);
if (dev->data->dev_conf.intr_conf.lsc != 0)
- PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ PMD_INIT_LOG(INFO,
+ "lsc won't enable because of no intr multiplex");
} else if (dev->data->dev_conf.intr_conf.lsc != 0) {
ret = i40e_aq_set_phy_int_mask(hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
@@ -1794,6 +2023,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -1809,7 +2040,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
/* Disable all queues */
@@ -1860,6 +2092,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t reg;
int i;
@@ -1871,7 +2105,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
/* Disable interrupt */
i40e_pf_disable_irq0(hw);
- rte_intr_disable(&(dev->pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
@@ -2069,6 +2303,8 @@ out:
if (link.link_status == old.link_status)
return -1;
+ i40e_notify_all_vfs_link_status(dev);
+
return 0;
}
@@ -2579,19 +2815,49 @@ i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
return -ENOSYS;
}
+static int
+i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+ int ret;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> 24);
+ build = (u16)((full_ver >> 8) & 0xffff);
+ patch = (u8)(full_ver & 0xff);
+
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d%d 0x%08x %d.%d.%d",
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack,
+ ver, build, patch);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2648,6 +2914,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_max = I40E_MAX_RING_DESC,
.nb_min = I40E_MIN_RING_DESC,
.nb_align = I40E_ALIGN_RING_DESC,
+ .nb_seg_max = I40E_TX_MAX_SEG,
+ .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
};
if (pf->flags & I40E_FLAG_VMDQ) {
@@ -2708,7 +2976,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
else {
ret = -EINVAL;
PMD_DRV_LOG(ERR,
- "Unsupported vlan type in single vlan.\n");
+ "Unsupported vlan type in single vlan.");
return ret;
}
break;
@@ -2720,13 +2988,15 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
&reg_r, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Fail to debug read from "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
ret = -EIO;
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
- "0x%08"PRIx64"", reg_id, reg_r);
+ PMD_DRV_LOG(DEBUG,
+ "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
+ reg_id, reg_r);
reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
@@ -2740,12 +3010,14 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
reg_w, NULL);
if (ret != I40E_SUCCESS) {
ret = -EIO;
- PMD_DRV_LOG(ERR, "Fail to debug write to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ PMD_DRV_LOG(ERR,
+ "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
+ PMD_DRV_LOG(DEBUG,
+ "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_w, reg_id);
return ret;
}
@@ -2889,8 +3161,9 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
- PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
- "High_water must <= %d.", max_high_water);
+ PMD_INIT_LOG(ERR,
+ "Invalid high/low water setup value in KB, High_water must be <= %d.",
+ max_high_water);
return -EINVAL;
}
@@ -2994,7 +3267,7 @@ i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
}
/* Add a MAC address, and update filters */
-static void
+static int
i40e_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
__rte_unused uint32_t index,
@@ -3011,13 +3284,13 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
pool);
- return;
+ return -ENOTSUP;
}
if (pool > pf->nb_cfg_vmdq_vsi) {
PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
pool, pf->nb_cfg_vmdq_vsi);
- return;
+ return -EINVAL;
}
(void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
@@ -3034,8 +3307,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
ret = i40e_vsi_add_mac(vsi, &mac_filter);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
- return;
+ return -ENODEV;
}
+ return 0;
}
/* Remove a MAC address, and update filters */
@@ -3062,8 +3336,8 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
/* No VMDQ pool enabled or configured */
if (!(pf->flags & I40E_FLAG_VMDQ) ||
(i > pf->nb_cfg_vmdq_vsi)) {
- PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
- "/configured");
+ PMD_DRV_LOG(ERR,
+ "No VMDQ pool enabled/configured");
return;
}
vsi = pf->vmdq[i - 1].vsi;
@@ -3264,9 +3538,9 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, lut_size);
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
return -EINVAL;
}
@@ -3305,9 +3579,9 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, lut_size);
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
return -EINVAL;
}
@@ -3362,8 +3636,9 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
mem->va = mz->addr;
mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
mem->zone = (const void *)mz;
- PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
- "%"PRIu64, mz->name, mem->pa);
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s allocated with physical address: %"PRIu64,
+ mz->name, mem->pa);
return I40E_SUCCESS;
}
@@ -3380,9 +3655,9 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
if (!mem)
return I40E_ERR_PARAM;
- PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
- "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
- mem->pa);
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s to be freed with physical address: %"PRIu64,
+ ((const struct rte_memzone *)mem->zone)->name, mem->pa);
rte_memzone_free((const struct rte_memzone *)mem->zone);
mem->zone = NULL;
mem->va = NULL;
@@ -3493,9 +3768,10 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint16_t qp_count = 0, vsi_count = 0;
- if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+ if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
return -EINVAL;
}
@@ -3536,13 +3812,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
/* VF queue/VSI allocation */
pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
- if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
+ if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
pf->flags |= I40E_FLAG_SRIOV;
pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
- pf->vf_num = dev->pci_dev->max_vfs;
- PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
- "in total %u queues", pf->vf_num, pf->vf_nb_qps,
- pf->vf_nb_qps * pf->vf_num);
+ pf->vf_num = pci_dev->max_vfs;
+ PMD_DRV_LOG(DEBUG,
+ "%u VF VSIs, %u queues per VF VSI, in total %u queues",
+ pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
} else {
pf->vf_nb_qps = 0;
pf->vf_num = 0;
@@ -3570,14 +3846,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
if (pf->max_nb_vmdq_vsi) {
pf->flags |= I40E_FLAG_VMDQ;
pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
- PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
- "per VMDQ VSI, in total %u queues",
- pf->max_nb_vmdq_vsi,
- pf->vmdq_nb_qps, pf->vmdq_nb_qps *
- pf->max_nb_vmdq_vsi);
+ PMD_DRV_LOG(DEBUG,
+ "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
+ pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
} else {
- PMD_DRV_LOG(INFO, "No enough queues left for "
- "VMDq");
+ PMD_DRV_LOG(INFO,
+ "No enough queues left for VMDq");
}
} else {
PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
@@ -3590,15 +3865,15 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
pf->flags |= I40E_FLAG_DCB;
if (qp_count > hw->func_caps.num_tx_qp) {
- PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
- "the hardware maximum %u", qp_count,
- hw->func_caps.num_tx_qp);
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u queues, which exceeds the hardware maximum %u",
+ qp_count, hw->func_caps.num_tx_qp);
return -EINVAL;
}
if (vsi_count > hw->func_caps.num_vsis) {
- PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
- "the hardware maximum %u", vsi_count,
- hw->func_caps.num_vsis);
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
+ vsi_count, hw->func_caps.num_vsis);
return -EINVAL;
}
@@ -3844,8 +4119,8 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
*/
entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
if (entry == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "resource pool");
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate memory for resource pool");
return -ENOMEM;
}
entry->base = valid_entry->base;
@@ -3885,9 +4160,9 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
}
if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
- PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
- "HW support 0x%x", hw->func_caps.enabled_tcmap,
- enabled_tcmap);
+ PMD_DRV_LOG(ERR,
+ "Enabled TC map 0x%x not applicable to HW support 0x%x",
+ hw->func_caps.enabled_tcmap, enabled_tcmap);
return I40E_NOT_SUPPORTED;
}
return I40E_SUCCESS;
@@ -4105,12 +4380,13 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
hw->aq.asq_last_status);
goto fail;
}
+ veb->enabled_tc = I40E_DEFAULT_TCMAP;
/* get statistics index */
ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
&veb->stats_idx, NULL, NULL, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
+ PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
hw->aq.asq_last_status);
goto fail;
}
@@ -4232,8 +4508,8 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
struct i40e_mac_filter *f;
struct ether_addr *mac;
- PMD_DRV_LOG(WARNING, "Cannot remove the default "
- "macvlan filter");
+ PMD_DRV_LOG(DEBUG,
+ "Cannot remove the default macvlan filter");
/* It needs to add the permanent mac into mac list */
f = rte_zmalloc("macv_filter", sizeof(*f), 0);
if (f == NULL) {
@@ -4283,8 +4559,9 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
- "configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "VSI failed to get TC bandwdith configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
@@ -4351,7 +4628,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret)
- PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
+ PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
hw->aq.asq_last_status);
}
@@ -4372,14 +4649,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
uplink_vsi == NULL) {
- PMD_DRV_LOG(ERR, "VSI setup failed, "
- "VSI link shouldn't be NULL");
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, VSI link shouldn't be NULL");
return NULL;
}
if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
- PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
- "uplink VSI should be NULL");
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, MAIN VSI uplink VSI should be NULL");
return NULL;
}
@@ -4423,6 +4700,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
vsi->user_param = user_param;
+ vsi->vlan_anti_spoof_on = 0;
+ vsi->vlan_filter_on = 0;
/* Allocate queues */
switch (vsi->type) {
case I40E_VSI_MAIN :
@@ -4530,8 +4809,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.seid = vsi->seid;
@@ -4599,13 +4878,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
- I40E_DEFAULT_TCMAP);
+ hw->func_caps.enabled_tcmap);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
- ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+
+ ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
/**
@@ -4644,8 +4924,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4662,8 +4942,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping.");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping.");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4926,8 +5206,9 @@ i40e_pf_setup(struct i40e_pf *pf)
/* make queue allocated first, let FDIR use queue pair 0*/
ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
if (ret != I40E_FDIR_QUEUE_ID) {
- PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
- " ret =%d", ret);
+ PMD_DRV_LOG(ERR,
+ "queue allocation fails for FDIR: ret =%d",
+ ret);
pf->flags &= ~I40E_FLAG_FDIR;
}
}
@@ -4946,12 +5227,12 @@ i40e_pf_setup(struct i40e_pf *pf)
else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
else {
- PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
- hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
+ hw->func_caps.rss_table_size);
return I40E_ERR_PARAM;
}
- PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
- "size: %u\n", hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
+ hw->func_caps.rss_table_size);
pf->hash_lut_size = hw->func_caps.rss_table_size;
/* Enable ethtype and macvlan filters */
@@ -5201,8 +5482,8 @@ i40e_dev_rx_init(struct i40e_pf *pf)
ret = i40e_rx_queue_init(rxq);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to do RX queue "
- "initialization");
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
break;
}
}
@@ -5452,18 +5733,10 @@ static void
i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_virtchnl_pf_event event;
int i;
- event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- event.event_data.link_event.link_status =
- dev->data->dev_link.link_status;
- event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
-
for (i = 0; i < pf->vf_num; i++)
- i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT,
- I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+ i40e_notify_vf_link_status(dev, &pf->vfs[i]);
}
static void
@@ -5486,8 +5759,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
ret = i40e_clean_arq_element(hw, &info, &pending);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
- "aq_err: %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(INFO,
+ "Failed to read msg from AdminQ, aq_err: %u",
+ hw->aq.asq_last_status);
break;
}
opcode = rte_le_to_cpu_16(info.desc.opcode);
@@ -5504,14 +5778,12 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
break;
case i40e_aqc_opc_get_link_status:
ret = i40e_dev_link_update(dev, 0);
- if (!ret) {
- i40e_notify_all_vfs_link_status(dev);
+ if (!ret)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
- }
break;
default:
- PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
opcode);
break;
}
@@ -5532,8 +5804,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+i40e_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5550,7 +5821,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
PMD_DRV_LOG(INFO, "No interrupt event");
goto done;
}
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
@@ -5565,7 +5835,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
PMD_DRV_LOG(ERR, "ICR0: HMC error");
if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
@@ -5579,10 +5848,10 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(dev->intr_handle);
}
-static int
+int
i40e_add_macvlan_filters(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *filter,
int total)
@@ -5632,7 +5901,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC match type");
ret = I40E_ERR_PARAM;
goto DONE;
}
@@ -5656,7 +5925,7 @@ DONE:
return ret;
}
-static int
+int
i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *filter,
int total)
@@ -5707,7 +5976,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC filter type");
ret = I40E_ERR_PARAM;
goto DONE;
}
@@ -5762,14 +6031,11 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
}
static void
-i40e_set_vlan_filter(struct i40e_vsi *vsi,
- uint16_t vlan_id, bool on)
+i40e_store_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
{
uint32_t vid_idx, vid_bit;
- if (vlan_id > ETH_VLAN_ID_MAX)
- return;
-
vid_idx = I40E_VFTA_IDX(vlan_id);
vid_bit = I40E_VFTA_BIT(vlan_id);
@@ -5779,11 +6045,43 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
vsi->vfta[vid_idx] &= ~vid_bit;
}
+void
+i40e_set_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return;
+
+ i40e_store_vlan_filter(vsi, vlan_id, on);
+
+ if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
+ return;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+
+ if (on) {
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to add vlan filter");
+ } else {
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR,
+ "Failed to remove vlan filter");
+ }
+}
+
/**
* Find all vlan options for specific mac addr,
* return with actual vlan found.
*/
-static inline int
+int
i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
int num, struct ether_addr *addr)
@@ -5804,8 +6102,8 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
if (vsi->vfta[j] & (1 << k)) {
if (i > num - 1) {
- PMD_DRV_LOG(ERR, "vlan number "
- "not match");
+ PMD_DRV_LOG(ERR,
+ "vlan number doesn't match");
return I40E_ERR_PARAM;
}
(void)rte_memcpy(&mv_f[i].macaddr,
@@ -6098,7 +6396,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
filter_type == RTE_MACVLAN_HASH_MATCH) {
if (vlan_num == 0) {
- PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
return I40E_ERR_PARAM;
}
} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
@@ -6207,18 +6505,14 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV4;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
@@ -6227,18 +6521,14 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV6;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
@@ -6289,8 +6579,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to configure RSS key "
- "via AQ");
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
} else {
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
@@ -6436,7 +6725,95 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
+ if ((rte_le_to_cpu_16(cld_filter->element.flags) &
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
+ tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ else
+ tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ tunnel_filter->input.flags = cld_filter->element.flags;
+ tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
+ tunnel_filter->queue = cld_filter->element.queue_number;
+ rte_memcpy(tunnel_filter->input.general_fields,
+ cld_filter->general_fields,
+ sizeof(cld_filter->general_fields));
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -6449,37 +6826,44 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
int val, ret = 0;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi = pf->main_vsi;
- struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
- struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
- sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
- 0);
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
if (NULL == cld_filter) {
PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- return -EINVAL;
+ return -ENOMEM;
}
pfilter = cld_filter;
- ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac);
- ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac);
+ ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct ether_addr *)&pfilter->element.outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct ether_addr *)&pfilter->element.inner_mac);
- pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
- rte_memcpy(&pfilter->ipaddr.v4.data,
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
&rte_cpu_to_le_32(ipv4_addr),
- sizeof(pfilter->ipaddr.v4.data));
+ sizeof(pfilter->element.ipaddr.v4.data));
} else {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
for (i = 0; i < 4; i++) {
convert_ipv6[i] =
rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
}
- rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6,
- sizeof(pfilter->ipaddr.v6.data));
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
}
/* check tunneled type */
@@ -6501,23 +6885,369 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
}
val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
- &pfilter->flags);
+ &pfilter->element.flags);
if (val < 0) {
rte_free(cld_filter);
return -EINVAL;
}
- pfilter->flags |= rte_cpu_to_le_16(
+ pfilter->element.flags |= rte_cpu_to_le_16(
I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
- pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
- pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
+ pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->element.queue_number =
+ rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
- if (add)
- ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
+ ret = i40e_aq_add_cloud_filters(hw,
+ vsi->seid, &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return -ENOTSUP;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return -ENOTSUP;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
+
+ rte_free(cld_filter);
+ return ret;
+}
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
+#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
+#define I40E_TR_GENEVE_KEY_MASK 0x8
+#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
+#define I40E_TR_GRE_KEY_MASK 0x400
+#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
+#define I40E_TR_GRE_NO_KEY_MASK 0x8000
+
+static enum
+i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 3 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[7] = 0xF0;
+ filter_replace_buf.data[8]
+ = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
+ filter_replace_buf.data[8] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
+ I40E_TR_GENEVE_KEY_MASK |
+ I40E_TR_GENERIC_UDP_TUNNEL_MASK;
+ filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
+ I40E_TR_GRE_KEY_WITH_XSUM_MASK |
+ I40E_TR_GRE_NO_KEY_MASK) >> 8;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+static enum
+i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ /* For MPLSoUDP */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
+ I40E_AQC_MIRROR_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+
+ /* For MPLSoGRE */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
+ I40E_AQC_MIRROR_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+int
+i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add)
+{
+ uint16_t ip_type;
+ uint32_t ipv4_addr;
+ uint8_t i, tun_type = 0;
+ /* internal variable to convert ipv6 byte order */
+ uint32_t convert_ipv6[4];
+ int val, ret = 0;
+ struct i40e_pf_vf *vf = NULL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
+ uint32_t teid_le;
+ bool big_buffer = 0;
+
+ cld_filter = rte_zmalloc("tunnel_filter",
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
+
+ if (cld_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+ pfilter = cld_filter;
+
+ ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct ether_addr *)&pfilter->element.outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct ether_addr *)&pfilter->element.inner_mac);
+
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
+ &rte_cpu_to_le_32(ipv4_addr),
+ sizeof(pfilter->element.ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ for (i = 0; i < 4; i++) {
+ convert_ipv6[i] =
+ rte_cpu_to_le_32(rte_be_to_cpu_32(
+ tunnel_filter->ip_addr.ipv6_addr[i]));
+ }
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
+ }
+
+ /* check tunneled type */
+ switch (tunnel_filter->tunnel_type) {
+ case I40E_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+ break;
+ case I40E_TUNNEL_TYPE_NVGRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+ break;
+ case I40E_TUNNEL_TYPE_IP_IN_GRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoUDP:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ 0x40;
+ big_buffer = 1;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoGRE:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+ break;
+ case I40E_TUNNEL_TYPE_QINQ:
+ if (!pf->qinq_replace_flag) {
+ ret = i40e_cloud_filter_qinq_create(pf);
+ if (ret < 0)
+ PMD_DRV_LOG(DEBUG,
+ "QinQ tunnel filter already created.");
+ pf->qinq_replace_flag = 1;
+ }
+ /* Add in the General fields the values of
+ * the Outer and Inner VLAN
+ * Big Buffer should be set, see changes in
+ * i40e_aq_add_cloud_filters
+ */
+ pfilter->general_fields[0] = tunnel_filter->inner_vlan;
+ pfilter->general_fields[1] = tunnel_filter->outer_vlan;
+ big_buffer = 1;
+ break;
+ default:
+ /* Other tunnel types is not supported. */
+ PMD_DRV_LOG(ERR, "tunnel type is not supported.");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
+ pfilter->element.flags |=
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ else {
+ val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
+ &pfilter->element.flags);
+ if (val < 0) {
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+ }
+
+ pfilter->element.flags |= rte_cpu_to_le_16(
+ I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
+ ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
+ pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->element.queue_number =
+ rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ if (!tunnel_filter->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ if (tunnel_filter->vf_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+ vf = &pf->vfs[tunnel_filter->vf_id];
+ vsi = vf->vsi;
+ }
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ check_filter.is_to_vf = tunnel_filter->is_to_vf;
+ check_filter.vf_id = tunnel_filter->vf_id;
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
+ if (big_buffer)
+ ret = i40e_aq_add_cloud_filters_big_buffer(hw,
+ vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_add_cloud_filters(hw,
+ vsi->seid, &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return -ENOTSUP;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
+ if (big_buffer)
+ ret = i40e_aq_remove_cloud_filters_big_buffer(
+ hw, vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return -ENOTSUP;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
@@ -6554,8 +7284,9 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
/* Now check if there is space to add the new port */
idx = i40e_get_vxlan_port_idx(pf, 0);
if (idx < 0) {
- PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
- "not adding port %d", port);
+ PMD_DRV_LOG(ERR,
+ "Maximum number of UDP ports reached, not adding port %d",
+ port);
return -ENOSPC;
}
@@ -6794,7 +7525,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
int ret = -EINVAL;
val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
- PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
if (len == 3) {
reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
@@ -6813,7 +7544,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
} else {
ret = 0;
}
- PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
return ret;
@@ -6926,15 +7657,15 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
if (enable > 0) {
if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
- PMD_DRV_LOG(INFO, "Symmetric hash has already "
- "been enabled");
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been enabled");
return;
}
reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
} else {
if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
- PMD_DRV_LOG(INFO, "Symmetric hash has already "
- "been disabled");
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been disabled");
return;
}
reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
@@ -7051,23 +7782,60 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
pctype = i40e_flowtype_to_pctype(i);
reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
+ reg);
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
+ reg);
+ }
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ }
}
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
/* Toeplitz */
if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to "
- "Toeplitz");
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Toeplitz");
goto out;
}
reg |= I40E_GLQF_CTL_HTOEP_MASK;
} else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
/* Simple XOR */
if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to "
- "Simple XOR");
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Simple XOR");
goto out;
}
reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
@@ -7110,7 +7878,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7129,7 +7896,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7139,7 +7905,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7149,7 +7914,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7183,7 +7947,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7202,7 +7965,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7212,7 +7974,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7222,7 +7983,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7262,7 +8022,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -7273,19 +8032,16 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -7307,7 +8063,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -7318,19 +8073,16 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -7374,7 +8126,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
@@ -7383,22 +8135,18 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7410,22 +8158,18 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7681,10 +8425,10 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
- PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
if (reg != val)
i40e_write_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
(uint32_t)i40e_read_rx_ctl(hw, addr));
}
@@ -7995,16 +8739,95 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -8015,13 +8838,29 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
}
if (filter->ether_type == ETHER_TYPE_IPv4 ||
filter->ether_type == ETHER_TYPE_IPv6) {
- PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
- " control packet filter.", filter->ether_type);
+ PMD_DRV_LOG(ERR,
+ "unsupported ether_type(0x%04x) in control packet filter.",
+ filter->ether_type);
return -EINVAL;
}
if (filter->ether_type == ETHER_TYPE_VLAN)
- PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
- " not supported.");
+ PMD_DRV_LOG(WARNING,
+ "filter vlan ether_type in first tag is not supported.");
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
@@ -8036,14 +8875,25 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
pf->main_vsi->seid,
filter->queue, add, &stats, NULL);
- PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
- " mac_etype_used = %u, etype_used = %u,"
- " mac_etype_free = %u, etype_free = %u\n",
- ret, stats.mac_etype_used, stats.etype_used,
- stats.mac_etype_free, stats.etype_free);
+ PMD_DRV_LOG(INFO,
+ "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
+ ret, stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
@@ -8078,7 +8928,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
FALSE);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -ENOSYS;
break;
}
@@ -8116,6 +8966,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -8133,10 +8988,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
static void
i40e_enable_extended_tag(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint32_t buf = 0;
int ret;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CAP_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8149,7 +9005,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
}
buf = 0;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8161,7 +9017,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
return;
}
buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
- ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
@@ -8224,18 +9080,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
@@ -8243,18 +9095,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
@@ -8369,9 +9217,9 @@ i40e_configure_registers(struct i40e_hw *hw)
ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
reg_table[i].val, NULL);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
- "address of 0x%"PRIx32, reg_table[i].val,
- reg_table[i].addr);
+ PMD_DRV_LOG(ERR,
+ "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
+ reg_table[i].val, reg_table[i].addr);
break;
}
PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
@@ -8416,8 +9264,9 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
I40E_VSI_L2TAGSTXVALID(
vsi->vsi_id), reg, NULL);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to update "
- "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
+ PMD_DRV_LOG(ERR,
+ "Failed to update VSI_L2TAGSTXVALID[%d]",
+ vsi->vsi_id);
return I40E_ERR_CONFIG;
}
}
@@ -8468,11 +9317,10 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw,
rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
- PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
- "rule_id = %u"
- " mirror_rules_used = %u, mirror_rules_free = %u,",
- hw->aq.asq_last_status, resp->rule_id,
- resp->mirror_rules_used, resp->mirror_rules_free);
+ PMD_DRV_LOG(INFO,
+ "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
+ hw->aq.asq_last_status, resp->rule_id,
+ resp->mirror_rules_used, resp->mirror_rules_free);
*rule_id = rte_le_to_cpu_16(resp->rule_id);
return status;
@@ -8550,8 +9398,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
- PMD_DRV_LOG(ERR, "mirror rule can not be configured"
- " without veb or vfs.");
+ PMD_DRV_LOG(ERR,
+ "mirror rule can not be configured without veb or vfs.");
return -ENOSYS;
}
if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
@@ -8583,9 +9431,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
- " ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
@@ -8674,9 +9522,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
mirr_rule->rule_type, mirr_rule->entries,
j, &rule_id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to add mirror rule:"
- " ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to add mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
rte_free(mirr_rule);
return -ENOSYS;
}
@@ -8728,9 +9576,9 @@ i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
- " status = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: status = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
@@ -9162,9 +10010,9 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
&veb_bw, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation"
- " per TC failed = %d",
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "AQ command Config switch_comp BW allocation per TC failed = %d",
+ hw->aq.asq_last_status);
return ret;
}
@@ -9172,16 +10020,18 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
&ets_query, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
- " configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp ETS configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
memset(&bw_query, 0, sizeof(bw_query));
ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
&bw_query, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
- " configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp bandwidth configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
@@ -9246,8 +10096,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
}
ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
- " per TC failed = %d",
+ PMD_INIT_LOG(ERR,
+ "AQ command Config VSI BW allocation per TC failed = %d",
hw->aq.asq_last_status);
goto out;
}
@@ -9268,9 +10118,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
/* Update the VSI after updating the VSI queue-mapping information */
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to configure "
- "TC queue mapping = %d",
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
+ hw->aq.asq_last_status);
goto out;
}
/* update the local VSI info with updated queue map */
@@ -9322,8 +10171,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
/* Use the FW API if FW > v4.4*/
if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
(hw->aq.fw_maj_ver >= 5))) {
- PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
- " to configure DCB");
+ PMD_INIT_LOG(ERR,
+ "FW < v4.4, can not use FW LLDP API to configure DCB");
return I40E_ERR_FIRMWARE_API_VERSION;
}
@@ -9338,8 +10187,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
old_cfg->etsrec = old_cfg->etscfg;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Set DCB Config failed, err %s aq_err %s\n",
+ PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
@@ -9371,7 +10219,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VEB seid=%d\n",
+ "Failed configuring TC for VEB seid=%d",
main_vsi->veb->seid);
}
/* Update each VSI */
@@ -9389,8 +10237,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
I40E_DEFAULT_TCMAP);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VSI seid=%d\n",
- vsi_list->vsi->seid);
+ "Failed configuring TC for VSI seid=%d",
+ vsi_list->vsi->seid);
/* continue */
}
}
@@ -9409,7 +10257,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret = 0;
+ int i, ret = 0;
if ((pf->flags & I40E_FLAG_DCB) == 0) {
PMD_INIT_LOG(ERR, "HW doesn't support DCB");
@@ -9436,6 +10284,9 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
hw->local_dcbx_config.etscfg.tsatable[0] =
I40E_IEEE_TSA_ETS;
+ /* all UPs mapping to TC0 */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
hw->local_dcbx_config.etsrec =
hw->local_dcbx_config.etscfg;
hw->local_dcbx_config.pfc.willing = 0;
@@ -9450,15 +10301,15 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
I40E_APP_PROTOID_FCOE;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR, "default dcb config fails."
- " err = %d, aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "default dcb config fails. err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
} else {
- PMD_INIT_LOG(ERR, "DCB initialization in FW fails,"
- " err = %d, aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "DCB initialization in FW fails, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
} else {
@@ -9469,14 +10320,14 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
ret = i40e_init_dcb(hw);
if (!ret) {
if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- PMD_INIT_LOG(ERR, "HW doesn't support"
- " DCBX offload.");
+ PMD_INIT_LOG(ERR,
+ "HW doesn't support DCBX offload.");
return -ENOTSUP;
}
} else {
- PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
- " aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "DCBX configuration failed, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
}
@@ -9585,7 +10436,8 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -9610,7 +10462,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
I40E_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
@@ -9618,7 +10470,8 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -9740,8 +10593,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_eth_dev_data *dev_data = pf->dev_data;
- uint32_t frame_size = mtu + ETHER_HDR_LEN
- + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
+ uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
int ret = 0;
/* check if mtu is within the allowed range */
@@ -9750,8 +10602,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* mtu setting is forbidden if port is start */
if (dev_data->dev_started) {
- PMD_DRV_LOG(ERR,
- "port %d must be stopped before configuration\n",
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
dev_data->port_id);
return -EBUSY;
}
@@ -9765,3 +10616,230 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_pf_vf *vf;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ bool big_buffer = 0;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ if (!f->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ vf = &pf->vfs[f->vf_id];
+ vsi = vf->vsi;
+ }
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
+ (struct ether_addr *)&cld_filter.element.outer_mac);
+ ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
+ (struct ether_addr *)&cld_filter.element.inner_mac);
+ cld_filter.element.inner_vlan = f->input.inner_vlan;
+ cld_filter.element.flags = f->input.flags;
+ cld_filter.element.tenant_id = f->input.tenant_id;
+ cld_filter.element.queue_number = f->queue;
+ rte_memcpy(cld_filter.general_fields,
+ f->input.general_fields,
+ sizeof(f->input.general_fields));
+
+ if (((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ ((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ ((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ big_buffer = 1;
+
+ if (big_buffer)
+ i40e_aq_add_cloud_filters_big_buffer(hw,
+ vsi->seid, &cld_filter, 1);
+ else
+ i40e_aq_add_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
+ }
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
+}
+
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->data->drv_name,
+ drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool
+is_i40e_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_i40e_pmd);
+}
+
+/* Create a QinQ cloud filter
+ *
+ * The Fortville NIC has limited resources for tunnel filters,
+ * so we can only reuse existing filters.
+ *
+ * In step 1 we define which Field Vector fields can be used for
+ * filter types.
+ * As we do not have the inner tag defined as a field,
+ * we have to define it first, by reusing one of L1 entries.
+ *
+ * In step 2 we are replacing one of existing filter types with
+ * a new one for QinQ.
+ * As we reusing L1 and replacing L2, some of the default filter
+ * types will disappear,which depends on L1 and L2 entries we reuse.
+ *
+ * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
+ *
+ * 1. Create L1 filter of outer vlan (12b) which will be in use
+ * later when we define the cloud filter.
+ * a. Valid_flags.replace_cloud = 0
+ * b. Old_filter = 10 (Stag_Inner_Vlan)
+ * c. New_filter = 0x10
+ * d. TR bit = 0xff (optional, not used here)
+ * e. Buffer – 2 entries:
+ * i. Byte 0 = 8 (outer vlan FV index).
+ * Byte 1 = 0 (rsv)
+ * Byte 2-3 = 0x0fff
+ * ii. Byte 0 = 37 (inner vlan FV index).
+ * Byte 1 =0 (rsv)
+ * Byte 2-3 = 0x0fff
+ *
+ * Step 2:
+ * 2. Create cloud filter using two L1 filters entries: stag and
+ * new filter(outer vlan+ inner vlan)
+ * a. Valid_flags.replace_cloud = 1
+ * b. Old_filter = 1 (instead of outer IP)
+ * c. New_filter = 0x10
+ * d. Buffer – 2 entries:
+ * i. Byte 0 = 0x80 | 7 (valid | Stag).
+ * Byte 1-3 = 0 (rsv)
+ * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
+ * Byte 9-11 = 0 (rsv)
+ */
+static int
+i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
+{
+ int ret = -ENOTSUP;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ /* Init */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[2] = 0xff;
+ filter_replace_buf.data[3] = 0x0f;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[6] = 0xff;
+ filter_replace_buf.data[7] = 0x0f;
+ ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Apply the second L2 cloud filter */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L2 filter, input for L2 filter will be L1 filter */
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return ret;
+}
+
+RTE_INIT(i40e_init_log);
+static void
+i40e_init_log(void)
+{
+ i40e_logtype_init = rte_log_register("pmd.i40e.init");
+ if (i40e_logtype_init >= 0)
+ rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
+ i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
+ if (i40e_logtype_driver >= 0)
+ rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 5f3ecd9a..2ff8282f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,6 +37,8 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -126,6 +128,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_FLAG_FDIR (1ULL << 6)
#define I40E_FLAG_VXLAN (1ULL << 7)
#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8)
+#define I40E_FLAG_VF_MAC_BY_PF (1ULL << 9)
#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
I40E_FLAG_DCB | \
I40E_FLAG_VMDQ | \
@@ -134,7 +137,8 @@ enum i40e_flxpld_layer_idx {
I40E_FLAG_HEADER_SPLIT_ENABLED | \
I40E_FLAG_FDIR | \
I40E_FLAG_VXLAN | \
- I40E_FLAG_RSS_AQ_CAPABLE)
+ I40E_FLAG_RSS_AQ_CAPABLE | \
+ I40E_FLAG_VF_MAC_BY_PF)
#define I40E_RSS_OFFLOAD_ALL ( \
ETH_RSS_FRAG_IPV4 | \
@@ -188,6 +192,72 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
+ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
+/**
+ * The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define I40E_ETH_OVERHEAD \
+ (ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
+
struct i40e_adapter;
/**
@@ -242,6 +312,7 @@ struct i40e_veb {
uint16_t stats_idx;
struct i40e_eth_stats stats;
uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t strict_prio_tc; /* bit map of TCs set to strict priority mode */
struct i40e_bw_info bw_info; /* VEB bandwidth information */
};
@@ -300,6 +371,8 @@ struct i40e_vsi {
uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
uint16_t nb_msix; /* The max number of msix vector */
uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
+ uint8_t vlan_filter_on; /* The VLAN filter enabled */
struct i40e_bw_info bw_info; /* VSI bandwidth information */
};
@@ -376,6 +449,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -394,6 +475,122 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
+#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
+#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+
+enum i40e_tunnel_iptype {
+ I40E_TUNNEL_IPTYPE_IPV4,
+ I40E_TUNNEL_IPTYPE_IPV6,
+};
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ enum i40e_tunnel_iptype ip_type;
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+ uint16_t general_fields[32]; /* Big buffer */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */
+ uint16_t vf_id; /* VF id, avaiblable when is_to_vf is 1. */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/**
+ * Tunnel type.
+ */
+enum i40e_tunnel_type {
+ I40E_TUNNEL_TYPE_NONE = 0,
+ I40E_TUNNEL_TYPE_VXLAN,
+ I40E_TUNNEL_TYPE_GENEVE,
+ I40E_TUNNEL_TYPE_TEREDO,
+ I40E_TUNNEL_TYPE_NVGRE,
+ I40E_TUNNEL_TYPE_IP_IN_GRE,
+ I40E_L2_TUNNEL_TYPE_E_TAG,
+ I40E_TUNNEL_TYPE_MPLSoUDP,
+ I40E_TUNNEL_TYPE_MPLSoGRE,
+ I40E_TUNNEL_TYPE_QINQ,
+ I40E_TUNNEL_TYPE_MAX,
+};
+
+/**
+ * Tunneling Packet filter configuration.
+ */
+struct i40e_tunnel_filter_conf {
+ struct ether_addr outer_mac; /**< Outer MAC address to match. */
+ struct ether_addr inner_mac; /**< Inner MAC address to match. */
+ uint16_t inner_vlan; /**< Inner VLAN to match. */
+ uint32_t outer_vlan; /**< Outer VLAN to match */
+ enum i40e_tunnel_iptype ip_type; /**< IP address type. */
+ /**
+ * Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP
+ * is set in filter_type, or inner destination IP address to match
+ * if ETH_TUNNEL_FILTER_IIP is set in filter_type.
+ */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 address in big endian. */
+ uint32_t ipv6_addr[4]; /**< IPv6 address in big endian. */
+ } ip_addr;
+ /** Flags from ETH_TUNNEL_FILTER_XX - see above. */
+ uint16_t filter_type;
+ enum i40e_tunnel_type tunnel_type; /**< Tunnel Type. */
+ uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */
+ uint16_t queue_id; /**< Queue assigned to if match. */
+ uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */
+ uint16_t vf_id; /**< VF id, avaiblable when is_to_vf is 1. */
};
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
@@ -418,6 +615,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, rte_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -466,12 +674,17 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
+ bool mpls_replace_flag; /* 1 - MPLS filter replace is done */
+ bool qinq_replace_flag; /* QINQ filter replace is done */
};
enum pending_msg {
@@ -538,6 +751,8 @@ struct i40e_vf {
uint64_t flags;
};
+#define I40E_MAX_PKT_TYPE 256
+
/*
* Structure to store private data for each PF/VF instance.
*/
@@ -562,6 +777,29 @@ struct i40e_adapter {
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
struct rte_timecounter tx_tstamp_tc;
+
+ /* ptype mapping table */
+ uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned;
+};
+
+extern const struct rte_flow_ops i40e_flow_ops;
+
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+ struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
};
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
@@ -605,6 +843,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
@@ -616,6 +855,46 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
+int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num, struct ether_addr *addr);
+int i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total);
+void i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on);
+int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total);
+bool is_i40e_supported(struct rte_eth_dev *dev);
+
+#define I40E_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 640d316a..859b5e8f 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -55,6 +55,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -135,10 +136,10 @@ static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
uint16_t tx_queue_id);
static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
uint16_t tx_queue_id);
-static void i40evf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr,
- uint32_t index,
- uint32_t pool);
+static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ uint32_t pool);
static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -151,6 +152,9 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
+static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
@@ -214,6 +218,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .rx_descriptor_status = i40e_dev_rx_descriptor_status,
+ .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
@@ -225,6 +231,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.reta_query = i40evf_dev_rss_reta_query,
.rss_hash_update = i40evf_dev_rss_hash_update,
.rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
+ .mtu_set = i40evf_dev_mtu_set,
+ .mac_addr_set = i40evf_set_default_mac_addr,
};
/*
@@ -640,7 +648,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
return ret;
}
@@ -693,7 +701,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
return ret;
}
@@ -719,7 +727,8 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
sizeof(struct i40e_virtchnl_vector_map)];
struct i40e_virtchnl_irq_map_info *map_info;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vector_id;
int i, err;
@@ -846,7 +855,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
return 0;
}
-static void
+static int
i40evf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *addr,
__rte_unused uint32_t index,
@@ -864,7 +873,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
addr->addr_bytes[0], addr->addr_bytes[1],
addr->addr_bytes[2], addr->addr_bytes[3],
addr->addr_bytes[4], addr->addr_bytes[5]);
- return;
+ return I40E_ERR_INVALID_MAC_ADDR;
}
list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
@@ -883,23 +892,20 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_ADD_ETHER_ADDRESS");
- return;
+ return err;
}
static void
-i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
{
struct i40e_virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct rte_eth_dev_data *data = dev->data;
- struct ether_addr *addr;
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
sizeof(struct i40e_virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
- addr = &(data->mac_addrs[index]);
-
if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
addr->addr_bytes[0], addr->addr_bytes[1],
@@ -926,6 +932,17 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
return;
}
+static void
+i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *addr;
+
+ addr = &data->mac_addrs[index];
+
+ i40evf_del_mac_addr_by_addr(dev, addr);
+}
+
static int
i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
@@ -953,7 +970,7 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
}
static int
-i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
int ret;
struct i40e_eth_stats *pstats = NULL;
@@ -1088,7 +1105,6 @@ static const struct rte_pci_id pci_id_i40evf_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
- { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -1182,7 +1198,6 @@ i40evf_init_vf(struct rte_eth_dev *dev)
int i, err, bufsz;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct ether_addr *p_mac_addr;
uint16_t interval =
i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
@@ -1259,9 +1274,8 @@ i40evf_init_vf(struct rte_eth_dev *dev)
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Store the MAC address configured by host, or generate random one */
- p_mac_addr = (struct ether_addr *)(vf->vsi_res->default_mac_addr);
- if (is_valid_assigned_ether_addr(p_mac_addr)) /* Configured by host */
- ether_addr_copy(p_mac_addr, (struct ether_addr *)hw->mac.addr);
+ if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
else
eth_random_addr(hw->mac.addr); /* Generate a random one */
@@ -1314,16 +1328,16 @@ i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
switch (pf_msg->event) {
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
vf->link_up = pf_msg->event_data.link_event.link_status;
vf->link_speed = pf_msg->event_data.link_event.link_speed;
break;
case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
default:
PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
@@ -1385,7 +1399,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
"expect %u, get %u",
vf->pend_cmd, msg_opc);
PMD_DRV_LOG(DEBUG, "adminq response is received,"
- " opcode = %d\n", msg_opc);
+ " opcode = %d", msg_opc);
}
break;
default:
@@ -1409,8 +1423,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+i40evf_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1423,31 +1436,31 @@ i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
/* No interrupt event indicated */
if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
- PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n");
+ PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
goto done;
}
if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
- PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n");
+ PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
i40evf_handle_aq_msg(dev);
}
/* Link Status Change interrupt */
if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
- " do nothing\n");
+ " do nothing");
done:
i40evf_enable_irq0(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(dev->intr_handle);
}
static int
i40evf_dev_init(struct rte_eth_dev *eth_dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
- eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct i40e_hw *hw
+ = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1465,16 +1478,17 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
i40e_set_tx_function(eth_dev);
return 0;
}
-
- rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
-
- hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
- hw->device_id = eth_dev->pci_dev->id.device_id;
- hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
- hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
- hw->bus.device = eth_dev->pci_dev->addr.devid;
- hw->bus.func = eth_dev->pci_dev->addr.function;
- hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ i40e_set_default_ptype_table(eth_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->adapter_stopped = 0;
if(i40evf_init_vf(eth_dev) != 0) {
@@ -1530,23 +1544,32 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+
+static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct i40e_adapter), i40evf_dev_init);
+}
+
+static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit);
+}
+
/*
* virtual function driver struct
*/
-static struct eth_driver rte_i40evf_pmd = {
- .pci_drv = {
- .id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = i40evf_dev_init,
- .eth_dev_uninit = i40evf_dev_uninit,
- .dev_private_size = sizeof(struct i40e_adapter),
+static struct rte_pci_driver rte_i40evf_pmd = {
+ .id_table = pci_id_i40evf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_i40evf_pci_probe,
+ .remove = eth_i40evf_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio");
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
@@ -1861,7 +1884,8 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
I40E_WRITE_REG(hw,
@@ -1893,7 +1917,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
@@ -1919,7 +1944,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1945,7 +1971,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
@@ -1953,7 +1979,8 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -1997,6 +2024,10 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
}
list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
for (i = begin; i < next_begin; i++) {
addr = &dev->data->mac_addrs[i];
@@ -2033,7 +2064,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
@@ -2057,7 +2089,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -2098,7 +2130,8 @@ err_queue:
static void
i40evf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -2142,6 +2175,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
case I40E_LINK_SPEED_20GB:
new_link.link_speed = ETH_SPEED_NUM_20G;
break;
+ case I40E_LINK_SPEED_25GB:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
case I40E_LINK_SPEED_40GB:
new_link.link_speed = ETH_SPEED_NUM_40G;
break;
@@ -2225,6 +2261,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2285,15 +2322,16 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static void
i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- if (i40evf_get_statics(dev, stats))
- PMD_DRV_LOG(ERR, "Get statics failed");
+ if (i40evf_get_statistics(dev, stats))
+ PMD_DRV_LOG(ERR, "Get statistics failed");
}
static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
i40evf_dev_stop(dev);
hw->adapter_stopped = 1;
@@ -2301,11 +2339,11 @@ i40evf_dev_close(struct rte_eth_dev *dev)
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
/* disable uio intr before callback unregister */
- rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_disable(intr_handle);
/* unregister callback func from eal lib */
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40evf_dev_interrupt_handler, dev);
i40evf_disable_irq0(hw);
}
@@ -2382,7 +2420,7 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
@@ -2421,7 +2459,7 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
@@ -2566,7 +2604,7 @@ i40evf_config_rss(struct i40e_vf *vf)
if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "RSS not configured\n");
+ PMD_DRV_LOG(DEBUG, "RSS not configured");
return 0;
}
@@ -2583,7 +2621,7 @@ i40evf_config_rss(struct i40e_vf *vf)
rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
+ PMD_DRV_LOG(DEBUG, "No hash flag is set");
return 0;
}
@@ -2643,3 +2681,54 @@ i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+
+static int
+i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = vf->dev_data;
+ uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
+ int ret = 0;
+
+ /* check if mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev_data->dev_conf.rxmode.jumbo_frame = 0;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+static void
+i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (!is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return;
+ }
+
+ if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
+ return;
+
+ if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
+ return;
+
+ i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
+
+ i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15c..28cc554f 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,7 +119,13 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -251,7 +257,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
- eth_dev->driver->pci_drv.driver.name,
+ eth_dev->data->drv_name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
@@ -1017,13 +1023,81 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
@@ -1032,6 +1106,9 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1131,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1170,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
@@ -1220,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -1481,3 +1584,30 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
+ guarant_cnt, best_cnt);
+}
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 00000000..24e1c658
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,2258 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter);
+static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
+static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+ .create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
+};
+
+union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_mpls_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched QINQ */
+static enum rte_flow_item_type pattern_qinq_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_flow_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* VXLAN */
+ { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* MPLSoUDP & MPLSoGRE */
+ { pattern_mpls_1, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_2, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_3, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_4, i40e_flow_parse_mpls_filter },
+ /* QINQ */
+ { pattern_qinq_1, i40e_flow_parse_qinq_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static uint16_t
+i40e_get_outer_vlan(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint16_t reg_id;
+ uint16_t tpid;
+
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
+
+ i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ &reg_r, NULL);
+
+ tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
+
+ return tpid;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
+ * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
+ * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
+ * FF:FF:FF:FF:FF:FF
+ * 5. Ether_type mask should be 0xFFFF.
+ */
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+ uint16_t outer_tpid;
+
+ outer_tpid = i40e_get_outer_vlan(dev);
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6 ||
+ filter->ether_type == ETHER_TYPE_LLDP ||
+ filter->ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Ethertype action only supports QUEUE or DROP. */
+static int
+i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_ethertype_action(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported flow type and input set: refer to array
+ * default_inset_table in i40e_ethdev.c.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter.
+ * FDIR action supports QUEUE or (QUEUE + MARK).
+ */
+static int
+i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* Parse to get the action info of a tunnel filter
+ * Tunnel action only supports PF, VF and QUEUE.
+ */
+static int
+i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is PF or VF. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
+ act->type != RTE_FLOW_ACTION_TYPE_VF) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ filter->vf_id = act_vf->id;
+ filter->is_to_vf = 1;
+ if (filter->vf_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid VF ID for tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Check if the next non-void item is QUEUE */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if ((!filter->is_to_vf) &&
+ (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ } else if (filter->is_to_vf &&
+ (filter->queue_id >= pf->vf_nb_qps)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+ const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec = NULL;
+ const struct rte_flow_item_eth *i_eth_mask = NULL;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+ uint32_t tenant_id_be = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MPLS label.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_mpls *mpls_spec;
+ const struct rte_flow_item_mpls *mpls_mask;
+ enum rte_flow_item_type item_type;
+ bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
+ const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
+ uint32_t label_be = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ is_mplsoudp = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ /* GRE is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GRE item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ mpls_spec =
+ (const struct rte_flow_item_mpls *)item->spec;
+ mpls_mask =
+ (const struct rte_flow_item_mpls *)item->mask;
+
+ if (!mpls_spec || !mpls_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS item");
+ return -rte_errno;
+ }
+
+ if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS label mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&label_be + 1),
+ mpls_spec->label_tc_s, 3);
+ filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (is_mplsoudp)
+ filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
+ else
+ filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_mpls_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: QINQ.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vlan *i_vlan_spec = NULL;
+ const struct rte_flow_item_vlan *i_vlan_mask = NULL;
+ const struct rte_flow_item_vlan *o_vlan_spec = NULL;
+ const struct rte_flow_item_vlan *o_vlan_mask = NULL;
+
+ enum rte_flow_item_type item_type;
+ bool vlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+
+ if (!vlan_flag) {
+ o_vlan_spec = vlan_spec;
+ o_vlan_mask = vlan_mask;
+ vlan_flag = 1;
+ } else {
+ i_vlan_spec = vlan_spec;
+ i_vlan_mask = vlan_mask;
+ vlan_flag = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* Get filter specification */
+ if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+ (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
+ & I40E_TCI_MASK;
+ filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
+ & I40E_TCI_MASK;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
+ return 0;
+}
+
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_qinq_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_consistent_tunnel_filter_set(pf,
+ &cons_filter.consistent_tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_filter_type filter_type = flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_flow_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_flow_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_pf_vf *vf;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ bool big_buffer = 0;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.element.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.element.inner_mac);
+ cld_filter.element.inner_vlan = filter->input.inner_vlan;
+ cld_filter.element.flags = filter->input.flags;
+ cld_filter.element.tenant_id = filter->input.tenant_id;
+ cld_filter.element.queue_number = filter->queue;
+ rte_memcpy(cld_filter.general_fields,
+ filter->input.general_fields,
+ sizeof(cld_filter.general_fields));
+
+ if (!filter->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ vf = &pf->vfs[filter->vf_id];
+ vsi = vf->vsi;
+ }
+
+ if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ big_buffer = 1;
+
+ if (big_buffer)
+ ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
+ &cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
+ if (ret < 0)
+ return -ENOTSUP;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_flow_flush_fdir_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_ethertype_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_tunnel_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+static int
+i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all ethertype filters */
+static int
+i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_flow_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all tunnel filters */
+static int
+i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_flow_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/i40e/i40e_logs.h b/drivers/net/i40e/i40e_logs.h
index e042e242..8e99cd52 100644
--- a/drivers/net/i40e/i40e_logs.h
+++ b/drivers/net/i40e/i40e_logs.h
@@ -34,14 +34,11 @@
#ifndef _I40E_LOGS_H_
#define _I40E_LOGS_H_
+extern int i40e_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_INIT
+ rte_log(RTE_LOG_ ## level, i40e_logtype_init, "%s(): " fmt "\n", \
+ __func__, ##args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
-#endif
#ifdef RTE_LIBRTE_I40E_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -64,12 +61,10 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+extern int i40e_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, i40e_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 97b8eccc..0758503e 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,6 +55,7 @@
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
#include "i40e_pf.h"
+#include "rte_pmd_i40e.h"
#define I40E_CFG_CRCSTRIP_DEFAULT 1
@@ -274,14 +275,30 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
}
static void
-i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
{
struct i40e_virtchnl_version_info info;
- info.major = I40E_DPDK_VERSION_MAJOR;
- info.minor = I40E_DPDK_VERSION_MINOR;
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
- I40E_SUCCESS, (uint8_t *)&info, sizeof(info));
+ /* Respond like a Linux PF host in order to support both DPDK VF and
+ * Linux VF driver. The expense is original DPDK host specific feature
+ * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
+ *
+ * DPDK VF also can't identify host driver by version number returned.
+ * It always assume talking with Linux PF.
+ */
+ info.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS,
+ (uint8_t *)&info,
+ sizeof(info));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&info,
+ sizeof(info));
}
static int
@@ -294,13 +311,20 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
}
static int
-i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
{
struct i40e_virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
int ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
/* only have 1 VSI by default */
len = sizeof(struct i40e_virtchnl_vf_resource) +
I40E_DEFAULT_VF_VSI_NUM *
@@ -323,8 +347,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
/* Change below setting if PF host can support more VSIs for VF */
vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
- /* As assume Vf only has single VSI now, always return 0 */
- vf_res->vsi_res[0].vsi_id = 0;
+ vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
ether_addr_copy(&vf->mac_addr,
(struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
@@ -382,6 +405,29 @@ i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
return err;
}
+static inline uint8_t
+i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
+ uint16_t queue_id)
+{
+ struct i40e_aqc_vsi_properties_data *info = &vsi->info;
+ uint16_t bsf, qp_idx;
+ uint8_t i;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
+ I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
+ bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
+ I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+ if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
+ return i;
+ }
+ }
+ return 0;
+}
+
static int
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
@@ -389,16 +435,20 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
{
int err = I40E_SUCCESS;
struct i40e_hmc_obj_txq tx_ctx;
+ struct i40e_vsi *vsi = vf->vsi;
uint32_t qtx_ctl;
- uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
-
+ uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
+ uint8_t dcb_tc;
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(tx_ctx));
- tx_ctx.new_context = 1;
tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->ring_len;
- tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
+ dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
+ tx_ctx.head_wb_ena = txq->headwb_enabled;
+ tx_ctx.head_wb_addr = txq->dma_headwb_addr;
+
err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
if (err != I40E_SUCCESS)
return err;
@@ -425,7 +475,8 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
static int
i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
@@ -434,11 +485,18 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
struct i40e_virtchnl_queue_pair_info *vc_qpi;
int i, ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
vc_vqci->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -484,7 +542,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
@@ -493,11 +552,19 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
int i, ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
vc_vqcei->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -539,13 +606,125 @@ send_msg:
return ret;
}
+static void
+i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_vector_map *vvm)
+{
+#define BITS_PER_CHAR 8
+ uint64_t linklistmap = 0, tempmap;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t qid;
+ bool b_first_q = true;
+ enum i40e_queue_type qtype;
+ uint16_t vector_id;
+ uint32_t reg, reg_idx;
+ uint16_t itr_idx = 0, i;
+
+ vector_id = vvm->vector_id;
+ /* setup the head */
+ if (!vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
+ + (vector_id - 1));
+
+ if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
+ I40E_WRITE_REG(hw, reg_idx,
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto cfg_irq_done;
+ }
+
+ /* sort all rx and tx queues */
+ tempmap = vvm->rxq_map;
+ for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i));
+ tempmap >>= 1;
+ }
+
+ tempmap = vvm->txq_map;
+ for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i + 1));
+ tempmap >>= 1;
+ }
+
+ /* Link all rx and tx queues into a chained list */
+ tempmap = linklistmap;
+ i = 0;
+ b_first_q = true;
+ do {
+ if (tempmap & 0x1) {
+ qtype = (enum i40e_queue_type)(i % 2);
+ qid = vf->vsi->base_queue + i / 2;
+ if (b_first_q) {
+ /* This is header */
+ b_first_q = false;
+ reg = ((qtype <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+ | qid);
+ } else {
+ /* element in the link list */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ }
+ I40E_WRITE_REG(hw, reg_idx, reg);
+ /* find next register to program */
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(qid);
+ itr_idx = vvm->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(qid);
+ itr_idx = vvm->txitr_idx;
+ break;
+ default:
+ break;
+ }
+ }
+ i++;
+ tempmap >>= 1;
+ } while (tempmap);
+
+ /* Terminate the link list */
+ reg = (vector_id) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ I40E_WRITE_REG(hw, reg_idx, reg);
+
+cfg_irq_done:
+ I40E_WRITE_FLUSH(hw);
+}
+
static int
i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
- uint8_t *msg, uint16_t msglen)
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_virtchnl_irq_map_info *irqmap =
(struct i40e_virtchnl_irq_map_info *)msg;
+ struct i40e_virtchnl_vector_map *map;
+ int i;
+ uint16_t vector_id;
+ unsigned long qbit_max;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
PMD_DRV_LOG(ERR, "buffer too short");
@@ -553,23 +732,46 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
goto send_msg;
}
- /* Assume VF only have 1 vector to bind all queues */
- if (irqmap->num_vectors != 1) {
- PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
- ret = I40E_ERR_PARAM;
+ /* PF host will support both DPDK VF or Linux VF driver, identify by
+ * number of vectors requested.
+ */
+
+ /* DPDK VF only requires single vector */
+ if (irqmap->num_vectors == 1) {
+ /* This MSIX intr store the intr in VF range */
+ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
+ vf->vsi->nb_msix = irqmap->num_vectors;
+ vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+
+ /* Don't care how the TX/RX queue mapping with this vector.
+ * Link all VF RX queues together. Only did mapping work.
+ * VF can disable/enable the intr by itself.
+ */
+ i40e_vsi_queues_bind_intr(vf->vsi);
goto send_msg;
}
- /* This MSIX intr store the intr in VF range */
- vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
- vf->vsi->nb_msix = irqmap->num_vectors;
- vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+ /* Then, it's Linux VF driver */
+ qbit_max = 1 << pf->vf_nb_qp_max;
+ for (i = 0; i < irqmap->num_vectors; i++) {
+ map = &irqmap->vecmap[i];
+
+ vector_id = map->vector_id;
+ /* validate msg params */
+ if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
+ i40e_pf_config_irq_link_list(vf, map);
+ } else {
+ /* configured queue size excceed limit */
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
- /* Don't care how the TX/RX queue mapping with this vector.
- * Link all VF RX queues together. Only did mapping work.
- * VF can disable/enable the intr by itself.
- */
- i40e_vsi_queues_bind_intr(vf->vsi);
send_msg:
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
ret, NULL, 0);
@@ -648,12 +850,21 @@ send_msg:
static int
i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_queue_select *q_sel =
(struct i40e_virtchnl_queue_select *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*q_sel)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -671,7 +882,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_ether_addr_list *addr_list =
@@ -680,6 +892,14 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
int i;
struct ether_addr *mac;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
if (msg == NULL || msglen <= sizeof(*addr_list)) {
@@ -692,8 +912,8 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
mac = (struct ether_addr *)(addr_list->list[i].addr);
(void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
- if(!is_valid_assigned_ether_addr(mac) ||
- i40e_vsi_add_mac(vf->vsi, &filter)) {
+ if (is_zero_ether_addr(mac) ||
+ i40e_vsi_add_mac(vf->vsi, &filter)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
}
@@ -709,7 +929,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_ether_addr_list *addr_list =
@@ -717,6 +938,14 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
int i;
struct ether_addr *mac;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*addr_list)) {
PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
ret = I40E_ERR_PARAM;
@@ -725,7 +954,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
for (i = 0; i < addr_list->num_elements; i++) {
mac = (struct ether_addr *)(addr_list->list[i].addr);
- if(!is_valid_assigned_ether_addr(mac) ||
+ if(is_zero_ether_addr(mac) ||
i40e_vsi_delete_mac(vf->vsi, mac)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
@@ -741,7 +970,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
- uint8_t *msg, uint16_t msglen)
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
@@ -749,6 +979,14 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
int i;
uint16_t *vid;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
PMD_DRV_LOG(ERR, "add_vlan argument too short");
ret = I40E_ERR_PARAM;
@@ -773,7 +1011,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
@@ -781,6 +1020,14 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
int i;
uint16_t *vid;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
PMD_DRV_LOG(ERR, "delete_vlan argument too short");
ret = I40E_ERR_PARAM;
@@ -805,7 +1052,8 @@ static int
i40e_pf_host_process_cmd_config_promisc_mode(
struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_promisc_info *promisc =
@@ -813,6 +1061,14 @@ i40e_pf_host_process_cmd_config_promisc_mode(
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
bool unicast = FALSE, multicast = FALSE;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*promisc)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -838,13 +1094,20 @@ send_msg:
}
static int
-i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
{
i40e_update_vsi_stats(vf->vsi);
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats,
- sizeof(vf->vsi->eth_stats));
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
return I40E_SUCCESS;
}
@@ -853,12 +1116,21 @@ static int
i40e_pf_host_process_cmd_cfg_vlan_offload(
struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_offload_info *offload =
(struct i40e_virtchnl_vlan_offload_info *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*offload)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -879,12 +1151,21 @@ send_msg:
static int
i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_pvid_info *tpid_info =
(struct i40e_virtchnl_pvid_info *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*tpid_info)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -899,7 +1180,7 @@ send_msg:
return ret;
}
-static void
+void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
struct i40e_virtchnl_pf_event event;
@@ -907,8 +1188,33 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+ /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
+ switch (dev->data->dev_link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
+ break;
+ case ETH_SPEED_NUM_1G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
+ break;
+ case ETH_SPEED_NUM_20G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
+ break;
+ case ETH_SPEED_NUM_25G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
+ break;
+ case ETH_SPEED_NUM_40G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ default:
+ event.event_data.link_event.link_speed =
+ I40E_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
@@ -925,6 +1231,8 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
struct i40e_pf_vf *vf;
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
+ struct rte_pmd_i40e_mb_event_param cb_param;
+ bool b_op = TRUE;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
PMD_DRV_LOG(ERR, "invalid argument");
@@ -939,10 +1247,35 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
return;
}
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
+ cb_param.vfid = vf_id;
+ cb_param.msg_type = opcode;
+ cb_param.msg = (void *)msg;
+ cb_param.msglen = msglen;
+
+ /**
+ * Ask user application if we're allowed to perform those functions.
+ * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
+ * then business as usual.
+ * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
+ * do nothing and send not_supported to VF. As PF must send a response
+ * to VF and ACK/NACK is not defined.
+ */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+ if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
+ PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
+ opcode);
+ b_op = FALSE;
+ }
+
switch (opcode) {
case I40E_VIRTCHNL_OP_VERSION :
PMD_DRV_LOG(INFO, "OP_VERSION received");
- i40e_pf_host_process_cmd_version(vf);
+ i40e_pf_host_process_cmd_version(vf, b_op);
break;
case I40E_VIRTCHNL_OP_RESET_VF :
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
@@ -950,61 +1283,72 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
- i40e_pf_host_process_cmd_get_vf_resource(vf);
+ i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
- i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
- msglen);
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
- i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
- i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
- i40e_notify_vf_link_status(dev, vf);
+ if (b_op) {
+ i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ i40e_notify_vf_link_status(dev, vf);
+ } else {
+ i40e_pf_host_send_msg_to_vf(
+ vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ }
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
- i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen);
+ i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen);
+ i40e_pf_host_process_cmd_add_ether_address(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen);
+ i40e_pf_host_process_cmd_del_ether_address(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
- i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen);
+ i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
- i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen);
+ i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
- i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
PMD_DRV_LOG(INFO, "OP_GET_STATS received");
- i40e_pf_host_process_cmd_get_stats(vf);
+ i40e_pf_host_process_cmd_get_stats(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
- i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
+ i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
- i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
+ i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
break;
/* Don't add command supported below, which will
* return an error code.
@@ -1055,9 +1399,9 @@ i40e_pf_host_init(struct rte_eth_dev *dev)
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
if (ret != I40E_SUCCESS)
goto fail;
- eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
}
+ RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
/* restore irq0 */
i40e_pf_enable_irq0(hw);
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index 244bac37..b4c22876 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -118,5 +118,7 @@ void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint8_t *msg, uint16_t msglen);
int i40e_pf_host_init(struct rte_eth_dev *dev);
int i40e_pf_host_uninit(struct rte_eth_dev *dev);
+void i40e_notify_vf_link_status(struct rte_eth_dev *dev,
+ struct i40e_pf_vf *vf);
#endif /* _I40E_PF_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1b25b2f2..351cb94d 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +50,8 @@
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -59,7 +61,6 @@
#define DEFAULT_TX_RS_THRESH 32
#define DEFAULT_TX_FREE_THRESH 32
-#define I40E_MAX_PKT_TYPE 256
#define I40E_TX_MAX_BURST 32
@@ -73,12 +74,31 @@
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#ifdef RTE_LIBRTE_IEEE1588
+#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define I40E_TX_IEEE1588_TMST 0
+#endif
+
#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
+#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK | \
+ I40E_TX_IEEE1588_TMST)
+
+#define I40E_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -256,7 +276,7 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
*cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
break;
default:
- PMD_TX_LOG(ERR, "Tunnel type not supported\n");
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
return;
}
@@ -412,15 +432,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
- RTE_PMD_I40E_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "I40E_MAX_RING_DESC=%d, "
- "RTE_PMD_I40E_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, I40E_MAX_RING_DESC,
- RTE_PMD_I40E_RX_MAX_BURST);
- ret = -EINVAL;
}
#else
ret = -EINVAL;
@@ -446,6 +457,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
int32_t s[I40E_LOOK_AHEAD], nb_dd;
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -494,9 +506,9 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
mb->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
@@ -584,7 +596,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
/* Update rx tail regsiter */
rte_wmb();
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+ I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -598,6 +610,7 @@ static inline uint16_t
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
uint16_t nb_rx = 0;
if (!nb_pkts)
@@ -615,9 +628,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (i40e_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
- "port_id=%u, queue_id=%u",
- rxq->port_id, rxq->queue_id);
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
rxq->rx_nb_avail = 0;
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
@@ -679,6 +693,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union i40e_rx_desc rxd;
struct i40e_rx_entry *sw_ring;
struct i40e_rx_entry *rxe;
+ struct rte_eth_dev *dev;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
uint16_t nb_rx;
@@ -688,6 +703,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t rx_id, nb_hold;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl;
nb_rx = 0;
nb_hold = 0;
@@ -695,6 +711,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
+ ptype_tbl = rxq->vsi->adapter->ptype_tbl;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -707,10 +724,13 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
- rxd = *rxdp;
+ }
+ rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
rx_id++;
@@ -751,8 +771,8 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
rxm->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
@@ -802,10 +822,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
struct rte_mbuf *nmb, *rxm;
uint16_t rx_id = rxq->rx_tail;
uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
uint32_t rx_status;
uint64_t qword1;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -818,8 +840,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
+ }
+
rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
@@ -913,8 +939,8 @@ i40e_recv_scattered_pkts(void *rx_queue,
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
first_seg->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
@@ -1022,7 +1048,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_tx;
uint32_t td_cmd;
uint32_t td_offset;
- uint32_t tx_flags;
uint32_t td_tag;
uint64_t ol_flags;
uint16_t nb_used;
@@ -1046,7 +1071,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
td_cmd = 0;
td_tag = 0;
td_offset = 0;
- tx_flags = 0;
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1093,12 +1117,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Descriptor based VLAN insertion */
if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- tx_flags |= tx_pkt->vlan_tci <<
- I40E_TX_FLAG_L2TAG1_SHIFT;
- tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
- I40E_TX_FLAG_L2TAG1_SHIFT;
+ td_tag = tx_pkt->vlan_tci;
}
/* Always enable CRC offload insertion */
@@ -1231,7 +1251,7 @@ end_of_tx:
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -1383,7 +1403,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
/* Update the tx tail register */
rte_wmb();
- I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
@@ -1414,6 +1434,85 @@ i40e_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
+static uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = i40e_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * m->nb_segs is uint8_t, so nb_segs is always less than
+ * I40E_TX_MAX_SEG.
+ * We check only a condition for nb_segs > I40E_TX_MAX_MTU_SEG.
+ */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
+ (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ /* MSS outside the range (256B - 9674B) are considered
+ * malicious
+ */
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+ return i;
+}
+
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
@@ -1701,8 +1800,17 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/* Allocate the maximun number of RX ring hardware descriptor. */
- ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
- ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ len = I40E_MAX_RING_DESC;
+
+ /**
+ * Allocating a little more memory because vectorized/bulk_alloc Rx
+ * functions doesn't check boundaries each time.
+ */
+ len += RTE_PMD_I40E_RX_MAX_BURST;
+
+ ring_size = RTE_ALIGN(len * sizeof(union i40e_rx_desc),
+ I40E_DMA_MEM_ALIGN);
+
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
@@ -1717,11 +1825,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
-#else
- len = nb_desc;
-#endif
/* Allocate the software ring. */
rxq->sw_ring =
@@ -1796,11 +1900,6 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct i40e_rx_queue *rxq;
uint16_t desc = 0;
- if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
@@ -1831,7 +1930,7 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
int ret;
if (unlikely(offset >= rxq->nb_rx_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
return 0;
}
@@ -1849,6 +1948,64 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
}
int
+i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_le_to_cpu_64((1ULL << I40E_RX_DESC_STATUS_DD_SHIFT)
+ << I40E_RXD_QW1_STATUS_SHIFT);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct i40e_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
+ expect = rte_cpu_to_le_64(
+ I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -2129,11 +2286,11 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
((volatile char *)rxq->rx_ring)[i] = 0;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
@@ -2765,12 +2922,25 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
}
+ dev->tx_pkt_prepare = NULL;
} else {
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
}
}
+void __attribute__((cold))
+i40e_set_default_ptype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
+}
+
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
int __attribute__((weak))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
@@ -2815,9 +2985,9 @@ i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
}
uint16_t __attribute__((weak))
-i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
- struct rte_mbuf __rte_unused **tx_pkts,
- uint16_t __rte_unused nb_pkts)
+i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
{
return 0;
}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index ecdb13cb..20084d64 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -34,16 +34,6 @@
#ifndef _I40E_RXTX_H_
#define _I40E_RXTX_H_
-/**
- * 32 bits tx flags, high 16 bits for L2TAG1 (VLAN),
- * low 16 bits for others.
- */
-#define I40E_TX_FLAG_L2TAG1_SHIFT 16
-#define I40E_TX_FLAG_L2TAG1_MASK 0xffff0000
-#define I40E_TX_FLAG_CSUM ((uint32_t)(1 << 0))
-#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
-#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
-
#define RTE_PMD_I40E_RX_MAX_BURST 32
#define RTE_PMD_I40E_TX_MAX_BURST 32
@@ -63,6 +53,12 @@
#define I40E_MIN_RING_DESC 64
#define I40E_MAX_RING_DESC 4096
+#define I40E_MIN_TSO_MSS 256
+#define I40E_MAX_TSO_MSS 9674
+
+#define I40E_TX_MAX_SEG UINT8_MAX
+#define I40E_TX_MAX_MTU_SEG 8
+
#undef container_of
#define container_of(ptr, type, member) ({ \
typeof(((type *)0)->member)(*__mptr) = (ptr); \
@@ -113,11 +109,11 @@ struct i40e_rx_queue {
uint16_t nb_rx_hold; /**< number of held free RX desc */
struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */
struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */
+ struct rte_mbuf fake_mbuf; /**< dummy mbuf */
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
uint16_t rx_nb_avail; /**< number of staged packets ready */
uint16_t rx_next_avail; /**< index of next staged packets */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
- struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
@@ -223,6 +219,8 @@ uint16_t i40e_recv_scattered_pkts(void *rx_queue,
uint16_t i40e_xmit_pkts(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int i40e_tx_queue_init(struct i40e_tx_queue *txq);
int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
void i40e_free_tx_resources(struct i40e_tx_queue *txq);
@@ -238,6 +236,8 @@ void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -248,19 +248,20 @@ int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
-uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
void i40e_set_rx_function(struct rte_eth_dev *dev);
void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq);
void i40e_set_tx_function(struct rte_eth_dev *dev);
+void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
/* For each value it means, datasheet of hardware can tell more details
*
* @note: fix i40e_dev_supported_ptypes_get() if any change here.
*/
static inline uint32_t
-i40e_rxd_pkt_type_mapping(uint8_t ptype)
+i40e_get_default_pkt_type(uint8_t ptype)
{
static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
/* L2 types */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
new file mode 100644
index 00000000..f4036ea2
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -0,0 +1,645 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <altivec.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+
+ vector unsigned long hdr_room = (vector unsigned long){
+ RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM};
+ vector unsigned long dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = (vector unsigned long){};
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vec_st(dma_addr0, 0,
+ (vector unsigned long *)&rxdp[i].read);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ vector unsigned long vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
+ vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = vec_mergel(vaddr0, vaddr0);
+ dma_addr1 = vec_mergel(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = vec_add(dma_addr0, hdr_room);
+ dma_addr1 = vec_add(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
+ vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+ vector unsigned int vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const vector unsigned int rss_vlan_msk = (vector unsigned int){
+ (int32_t)0x1c03804, (int32_t)0x1c03804,
+ (int32_t)0x1c03804, (int32_t)0x1c03804};
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const vector unsigned char vlan_flags = (vector unsigned char){
+ 0, 0, 0, 0,
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char rss_flags = (vector unsigned char){
+ 0, PKT_RX_FDIR, 0, 0,
+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char l3_l4e_flags = (vector unsigned char){
+ 0,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ | PKT_RX_IP_CKSUM_BAD,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
+ vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
+ vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
+
+ vlan1 = vec_and(vlan0, rss_vlan_msk);
+ vlan0 = (vector unsigned int)vec_perm(vlan_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&vlan1);
+
+ rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
+ rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
+ *(vector unsigned char *)&rss);
+
+ l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
+ l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&l3_l4e);
+
+ vlan0 = vec_or(vlan0, rss);
+ vlan0 = vec_or(vlan0, l3_l4e);
+
+ rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
+ rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
+ rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
+ rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
+{
+ vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+ vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+
+ ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
+ ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
+
+ rx_pkts[0]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype0)[0]];
+ rx_pkts[1]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype0)[8]];
+ rx_pkts[2]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype1)[0]];
+ rx_pkts[3]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ vector unsigned char shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ vector unsigned short crc_adjust = (vector unsigned short){
+ 0, 0, /* ignore pkt_type field */
+ rxq->crc_len, /* sub crc on pkt_len */
+ 0, /* ignore high-16bits of pkt_len */
+ rxq->crc_len, /* sub crc on data_len */
+ 0, 0, 0 /* ignore non-length fields */
+ };
+ vector unsigned long dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = (vector unsigned long){0x0000000100000001ULL,
+ 0x0000000100000001ULL};
+
+ /* 4 packets EOP mask */
+ eop_check = (vector unsigned long){0x0000000200000002ULL,
+ 0x0000000200000002ULL};
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = (vector unsigned char){
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 14, 15, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 14, 15, /* octet 15~14, 16 bits data_len */
+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+ vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+ vector unsigned long mbp1, mbp2; /* two mbuf pointer
+ * in one XMM reg.
+ */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = *(vector unsigned long *)&sw_ring[pos];
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = *(vector unsigned long *)(rxdp + 3);
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos] = mbp1;
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+
+ descs[2] = *(vector unsigned long *)(rxdp + 2);
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = *(vector unsigned long *)(rxdp + 1);
+ rte_compiler_barrier();
+ descs[0] = *(vector unsigned long *)(rxdp);
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2;
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len3 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[3]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ const vector unsigned int len2 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[2]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = (vector unsigned long)len3;
+ descs[2] = (vector unsigned long)len2;
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vec_perm((vector unsigned char)descs[3],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb3 = vec_perm((vector unsigned char)descs[2],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
+ (vector unsigned short)descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
+ (vector unsigned short)descs[0]);
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb4, crc_adjust);
+ pkt_mb3 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len1 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[1]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+ const vector unsigned int len0 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[0]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = (vector unsigned long)len1;
+ descs[0] = (vector unsigned long)len0;
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vec_perm((vector unsigned char)descs[1],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb1 = vec_perm((vector unsigned char)descs[0],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = (vector unsigned short)vec_mergeh(
+ sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vec_st(pkt_mb4, 0,
+ (vector unsigned char *)&rx_pkts[pos + 3]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb3, 0,
+ (vector unsigned char *)&rx_pkts[pos + 2]
+ ->rx_descriptor_fields1
+ );
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb2, crc_adjust);
+ pkt_mb1 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ vector unsigned char eop_shuf_mask =
+ (vector unsigned char){
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ };
+
+ /* and with mask to extract bits, flipping 1-0 */
+ vector unsigned char eop_bits = vec_and(
+ (vector unsigned char)vec_nor(staterr, staterr),
+ (vector unsigned char)eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = vec_perm(eop_bits, (vector unsigned char){},
+ eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *split_packet = (vec_ld(0,
+ (vector unsigned int *)&eop_bits))[0];
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = vec_and(staterr, (vector unsigned short)dd_check);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vec_st(pkt_mb2, 0,
+ (vector unsigned char *)&rx_pkts[pos + 1]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb1, 0,
+ (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+ );
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll((vec_ld(0,
+ (vector unsigned long *)&staterr)[0]));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ vector unsigned long descriptor = (vector unsigned long){
+ pkt->buf_physaddr + pkt->data_off, high_qw};
+ *(vector unsigned long *)txdp = descriptor;
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ nb_commit = nb_pkts;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 990520f3..69209668 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -65,9 +65,9 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
start->ol_flags = end->ol_flags;
/* we need to strip crc for the whole packet */
start->pkt_len -= rxq->crc_len;
- if (end->data_len > rxq->crc_len) {
+ if (end->data_len > rxq->crc_len)
end->data_len -= rxq->crc_len;
- } else {
+ else {
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
@@ -78,7 +78,6 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
end->data_len);
secondlast->next = NULL;
rte_pktmbuf_free_seg(end);
- end = secondlast;
}
pkts[pkt_idx++] = start;
start = end = NULL;
@@ -124,12 +123,12 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool)) {
free[nb_free++] = m;
@@ -145,7 +144,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
@@ -225,14 +224,6 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0 ||
- rxmode->hw_ip_checksum != 0)
- return -1;
-#endif
-
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
@@ -243,6 +234,10 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
if (rxmode->header_split == 1)
return -1;
+ /* no QinQ support */
+ if (rxmode->hw_vlan_extend == 1)
+ return -1;
+
return 0;
#else
RTE_SET_USED(dev);
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 011c54e0..694e91f3 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -57,7 +57,6 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
uint64x2_t dma_addr0, dma_addr1;
uint64x2_t zero = vdupq_n_u64(0);
uint64_t paddr;
- uint8x8_t p;
rxdp = rxq->rx_ring + rxq->rxrearm_start;
@@ -77,27 +76,17 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
return;
}
- p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
-
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- vst1_u8((uint8_t *)&mb0->rearm_data, p);
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
dma_addr0 = vdupq_n_u64(paddr);
/* flush desc with pa dma_addr */
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
- vst1_u8((uint8_t *)&mb1->rearm_data, p);
paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
dma_addr1 = vdupq_n_u64(paddr);
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
@@ -116,18 +105,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
+ struct rte_mbuf **rx_pkts)
{
uint32x4_t vlan0, vlan1, rss, l3_l4e;
+ const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
+ uint64x2_t rearm0, rearm1, rearm2, rearm3;
/* mask everything except RSS, flow director and VLAN flags
* bit2 is for VLAN tag, bit11 for flow director indication
@@ -136,6 +120,20 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
const uint32x4_t rss_vlan_msk = {
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
+ const uint32x4_t cksum_mask = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD};
+
/* map rss and vlan type to rss hash and vlan flag */
const uint8x16_t vlan_flags = {
0, 0, 0, 0,
@@ -150,14 +148,16 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
0, 0, 0, 0};
const uint8x16_t l3_l4e_flags = {
- 0,
- PKT_RX_IP_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
0, 0, 0, 0, 0, 0, 0, 0};
vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
@@ -177,26 +177,32 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
l3_l4e = vshrq_n_u32(vlan1, 22);
l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
vreinterpretq_u8_u32(l3_l4e)));
-
+ /* then we shift left 1 bit */
+ l3_l4e = vshlq_n_u32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = vandq_u32(l3_l4e, cksum_mask);
vlan0 = vorrq_u32(vlan0, rss);
vlan0 = vorrq_u32(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = vgetq_lane_u32(vlan0, 0);
- rx_pkts[1]->ol_flags = vgetq_lane_u32(vlan0, 1);
- rx_pkts[2]->ol_flags = vgetq_lane_u32(vlan0, 2);
- rx_pkts[3]->ol_flags = vgetq_lane_u32(vlan0, 3);
+ rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1);
+ rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1);
+ rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
+ rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
+
+ vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
+ vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
+ vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
+ vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
}
-#else
-#define desc_to_olflags_v(descs, rx_pkts) do {} while (0)
-#endif
#define PKTLEN_SHIFT 10
#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL
static inline void
-desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
{
int i;
uint8_t ptype;
@@ -205,7 +211,7 @@ desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
for (i = 0; i < 4; i++) {
tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
ptype = vgetq_lane_u8(tmp, 8);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
+ rx_pkts[i]->packet_type = ptype_tbl[ptype];
}
}
@@ -225,6 +231,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts_recd;
int pos;
uint64_t var;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
/* mask to shuffle from desc. to mbuf */
uint8x16_t shuf_msk = {
@@ -359,7 +366,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sterr_tmp2.val[1]).val[0];
stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
@@ -429,7 +436,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb2);
vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
- desc_to_ptype_v(descs, &rx_pkts[pos]);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
nb_pkts_recd += var;
@@ -523,8 +530,8 @@ vtx(volatile struct i40e_tx_desc *txdp,
}
uint16_t
-i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index b95cc8e1..3b4a352e 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -82,22 +82,10 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
__m128i vaddr0, vaddr1;
- uintptr_t p0, p1;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- p0 = (uintptr_t)&mb0->rearm_data;
- *(uint64_t *)p0 = rxq->mbuf_initializer;
- p1 = (uintptr_t)&mb1->rearm_data;
- *(uint64_t *)p1 = rxq->mbuf_initializer;
-
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -128,17 +116,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
+ struct rte_mbuf **rx_pkts)
{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
__m128i vlan0, vlan1, rss, l3_l4e;
/* mask everything except RSS, flow director and VLAN flags
@@ -206,19 +190,30 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
vlan0 = _mm_or_si128(vlan0, rss);
vlan0 = _mm_or_si128(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0);
- rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2);
- rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4);
- rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6);
+ /*
+ * At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
}
-#else
-#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
-#endif
#define PKTLEN_SHIFT 10
static inline void
-desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
{
__m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
__m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
@@ -226,10 +221,10 @@ desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
ptype0 = _mm_srli_epi64(ptype0, 30);
ptype1 = _mm_srli_epi64(ptype1, 30);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0));
- rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8));
- rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0));
- rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8));
+ rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
+ rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
+ rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
+ rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
}
/*
@@ -248,6 +243,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
int pos;
uint64_t var;
__m128i shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
__m128i crc_adjust = _mm_set_epi16(
0, 0, 0, /* ignore non-length fields */
@@ -320,20 +316,26 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_I40E_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -342,8 +344,10 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
@@ -372,7 +376,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* C.1 4=>2 filter staterr info only */
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -424,12 +428,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
split_packet += RTE_I40E_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
/* C.3 calc available number of desc */
@@ -441,7 +439,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb2);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
- desc_to_ptype_v(descs, &rx_pkts[pos]);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
@@ -536,8 +534,8 @@ vtx(volatile struct i40e_tx_desc *txdp,
}
uint16_t
-i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
new file mode 100644
index 00000000..f7ce62bb
--- /dev/null
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -0,0 +1,1937 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+#include "i40e_pf.h"
+#include "i40e_rxtx.h"
+#include "rte_pmd_i40e.h"
+
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 40000
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+
+int
+rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
+ if (on) {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+ if (add)
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ else
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add/rm vlan filter");
+ return ret;
+ }
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+int
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->vlan_anti_spoof_on == on)
+ return 0; /* already on or off */
+
+ vsi->vlan_anti_spoof_on = on;
+ if (!vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
+ return -ENOTSUP;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num;
+ enum rte_mac_filter_type filter_type;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* remove all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ vlan_num = vsi->vlan_num;
+ filter_type = f->mac_info.filter_type;
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (vlan_num == 0) {
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
+ return I40E_ERR_PARAM;
+ }
+ } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+ filter_type == RTE_MAC_HASH_MATCH)
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num = 0;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* restore all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
+ /**
+ * If vlan_num is 0, that's the first time to add mac,
+ * set mask for vlan_id 0.
+ */
+ if (vsi->vlan_num == 0) {
+ i40e_set_vlan_filter(vsi, 0, 1);
+ vsi->vlan_num = 1;
+ }
+ vlan_num = vsi->vlan_num;
+ } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+
+ if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
+{
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Use the FW API if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver < 5) {
+ PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+ return -ENOTSUP;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
+ if (on) {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ /* remove all the MAC and VLAN first */
+ ret = i40e_vsi_rm_mac_filter(vsi);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
+ return ret;
+ }
+ if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
+ return ret;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ if (on)
+ vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+ else
+ vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ return ret;
+ }
+
+ /* add all the MAC and VLAN back */
+ ret = i40e_vsi_restore_mac_filter(vsi);
+ if (ret)
+ return ret;
+ if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ uint16_t vf_id;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* setup PF TX loopback */
+ vsi = pf->main_vsi;
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+
+ /* setup TX loopback for all the VFs */
+ if (!pf->vfs) {
+ /* if no VF, do nothing. */
+ return 0;
+ }
+
+ for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ on, NULL, true);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ on, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_mac_filter *f;
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ void *temp;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ ether_addr_copy(mac_addr, &vf->mac_addr);
+
+ /* Remove all existing mac */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
+ i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+
+ return 0;
+}
+
+/* Set vlan strip on/off for specific VF from host */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+
+ if (!vsi)
+ return -EINVAL;
+
+ ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+ uint16_t vlan_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (vlan_id > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0)
+ return -ENODEV;
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = vlan_id;
+ if (vlan_id > 0)
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+ else
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+ uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_mac_filter_info filter;
+ struct ether_addr broadcast = {
+ .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (on) {
+ (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ ret = i40e_vsi_add_mac(vsi, &filter);
+ } else {
+ ret = i40e_vsi_delete_mac(vsi, &broadcast);
+ }
+
+ if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ if (on) {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ } else {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vlan_filter_count(struct i40e_vsi *vsi)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ int count = 0;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ count++;
+ }
+ }
+
+ return count;
+}
+
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ uint16_t vf_idx;
+ int ret = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ if (vf_mask == 0) {
+ PMD_DRV_LOG(ERR, "No VF.");
+ return -EINVAL;
+ }
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ vsi = pf->vfs[vf_idx].vsi;
+ if (on) {
+ if (!vsi->vlan_filter_on) {
+ vsi->vlan_filter_on = true;
+ i40e_aq_set_vsi_vlan_promisc(hw,
+ vsi->seid,
+ false,
+ NULL);
+ if (!vsi->vlan_anti_spoof_on)
+ i40e_add_rm_all_vlan_filter(
+ vsi, true);
+ }
+ ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ } else {
+ ret = i40e_vsi_delete_vlan(vsi, vlan_id);
+
+ if (!i40e_vlan_filter_count(vsi)) {
+ vsi->vlan_filter_on = false;
+ i40e_aq_set_vsi_vlan_promisc(hw,
+ vsi->seid,
+ true,
+ NULL);
+ }
+ }
+ }
+ }
+
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+
+ stats->ipackets = vsi->eth_stats.rx_unicast +
+ vsi->eth_stats.rx_multicast +
+ vsi->eth_stats.rx_broadcast;
+ stats->opackets = vsi->eth_stats.tx_unicast +
+ vsi->eth_stats.tx_multicast +
+ vsi->eth_stats.tx_broadcast;
+ stats->ibytes = vsi->eth_stats.rx_bytes;
+ stats->obytes = vsi->eth_stats.tx_bytes;
+ stats->ierrors = vsi->eth_stats.rx_discards;
+ stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_reset_vf_stats(uint8_t port,
+ uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->offset_loaded = false;
+ i40e_update_vsi_stats(vsi);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret = 0;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (bw > I40E_QOS_BW_MAX) {
+ PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+ I40E_QOS_BW_MAX);
+ return -EINVAL;
+ }
+
+ if (bw % I40E_QOS_BW_GRANULARITY) {
+ PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+ I40E_QOS_BW_GRANULARITY);
+ return -EINVAL;
+ }
+
+ bw /= I40E_QOS_BW_GRANULARITY;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* No change. */
+ if (bw == vsi->bw_info.bw_limit) {
+ PMD_DRV_LOG(INFO,
+ "No change for VF max bandwidth. Nothing to do.");
+ return 0;
+ }
+
+ /**
+ * VF bandwidth limitation and TC bandwidth limitation cannot be
+ * enabled in parallel, quit if TC bandwidth limitation is enabled.
+ *
+ * If bw is 0, means disable bandwidth limitation. Then no need to
+ * check TC bandwidth limitation.
+ */
+ if (bw) {
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if ((vsi->enabled_tc & BIT_ULL(i)) &&
+ vsi->bw_info.bw_ets_credits[i])
+ break;
+ }
+ if (i != I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR,
+ "TC max bandwidth has been set on this VF,"
+ " please disable it first.");
+ return -EINVAL;
+ }
+ }
+
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d bandwidth, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ vsi->bw_info.bw_limit = (uint16_t)bw;
+ vsi->bw_info.bw_max = 0;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
+ uint8_t tc_num, uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
+ int ret = 0;
+ int i, j;
+ uint16_t sum;
+ bool b_change = false;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ I40E_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i))
+ sum++;
+ }
+ if (sum != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ sum);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < tc_num; i++) {
+ if (!bw_weight[i]) {
+ PMD_DRV_LOG(ERR,
+ "The weight should be 1 at least.");
+ return -EINVAL;
+ }
+ sum += bw_weight[i];
+ }
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ /**
+ * Create the configuration for all the TCs.
+ */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ j = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ if (bw_weight[j] !=
+ vsi->bw_info.bw_ets_share_credits[i])
+ b_change = true;
+
+ tc_bw.tc_bw_credits[i] = bw_weight[j];
+ j++;
+ }
+ }
+
+ /* No change. */
+ if (!b_change) {
+ PMD_DRV_LOG(INFO,
+ "No change for TC allocated bandwidth."
+ " Nothing to do.");
+ return 0;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d TC bandwidth weight, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ j = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
+ j++;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
+ uint8_t tc_no, uint32_t bw)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+ int ret = 0;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (bw > I40E_QOS_BW_MAX) {
+ PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+ I40E_QOS_BW_MAX);
+ return -EINVAL;
+ }
+
+ if (bw % I40E_QOS_BW_GRANULARITY) {
+ PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+ I40E_QOS_BW_GRANULARITY);
+ return -EINVAL;
+ }
+
+ bw /= I40E_QOS_BW_GRANULARITY;
+
+ if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
+ I40E_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
+ PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
+ vf_id, tc_no);
+ return -EINVAL;
+ }
+
+ /* No change. */
+ if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
+ PMD_DRV_LOG(INFO,
+ "No change for TC max bandwidth. Nothing to do.");
+ return 0;
+ }
+
+ /**
+ * VF bandwidth limitation and TC bandwidth limitation cannot be
+ * enabled in parallel, disable VF bandwidth limitation if it's
+ * enabled.
+ * If bw is 0, means disable bandwidth limitation. Then no need to
+ * care about VF bandwidth limitation configuration.
+ */
+ if (bw && vsi->bw_info.bw_limit) {
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to disable VF(%d)"
+ " bandwidth limitation, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "VF max bandwidth is disabled according"
+ " to TC max bandwidth setting.");
+ }
+
+ /**
+ * Get all the TCs' info to create a whole picture.
+ * Because the incremental change isn't permitted.
+ */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ tc_bw.tc_bw_credits[i] =
+ rte_cpu_to_le_16(
+ vsi->bw_info.bw_ets_credits[i]);
+ }
+ }
+ tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
+
+ ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d TC %d max bandwidth, err(%d).",
+ vf_id, tc_no, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ int i;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ vsi = pf->main_vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ veb = vsi->veb;
+ if (!veb) {
+ PMD_DRV_LOG(ERR, "Invalid VEB.");
+ return -EINVAL;
+ }
+
+ if ((tc_map & veb->enabled_tc) != tc_map) {
+ PMD_DRV_LOG(ERR,
+ "TC bitmap isn't the subset of enabled TCs 0x%x.",
+ veb->enabled_tc);
+ return -EINVAL;
+ }
+
+ if (tc_map == veb->strict_prio_tc) {
+ PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
+ return 0;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Disable DCBx if it's the first time to set strict priority. */
+ if (!veb->strict_prio_tc) {
+ ret = i40e_aq_stop_lldp(hw, true, NULL);
+ if (ret)
+ PMD_DRV_LOG(INFO,
+ "Failed to disable DCBx as it's already"
+ " disabled.");
+ else
+ PMD_DRV_LOG(INFO,
+ "DCBx is disabled according to strict"
+ " priority setting.");
+ }
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ ets_data.tc_valid_bits = veb->enabled_tc;
+ ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
+ ets_data.tc_strict_priority_flags = tc_map;
+ /* Get all TCs' bandwidth. */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (veb->enabled_tc & BIT_ULL(i)) {
+ /* For rubust, if bandwidth is 0, use 1 instead. */
+ if (veb->bw_info.bw_ets_share_credits[i])
+ ets_data.tc_bw_share_credits[i] =
+ veb->bw_info.bw_ets_share_credits[i];
+ else
+ ets_data.tc_bw_share_credits[i] =
+ I40E_QOS_BW_WEIGHT_MIN;
+ }
+ }
+
+ if (!veb->strict_prio_tc)
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
+ NULL);
+ else if (tc_map)
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
+ NULL);
+ else
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
+ NULL);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set TCs' strict priority mode."
+ " err (%d)", ret);
+ return -EINVAL;
+ }
+
+ veb->strict_prio_tc = tc_map;
+
+ /* Enable DCBx again, if all the TCs' strict priority disabled. */
+ if (!tc_map) {
+ ret = i40e_aq_start_lldp(hw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to enable DCBx, err(%d).", ret);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "DCBx is enabled again according to strict"
+ " priority setting.");
+ }
+
+ return ret;
+}
+
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+
+static void
+i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
+ uint32_t track_id, uint8_t *profile_info_sec,
+ bool add)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
+ memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
+ if (add)
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ else
+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
+}
+
+static enum i40e_status_code
+i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_profile_section_header *sec;
+ uint32_t track_id;
+ uint32_t offset = 0;
+ uint32_t info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ track_id = ((struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset))->track_id;
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ if (status)
+ PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
+ "offset %d, info %d",
+ offset, info);
+
+ return status;
+}
+
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+
+/* Check if the profile info exists */
+static int
+i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t *buff;
+ struct rte_pmd_i40e_profile_list *p_list;
+ struct rte_pmd_i40e_profile_info *pinfo, *p;
+ uint32_t i;
+ int ret;
+
+ buff = rte_zmalloc("pinfo_list",
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+ 0);
+ if (!buff) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return -1;
+ }
+
+ ret = i40e_aq_get_ddp_list(
+ hw, (void *)buff,
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+ 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get profile info list.");
+ rte_free(buff);
+ return -1;
+ }
+ p_list = (struct rte_pmd_i40e_profile_list *)buff;
+ pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
+ sizeof(struct i40e_profile_section_header));
+ for (i = 0; i < p_list->p_count; i++) {
+ p = &p_list->p_info[i];
+ if ((pinfo->track_id == p->track_id) &&
+ !memcmp(&pinfo->version, &p->version,
+ sizeof(struct i40e_ddp_version)) &&
+ !memcmp(&pinfo->name, &p->name,
+ I40E_DDP_NAME_SIZE)) {
+ PMD_DRV_LOG(INFO, "Profile exists.");
+ rte_free(buff);
+ return 1;
+ }
+ }
+
+ rte_free(buff);
+ return 0;
+}
+
+int
+rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+ uint32_t size,
+ enum rte_pmd_i40e_package_op op)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *profile_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+ uint32_t track_id;
+ uint8_t *profile_info_sec;
+ int is_exist;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)buff;
+
+ if (!pkg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to fill the package structure");
+ return -EINVAL;
+ }
+
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+ track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ /* Find profile segment */
+ profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
+ pkg_hdr);
+ if (!profile_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find profile segment header");
+ return -EINVAL;
+ }
+
+ profile_info_sec = rte_zmalloc(
+ "i40e_profile_info",
+ sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info),
+ 0);
+ if (!profile_info_sec) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -EINVAL;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ /* Check if the profile exists */
+ i40e_generate_profile_info_sec(
+ ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+ &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+ track_id, profile_info_sec, 1);
+ is_exist = i40e_check_profile_info(port, profile_info_sec);
+ if (is_exist > 0) {
+ PMD_DRV_LOG(ERR, "Profile already exists.");
+ rte_free(profile_info_sec);
+ return 1;
+ } else if (is_exist < 0) {
+ PMD_DRV_LOG(ERR, "Failed to check profile.");
+ rte_free(profile_info_sec);
+ return -EINVAL;
+ }
+
+ /* Write profile to HW */
+ status = i40e_write_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write profile.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+
+ /* Add profile info to info list */
+ status = i40e_add_rm_profile_info(hw, profile_info_sec);
+ if (status)
+ PMD_DRV_LOG(ERR, "Failed to add profile info.");
+ } else {
+ PMD_DRV_LOG(ERR, "Operation not supported.");
+ }
+
+ rte_free(profile_info_sec);
+ return status;
+}
+
+int
+rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
+ return -EINVAL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ status = i40e_aq_get_ddp_list(hw, (void *)buff,
+ size, 0, NULL);
+
+ return status;
+}
+
+static int check_invalid_pkt_type(uint32_t pkt_type)
+{
+ uint32_t l2, l3, l4, tnl, il2, il3, il4;
+
+ l2 = pkt_type & RTE_PTYPE_L2_MASK;
+ l3 = pkt_type & RTE_PTYPE_L3_MASK;
+ l4 = pkt_type & RTE_PTYPE_L4_MASK;
+ tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
+ il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
+ il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
+ il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
+
+ if (l2 &&
+ l2 != RTE_PTYPE_L2_ETHER &&
+ l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
+ l2 != RTE_PTYPE_L2_ETHER_ARP &&
+ l2 != RTE_PTYPE_L2_ETHER_LLDP &&
+ l2 != RTE_PTYPE_L2_ETHER_NSH &&
+ l2 != RTE_PTYPE_L2_ETHER_VLAN &&
+ l2 != RTE_PTYPE_L2_ETHER_QINQ)
+ return -1;
+
+ if (l3 &&
+ l3 != RTE_PTYPE_L3_IPV4 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (l4 &&
+ l4 != RTE_PTYPE_L4_TCP &&
+ l4 != RTE_PTYPE_L4_UDP &&
+ l4 != RTE_PTYPE_L4_FRAG &&
+ l4 != RTE_PTYPE_L4_SCTP &&
+ l4 != RTE_PTYPE_L4_ICMP &&
+ l4 != RTE_PTYPE_L4_NONFRAG)
+ return -1;
+
+ if (tnl &&
+ tnl != RTE_PTYPE_TUNNEL_IP &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+ tnl != RTE_PTYPE_TUNNEL_VXLAN &&
+ tnl != RTE_PTYPE_TUNNEL_NVGRE &&
+ tnl != RTE_PTYPE_TUNNEL_GENEVE &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT)
+ return -1;
+
+ if (il2 &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
+ return -1;
+
+ if (il3 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (il4 &&
+ il4 != RTE_PTYPE_INNER_L4_TCP &&
+ il4 != RTE_PTYPE_INNER_L4_UDP &&
+ il4 != RTE_PTYPE_INNER_L4_FRAG &&
+ il4 != RTE_PTYPE_INNER_L4_SCTP &&
+ il4 != RTE_PTYPE_INNER_L4_ICMP &&
+ il4 != RTE_PTYPE_INNER_L4_NONFRAG)
+ return -1;
+
+ return 0;
+}
+
+static int check_invalid_ptype_mapping(
+ struct rte_pmd_i40e_ptype_mapping *mapping_table,
+ uint16_t count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uint16_t ptype = mapping_table[i].hw_ptype;
+ uint32_t pkt_type = mapping_table[i].sw_ptype;
+
+ if (ptype >= I40E_MAX_PKT_TYPE)
+ return -1;
+
+ if (pkt_type == RTE_PTYPE_UNKNOWN)
+ continue;
+
+ if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
+ continue;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_ptype_mapping_update(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (count > I40E_MAX_PKT_TYPE)
+ return -EINVAL;
+
+ if (check_invalid_ptype_mapping(mapping_items, count))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (exclusive) {
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
+ }
+
+ for (i = 0; i < count; i++)
+ ad->ptype_tbl[mapping_items[i].hw_ptype]
+ = mapping_items[i].sw_ptype;
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ i40e_set_default_ptype_table(dev);
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_get(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int n = 0;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (n >= size)
+ break;
+ if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
+ continue;
+ mapping_items[n].hw_ptype = i;
+ mapping_items[n].sw_ptype = ad->ptype_tbl[i];
+ n++;
+ }
+
+ *count = n;
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (!mask && check_invalid_pkt_type(target))
+ return -EINVAL;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (mask) {
+ if ((target | ad->ptype_tbl[i]) == target &&
+ (target & ad->ptype_tbl[i]))
+ ad->ptype_tbl[i] = pkt_type;
+ } else {
+ if (ad->ptype_tbl[i] == target)
+ ad->ptype_tbl[i] = pkt_type;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
new file mode 100644
index 00000000..1efb2c4b
--- /dev/null
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -0,0 +1,590 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PMD_I40E_H_
+#define _PMD_I40E_H_
+
+/**
+ * @file rte_pmd_i40e.h
+ *
+ * i40e PMD specific functions.
+ *
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ */
+
+#include <rte_ethdev.h>
+
+/**
+ * Response sent back to i40e driver from user app after callback
+ */
+enum rte_pmd_i40e_mb_event_rsp {
+ RTE_PMD_I40E_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_I40E_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_I40E_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_I40E_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Data sent to the user application when the callback is executed.
+ */
+struct rte_pmd_i40e_mb_event_param {
+ uint16_t vfid; /**< Virtual Function number */
+ uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */
+ uint16_t retval; /**< return value */
+ void *msg; /**< pointer to message */
+ uint16_t msglen; /**< length of the message */
+};
+
+/**
+ * Option of package processing.
+ */
+enum rte_pmd_i40e_package_op {
+ RTE_PMD_I40E_PKG_OP_UNDEFINED = 0,
+ RTE_PMD_I40E_PKG_OP_WR_ADD, /**< load package and add to info list */
+ RTE_PMD_I40E_PKG_OP_MAX = 32
+};
+
+#define RTE_PMD_I40E_DDP_NAME_SIZE 32
+
+/**
+ * Version for dynamic device personalization.
+ * Version in "major.minor.update.draft" format.
+ */
+struct rte_pmd_i40e_ddp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t update;
+ uint8_t draft;
+};
+
+/**
+ * Profile information in profile info list.
+ */
+struct rte_pmd_i40e_profile_info {
+ uint32_t track_id;
+ struct rte_pmd_i40e_ddp_version version;
+ uint8_t owner;
+ uint8_t reserved[7];
+ uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE];
+};
+
+/**
+ * Profile information list returned from HW.
+ */
+struct rte_pmd_i40e_profile_list {
+ uint32_t p_count;
+ struct rte_pmd_i40e_profile_info p_info[1];
+};
+
+/**
+ * ptype mapping table only accept RTE_PTYPE_XXX or "user defined" ptype.
+ * A ptype with MSB set will be regarded as a user defined ptype.
+ * Below macro help to create a user defined ptype.
+ */
+#define RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK 0x80000000
+
+struct rte_pmd_i40e_ptype_mapping {
+ uint16_t hw_ptype; /**< hardware defined packet type*/
+ uint32_t sw_ptype; /**< software defined packet type */
+};
+
+/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf);
+
+/**
+ * Enable/Disable VF MAC anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set MAC anti spoofing.
+ * @param on
+ * 1 - Enable VFs MAC anti spoofing.
+ * 0 - Disable VFs MAC anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set VLAN anti spoofing.
+ * @param on
+ * 1 - Enable VFs VLAN anti spoofing.
+ * 0 - Disable VFs VLAN anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable TX loopback on all the PF and VFs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable TX loopback.
+ * 0 - Disable TX loopback.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_tx_loopback(uint8_t port,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF unicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF multicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Set the VF MAC address.
+ *
+ * PF should set MAC address before VF initialized, if PF sets the MAC
+ * address after VF initialized, new MAC address won't be effective until
+ * VF reinitialize.
+ *
+ * This will remove all existing MAC filters.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable vf broadcast mode
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable broadcast.
+ * 1 - Enable broadcast.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable vf vlan tag
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable VF's vlan tag.
+ * n - Enable VF's vlan tag.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN filter
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vlan_id
+ * ID specifying VLAN
+ * @param vf_mask
+ * Mask to filter VF's
+ * @param on
+ * 0 - Disable VF's VLAN filter.
+ * 1 - Enable VF's VLAN filter.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on);
+
+/**
+ * Get VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters for the following set of statistics:
+ * - *ipackets* with the total of successfully received packets.
+ * - *opackets* with the total of successfully transmitted packets.
+ * - *ibytes* with the total of successfully received bytes.
+ * - *obytes* with the total of successfully transmitted bytes.
+ * - *ierrors* with the total of erroneous received packets.
+ * - *oerrors* with the total of failed transmitted packets.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_i40e_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats);
+
+/**
+ * Clear VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_reset_vf_stats(uint8_t port,
+ uint16_t vf_id);
+
+/**
+ * Set VF's max bandwidth.
+ *
+ * Per VF bandwidth limitation and per TC bandwidth limitation cannot
+ * be enabled in parallel. If per TC bandwidth is enabled, this function
+ * will disable it.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param bw
+ * Bandwidth for this VF.
+ * The value should be an absolute bandwidth in Mbps.
+ * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets.
+ * Not count the bytes added by physical layer.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_max_bw(uint8_t port,
+ uint16_t vf_id,
+ uint32_t bw);
+
+/**
+ * Set all the TCs' bandwidth weight on a specific VF.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+/**
+ * Set a specific TC's max bandwidth on a specific VF.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param tc_no
+ * Number specifying TC.
+ * @param bw
+ * Max bandwidth for this TC.
+ * The value should be an absolute bandwidth in Mbps.
+ * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets.
+ * Not count the bytes added by physical layer.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port,
+ uint16_t vf_id,
+ uint8_t tc_no,
+ uint32_t bw);
+
+/**
+ * Set some TCs to strict priority mode on a physical port.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_map
+ * A bit map for the TCs.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
+
+/**
+ * Load/Unload a ddp package
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param buff
+ * buffer of package.
+ * @param size
+ * size of buffer.
+ * @param op
+ * Operation of package processing
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (1) if profile exists.
+ */
+int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+ uint32_t size,
+ enum rte_pmd_i40e_package_op op);
+
+/**
+ * rte_pmd_i40e_get_ddp_list - Get loaded profile list
+ * @param port
+ * port id
+ * @param buff
+ * buffer for response
+ * @param size
+ * buffer size
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size);
+
+/**
+ * Update hardware defined ptype to software defined packet type
+ * mapping table.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the mapping items array.
+ * @param count
+ * number of mapping items.
+ * @param exclusive
+ * the flag indicate different ptype mapping update method.
+ * -(0) only overwrite referred PTYPE mapping,
+ * keep other PTYPEs mapping unchanged.
+ * -(!0) overwrite referred PTYPE mapping,
+ * set other PTYPEs maps to PTYPE_UNKNOWN.
+ */
+int rte_pmd_i40e_ptype_mapping_update(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive);
+
+/**
+ * Reset hardware defined ptype to software defined ptype
+ * mapping table to default.
+ *
+ * @param port
+ * pointer to port identifier of the device
+ */
+int rte_pmd_i40e_ptype_mapping_reset(uint8_t port);
+
+/**
+ * Get hardware defined ptype to software defined ptype
+ * mapping items.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the array to store returned items.
+ * @param size
+ * the size of the input array.
+ * @param count
+ * the place to store the number of returned items.
+ * @param valid_only
+ * -(0) return full mapping table.
+ * -(!0) only return mapping items which packet_type != RTE_PTYPE_UNKNOWN.
+ */
+int rte_pmd_i40e_ptype_mapping_get(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only);
+
+/**
+ * Replace a specific or a group of software defined ptypes
+ * with a new one
+ *
+ * @param port
+ * pointer to port identifier of the device
+ * @param target
+ * the packet type to be replaced
+ * @param mask
+ * -(0) target represent a specific software defined ptype.
+ * -(!0) target is a mask to represent a group of software defined ptypes.
+ * @param pkt_type
+ * the new packet type to overwrite
+ */
+int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type);
+
+#endif /* _PMD_I40E_H_ */
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index ef353984..3b0e805d 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -2,3 +2,39 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_17.02 {
+ global:
+
+ rte_pmd_i40e_get_vf_stats;
+ rte_pmd_i40e_ping_vfs;
+ rte_pmd_i40e_ptype_mapping_get;
+ rte_pmd_i40e_ptype_mapping_replace;
+ rte_pmd_i40e_ptype_mapping_reset;
+ rte_pmd_i40e_ptype_mapping_update;
+ rte_pmd_i40e_reset_vf_stats;
+ rte_pmd_i40e_set_tx_loopback;
+ rte_pmd_i40e_set_vf_broadcast;
+ rte_pmd_i40e_set_vf_mac_addr;
+ rte_pmd_i40e_set_vf_mac_anti_spoof;
+ rte_pmd_i40e_set_vf_multicast_promisc;
+ rte_pmd_i40e_set_vf_unicast_promisc;
+ rte_pmd_i40e_set_vf_vlan_anti_spoof;
+ rte_pmd_i40e_set_vf_vlan_filter;
+ rte_pmd_i40e_set_vf_vlan_insert;
+ rte_pmd_i40e_set_vf_vlan_stripq;
+ rte_pmd_i40e_set_vf_vlan_tag;
+
+} DPDK_2.0;
+
+DPDK_17.05 {
+ global:
+
+ rte_pmd_i40e_set_tc_strict_prio;
+ rte_pmd_i40e_set_vf_max_bw;
+ rte_pmd_i40e_set_vf_tc_bw_alloc;
+ rte_pmd_i40e_set_vf_tc_max_bw;
+ rte_pmd_i40e_process_ddp_package;
+ rte_pmd_i40e_get_ddp_list;
+
+} DPDK_17.02;