summaryrefslogtreecommitdiffstats
path: root/drivers/net/i40e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/i40e')
-rw-r--r--drivers/net/i40e/Makefile5
-rw-r--r--drivers/net/i40e/base/i40e_register.h24
-rw-r--r--drivers/net/i40e/i40e_ethdev.c1016
-rw-r--r--drivers/net/i40e/i40e_ethdev.h120
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c325
-rw-r--r--drivers/net/i40e/i40e_fdir.c3
-rw-r--r--drivers/net/i40e/i40e_flow.c218
-rw-r--r--drivers/net/i40e/i40e_rxtx.c383
-rw-r--r--drivers/net/i40e/i40e_rxtx.h5
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_avx2.c2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c35
-rw-r--r--drivers/net/i40e/i40e_vf_representor.c531
-rw-r--r--drivers/net/i40e/meson.build12
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c86
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h18
16 files changed, 2123 insertions, 664 deletions
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 5663f5b1..3f869a8d 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -11,6 +11,8 @@ LIB = librte_pmd_i40e.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF
CFLAGS += -DX722_A0_SUPPORT
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
LDLIBS += -lrte_bus_pci
@@ -24,7 +26,7 @@ LIBABIVER := 2
# to disable warnings
#
ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
-CFLAGS_BASE_DRIVER = -wd593 -wd188
+CFLAGS_BASE_DRIVER = -diag-disable 593
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
CFLAGS_BASE_DRIVER += -Wno-sign-compare
CFLAGS_BASE_DRIVER += -Wno-unused-value
@@ -85,6 +87,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_vf_representor.c
ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
CC_AVX2_SUPPORT=1
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index a482ab90..df66e76a 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -90,7 +90,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
#define I40E_PF_ARQT_ARQT_SHIFT 0
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -113,7 +113,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
#define I40E_PF_ATQT_ATQT_SHIFT 0
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -140,7 +140,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ARQT_MAX_INDEX 127
#define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -168,7 +168,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ATQT_MAX_INDEX 127
#define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -291,7 +291,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -535,7 +535,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1274,14 +1274,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1692,7 +1692,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLNVM_SRCTL_START_SHIFT 30
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -3059,7 +3059,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3196,7 +3196,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3219,7 +3219,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 508b4171..85a6a867 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -11,6 +11,7 @@
#include <inttypes.h>
#include <assert.h>
+#include <rte_common.h>
#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
@@ -41,6 +42,8 @@
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
+#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
+#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
#define I40E_CLEAR_PXE_WAIT_MS 200
@@ -213,7 +216,7 @@
/* Bit mask of Extended Tag enable/disable */
#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
-static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
static int i40e_dev_configure(struct rte_eth_dev *dev);
static int i40e_dev_start(struct rte_eth_dev *dev);
@@ -369,7 +372,12 @@ static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
static int i40e_get_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
-static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -395,6 +403,13 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
int i40e_logtype_init;
int i40e_logtype_driver;
+static const char *const valid_keys[] = {
+ ETH_I40E_FLOATING_VEB_ARG,
+ ETH_I40E_FLOATING_VEB_LIST_ARG,
+ ETH_I40E_SUPPORT_MULTI_DRIVER,
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG,
+ NULL};
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -489,6 +504,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.get_reg = i40e_get_regs,
.get_eeprom_length = i40e_get_eeprom_length,
.get_eeprom = i40e_get_eeprom,
+ .get_module_info = i40e_get_module_info,
+ .get_module_eeprom = i40e_get_module_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
.tm_ops_get = i40e_tm_ops_get,
@@ -607,16 +624,74 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
sizeof(rte_i40e_txq_prio_strings[0]))
-static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+static int
+eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct i40e_adapter), eth_i40e_dev_init);
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ &eth_da);
+ if (retval)
+ return retval;
+ }
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct i40e_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_i40e_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ /* probe VF representor ports */
+ struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
+ pci_dev->device.name);
+
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct i40e_vf_representor representor = {
+ .vf_id = eth_da.representor_ports[i],
+ .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
+ pf_ethdev->data->dev_private)->switch_domain_id,
+ .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
+ pf_ethdev->data->dev_private)
+ };
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name, eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct i40e_vf_representor), NULL, NULL,
+ i40e_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create i40e vf "
+ "representor %s.", name);
+ }
+
+ return 0;
}
static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
}
static struct rte_pci_driver rte_i40e_pmd = {
@@ -627,41 +702,21 @@ static struct rte_pci_driver rte_i40e_pmd = {
.remove = eth_i40e_pci_remove,
};
-static inline int
-rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static inline int
-rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static inline void
-i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
+ uint32_t reg_val)
{
+ uint32_t ori_reg_val;
+ struct rte_eth_dev *dev;
+
+ ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
i40e_write_rx_ctl(hw, reg_addr, reg_val);
- PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
- "with value 0x%08x",
- reg_addr, reg_val);
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
}
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
@@ -688,7 +743,6 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
*/
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
- i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
}
static inline void i40e_config_automask(struct i40e_pf *pf)
@@ -807,7 +861,7 @@ config_vf_floating_veb(struct rte_devargs *devargs,
if (devargs == NULL)
return;
- kvlist = rte_kvargs_parse(devargs->args, NULL);
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (kvlist == NULL)
return;
@@ -848,7 +902,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
if (devargs == NULL)
return 0;
- kvlist = rte_kvargs_parse(devargs->args, NULL);
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (kvlist == NULL)
return 0;
@@ -1055,8 +1109,6 @@ i40e_init_queue_region_conf(struct rte_eth_dev *dev)
memset(info, 0, sizeof(struct i40e_queue_regions));
}
-#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
-
static int
i40e_parse_multi_drv_handler(__rte_unused const char *key,
const char *value,
@@ -1088,9 +1140,8 @@ static int
i40e_support_multi_driver(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- static const char *const valid_keys[] = {
- ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
struct rte_kvargs *kvlist;
+ int kvargs_count;
/* Enable global configuration by default */
pf->support_multi_driver = false;
@@ -1102,7 +1153,13 @@ i40e_support_multi_driver(struct rte_eth_dev *dev)
if (!kvlist)
return -EINVAL;
- if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
"the first invalid or last valid one is used !",
ETH_I40E_SUPPORT_MULTI_DRIVER);
@@ -1118,7 +1175,34 @@ i40e_support_multi_driver(struct rte_eth_dev *dev)
}
static int
-eth_i40e_dev_init(struct rte_eth_dev *dev)
+i40e_aq_debug_write_global_register(struct i40e_hw *hw,
+ uint32_t reg_addr, uint64_t reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ uint64_t ori_reg_val;
+ struct rte_eth_dev *dev;
+ int ret;
+
+ ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from 0x%08x",
+ reg_addr);
+ return -EIO;
+ }
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%"PRIx64", after: 0x%"PRIx64,
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
+
+ return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
+}
+
+static int
+eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev;
struct rte_intr_handle *intr_handle;
@@ -1170,6 +1254,13 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->bus.func = pci_dev->addr.function;
hw->adapter_stopped = 0;
+ /*
+ * Switch Tag value should not be identical to either the First Tag
+ * or Second Tag values. So set something other than common Ethertype
+ * for internal switching.
+ */
+ hw->switch_tag = 0xffff;
+
/* Check if need to support multi-driver */
i40e_support_multi_driver(dev);
@@ -1224,7 +1315,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialise the L3_MAP register */
if (!pf->support_multi_driver) {
- ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GLQF_L3_MAP(40),
0x00000028, NULL);
if (ret)
PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
@@ -1232,7 +1324,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG,
"Global register 0x%08x is changed with 0x28",
I40E_GLQF_L3_MAP(40));
- i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
}
/* Need the special FW version to support floating VEB */
@@ -1519,7 +1610,6 @@ void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
- i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
}
static int
@@ -1533,6 +1623,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct rte_flow *p_flow;
int ret;
uint8_t aq_fail = 0;
+ int retries = 0;
PMD_INIT_FUNC_TRACE();
@@ -1544,6 +1635,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
+ ret = rte_eth_switch_domain_free(pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1574,9 +1669,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
- /* register callback func to eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40e_dev_interrupt_handler, dev);
+ /* unregister callback func to eal lib */
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ i40e_msec_delay(500);
+ } while (retries++ < 5);
i40e_rm_ethtype_filter_list(pf);
i40e_rm_tunnel_filter_list(pf);
@@ -1746,8 +1852,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
/* Write first RX queue to Link list register as the head element */
if (vsi->type != I40E_VSI_SRIOV) {
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
- pf->support_multi_driver);
+ i40e_calc_itr_interval(1, pf->support_multi_driver);
if (msix_vect == I40E_MISC_VEC_ID) {
I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
@@ -1943,27 +2048,40 @@ i40e_phy_conf_link(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
enum i40e_aq_phy_type cnt;
+ uint8_t avail_speed;
uint32_t phy_type_mask = 0;
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_LOW_POWER;
- const uint8_t advt = I40E_LINK_SPEED_40GB |
- I40E_LINK_SPEED_25GB |
- I40E_LINK_SPEED_10GB |
- I40E_LINK_SPEED_1GB |
- I40E_LINK_SPEED_100MB;
int ret = -ENOTSUP;
+ /* To get phy capabilities of available speeds. */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
+ status);
+ return ret;
+ }
+ avail_speed = phy_ab.link_speed;
+ /* To get the current phy config. */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
NULL);
- if (status)
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
+ status);
return ret;
+ }
- /* If link already up, no need to set up again */
- if (is_up && phy_ab.phy_type != 0)
+ /* If link needs to go up and it is in autoneg mode the speed is OK,
+ * no need to set up again.
+ */
+ if (is_up && phy_ab.phy_type != 0 &&
+ abilities & I40E_AQ_PHY_AN_ENABLED &&
+ phy_ab.link_speed != 0)
return I40E_SUCCESS;
memset(&phy_conf, 0, sizeof(phy_conf));
@@ -1972,18 +2090,20 @@ i40e_phy_conf_link(struct i40e_hw *hw,
abilities &= ~mask;
abilities |= phy_ab.abilities & mask;
- /* update ablities and speed */
- if (abilities & I40E_AQ_PHY_AN_ENABLED)
- phy_conf.link_speed = advt;
- else
- phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
-
phy_conf.abilities = abilities;
+ /* If link needs to go up, but the force speed is not supported,
+ * Warn users and config the default available speeds.
+ */
+ if (is_up && !(force_speed & avail_speed)) {
+ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
+ phy_conf.link_speed = avail_speed;
+ } else {
+ phy_conf.link_speed = is_up ? force_speed : avail_speed;
+ }
-
- /* To enable link, phy_type mask needs to include each type */
- for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ /* PHY type mask needs to include each type except PHY type extension */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
phy_type_mask |= 1 << cnt;
/* use get_phy_abilities_resp value for the rest */
@@ -2016,11 +2136,18 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *conf = &dev->data->dev_conf;
+ if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_100M;
+ }
speed = i40e_parse_link_speeds(conf->link_speeds);
- abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- abilities |= I40E_AQ_PHY_LINK_ENABLED;
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
+ I40E_AQ_PHY_AN_ENABLED |
+ I40E_AQ_PHY_LINK_ENABLED;
return i40e_phy_conf_link(hw, abilities, speed, true);
}
@@ -2137,13 +2264,6 @@ i40e_dev_start(struct rte_eth_dev *dev)
}
/* Apply link configure */
- if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G)) {
- PMD_DRV_LOG(ERR, "Invalid link setting");
- goto err_up;
- }
ret = i40e_apply_link_speed(dev);
if (I40E_SUCCESS != ret) {
PMD_DRV_LOG(ERR, "Fail to apply link setting");
@@ -2286,6 +2406,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
i40e_pf_disable_irq0(hw);
rte_intr_disable(intr_handle);
+ i40e_fdir_teardown(pf);
+
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
@@ -2297,7 +2419,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
pf->vmdq = NULL;
/* release all the existing VSIs and VEBs */
- i40e_fdir_teardown(pf);
i40e_vsi_release(pf->main_vsi);
/* shutdown the adminq */
@@ -2339,7 +2460,7 @@ i40e_dev_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- ret = eth_i40e_dev_init(dev);
+ ret = eth_i40e_dev_init(dev, NULL);
return ret;
}
@@ -2437,84 +2558,143 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
return i40e_phy_conf_link(hw, abilities, speed, false);
}
-int
-i40e_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete)
+static __rte_always_inline void
+update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
+{
+/* Link status registers and values*/
+#define I40E_PRTMAC_LINKSTA 0x001E2420
+#define I40E_REG_LINK_UP 0x40000080
+#define I40E_PRTMAC_MACC 0x001E24E0
+#define I40E_REG_MACC_25GB 0x00020000
+#define I40E_REG_SPEED_MASK 0x38000000
+#define I40E_REG_SPEED_100MB 0x00000000
+#define I40E_REG_SPEED_1GB 0x08000000
+#define I40E_REG_SPEED_10GB 0x10000000
+#define I40E_REG_SPEED_20GB 0x20000000
+#define I40E_REG_SPEED_25_40GB 0x18000000
+ uint32_t link_speed;
+ uint32_t reg_val;
+
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
+ link_speed = reg_val & I40E_REG_SPEED_MASK;
+ reg_val &= I40E_REG_LINK_UP;
+ link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
+
+ if (unlikely(link->link_status == 0))
+ return;
+
+ /* Parse the link status */
+ switch (link_speed) {
+ case I40E_REG_SPEED_100MB:
+ link->link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_REG_SPEED_1GB:
+ link->link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_REG_SPEED_10GB:
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_REG_SPEED_20GB:
+ link->link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_REG_SPEED_25_40GB:
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
+
+ if (reg_val & I40E_REG_MACC_25GB)
+ link->link_speed = ETH_SPEED_NUM_25G;
+ else
+ link->link_speed = ETH_SPEED_NUM_40G;
+
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
+ break;
+ }
+}
+
+static __rte_always_inline void
+update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
+ bool enable_lse, int wait_to_complete)
{
-#define CHECK_INTERVAL 100 /* 100ms */
-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+ uint32_t rep_cnt = MAX_REPEAT_TIME;
struct i40e_link_status link_status;
- struct rte_eth_link link, old;
int status;
- unsigned rep_cnt = MAX_REPEAT_TIME;
- bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
- memset(&link, 0, sizeof(link));
- memset(&old, 0, sizeof(old));
memset(&link_status, 0, sizeof(link_status));
- rte_i40e_dev_atomic_read_link_status(dev, &old);
do {
+ memset(&link_status, 0, sizeof(link_status));
+
/* Get link status information from hardware */
status = i40e_aq_get_link_info(hw, enable_lse,
&link_status, NULL);
- if (status != I40E_SUCCESS) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ if (unlikely(status != I40E_SUCCESS)) {
+ link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
- goto out;
+ return;
}
- link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
- if (!wait_to_complete || link.link_status)
+ link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
+ if (!wait_to_complete || link->link_status)
break;
rte_delay_ms(CHECK_INTERVAL);
} while (--rep_cnt);
- if (!link.link_status)
- goto out;
-
- /* i40e uses full duplex only */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
-
/* Parse the link status */
switch (link_status.link_speed) {
case I40E_LINK_SPEED_100MB:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = ETH_SPEED_NUM_100M;
break;
case I40E_LINK_SPEED_1GB:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = ETH_SPEED_NUM_1G;
break;
case I40E_LINK_SPEED_10GB:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = ETH_SPEED_NUM_10G;
break;
case I40E_LINK_SPEED_20GB:
- link.link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = ETH_SPEED_NUM_20G;
break;
case I40E_LINK_SPEED_25GB:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = ETH_SPEED_NUM_25G;
break;
case I40E_LINK_SPEED_40GB:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = ETH_SPEED_NUM_40G;
break;
default:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = ETH_SPEED_NUM_100M;
break;
}
+}
+int
+i40e_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+ int ret;
+
+ memset(&link, 0, sizeof(link));
+
+ /* i40e uses full duplex only */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
-out:
- rte_i40e_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
+ if (!wait_to_complete && !enable_lse)
+ update_link_reg(hw, &link);
+ else
+ update_link_aq(hw, &link, enable_lse, wait_to_complete);
+ ret = rte_eth_linkstatus_set(dev, &link);
i40e_notify_all_vfs_link_status(dev);
- return 0;
+ return ret;
}
/* Get all the statistics of a VSI */
@@ -3169,13 +3349,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vsi *vsi = pf->main_vsi;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3183,7 +3363,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3196,7 +3382,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ dev_info->tx_queue_offload_capa;
+ dev_info->dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
@@ -3210,6 +3402,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3220,8 +3413,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -3248,15 +3440,42 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues += dev_info->vmdq_queue_num;
}
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* For XL710 */
dev_info->speed_capa = ETH_LINK_SPEED_40G;
- else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+ dev_info->default_rxportconf.nb_queues = 2;
+ dev_info->default_txportconf.nb_queues = 2;
+ if (dev->data->nb_rx_queues == 1)
+ dev_info->default_rxportconf.ring_size = 2048;
+ else
+ dev_info->default_rxportconf.ring_size = 1024;
+ if (dev->data->nb_tx_queues == 1)
+ dev_info->default_txportconf.ring_size = 1024;
+ else
+ dev_info->default_txportconf.ring_size = 512;
+
+ } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
/* For XXV710 */
dev_info->speed_capa = ETH_LINK_SPEED_25G;
- else
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
/* For X710 */
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+ dev_info->default_rxportconf.ring_size = 512;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ }
+ }
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
}
static int
@@ -3307,7 +3526,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
return 0;
}
- ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_SWT_L2TAGCTRL(reg_id),
reg_w, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
@@ -3329,7 +3549,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3367,7 +3588,6 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
/* If NVM API < 1.7, keep the register setting */
ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
tpid, qinq);
- i40e_global_cfg_warning(I40E_WARNING_TPID);
return ret;
}
@@ -3377,9 +3597,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3387,14 +3609,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3611,7 +3833,6 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
<< I40E_KILOSHIFT);
- i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
} else {
PMD_DRV_LOG(ERR,
"Water marker configuration is not supported.");
@@ -3641,6 +3862,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3659,7 +3881,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -4010,8 +4232,8 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
- mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
- alignment, RTE_PGSIZE_2M);
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
return I40E_ERR_NO_MEMORY;
@@ -4147,7 +4369,6 @@ i40e_get_cap(struct i40e_hw *hw)
}
#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
-#define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
static int i40e_pf_parse_vf_queue_number_handler(const char *key,
const char *value,
@@ -4181,9 +4402,9 @@ static int i40e_pf_parse_vf_queue_number_handler(const char *key,
static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
{
- static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_kvargs *kvlist;
+ int kvargs_count;
/* set default queue number per VF as 4 */
pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
@@ -4195,12 +4416,18 @@ static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
if (kvlist == NULL)
return -(EINVAL);
- if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
"the first invalid or last valid one is used !",
- QUEUE_NUM_PER_VF_ARG);
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG);
- rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
+ rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
i40e_pf_parse_vf_queue_number_handler, pf);
rte_kvargs_free(kvlist);
@@ -5669,6 +5896,12 @@ i40e_pf_setup(struct i40e_pf *pf)
PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
return ret;
}
+
+ ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "failed to allocate switch domain for device %d", ret);
+
if (pf->flags & I40E_FLAG_FDIR) {
/* make queue allocated first, let FDIR use queue pair 0*/
ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
@@ -7372,6 +7605,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7415,13 +7649,14 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7431,6 +7666,7 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7459,10 +7695,13 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* For MPLSoGRE */
memset(&filter_replace, 0,
@@ -7485,13 +7724,14 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7501,6 +7741,7 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7536,10 +7777,13 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* for GTP-U */
memset(&filter_replace, 0,
@@ -7568,13 +7812,14 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7584,6 +7829,7 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7611,10 +7857,13 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* for GTP-U */
memset(&filter_replace, 0,
@@ -7636,13 +7885,14 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -8194,14 +8444,14 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
}
if (reg != val) {
- ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_PRS_FVBM(2),
reg, NULL);
if (ret != 0)
return ret;
PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
"with value 0x%08x",
I40E_GL_PRS_FVBM(2), reg);
- i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
} else {
ret = 0;
}
@@ -8467,7 +8717,6 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
I40E_GLQF_HSYM(j),
reg);
}
- i40e_global_cfg_warning(I40E_WARNING_HSYM);
}
}
@@ -8493,7 +8742,6 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
goto out;
i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
- i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
out:
I40E_WRITE_FLUSH(hw);
@@ -9086,12 +9334,17 @@ void
i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
+ struct rte_eth_dev *dev;
- PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
- if (reg != val)
- i40e_write_global_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
- (uint32_t)i40e_read_rx_ctl(hw, addr));
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ if (reg != val) {
+ i40e_write_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, addr, reg,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+ }
}
static void
@@ -9165,12 +9418,6 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
pf->hash_input_set[pctype] = input_set;
pf->fdir.input_set[pctype] = input_set;
}
-
- if (!pf->support_multi_driver) {
- i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
- i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
- i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
- }
}
int
@@ -9236,7 +9483,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
for (i = 0; i < num; i++)
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
@@ -9245,7 +9491,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
0);
- i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
I40E_WRITE_FLUSH(hw);
pf->hash_input_set[pctype] = input_set;
@@ -9326,7 +9571,6 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
i40e_check_write_global_reg(hw,
I40E_GLQF_FD_MSK(i, pctype),
0);
- i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
} else {
PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
}
@@ -9809,6 +10053,60 @@ i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+/*
+ * GL_SWR_PM_UP_THR:
+ * The value is not impacted from the link speed, its value is set according
+ * to the total number of ports for a better pipe-monitor configuration.
+ */
+static bool
+i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
+{
+#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
+
+#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
+
+ static const struct {
+ uint16_t device_id;
+ uint32_t val;
+ } swr_pm_table[] = {
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
+
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
+ };
+ uint32_t i;
+
+ if (value == NULL) {
+ PMD_DRV_LOG(ERR, "value is NULL");
+ return false;
+ }
+
+ for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
+ if (hw->device_id == swr_pm_table[i].device_id) {
+ *value = swr_pm_table[i].val;
+
+ PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
+ "value - 0x%08x",
+ hw->device_id, *value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int
i40e_dev_sync_phy_type(struct i40e_hw *hw)
{
@@ -9873,13 +10171,16 @@ i40e_configure_registers(struct i40e_hw *hw)
}
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
- I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_SF_VALUE;
- else /* For X710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+ uint32_t cfg_val;
+
+ if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
+ PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
+ "GL_SWR_PM_UP_THR value fixup",
+ hw->device_id);
+ continue;
+ }
+
+ reg_table[i].val = cfg_val;
}
ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
@@ -10321,9 +10622,8 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
uint32_t tsync_inc_h;
/* Get current link speed. */
- memset(&link, 0, sizeof(link));
i40e_dev_link_update(dev, 1);
- rte_i40e_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
case ETH_SPEED_NUM_40G:
@@ -11249,8 +11549,148 @@ static int i40e_get_eeprom(struct rte_eth_dev *dev,
return 0;
}
-static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr)
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t sff8472_comp = 0;
+ uint32_t sff8472_swap = 0;
+ uint32_t sff8636_rev = 0;
+ i40e_status status;
+ uint32_t type = 0;
+
+ /* Check if firmware supports reading module EEPROM. */
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ PMD_DRV_LOG(ERR,
+ "Module EEPROM memory read not supported. "
+ "Please update the NVM image.\n");
+ return -EINVAL;
+ }
+
+ status = i40e_update_link_info(hw);
+ if (status)
+ return -EIO;
+
+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
+ PMD_DRV_LOG(ERR,
+ "Cannot read module EEPROM memory. "
+ "No module connected.\n");
+ return -EINVAL;
+ }
+
+ type = hw->phy.link_info.module_type[0];
+
+ switch (type) {
+ case I40E_MODULE_TYPE_SFP:
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_COMP,
+ &sff8472_comp, NULL);
+ if (status)
+ return -EIO;
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_SWAP,
+ &sff8472_swap, NULL);
+ if (status)
+ return -EIO;
+
+ /* Check if the module requires address swap to access
+ * the other EEPROM memory page.
+ */
+ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
+ PMD_DRV_LOG(WARNING,
+ "Module address swap to access "
+ "page 0xA2 is not supported.\n");
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp == 0x00) {
+ /* Module is not SFF-8472 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP_PLUS:
+ /* Read from memory page 0. */
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ 0,
+ I40E_MODULE_REVISION_ADDR,
+ &sff8636_rev, NULL);
+ if (status)
+ return -EIO;
+ /* Determine revision compliance byte */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP28:
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Module type unrecognized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ bool is_sfp = false;
+ i40e_status status;
+ uint8_t *data = info->data;
+ uint32_t value = 0;
+ uint32_t i;
+
+ if (!info || !info->length || !data)
+ return -EINVAL;
+
+ if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < info->length; i++) {
+ u32 offset = i + info->offset;
+ u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
+ offset -= RTE_ETH_MODULE_SFF_8079_LEN;
+ addr = I40E_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
+ addr++;
+ }
+ }
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ addr, offset, &value, NULL);
+ if (status)
+ return -EIO;
+ data[i] = (uint8_t)value;
+ }
+ return 0;
+}
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -11261,7 +11701,7 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
- return;
+ return -EINVAL;
}
TAILQ_FOREACH(f, &vsi->mac_list, next) {
@@ -11271,25 +11711,31 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
if (f == NULL) {
PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
- return;
+ return -EIO;
}
mac_filter = f->mac_info;
ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to delete mac filter");
- return;
+ return -EIO;
}
memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
ret = i40e_vsi_add_mac(vsi, &mac_filter);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to add mac filter");
- return;
+ return -EIO;
}
memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
- i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
- mac_addr->addr_bytes, NULL);
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ mac_addr->addr_bytes, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to change mac");
+ return -EIO;
+ }
+
+ return 0;
}
static int
@@ -11312,9 +11758,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -11413,7 +11861,7 @@ i40e_rss_filter_restore(struct i40e_pf *pf)
{
struct i40e_rte_flow_rss_conf *conf =
&pf->rss_info;
- if (conf->num)
+ if (conf->conf.queue_num)
i40e_config_rss_filter(pf, conf, TRUE);
}
@@ -11456,7 +11904,8 @@ i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
static int
i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t pkg_size, uint32_t proto_num,
- struct rte_pmd_i40e_proto_info *proto)
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
uint32_t pctype_num;
@@ -11469,6 +11918,12 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t i, j, n;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&pctype_num, sizeof(pctype_num),
RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
@@ -11531,8 +11986,13 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
i40e_find_customized_pctype(pf,
I40E_CUSTOMIZED_GTPU);
if (new_pctype) {
- new_pctype->pctype = pctype_value;
- new_pctype->valid = true;
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ new_pctype->pctype = pctype_value;
+ new_pctype->valid = true;
+ } else {
+ new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
+ new_pctype->valid = false;
+ }
}
}
@@ -11542,8 +12002,9 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
static int
i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size, uint32_t proto_num,
- struct rte_pmd_i40e_proto_info *proto)
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
{
struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
uint16_t port_id = dev->data->port_id;
@@ -11556,6 +12017,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
bool in_tunnel;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ rte_pmd_i40e_ptype_mapping_reset(port_id);
+ return 0;
+ }
+
/* get information about new ptype num */
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&ptype_num, sizeof(ptype_num),
@@ -11705,7 +12177,8 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_GRENAT;
in_tunnel = true;
- } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
+ } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
+ !strncasecmp(name, "L2TPV2", 6)) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_L2TP;
in_tunnel = true;
@@ -11728,7 +12201,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
void
i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size)
+ uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
uint32_t proto_num;
@@ -11737,6 +12210,12 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t i;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return;
+ }
+
/* get information about protocol number */
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&proto_num, sizeof(proto_num),
@@ -11770,20 +12249,23 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
/* Check if GTP is supported. */
for (i = 0; i < proto_num; i++) {
if (!strncmp(proto[i].name, "GTP", 3)) {
- pf->gtp_support = true;
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ pf->gtp_support = true;
+ else
+ pf->gtp_support = false;
break;
}
}
/* Update customized pctype info */
ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
- proto_num, proto);
+ proto_num, proto, op);
if (ret)
PMD_DRV_LOG(INFO, "No pctype is updated.");
/* Update customized ptype info */
ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
- proto_num, proto);
+ proto_num, proto, op);
if (ret)
PMD_DRV_LOG(INFO, "No ptype is updated.");
@@ -11840,6 +12322,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
if (pf->support_multi_driver) {
PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
@@ -11876,10 +12359,14 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
&filter_replace_buf);
if (ret != I40E_SUCCESS)
return ret;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* Apply the second L2 cloud filter */
memset(&filter_replace, 0,
@@ -11901,29 +12388,68 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!ret) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!ret && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return ret;
}
int
+i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t i, lut = 0;
uint16_t j, num;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
if (!add) {
- if (memcmp(conf, rss_info,
- sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
i40e_pf_disable_rss(pf);
memset(rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
@@ -11932,7 +12458,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
return -EINVAL;
}
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
return -EINVAL;
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
@@ -11943,7 +12469,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
else
num = pf->dev_data->nb_rx_queues;
- num = RTE_MIN(num, conf->num);
+ num = RTE_MIN(num, conf->conf.queue_num);
PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
@@ -11956,7 +12482,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
j = 0;
- lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
hw->func_caps.rss_table_entry_width) - 1));
if ((i & 3) == 3)
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
@@ -11981,15 +12507,13 @@ i40e_config_rss_filter(struct i40e_pf *pf,
i40e_hw_rss_hash_set(pf, &rss_conf);
- rte_memcpy(rss_info,
- conf, sizeof(struct i40e_rte_flow_rss_conf));
+ if (i40e_rss_conf_init(rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
-RTE_INIT(i40e_init_log);
-static void
-i40e_init_log(void)
+RTE_INIT(i40e_init_log)
{
i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
if (i40e_logtype_init >= 0)
@@ -12000,5 +12524,7 @@ i40e_init_log(void)
}
RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
- QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
+ ETH_I40E_FLOATING_VEB_ARG "=1"
+ ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 99efb670..3fffe5a5 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -5,12 +5,18 @@
#ifndef _I40E_ETHDEV_H_
#define _I40E_ETHDEV_H_
+#include <stdint.h>
+
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_tm_driver.h>
+#include "rte_pmd_i40e.h"
+
+#include "base/i40e_register.h"
#define I40E_VLAN_TAG_SIZE 4
@@ -22,6 +28,7 @@
#define I40E_NUM_DESC_ALIGN 32
#define I40E_BUF_SIZE_MIN 1024
#define I40E_FRAME_SIZE_MAX 9728
+#define I40E_TSO_FRAME_SIZE_MAX 262144
#define I40E_QUEUE_BASE_ADDR_UNIT 128
/* number of VSIs and queue default setting */
#define I40E_MAX_QP_NUM_PER_VF 16
@@ -80,11 +87,19 @@
#define I40E_WRITE_GLB_REG(hw, reg, value) \
do { \
+ uint32_t ori_val; \
+ struct rte_eth_dev *dev; \
+ ori_val = I40E_READ_REG((hw), (reg)); \
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev; \
I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \
(reg)), (value)); \
- PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " \
- "with value 0x%08x", \
- (reg), (value)); \
+ if (ori_val != value) \
+ PMD_DRV_LOG(WARNING, \
+ "i40e device %s changed global " \
+ "register [0x%08x]. original: 0x%08x, " \
+ "new: 0x%08x ", \
+ (dev->device->name), (reg), \
+ (ori_val), (value)); \
} while (0)
/* index flex payload per layer */
@@ -170,7 +185,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_ITR_INDEX_NONE 3
#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
-#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 8160 /* 8160 us */
+#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
/* Special FW support this floating VEB feature */
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
@@ -877,9 +892,11 @@ struct i40e_customized_pctype {
};
struct i40e_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
uint16_t queue_region_conf; /**< Queue region config flag */
- uint16_t num; /**< Number of entries in queue[]. */
+ uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
+ I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
};
@@ -956,6 +973,8 @@ struct i40e_pf {
bool gtp_support; /* 1 - support GTP-C and GTP-U */
/* customer customized pctype */
struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
+ /* Switch Domain Id */
+ uint16_t switch_domain_id;
};
enum pending_msg {
@@ -1005,6 +1024,9 @@ struct i40e_vf {
uint16_t promisc_flags; /* Promiscuous setting */
uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */
+ struct ether_addr mc_addrs[I40E_NUM_MACADDR_MAX]; /* Multicast addrs */
+ uint16_t mc_addrs_num; /* Multicast mac addresses number */
+
/* Event from pf */
bool dev_closed;
bool link_up;
@@ -1058,6 +1080,20 @@ struct i40e_adapter {
uint64_t pctypes_mask;
};
+/**
+ * Strucute to store private data for each VF representor instance
+ */
+struct i40e_vf_representor {
+ uint16_t switch_domain_id;
+ /**< Virtual Function ID */
+ uint16_t vf_id;
+ /**< Virtual Function ID */
+ struct i40e_adapter *adapter;
+ /**< Private data store of assocaiated physical function */
+ struct i40e_eth_stats stats_offset;
+ /**< Zero-point of VF statistics*/
+};
+
extern const struct rte_flow_ops i40e_flow_ops;
union i40e_filter_t {
@@ -1079,22 +1115,6 @@ struct i40e_valid_pattern {
parse_filter_t parse_filter;
};
-enum I40E_WARNING_IDX {
- I40E_WARNING_DIS_FLX_PLD,
- I40E_WARNING_ENA_FLX_PLD,
- I40E_WARNING_QINQ_PARSER,
- I40E_WARNING_QINQ_CLOUD_FILTER,
- I40E_WARNING_TPID,
- I40E_WARNING_FLOW_CTL,
- I40E_WARNING_GRE_KEY_LEN,
- I40E_WARNING_QF_CTL,
- I40E_WARNING_HASH_INSET,
- I40E_WARNING_HSYM,
- I40E_WARNING_HASH_MSK,
- I40E_WARNING_FD_MSK,
- I40E_WARNING_RPL_CLD_FILTER,
-};
-
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -1206,7 +1226,8 @@ void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
struct i40e_customized_pctype*
i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size);
+ uint32_t pkg_size,
+ enum rte_pmd_i40e_package_op op);
int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
@@ -1214,8 +1235,14 @@ void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
+int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add);
+int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
@@ -1292,50 +1319,23 @@ i40e_align_floor(int n)
}
static inline uint16_t
-i40e_calc_itr_interval(int16_t interval, bool is_pf, bool is_multi_drv)
+i40e_calc_itr_interval(bool is_pf, bool is_multi_drv)
{
- if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) {
- if (is_multi_drv) {
- interval = I40E_QUEUE_ITR_INTERVAL_MAX;
- } else {
- if (is_pf)
- interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
- else
- interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
- }
+ uint16_t interval = 0;
+
+ if (is_multi_drv) {
+ interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+ } else {
+ if (is_pf)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ else
+ interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
}
/* Convert to hardware count, as writing each 1 represents 2 us */
return interval / 2;
}
-static inline void
-i40e_global_cfg_warning(enum I40E_WARNING_IDX idx)
-{
- const char *warning;
- static const char *const warning_list[] = {
- [I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload",
- [I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload",
- [I40E_WARNING_QINQ_PARSER] = "support QinQ parser",
- [I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter",
- [I40E_WARNING_TPID] = "support TPID configuration",
- [I40E_WARNING_FLOW_CTL] = "configure water marker",
- [I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting",
- [I40E_WARNING_QF_CTL] = "support hash function setting",
- [I40E_WARNING_HASH_INSET] = "configure hash input set",
- [I40E_WARNING_HSYM] = "set symmetric hash",
- [I40E_WARNING_HASH_MSK] = "configure hash mask",
- [I40E_WARNING_FD_MSK] = "configure fdir mask",
- [I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter",
- };
-
- warning = warning_list[idx];
-
- RTE_LOG(WARNING, PMD,
- "Global register is changed during %s\n",
- warning);
-}
-
#define I40E_VALID_FLOW(flow_type) \
((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index fd003fe0..001c301b 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -44,6 +44,8 @@
#define I40EVF_BUSY_WAIT_COUNT 50
#define MAX_RESET_WAIT_CNT 20
+#define I40EVF_ALARM_INTERVAL 50000 /* us */
+
struct i40evf_arq_msg_info {
enum virtchnl_ops ops;
enum i40e_status_code result;
@@ -120,7 +122,7 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
@@ -130,6 +132,14 @@ static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
uint8_t *msg,
uint16_t msglen);
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr, bool add);
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
/* Default hash key buffer for RSS */
static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
@@ -195,6 +205,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
.mac_addr_remove = i40evf_del_mac_addr,
+ .set_mc_addr_list = i40evf_set_mc_addr_list,
.reta_update = i40evf_dev_rss_reta_update,
.reta_query = i40evf_dev_rss_reta_query,
.rss_hash_update = i40evf_dev_rss_hash_update,
@@ -1036,20 +1047,6 @@ static const struct rte_pci_id pci_id_i40evf_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
-static inline int
-i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
/* Disable IRQ0 */
static inline void
i40evf_disable_irq0(struct i40e_hw *hw)
@@ -1138,7 +1135,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
@@ -1375,7 +1372,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40evf_dev_interrupt_handler(void *param)
+i40evf_dev_alarm_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1404,6 +1401,8 @@ i40evf_dev_interrupt_handler(void *param)
done:
i40evf_enable_irq0(hw);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
}
static int
@@ -1447,12 +1446,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
return -1;
}
- /* register callback func to eal lib */
- rte_intr_callback_register(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, eth_dev);
/* configure and enable device interrupt */
i40evf_enable_irq0(hw);
@@ -1541,7 +1536,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1575,7 +1570,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1588,37 +1583,35 @@ static int
i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = 0;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail register. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- I40EVF_WRITE_FLUSH(hw);
+ rte_wmb();
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
- else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -1627,45 +1620,39 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct i40e_rx_queue *rxq;
int err;
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ rxq = dev->data->rx_queues[rx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
-
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
static int
i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = 0;
+ int err;
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
-
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
-
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -1674,22 +1661,19 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct i40e_tx_queue *txq;
int err;
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
-
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ txq = dev->data->tx_queues[tx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
@@ -1732,7 +1716,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1752,7 +1736,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -1841,7 +1825,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
@@ -1864,8 +1848,6 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&pci_dev->intr_handle);
-
return 0;
}
@@ -2012,23 +1994,18 @@ i40evf_dev_start(struct rte_eth_dev *dev)
/* Set all mac addrs */
i40evf_add_del_all_mac_addr(dev, TRUE);
+ /* Set all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ TRUE);
if (i40evf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed");
goto err_mac;
}
- /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
- * is mapped to VFIO vector 0 in i40evf_dev_init( ).
- * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
- * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
- * queue interrupt to other VFIO vectors.
- * So clear uio/vfio intr/evevnfd first to avoid failure.
- */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
- rte_intr_disable(intr_handle);
+ /* only enable interrupt in rx interrupt mode */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_enable(intr_handle);
- }
i40evf_enable_queues_intr(dev);
@@ -2036,6 +2013,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
err_mac:
i40evf_add_del_all_mac_addr(dev, FALSE);
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
err_queue:
return -1;
}
@@ -2046,9 +2025,13 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_disable(intr_handle);
+
if (hw->adapter_stopped == 1)
return;
i40evf_stop_queues(dev);
@@ -2063,6 +2046,9 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
}
/* remove all mac addrs */
i40evf_add_del_all_mac_addr(dev, FALSE);
+ /* remove all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
hw->adapter_stopped = 1;
}
@@ -2078,6 +2064,7 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
* while Linux driver does not
*/
+ memset(&new_link, 0, sizeof(new_link));
/* Linux driver PF host */
switch (vf->link_speed) {
case I40E_LINK_SPEED_100MB:
@@ -2107,11 +2094,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
new_link.link_status = vf->link_up ? ETH_LINK_UP :
ETH_LINK_DOWN;
new_link.link_autoneg =
- dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED;
-
- i40evf_dev_atomic_write_link_status(dev, &new_link);
+ !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
- return 0;
+ return rte_eth_linkstatus_set(dev, &new_link);
}
static void
@@ -2179,8 +2164,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2189,6 +2172,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2196,7 +2180,13 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2209,7 +2199,8 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -2219,6 +2210,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2229,8 +2221,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -2276,19 +2267,20 @@ static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
+ /*
+ * disable promiscuous mode before reset vf
+ * it is a workaround solution when work with kernel driver
+ * and it is not the normal way
+ */
+ i40evf_dev_promiscuous_disable(dev);
+ i40evf_dev_allmulticast_disable(dev);
+
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
-
- /* unregister callback func from eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40evf_dev_interrupt_handler, dev);
i40evf_disable_irq0(hw);
}
@@ -2649,16 +2641,17 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
}
-static void
+static int
i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
@@ -2667,15 +2660,99 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
- return;
+ return -EINVAL;
}
if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
- return;
+ return -EPERM;
i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr);
- i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+ if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
+ return -EIO;
ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+ return 0;
+}
+
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
+ (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
+ uint32_t i;
+ int err;
+ struct vf_cmd_info args;
+
+ if (mc_addrs == NULL || mc_addrs_num == 0)
+ return 0;
+
+ if (mc_addrs_num > I40E_NUM_MACADDR_MAX)
+ return -EINVAL;
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = mc_addrs_num;
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
+ mc_addrs[i].addr_bytes[0],
+ mc_addrs[i].addr_bytes[1],
+ mc_addrs[i].addr_bytes[2],
+ mc_addrs[i].addr_bytes[3],
+ mc_addrs[i].addr_bytes[4],
+ mc_addrs[i].addr_bytes[5]);
+ return -EINVAL;
+ }
+
+ memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
+ sizeof(list->list[i].addr));
+ }
+
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(struct virtchnl_ether_addr_list) +
+ i * sizeof(struct virtchnl_ether_addr);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+
+ /* flush previous addresses */
+ err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = 0;
+
+ /* add new ones */
+ err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num,
+ TRUE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = mc_addrs_num;
+ memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
+
+ return 0;
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index b83a0cff..d41601a1 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -525,8 +525,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf,
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
(num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
- i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
}
for (i = 0; i < num; i++) {
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 16c47cf7..c67b264d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -10,6 +10,7 @@
#include <unistd.h>
#include <stdarg.h>
+#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_log.h>
@@ -53,6 +54,7 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_fdir_filter_conf *filter);
@@ -1939,7 +1941,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
@@ -2259,8 +2262,7 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
- i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
}
/* Set flex pit */
@@ -2400,7 +2402,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
break;
}
- if (cus_pctype)
+ if (cus_pctype && cus_pctype->valid)
return cus_pctype->pctype;
return I40E_FILTER_PCTYPE_INVALID;
@@ -2419,6 +2421,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
*/
static int
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_fdir_filter_conf *filter)
@@ -2490,16 +2493,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
"Invalid MAC_addr mask.");
return -rte_errno;
}
+ }
+ if (eth_spec && eth_mask && eth_mask->type) {
+ enum rte_flow_item_type next = (item + 1)->type;
- if ((eth_mask->type & UINT16_MAX) ==
- UINT16_MAX) {
- input_set |= I40E_INSET_LAST_ETHER_TYPE;
- filter->input.flow.l2_flow.ether_type =
- eth_spec->type;
+ if (eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid type mask.");
+ return -rte_errno;
}
ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (ether_type == ETHER_TYPE_IPv4 ||
+
+ if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
+ ether_type == ETHER_TYPE_IPv4 ||
ether_type == ETHER_TYPE_IPv6 ||
ether_type == ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
@@ -2509,6 +2518,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
"Unsupported ether_type.");
return -rte_errno;
}
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ eth_spec->type;
}
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
@@ -2518,6 +2530,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
+
+ RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
@@ -2526,6 +2540,33 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
vlan_spec->tci;
}
}
+ if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
+ if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner_type"
+ " mask.");
+ return -rte_errno;
+ }
+
+ ether_type =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+
+ if (ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported inner_type.");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ vlan_spec->inner_type;
+ }
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
layer_idx = I40E_FLXPLD_L2_IDX;
@@ -2918,6 +2959,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VF:
vf_spec = item->spec;
+ if (!attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic"
+ " without affecting it"
+ " (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
filter->input.flow_ext.is_vf = 1;
filter->input.flow_ext.dst_id = vf_spec->id;
if (filter->input.flow_ext.is_vf &&
@@ -3080,7 +3131,8 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
&filter->fdir_filter;
int ret;
- ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
+ fdir_filter);
if (ret)
return ret;
@@ -3284,7 +3336,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -3514,7 +3567,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -4022,7 +4076,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -4150,7 +4205,8 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
info->region[0].user_priority[0] =
- (vlan_spec->tci >> 13) & 0x7;
+ (rte_be_to_cpu_16(
+ vlan_spec->tci) >> 13) & 0x7;
info->region[0].user_priority_num = 1;
info->queue_region_number = 1;
*action_flag = 0;
@@ -4169,6 +4225,19 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
return 0;
}
+/**
+ * This function is used to parse rss queue index, total queue number and
+ * hash functions, If the purpose of this configuration is for queue region
+ * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
+ * In queue region configuration, it also need to parse hardware flowtype
+ * and user_priority from configuration, it will also cheeck the validity
+ * of these parameters. For example, The queue region sizes should
+ * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
+ * hw_flowtype or PCTYPE max index should be 63, the user priority
+ * max index should be 7, and so on. And also, queue index should be
+ * continuous sequence and queue region index should be part of rss
+ * queue index for this port.
+ */
static int
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
@@ -4205,7 +4274,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (action_flag) {
for (n = 0; n < 64; n++) {
- if (rss->rss_conf->rss_hf & (hf_bit << n)) {
+ if (rss->types & (hf_bit << n)) {
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
@@ -4214,41 +4283,73 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
}
+ /**
+ * Do some queue region related parameters check
+ * in order to keep queue index for queue region to be
+ * continuous sequence and also to be part of RSS
+ * queue index for this port.
+ */
+ if (conf_info->queue_region_number) {
+ for (i = 0; i < rss->queue_num; i++) {
+ for (j = 0; j < rss_info->conf.queue_num; j++) {
+ if (rss->queue[i] == rss_info->conf.queue[j])
+ break;
+ }
+ if (j == rss_info->conf.queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+ }
+
+ for (i = 0; i < rss->queue_num - 1; i++) {
+ if (rss->queue[i + 1] != rss->queue[i] + 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+ }
+ }
+
+ /* Parse queue region related parameters from configuration */
for (n = 0; n < conf_info->queue_region_number; n++) {
if (conf_info->region[n].user_priority_num ||
conf_info->region[n].flowtype_num) {
- if (!((rte_is_power_of_2(rss->num)) &&
- rss->num <= 64)) {
- PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
- "total number of queues do not exceed the VSI allocation");
+ if (!((rte_is_power_of_2(rss->queue_num)) &&
+ rss->queue_num <= 64)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
return -rte_errno;
}
if (conf_info->region[n].user_priority[n] >=
I40E_MAX_USER_PRIORITY) {
- PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the user priority max index is 7");
return -rte_errno;
}
if (conf_info->region[n].hw_flowtype[n] >=
I40E_FILTER_PCTYPE_MAX) {
- PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
- return -rte_errno;
- }
-
- if (rss_info->num < rss->num ||
- rss_info->queue[0] < rss->queue[0] ||
- (rss->queue[0] + rss->num >
- rss_info->num + rss_info->queue[0])) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
- "no valid queues");
+ "the hw_flowtype or PCTYPE max index is 63");
return -rte_errno;
}
for (i = 0; i < info->queue_region_number; i++) {
- if (info->region[i].queue_num == rss->num &&
+ if (info->region[i].queue_num ==
+ rss->queue_num &&
info->region[i].queue_start_index ==
rss->queue[0])
break;
@@ -4256,12 +4357,15 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (i == info->queue_region_number) {
if (i > I40E_REGION_MAX_INDEX) {
- PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the queue region max index is 7");
return -rte_errno;
}
info->region[i].queue_num =
- rss->num;
+ rss->queue_num;
info->region[i].queue_start_index =
rss->queue[0];
info->region[i].region_id =
@@ -4301,10 +4405,13 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
rss_config->queue_region_conf = TRUE;
}
+ /**
+ * Return function if this flow is used for queue region configuration
+ */
if (rss_config->queue_region_conf)
return 0;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@@ -4312,7 +4419,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -4321,15 +4428,29 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_config->rss_conf = *rss->rss_conf;
- else
- rss_config->rss_conf.rss_hf =
- pf->adapter->flow_types_mask;
- for (n = 0; n < rss->num; ++n)
- rss_config->queue[n] = rss->queue[n];
- rss_config->num = rss->num;
+ /* Parse RSS related parameters from configuration */
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key too large");
+ if (rss->queue_num > RTE_DIM(rss_config->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (i40e_rss_conf_init(rss_config, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
index++;
/* check if the next not void action is END */
@@ -4385,14 +4506,15 @@ i40e_config_rss_filter_set(struct rte_eth_dev *dev,
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
if (conf->queue_region_conf) {
- i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
conf->queue_region_conf = 0;
} else {
- i40e_config_rss_filter(pf, conf, 1);
+ ret = i40e_config_rss_filter(pf, conf, 1);
}
- return 0;
+ return ret;
}
static int
@@ -4545,6 +4667,8 @@ i40e_flow_create(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_set(dev,
&cons_filter.rss_conf);
+ if (ret)
+ goto free_flow;
flow->rule = &pf->rss_info;
break;
default:
@@ -4846,7 +4970,7 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
ret = i40e_config_rss_filter(pf, rss_info, FALSE);
return ret;
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a6..2a28ee34 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -40,9 +40,6 @@
/* Base address of the HW descriptor ring should be 128B aligned. */
#define I40E_RING_BASE_ALIGN 128
-#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
#ifdef RTE_LIBRTE_IEEE1588
@@ -1240,7 +1237,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) {
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
@@ -1442,13 +1439,15 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
/* Check for m->nb_segs to not exceed the limits. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
- if (m->nb_segs > I40E_TX_MAX_SEG ||
- m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
+ m->pkt_len > I40E_FRAME_SIZE_MAX) {
rte_errno = -EINVAL;
return i;
}
- } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
- (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ } else if (m->nb_segs > I40E_TX_MAX_SEG ||
+ m->tso_segsz < I40E_MIN_TSO_MSS ||
+ m->tso_segsz > I40E_MAX_TSO_MSS ||
+ m->pkt_len > I40E_TSO_FRAME_SIZE_MAX) {
/* MSS outside the range (256B - 9674B) are considered
* malicious
*/
@@ -1461,6 +1460,12 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+ /* check the size of packet */
+ if (m->pkt_len < I40E_TX_MIN_PKT_LEN) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
@@ -1527,38 +1532,36 @@ int
i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = -1;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail regieter. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ rte_wmb();
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- } else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
@@ -1568,24 +1571,21 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- /*
- * rx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * rx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1593,28 +1593,27 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = -1;
+ int err;
struct i40e_tx_queue *txq;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- /*
- * tx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
@@ -1624,26 +1623,23 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- /*
- * tx_queue_id is queue id application refers to, while
- * txq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * txq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
@@ -1692,6 +1688,75 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_dev_first_queue(uint16_t idx, void **queues, int num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ if (i != idx && queues[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_rx_queue *rxq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ uint16_t buf_size =
+ (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ int use_scattered_rx =
+ ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size);
+
+ if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_queue(rxq->queue_id,
+ dev->data->rx_queues,
+ dev->data->nb_rx_queues)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags to default and call
+ * i40e_set_rx_function.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ dev->data->scattered_rx = use_scattered_rx;
+ if (use_def_burst_func)
+ ad->rx_bulk_alloc_allowed = false;
+ i40e_set_rx_function(dev);
+ return 0;
+ }
+
+ /* check bulk alloc conflict */
+ if (ad->rx_bulk_alloc_allowed && use_def_burst_func) {
+ PMD_DRV_LOG(ERR, "Can't use default burst.");
+ return -EINVAL;
+ }
+ /* check scatterred conflict */
+ if (!dev->data->scattered_rx && use_scattered_rx) {
+ PMD_DRV_LOG(ERR, "Scattered rx is required.");
+ return -EINVAL;
+ }
+ /* check vector conflict */
+ if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) {
+ PMD_DRV_LOG(ERR, "Failed vector rx setup.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1777,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ uint64_t offloads;
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,11 +1828,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
/* Allocate the maximun number of RX ring hardware descriptor. */
len = I40E_MAX_RING_DESC;
@@ -1808,25 +1879,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
i40e_reset_rx_queue(rxq);
rxq->q_set = TRUE;
- dev->data->rx_queues[queue_idx] = rxq;
-
- use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
-
- if (!use_def_burst_func) {
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
-#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
- } else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
- "not enabled on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
- ad->rx_bulk_alloc_allowed = false;
- }
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -1841,6 +1893,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->dcb_tc = i;
}
+ if (dev->data->dev_started) {
+ if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
+ i40e_dev_rx_queue_release(rxq);
+ return -EINVAL;
+ }
+ } else {
+ use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ if (!use_def_burst_func) {
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+ }
+
+ dev->data->rx_queues[queue_idx] = rxq;
return 0;
}
@@ -1972,6 +2052,52 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (i40e_tx_queue_init(txq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do TX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_queue(txq->queue_id,
+ dev->data->tx_queues,
+ dev->data->nb_tx_queues)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags and call
+ * i40e_set_tx_function.
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ i40e_set_tx_function(dev);
+ return 0;
+ }
+
+ /* check vector conflict */
+ if (ad->tx_vec_allowed) {
+ if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ ||
+ i40e_txq_vec_setup(txq)) {
+ PMD_DRV_LOG(ERR, "Failed vector tx setup.");
+ return -EINVAL;
+ }
+ }
+ /* check simple tx conflict */
+ if (ad->tx_simple_allowed) {
+ if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+ txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
+ PMD_DRV_LOG(ERR, "No-simple tx is required.");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1989,6 +2115,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ uint64_t offloads;
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2123,7 +2252,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->queue_id = queue_idx;
txq->reg_idx = reg_idx;
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
@@ -2144,10 +2273,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
i40e_reset_tx_queue(txq);
txq->q_set = TRUE;
- dev->data->tx_queues[queue_idx] = txq;
-
- /* Use a simple TX queue without offloads or multi segs if possible */
- i40e_set_tx_function_flag(dev, txq);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -2162,6 +2287,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->dcb_tc = i;
}
+ if (dev->data->dev_started) {
+ if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
+ i40e_dev_tx_queue_release(txq);
+ return -EINVAL;
+ }
+ } else {
+ /**
+ * Use a simple TX queue without offloads or
+ * multi segs if possible
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+
return 0;
}
@@ -2189,8 +2328,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
if (mz)
return mz;
- mz = rte_memzone_reserve_aligned(name, len,
- socket_id, 0, I40E_RING_BASE_ALIGN);
+ mz = rte_memzone_reserve_aligned(name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN);
return mz;
}
@@ -2469,7 +2608,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2886,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -2765,8 +2905,8 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+ qinfo->conf.offloads = txq->offloads;
}
void __attribute__((cold))
@@ -2889,19 +3029,24 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- /* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
- && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
- if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
- PMD_INIT_LOG(DEBUG, "Vector tx"
- " can be enabled on this txq.");
-
- } else {
- ad->tx_vec_allowed = false;
- }
- } else {
- ad->tx_simple_allowed = false;
- }
+ /* Use a simple Tx queue if possible (only fast free is allowed) */
+ ad->tx_simple_allowed =
+ (txq->offloads ==
+ (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
+ ad->tx_vec_allowed = (ad->tx_simple_allowed &&
+ txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
+
+ if (ad->tx_vec_allowed)
+ PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else if (ad->tx_simple_allowed)
+ PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else
+ PMD_INIT_LOG(DEBUG,
+ "Neither simple nor vector Tx enabled on Tx queue %u\n",
+ txq->queue_id);
}
void __attribute__((cold))
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd7923..3fc619af 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -30,6 +30,8 @@
#define I40E_TX_MAX_SEG UINT8_MAX
#define I40E_TX_MAX_MTU_SEG 8
+#define I40E_TX_MIN_PKT_LEN 17
+
#undef container_of
#define container_of(ptr, type, member) ({ \
typeof(((type *)0)->member)(*__mptr) = (ptr); \
@@ -107,6 +109,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
@@ -141,13 +144,13 @@ struct i40e_tx_queue {
uint16_t port_id; /**< Device port identifier. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx;
- uint32_t txq_flags;
struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
uint16_t tx_next_dd;
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index dbcb61f3..23179b3b 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -188,7 +188,7 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
- while (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
i40e_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 3ffedcb9..63cb1774 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -202,11 +202,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
/* - no csum error report support
* - no header split support
*/
- if (rxmode->header_split == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
/* no QinQ support */
- if (rxmode->hw_vlan_extend == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
return -1;
return 0;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index e549d1e8..83572ef8 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * Copyright(c) 2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright(c) 2016-2018, Linaro Limited.
*/
#include <stdint.h>
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
new file mode 100644
index 00000000..f9f13161
--- /dev/null
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_type.h"
+#include "base/virtchnl.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "rte_pmd_i40e.h"
+
+static int
+i40e_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return i40e_dev_link_update(representor->adapter->eth_dev,
+ wait_to_complete);
+}
+static void
+i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ /* get dev info for the vdev */
+ dev_info->device = ethdev->device;
+
+ dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
+ dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
+
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+ dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->switch_info.name =
+ representor->adapter->eth_dev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int
+i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void
+i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int
+i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static void
+i40evf_stat_update_48(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = *stat - *offset;
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+ *stat &= I40E_48_BIT_MASK;
+}
+
+static void
+i40evf_stat_update_32(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = (uint64_t)(*stat - *offset);
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static int
+rte_pmd_i40e_get_vf_native_stats(uint16_t port,
+ uint16_t vf_id,
+ struct i40e_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+ memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
+
+ return 0;
+}
+
+static int
+i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_stats *stats)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ struct i40e_eth_stats native_stats;
+ int ret;
+
+ ret = rte_pmd_i40e_get_vf_native_stats(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &native_stats);
+ if (ret == 0) {
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_bytes,
+ &native_stats.rx_bytes);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_unicast,
+ &native_stats.rx_unicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_multicast,
+ &native_stats.rx_multicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_broadcast,
+ &native_stats.rx_broadcast);
+ i40evf_stat_update_32(
+ &representor->stats_offset.rx_discards,
+ &native_stats.rx_discards);
+ i40evf_stat_update_32(
+ &representor->stats_offset.rx_unknown_protocol,
+ &native_stats.rx_unknown_protocol);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_bytes,
+ &native_stats.tx_bytes);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_unicast,
+ &native_stats.tx_unicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_multicast,
+ &native_stats.tx_multicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_broadcast,
+ &native_stats.tx_broadcast);
+ i40evf_stat_update_32(
+ &representor->stats_offset.tx_errors,
+ &native_stats.tx_errors);
+ i40evf_stat_update_32(
+ &representor->stats_offset.tx_discards,
+ &native_stats.tx_discards);
+
+ stats->ipackets = native_stats.rx_unicast +
+ native_stats.rx_multicast +
+ native_stats.rx_broadcast;
+ stats->opackets = native_stats.tx_unicast +
+ native_stats.tx_multicast +
+ native_stats.tx_broadcast;
+ stats->ibytes = native_stats.rx_bytes;
+ stats->obytes = native_stats.tx_bytes;
+ stats->ierrors = native_stats.rx_discards;
+ stats->oerrors = native_stats.tx_errors + native_stats.tx_discards;
+ }
+ return ret;
+}
+
+static void
+i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_get_vf_native_stats(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &representor->stats_offset);
+}
+
+static void
+i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_unicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 1);
+}
+
+static void
+i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_unicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 0);
+}
+
+static void
+i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_multicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 1);
+}
+
+static void
+i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_multicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 0);
+}
+
+static void
+i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_remove_vf_mac_addr(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &ethdev->data->mac_addrs[index]);
+}
+
+static int
+i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_i40e_set_vf_mac_addr(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static int
+i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_i40e_set_vf_vlan_filter(
+ representor->adapter->eth_dev->data->port_id,
+ vlan_id, vf_mask, on);
+}
+
+static int
+i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ struct rte_eth_dev *pdev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ uint32_t vfid;
+
+ pdev = representor->adapter->eth_dev;
+ vfid = representor->vf_id;
+
+ if (!is_i40e_supported(pdev)) {
+ PMD_DRV_LOG(ERR, "Invalid PF dev.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
+
+ if (vfid >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vf = &pf->vfs[vfid];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* Enable or disable VLAN filtering offload */
+ if (ethdev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ return i40e_vsi_config_vlan_filter(vsi, TRUE);
+ else
+ return i40e_vsi_config_vlan_filter(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping offload */
+ if (ethdev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)
+ return i40e_vsi_config_vlan_stripping(vsi, TRUE);
+ else
+ return i40e_vsi_config_vlan_stripping(vsi, FALSE);
+ }
+
+ return -EINVAL;
+}
+
+static void
+i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_vlan_stripq(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, on);
+}
+
+static int
+i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
+ __rte_unused int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_i40e_set_vf_vlan_insert(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, vlan_id);
+}
+
+struct eth_dev_ops i40e_representor_dev_ops = {
+ .dev_infos_get = i40e_vf_representor_dev_infos_get,
+
+ .dev_start = i40e_vf_representor_dev_start,
+ .dev_configure = i40e_vf_representor_dev_configure,
+ .dev_stop = i40e_vf_representor_dev_stop,
+
+ .rx_queue_setup = i40e_vf_representor_rx_queue_setup,
+ .tx_queue_setup = i40e_vf_representor_tx_queue_setup,
+
+ .link_update = i40e_vf_representor_link_update,
+
+ .stats_get = i40e_vf_representor_stats_get,
+ .stats_reset = i40e_vf_representor_stats_reset,
+
+ .promiscuous_enable = i40e_vf_representor_promiscuous_enable,
+ .promiscuous_disable = i40e_vf_representor_promiscuous_disable,
+
+ .allmulticast_enable = i40e_vf_representor_allmulticast_enable,
+ .allmulticast_disable = i40e_vf_representor_allmulticast_disable,
+
+ .mac_addr_remove = i40e_vf_representor_mac_addr_remove,
+ .mac_addr_set = i40e_vf_representor_mac_addr_set,
+
+ .vlan_filter_set = i40e_vf_representor_vlan_filter_set,
+ .vlan_offload_set = i40e_vf_representor_vlan_offload_set,
+ .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set,
+ .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set
+
+};
+
+static uint16_t
+i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct rte_eth_link *link;
+
+ representor->vf_id =
+ ((struct i40e_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct i40e_vf_representor *)init_params)->switch_domain_id;
+ representor->adapter =
+ ((struct i40e_vf_representor *)init_params)->adapter;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(
+ representor->adapter->eth_dev->data->dev_private);
+
+ if (representor->vf_id >= pf->vf_num)
+ return -ENODEV;
+
+ /** representor shares the same driver as it's PF device */
+ ethdev->device->driver = representor->adapter->eth_dev->device->driver;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &i40e_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
+
+ vf = &pf->vfs[representor->vf_id];
+
+ if (!vf->vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -ENODEV;
+ }
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
+ ethdev->data->nb_tx_queues = vf->vsi->nb_qps;
+
+ ethdev->data->mac_addrs = &vf->mac_addr;
+
+ /* Link state. Inherited from PF */
+ link = &representor->adapter->eth_dev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+i40e_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build
index 8764b0e5..d783f362 100644
--- a/drivers/net/i40e/meson.build
+++ b/drivers/net/i40e/meson.build
@@ -1,10 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
+
cflags += ['-DPF_DRIVER',
'-DVF_DRIVER',
'-DINTEGRATED_VF',
- '-DX722_A0_SUPPORT']
+ '-DX722_A0_SUPPORT',
+ '-DALLOW_EXPERIMENTAL_API']
subdir('base')
objs = [base_objs]
@@ -17,10 +20,12 @@ sources = files(
'i40e_fdir.c',
'i40e_flow.c',
'i40e_tm.c',
+ 'i40e_vf_representor.c',
'rte_pmd_i40e.c'
)
deps += ['hash']
+includes += include_directories('base')
if arch_subdir == 'x86'
dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1)
@@ -36,11 +41,10 @@ if arch_subdir == 'x86'
'i40e_rxtx_vec_avx2.c',
dependencies: [static_rte_ethdev,
static_rte_kvargs, static_rte_hash],
- c_args: '-mavx2')
+ include_directories: includes,
+ c_args: [cflags, '-mavx2'])
objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c')
endif
endif
-includes += include_directories('base')
-
install_headers('rte_pmd_i40e.h')
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index dae59e6d..bba62b1c 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -570,6 +570,49 @@ rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
return 0;
}
+static const struct ether_addr null_mac_addr;
+
+int
+rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (is_same_ether_addr(mac_addr, &vf->mac_addr))
+ /* Reset the mac with NULL address */
+ ether_addr_copy(&null_mac_addr, &vf->mac_addr);
+
+ /* Remove the mac */
+ i40e_vsi_delete_mac(vsi, mac_addr);
+
+ return 0;
+}
+
/* Set vlan strip on/off for specific VF from host */
int
rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
@@ -1530,6 +1573,11 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
return 1;
}
}
+ /* profile with group id 0xff is compatible with any other profile */
+ if ((pinfo->track_id & group_mask) == group_mask) {
+ rte_free(buff);
+ return 0;
+ }
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
if ((p->track_id & group_mask) == 0) {
@@ -1540,6 +1588,8 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
}
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
+ if ((p->track_id & group_mask) == group_mask)
+ continue;
if ((pinfo->track_id & group_mask) !=
(p->track_id & group_mask)) {
PMD_DRV_LOG(INFO, "Profile of different group exists.");
@@ -1603,8 +1653,6 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
return -EINVAL;
}
- i40e_update_customized_info(dev, buff, size);
-
/* Find metadata segment */
metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
pkg_hdr);
@@ -1661,6 +1709,7 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
else if (is_exist == 3)
PMD_DRV_LOG(ERR, "Profile of different group already exists");
+ i40e_update_customized_info(dev, buff, size, op);
rte_free(profile_info_sec);
return -EEXIST;
}
@@ -1708,6 +1757,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
}
}
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
+ op == RTE_PMD_I40E_PKG_OP_WR_DEL)
+ i40e_update_customized_info(dev, buff, size, op);
+
rte_free(profile_info_sec);
return status;
}
@@ -3071,6 +3124,7 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
{
struct rte_eth_dev *dev;
struct i40e_hw *hw;
+ struct i40e_pf *pf;
uint64_t inset_reg;
uint32_t mask_reg[2];
int i;
@@ -3086,10 +3140,12 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
return -EINVAL;
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- /* Clear mask first */
- for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
+ return -ENOTSUP;
+ }
inset_reg = inset->inset;
for (i = 0; i < 2; i++)
@@ -3098,14 +3154,15 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
switch (inset_type) {
case INSET_HASH:
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
break;
case INSET_FDIR:
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
@@ -3114,8 +3171,9 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
break;
case INSET_FDIR_FLX:
i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index d248adb1..be4a6024 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -456,6 +456,24 @@ int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
struct ether_addr *mac_addr);
/**
+ * Remove the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int
+rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
* Enable/Disable vf vlan strip for all queues in a pool
*
* @param port