aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/net/ixgbe
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/Makefile6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c764
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h37
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c209
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.c13
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c434
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h24
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_vf_representor.c231
-rw-r--r--drivers/net/ixgbe/meson.build3
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c230
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h84
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map10
16 files changed, 1506 insertions, 594 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index d0804fc5..7b6af353 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -20,9 +20,10 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
-CFLAGS_ixgbe_rxtx.o += -wd3656
+CFLAGS_ixgbe_rxtx.o += -diag-disable 3656
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
#
@@ -103,6 +104,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 44832585..26b19273 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -20,7 +20,6 @@
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -60,9 +59,6 @@
*/
#define IXGBE_FC_LO 0x40
-/* Default minimum inter-interrupt interval for EITR configuration */
-#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E
-
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
@@ -101,8 +97,6 @@
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
-#define IXGBE_HKEY_MAX_INDEX 10
-
/* Additional timesync values. */
#define NSEC_PER_SEC 1000000000L
#define IXGBE_INCVAL_10GB 0x66666666
@@ -118,7 +112,6 @@
#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
-#define DEFAULT_ETAG_ETYPE 0x893f
#define IXGBE_ETAG_ETYPE 0x00005084
#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
#define IXGBE_ETAG_ETYPE_VALID 0x80000000
@@ -133,7 +126,7 @@
#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
-static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
@@ -196,6 +189,9 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
uint16_t queue, bool on);
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
+static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+ int mask);
+static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
@@ -228,7 +224,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param);
static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
+static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
static bool is_device_supported(struct rte_eth_dev *dev,
@@ -244,8 +240,8 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
-static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
+static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -253,6 +249,7 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue, int on);
+static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
@@ -286,7 +283,7 @@ static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
+static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
@@ -328,6 +325,11 @@ static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
+static int ixgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
static int ixgbevf_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs);
@@ -565,6 +567,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.get_eeprom_length = ixgbe_get_eeprom_length,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
.get_dcb_info = ixgbe_dev_get_dcb_info,
.timesync_adjust_time = ixgbe_timesync_adjust_time,
.timesync_read_time = ixgbe_timesync_read_time,
@@ -787,58 +791,6 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
sizeof(rte_ixgbevf_stats_strings[0]))
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
/*
* This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
*/
@@ -1096,7 +1048,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
* It returns 0 on success.
*/
static int
-eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1339,6 +1291,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
+ int retries = 0;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -1359,8 +1313,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
- rte_intr_callback_unregister(intr_handle,
- ixgbe_dev_interrupt_handler, eth_dev);
+
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ rte_delay_ms(100);
+ } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
@@ -1522,7 +1488,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
l2_tn_info->e_tag_en = FALSE;
l2_tn_info->e_tag_fwd_en = FALSE;
- l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
+ l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG;
return 0;
}
@@ -1641,7 +1607,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
ixgbevf_dev_stats_reset(eth_dev);
/* Disable the interrupts for VF */
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(eth_dev);
hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
diag = hw->mac.ops.reset_hw(hw);
@@ -1710,7 +1676,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
rte_intr_callback_register(intr_handle,
ixgbevf_dev_interrupt_handler, eth_dev);
rte_intr_enable(intr_handle);
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(eth_dev);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
@@ -1743,7 +1709,7 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = NULL;
/* Disable the interrupts for VF */
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(eth_dev);
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -1755,16 +1721,81 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
-{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
+static int
+eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *pf_ethdev;
+ struct rte_eth_devargs eth_da;
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ &eth_da);
+ if (retval)
+ return retval;
+ } else
+ memset(&eth_da, 0, sizeof(eth_da));
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct ixgbe_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_ixgbe_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ /* probe VF representor ports */
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct ixgbe_vf_info *vfinfo;
+ struct ixgbe_vf_representor representor;
+
+ vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ pf_ethdev->data->dev_private);
+ if (vfinfo == NULL) {
+ PMD_DRV_LOG(ERR,
+ "no virtual functions supported by PF");
+ break;
+ }
+
+ representor.vf_id = eth_da.representor_ports[i];
+ representor.switch_domain_id = vfinfo->switch_domain_id;
+ representor.pf_ethdev = pf_ethdev;
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name,
+ eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct ixgbe_vf_representor), NULL, NULL,
+ ixgbe_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
+ "representor %s.", name);
+ }
+
+ return 0;
}
static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit);
}
static struct rte_pci_driver rte_ixgbe_pmd = {
@@ -1947,10 +1978,13 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
rxq = dev->data->rx_queues[queue];
- if (on)
+ if (on) {
rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- else
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ } else {
rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
}
static void
@@ -2001,64 +2035,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ctrl;
- uint16_t i;
- struct ixgbe_rx_queue *rxq;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl &= ~IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- } else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
- /* record those setting for HW strip per queue */
- ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
- }
- }
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ctrl;
- uint16_t i;
- struct ixgbe_rx_queue *rxq;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl |= IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- } else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
- /* record those setting for HW strip per queue */
- ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
- }
- }
-}
-
static void
ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
{
@@ -2114,25 +2090,93 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
*/
}
-static int
-ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint32_t ctrl;
+ uint16_t i;
+ struct ixgbe_rx_queue *rxq;
+ bool on;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ } else {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ }
+ } else {
+ /*
+ * Other 10G NIC, the VLAN strip can be setup
+ * per queue in RXDCTL
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl |= IXGBE_RXDCTL_VME;
+ on = TRUE;
+ } else {
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ on = FALSE;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+ }
+ }
+}
+
+static void
+ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
{
+ uint16_t i;
+ struct rte_eth_rxmode *rxmode;
+ struct ixgbe_rx_queue *rxq;
+
if (mask & ETH_VLAN_STRIP_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ixgbe_vlan_hw_strip_enable_all(dev);
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
else
- ixgbe_vlan_hw_strip_disable_all(dev);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ }
+}
+
+static int
+ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ ixgbe_vlan_hw_strip_config(dev);
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
ixgbe_vlan_hw_extend_enable(dev);
else
ixgbe_vlan_hw_extend_disable(dev);
@@ -2141,6 +2185,16 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return 0;
}
+static int
+ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbe_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
static void
ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
@@ -2289,11 +2343,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
- if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
- PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
- IXGBE_DCB_NB_QUEUES);
- return -EINVAL;
- }
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
@@ -2307,11 +2356,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
- if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
- PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
- IXGBE_DCB_NB_QUEUES);
- return -EINVAL;
- }
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
@@ -2480,6 +2524,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
+ uint32_t allowed_speeds = 0;
int mask = 0;
int status;
uint16_t vf, idx;
@@ -2561,7 +2606,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- err = ixgbe_vlan_offload_set(dev, mask);
+ err = ixgbe_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
goto error;
@@ -2628,9 +2673,21 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
if (err)
goto error;
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G;
+ break;
+ default:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+ }
+
link_speeds = &dev->data->dev_conf.link_speeds;
- if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G)) {
+ if (*link_speeds & ~allowed_speeds) {
PMD_INIT_LOG(ERR, "Invalid link setting");
goto error;
}
@@ -2656,6 +2713,10 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
} else {
if (*link_speeds & ETH_LINK_SPEED_10G)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_5G)
+ speed |= IXGBE_LINK_SPEED_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
if (*link_speeds & ETH_LINK_SPEED_1G)
speed |= IXGBE_LINK_SPEED_1GB_FULL;
if (*link_speeds & ETH_LINK_SPEED_100M)
@@ -2666,6 +2727,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
if (err)
goto error;
+ ixgbe_dev_link_update(dev, 0);
+
skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
@@ -2757,7 +2820,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
@@ -2881,7 +2944,7 @@ ixgbe_dev_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- ret = eth_ixgbe_dev_init(dev);
+ ret = eth_ixgbe_dev_init(dev, NULL);
return ret;
}
@@ -3057,9 +3120,18 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
}
/* Flow Director Stats registers */
- hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) & 0xFFFF;
+ hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
+ hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) & 0xFFFF;
+ hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
+ }
/* MACsec Stats registers */
macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
macsec_stats->out_pkts_encrypted +=
@@ -3625,7 +3697,6 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
@@ -3647,54 +3718,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
-
- /*
- * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
- * mode.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540) &&
- !RTE_ETH_DEV_SRIOV(dev).active)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
- if (dev->security_ctx) {
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
- }
-#endif
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3704,6 +3732,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3714,8 +3743,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -3736,6 +3764,14 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
dev_info->speed_capa |= ETH_LINK_SPEED_5G;
}
+
+ /* Driver-preferred Rx/Tx parameters */
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
}
static const uint32_t *
@@ -3784,7 +3820,6 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
@@ -3796,17 +3831,11 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3816,6 +3845,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3826,8 +3856,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
},
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -3863,7 +3892,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
- if (mac->type == ixgbe_mac_82599_vf) {
+ if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
int i;
for (i = 0; i < 5; i++) {
@@ -3941,12 +3970,12 @@ out:
}
/* return 0 means link status changed, -1 means not changed */
-static int
+int
ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
int wait_to_complete, int vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -3956,12 +3985,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
int wait = 1;
bool autoneg = false;
+ memset(&link, 0, sizeof(link));
link.link_status = ETH_LINK_DOWN;
- link.link_speed = 0;
+ link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_autoneg = ETH_LINK_AUTONEG;
- memset(&old, 0, sizeof(old));
- rte_ixgbe_dev_atomic_read_link_status(dev, &old);
hw->mac.get_link_status = true;
@@ -3985,19 +4013,14 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
if (diag != 0) {
link.link_speed = ETH_SPEED_NUM_100M;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
if (link_up == 0) {
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
+
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -4029,12 +4052,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
link.link_speed = ETH_SPEED_NUM_10G;
break;
}
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
-
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -4233,8 +4252,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
+
if (link.link_status) {
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
@@ -4269,7 +4288,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int64_t timeout;
- struct rte_eth_link link;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -4286,9 +4304,10 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
}
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ struct rte_eth_link link;
+
/* get the link status before link update, for predicting later */
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
ixgbe_dev_link_update(dev, 0);
@@ -4853,14 +4872,15 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
ixgbe_clear_rar(hw, index);
}
-static void
+static int
ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
ixgbe_remove_rar(dev, 0);
-
ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+ return 0;
}
static bool
@@ -4909,10 +4929,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4932,19 +4954,32 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* Virtual Function operations
*/
static void
-ixgbevf_intr_disable(struct ixgbe_hw *hw)
+ixgbevf_intr_disable(struct rte_eth_dev *dev)
{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
PMD_INIT_FUNC_TRACE();
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear mask value. */
+ intr->mask = 0;
}
static void
-ixgbevf_intr_enable(struct ixgbe_hw *hw)
+ixgbevf_intr_enable(struct rte_eth_dev *dev)
{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
PMD_INIT_FUNC_TRACE();
/* VF enable interrupt autoclean */
@@ -4953,6 +4988,9 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
IXGBE_WRITE_FLUSH(hw);
+
+ /* Save IXGBE_VTEIMS value to mask. */
+ intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
}
static int
@@ -4970,14 +5008,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
@@ -5030,7 +5068,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
/* Set HW strip */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- err = ixgbevf_vlan_offload_set(dev, mask);
+ err = ixgbevf_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
ixgbe_dev_clear_queues(dev);
@@ -5039,6 +5077,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_dev_rxtx_start(dev);
+ ixgbevf_dev_link_update(dev, 0);
+
/* check and configure queue intr-vector mapping */
if (rte_intr_cap_multiple(intr_handle) &&
dev->data->dev_conf.intr_conf.rxq) {
@@ -5074,7 +5114,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
rte_intr_enable(intr_handle);
/* Re-enable interrupt for VF */
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(dev);
return 0;
}
@@ -5088,7 +5128,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(dev);
hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
@@ -5226,24 +5266,34 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
}
static int
-ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_rx_queue *rxq;
uint16_t i;
int on = 0;
/* VF function only support hw strip feature, others are not support */
if (mask & ETH_VLAN_STRIP_MASK) {
- on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
- for (i = 0; i < hw->mac.max_rx_queues; i++)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
ixgbevf_vlan_strip_queue_set(dev, i, on);
+ }
}
return 0;
}
+static int
+ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbevf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
int
ixgbe_vt_check(struct ixgbe_hw *hw)
{
@@ -5580,17 +5630,17 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- uint32_t mask;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t vec = IXGBE_MISC_VEC_ID;
- mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
if (rte_intr_allow_others(intr_handle))
vec = IXGBE_RX_VEC_START;
- mask |= (1 << vec);
+ intr->mask |= (1 << vec);
RTE_SET_USED(queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
rte_intr_enable(intr_handle);
@@ -5600,19 +5650,19 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- uint32_t mask;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vec = IXGBE_MISC_VEC_ID;
- mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
if (rte_intr_allow_others(intr_handle))
vec = IXGBE_RX_VEC_START;
- mask &= ~(1 << vec);
+ intr->mask &= ~(1 << vec);
RTE_SET_USED(queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
return 0;
}
@@ -5778,6 +5828,13 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
if (vector_idx < base + intr_handle->nb_efd - 1)
vector_idx++;
}
+
+ /* As RX queue setting above show, all queues use the vector 0.
+ * Set only the ITR value of IXGBE_MISC_VEC_ID.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
+ IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | IXGBE_EITR_CNT_WDIS);
}
/**
@@ -5799,8 +5856,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
/* won't configure msix register if no mapping is done
* between intr vector and event fd
+ * but if misx has been enabled already, need to configure
+ * auto clean, auto mask and throttling.
*/
- if (!rte_intr_dp_is_en(intr_handle))
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ if (!rte_intr_dp_is_en(intr_handle) &&
+ !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
return;
if (rte_intr_allow_others(intr_handle))
@@ -5824,30 +5885,34 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
/* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
- for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
- queue_id++) {
- /* by default, 1:1 mapping */
- ixgbe_set_ivar_map(hw, 0, queue_id, vec);
- intr_handle->intr_vec[queue_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
- }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- IXGBE_MISC_VEC_ID);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
- break;
- default:
- break;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar_map(hw, -1,
+ IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ IXGBE_MISC_VEC_ID);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+ break;
+ default:
+ break;
+ }
}
IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
- IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+ IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | IXGBE_EITR_CNT_WDIS);
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
@@ -5863,6 +5928,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5884,14 +5950,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
bcnrc_val = 0;
}
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
- if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
- IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+ (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
IXGBE_MMW_SIZE_JUMBO_FRAME);
else
@@ -5983,12 +6049,14 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
}
}
-static void
+static int
ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
+
+ return 0;
}
int
@@ -6238,7 +6306,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!rx_conf->enable_scatter &&
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -6812,9 +6880,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
uint32_t shift = 0;
/* Get current link speed. */
- memset(&link, 0, sizeof(link));
ixgbe_dev_link_update(dev, 1);
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
case ETH_SPEED_NUM_100M:
@@ -7166,6 +7233,78 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev,
return eeprom->ops.write_buffer(hw, first, length, data);
}
+static int
+ixgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t status;
+ uint8_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0)
+ return -EIO;
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
+ uint8_t databyte = 0xFF;
+ uint8_t *data = info->data;
+ uint32_t i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ for (i = info->offset; i < info->offset + info->length; i++) {
+ if (i < RTE_ETH_MODULE_SFF_8079_LEN)
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ else
+ status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
+
+ if (status != 0)
+ return -EIO;
+
+ data[i - info->offset] = databyte;
+ }
+
+ return 0;
+}
+
uint16_t
ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
switch (mac_type) {
@@ -8169,7 +8308,7 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(dev);
/* read-on-clear nic registers here */
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -8186,7 +8325,6 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
static int
ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -8195,7 +8333,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
intr->flags &= ~IXGBE_FLAG_MAILBOX;
}
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(dev);
return 0;
}
@@ -8334,7 +8472,7 @@ ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev,
&filter_info->rss_info, TRUE);
}
@@ -8446,9 +8584,7 @@ RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
-RTE_INIT(ixgbe_init_log);
-static void
-ixgbe_init_log(void)
+RTE_INIT(ixgbe_init_log)
{
ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
if (ixgbe_logtype_init >= 0)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c56d6524..d0b93968 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -4,6 +4,9 @@
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
+
+#include <stdint.h>
+
#include "base/ixgbe_type.h"
#include "base/ixgbe_dcb.h"
#include "base/ixgbe_dcb_82599.h"
@@ -12,6 +15,7 @@
#ifdef RTE_LIBRTE_SECURITY
#include "ixgbe_ipsec.h"
#endif
+#include <rte_flow.h>
#include <rte_time.h>
#include <rte_hash.h>
#include <rte_pci.h>
@@ -39,6 +43,7 @@
#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
#define IXGBE_VFTA_SIZE 128
#define IXGBE_VLAN_TAG_SIZE 4
+#define IXGBE_HKEY_MAX_INDEX 10
#define IXGBE_MAX_RX_QUEUE_NUM 128
#define IXGBE_MAX_INTR_QUEUE_NUM 15
#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
@@ -57,6 +62,7 @@
(((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \
IXGBE_EITR_ITR_INT_MASK)
+#define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */
/* Loopback operation modes */
/* 82599 specific loopback operation types */
@@ -94,6 +100,11 @@
#define IXGBE_5TUPLE_MAX_PRI 7
#define IXGBE_5TUPLE_MIN_PRI 1
+/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
+/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
+
#define IXGBE_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
@@ -196,8 +207,8 @@ struct ixgbe_hw_fdir_info {
};
struct ixgbe_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
@@ -253,6 +264,7 @@ struct ixgbe_vf_info {
uint16_t vlan_count;
uint8_t spoofchk_enabled;
uint8_t api_version;
+ uint16_t switch_domain_id;
};
/*
@@ -480,6 +492,15 @@ struct ixgbe_adapter {
struct ixgbe_tm_conf tm_conf;
};
+struct ixgbe_vf_representor {
+ uint16_t vf_id;
+ uint16_t switch_domain_id;
+ struct rte_eth_dev *pf_ethdev;
+};
+
+int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
+
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
@@ -652,6 +673,10 @@ int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
+int
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf);
+
/*
* misc function prototypes
*/
@@ -659,9 +684,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
-void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
@@ -698,6 +721,10 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
+int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add);
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index d5e51797..e559f0fa 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -394,9 +394,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = info->mask.mac_addr_byte_mask;
- fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
- & IXGBE_FDIRIP6M_INNER_MAC;
+ fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ mac_mask = info->mask.mac_addr_byte_mask &
+ (IXGBE_FDIRIP6M_INNER_MAC >>
+ IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
+ fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
+ IXGBE_FDIRIP6M_INNER_MAC);
switch (info->mask.tunnel_type_mask) {
case 0:
@@ -771,10 +774,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
input->formatted.inner_mac,
fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
sizeof(input->formatted.inner_mac));
- input->formatted.tunnel_type =
- fdir_filter->input.flow.tunnel_flow.tunnel_type;
+ if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_VXLAN)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
+ else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
+ else
+ PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
+
input->formatted.tni_vni =
- fdir_filter->input.flow.tunnel_flow.tunnel_id;
+ fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
}
return 0;
@@ -1001,8 +1013,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
} else {
/* tunnel mode */
- if (input->formatted.tunnel_type !=
- RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ if (input->formatted.tunnel_type)
tunnel_type = 0x80000000;
tunnel_type |= addr_high;
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
@@ -1010,6 +1021,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
input->formatted.tni_vni);
}
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
}
/* record vlan (little-endian) and flex_bytes(big-endian) */
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index dcbfb38b..1adf1b80 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -264,8 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* Skip Ethernet */
if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
@@ -298,8 +298,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
@@ -346,7 +346,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
/**
* Only support src & dst addresses, protocol,
* others should be masked.
@@ -368,7 +368,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id;
- ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id;
@@ -413,7 +413,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
/**
* Only support src & dst ports, tcp flags,
@@ -447,12 +447,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -471,11 +471,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
} else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
- sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -494,7 +494,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = sctp_mask->hdr.dst_port;
filter->src_port_mask = sctp_mask->hdr.src_port;
- sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
} else {
@@ -557,6 +557,15 @@ action:
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -699,8 +708,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Mask bits of source MAC address must be full of 0.
* Mask bits of destination MAC address must be full
@@ -787,6 +796,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
@@ -1000,8 +1017,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
tcp_mask->hdr.src_port ||
tcp_mask->hdr.dst_port ||
@@ -1078,6 +1095,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
@@ -1198,8 +1224,8 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
- e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+ e_tag_spec = item->spec;
+ e_tag_mask = item->mask;
/* Only care about GRP and E cid base. */
if (e_tag_mask->epcp_edei_in_ecid_b ||
@@ -1250,6 +1276,15 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
}
/* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
if (attr->priority) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1354,6 +1389,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
}
/* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
if (attr->priority) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1447,12 +1491,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[])
break;
if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
- spec =
- (const struct rte_flow_item_fuzzy *)item->spec;
- last =
- (const struct rte_flow_item_fuzzy *)item->last;
- mask =
- (const struct rte_flow_item_fuzzy *)item->mask;
+ spec = item->spec;
+ last = item->last;
+ mask = item->mask;
if (!spec || !mask)
return 0;
@@ -1632,7 +1673,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_spec = item->spec;
/* Get the dst MAC. */
for (j = 0; j < ETHER_ADDR_LEN; j++) {
@@ -1645,7 +1686,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->mask) {
rule->b_mask = TRUE;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_mask = item->mask;
/* Ether type should be masked. */
if (eth_mask->type ||
@@ -1698,7 +1739,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
} else {
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1725,8 +1767,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
@@ -1772,8 +1814,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.type_of_service ||
ipv4_mask->hdr.total_length ||
@@ -1793,8 +1834,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
rule->ixgbe_fdir.formatted.dst_ip[0] =
ipv4_spec->hdr.dst_addr;
rule->ixgbe_fdir.formatted.src_ip[0] =
@@ -1844,8 +1884,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
rule->b_mask = TRUE;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
+ ipv6_mask = item->mask;
if (ipv6_mask->hdr.vtc_flow ||
ipv6_mask->hdr.payload_len ||
ipv6_mask->hdr.proto ||
@@ -1885,8 +1924,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_spec = item->spec;
rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
ipv6_spec->hdr.src_addr, 16);
rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
@@ -1938,7 +1976,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
if (tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
@@ -1957,7 +1995,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
tcp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2003,7 +2041,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2017,7 +2055,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
udp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2068,8 +2106,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
if (sctp_mask->hdr.tag ||
sctp_mask->hdr.cksum) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2083,8 +2120,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
sctp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2092,8 +2128,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
/* others even sctp port is not supported */
} else {
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
if (sctp_mask &&
(sctp_mask->hdr.src_port ||
sctp_mask->hdr.dst_port ||
@@ -2136,7 +2171,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- raw_mask = (const struct rte_flow_item_raw *)item->mask;
+ raw_mask = item->mask;
/* check mask */
if (raw_mask->relative != 0x1 ||
@@ -2152,7 +2187,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_spec = item->spec;
/* check spec */
if (raw_spec->relative != 0 ||
@@ -2403,7 +2438,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Get the VxLAN info */
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_VXLAN;
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
/* Only care about VNI, others should be masked. */
if (!item->mask) {
@@ -2425,8 +2460,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Tunnel type is always meaningful. */
rule->mask.tunnel_type_mask = 1;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
+ vxlan_mask = item->mask;
if (vxlan_mask->flags) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2452,20 +2486,17 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- vxlan_spec = (const struct rte_flow_item_vxlan *)
- item->spec;
+ vxlan_spec = item->spec;
rte_memcpy(((uint8_t *)
- &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ &rule->ixgbe_fdir.formatted.tni_vni),
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
- rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
- rule->ixgbe_fdir.formatted.tni_vni);
}
}
/* Get the NVGRE info */
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_NVGRE;
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
/**
* Only care about flags0, flags1, protocol and TNI,
@@ -2490,8 +2521,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Tunnel type is always meaningful. */
rule->mask.tunnel_type_mask = 1;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
+ nvgre_mask = item->mask;
if (nvgre_mask->flow_id) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2534,8 +2564,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_spec = item->spec;
if (nvgre_spec->c_k_s_rsvd0_ver !=
rte_cpu_to_be_16(0x2000) &&
nvgre_mask->c_k_s_rsvd0_ver) {
@@ -2557,7 +2586,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* tni is a 24-bits bit field */
rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
- rule->ixgbe_fdir.formatted.tni_vni <<= 8;
}
}
@@ -2591,7 +2619,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
return -rte_errno;
}
rule->b_mask = TRUE;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_mask = item->mask;
/* Ether type should be masked. */
if (eth_mask->type) {
@@ -2632,7 +2660,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_spec = item->spec;
/* Get the dst MAC. */
for (j = 0; j < ETHER_ADDR_LEN; j++) {
@@ -2671,8 +2699,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
return -rte_errno;
}
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
@@ -2775,7 +2803,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
rss = (const struct rte_flow_action_rss *)act->conf;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@@ -2783,7 +2811,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -2792,14 +2820,27 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_conf->rss_conf = *rss->rss_conf;
- else
- rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
- for (n = 0; n < rss->num; ++n)
- rss_conf->queue[n] = rss->queue[n];
- rss_conf->num = rss->num;
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (ixgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
@@ -2830,6 +2871,15 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
@@ -2848,7 +2898,7 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
}
@@ -3167,9 +3217,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&rss_filter_ptr->filter_info,
- &rss_conf,
- sizeof(struct ixgbe_rte_flow_rss_conf));
+ ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
TAILQ_INSERT_TAIL(&filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 176ec0fd..08405f1e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -598,13 +598,18 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
+
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
/* sanity checks */
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (rte_eth_dev_must_keep_crc(rx_offloads)) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
@@ -624,7 +629,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
if (reg != 0) {
@@ -632,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
return -1;
}
}
- if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
IXGBE_SECTXCTRL_STORE_FORWARD);
reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index ea997371..4b833ffa 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -90,6 +90,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
+ rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+
memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
@@ -122,11 +124,10 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
{
struct ixgbe_vf_info **vfinfo;
uint16_t vf_num;
+ int ret;
PMD_INIT_FUNC_TRACE();
- vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
-
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
@@ -136,6 +137,14 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
if (vf_num == 0)
return;
+ vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+ if (*vfinfo == NULL)
+ return;
+
+ ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
rte_free(*vfinfo);
*vfinfo = NULL;
}
@@ -329,10 +338,7 @@ set_rx_mode(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ixgbe_vlan_hw_strip_enable_all(dev);
- else
- ixgbe_vlan_hw_strip_disable_all(dev);
+ ixgbe_vlan_hw_strip_config(dev);
}
static inline void
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 6c582b4b..f82b74a9 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1420,7 +1420,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
/*
* Check if VLAN present only.
* Do not check whether L3/L4 rx checksum done by NIC or not,
- * That can be found from rte_eth_rxmode.hw_ip_checksum flag
+ * That can be found from rte_eth_rxmode.offloads flag
*/
pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+ if ((txq->offloads == 0) &&
#ifdef RTE_LIBRTE_SECURITY
!(txq->using_ipsec) &&
#endif
@@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
PMD_INIT_LOG(DEBUG,
- " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
- (unsigned long)txq->txq_flags,
- (unsigned long)IXGBE_SIMPLE_FLAGS);
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2409,45 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
}
}
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2421,10 +2459,13 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -2550,7 +2591,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_SECURITY
@@ -2769,6 +2810,82 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
#endif
}
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (ixgbe_is_vf(dev) == 0)
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) &&
+ !RTE_ETH_DEV_SRIOV(dev).active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2783,10 +2900,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -2816,10 +2936,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
/*
* The packet type in RX descriptor is different for different NICs.
@@ -4574,7 +4697,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
- if (!rsc_capable && rx_conf->enable_lro) {
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
"support it");
return -EINVAL;
@@ -4582,7 +4705,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* 3.0 RSC configuration requires HW CRC stripping being
@@ -4596,7 +4720,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RFCTL configuration */
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
- if ((rsc_capable) && (rx_conf->enable_lro))
+ if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
/*
* Since NFS packets coalescing is not supported - clear
* RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4609,7 +4733,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
- if (!rx_conf->enable_lro)
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
return 0;
/* Set RDRXCTL.RSCACKC bit */
@@ -4666,7 +4790,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
* at most 500us latency for a single RSC aggregation.
*/
eitr &= ~IXGBE_EITR_ITR_INT_MASK;
- eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
+ eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
+ eitr |= IXGBE_EITR_CNT_WDIS;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
@@ -4729,15 +4854,15 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->hw_strip_crc)
- hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
- else
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+ else
+ hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->jumbo_frame == 1) {
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
@@ -4757,6 +4882,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -4765,7 +4895,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+ rxq->crc_len = rte_eth_dev_must_keep_crc(rx_conf->offloads) ?
+ ETHER_CRC_LEN : 0;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
@@ -4779,28 +4910,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
- /*
- * Configure Header Split
- */
- if (rx_conf->header_split) {
- if (hw->mac.type == ixgbe_mac_82599EB) {
- /* Must setup the PSRTYPE register */
- uint32_t psrtype;
-
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
- }
- srrctl = ((rx_conf->split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
-#endif
- srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
@@ -4826,9 +4936,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
- if (rx_conf->enable_scatter)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
/*
@@ -4843,7 +4955,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (rx_conf->hw_ip_checksum)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4853,10 +4965,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->hw_strip_crc)
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- else
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+ else
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
@@ -5064,34 +5176,30 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- /* Allocate buffers for descriptor rings */
- if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
- PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
- rx_queue_id);
- return -1;
- }
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxq = dev->data->rx_queues[rx_queue_id];
- /* Wait until RX Enable ready */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
- rx_queue_id);
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
+ /* Allocate buffers for descriptor rings */
+ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
return -1;
+ }
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -5112,30 +5220,26 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
- /* Wait until RX Enable bit clear */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
- rx_queue_id);
+ /* Wait until RX Enable bit clear */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
- rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
- ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(adapter, rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- } else
- return -1;
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -5155,30 +5259,27 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ txq = dev->data->tx_queues[tx_queue_id];
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw,
- IXGBE_TXDCTL(txq->reg_idx));
- } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d", tx_queue_id);
- }
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- return -1;
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
+ tx_queue_id);
+ }
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -5198,9 +5299,6 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id >= dev->data->nb_tx_queues)
- return -1;
-
txq = dev->data->tx_queues[tx_queue_id];
/* Wait until TX queue is empty */
@@ -5214,8 +5312,9 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IXGBE_TDT(txq->reg_idx));
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.", tx_queue_id);
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.",
+ tx_queue_id);
}
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
@@ -5231,8 +5330,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IXGBE_TXDCTL(txq->reg_idx));
} while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d", tx_queue_id);
+ PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+ tx_queue_id);
}
if (txq->ops != NULL) {
@@ -5259,6 +5358,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -5277,7 +5377,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
@@ -5289,6 +5389,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
struct ixgbe_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
@@ -5328,6 +5429,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
ixgbevf_rlpml_set_vf(hw,
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -5351,18 +5457,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
- /*
- * Configure Header Split
- */
- if (dev->data->dev_conf.rxmode.header_split) {
- srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
-#endif
- srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
@@ -5387,24 +5482,18 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.enable_scatter ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
- (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ (rxmode->max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
}
- }
-#ifdef RTE_HEADER_SPLIT_ENABLE
- if (dev->data->dev_conf.rxmode.header_split)
- /* Must setup the PSRTYPE register */
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-#endif
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
/* Set RQPL for VF RSS according to max Rx queue */
psrtype |= (dev->data->nb_rx_queues >> 1) <<
@@ -5522,6 +5611,40 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
}
int
+ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add)
{
@@ -5531,7 +5654,12 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
uint16_t j;
uint16_t sp_reta_size;
uint32_t reta_reg;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -5541,8 +5669,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
if (!add) {
- if (memcmp(conf, &filter_info->rss_info,
- sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
+ if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
ixgbe_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct ixgbe_rte_flow_rss_conf));
@@ -5551,7 +5679,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table
* The byte-swap is needed because NIC registers are in
@@ -5561,9 +5689,9 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
- if (j == conf->num)
+ if (j == conf->conf.queue_num)
j = 0;
- reta = (reta << 8) | conf->queue[j];
+ reta = (reta << 8) | conf->conf.queue[j];
if ((i & 3) == 3)
IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
@@ -5580,8 +5708,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
rss_conf.rss_key = rss_intel_key; /* Default hash key */
ixgbe_hw_rss_hash_set(hw, &rss_conf);
- rte_memcpy(&filter_info->rss_info,
- conf, sizeof(struct ixgbe_rte_flow_rss_conf));
+ if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 69c718bc..39378f75 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -129,6 +129,7 @@ struct ixgbe_rx_queue {
uint8_t rx_deferred_start; /**< not in global dev start. */
/** flags to set in mbuf when a vlan is detected. */
uint64_t vlan_flags;
+ uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
@@ -221,7 +222,7 @@ struct ixgbe_tx_queue {
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
- uint32_t txq_flags; /**< Holds flags for this TXq */
+ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -240,20 +241,6 @@ struct ixgbe_txq_ops {
};
/*
- * The "simple" TX queue functions require that the following
- * flags are set when the TX queue is configured:
- * - ETH_TXQ_FLAGS_NOMULTSEGS
- * - ETH_TXQ_FLAGS_NOVLANOFFL
- * - ETH_TXQ_FLAGS_NOXSUMSCTP
- * - ETH_TXQ_FLAGS_NOXSUMUDP
- * - ETH_TXQ_FLAGS_NOXSUMTCP
- * and that the RS bit threshold (tx_rs_thresh) is at least equal to
- * RTE_PMD_IXGBE_TX_MAX_BURST.
- */
-#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
-/*
* Populate descriptors with the following info:
* 1.) buffer_addr = phys_addr + headroom
* 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
@@ -305,6 +292,11 @@ extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
-
#endif /* RTE_IXGBE_INC_VECTOR */
+
+uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
+
#endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 414840a2..a97c2718 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -278,17 +278,12 @@ static inline int
ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
- /* no header split support */
- if (rxmode->header_split == 1)
- return -1;
-
return 0;
#else
RTE_SET_USED(dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e0f9998f..edb13835 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
/* no csum error report support */
- if (rxmode->hw_ip_checksum == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
return -1;
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
new file mode 100644
index 00000000..db516d99
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "rte_pmd_ixgbe.h"
+
+
+static int
+ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return ixgbe_dev_link_update_share(representor->pf_ethdev,
+ wait_to_complete, 0);
+}
+
+static int
+ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_ixgbe_set_vf_mac_addr(
+ representor->pf_ethdev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static void
+ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(
+ representor->pf_ethdev->data->dev_private);
+
+ dev_info->device = representor->pf_ethdev->device;
+
+ dev_info->min_rx_bufsize = 1024;
+ /**< Minimum size of RX buffer. */
+ dev_info->max_rx_pktlen = 9728;
+ /**< Maximum configurable length of RX pkt. */
+ dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ /**< Maximum number of RX queues. */
+ dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+ /**< Maximum number of TX queues. */
+
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ /**< Maximum number of MAC addresses. */
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ /**< Device RX offload capabilities. */
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ /**< Device TX offload capabilities. */
+
+ dev_info->speed_capa =
+ representor->pf_ethdev->data->dev_link.link_speed;
+ /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+
+ dev_info->switch_info.name =
+ representor->pf_ethdev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int ixgbe_vf_representor_dev_configure(
+ __rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_rx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_tx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_ixgbe_set_vf_vlan_filter(
+ representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on);
+}
+
+static void
+ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id,
+ representor->vf_id, on);
+}
+
+struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
+ .dev_infos_get = ixgbe_vf_representor_dev_infos_get,
+
+ .dev_start = ixgbe_vf_representor_dev_start,
+ .dev_configure = ixgbe_vf_representor_dev_configure,
+ .dev_stop = ixgbe_vf_representor_dev_stop,
+
+ .rx_queue_setup = ixgbe_vf_representor_rx_queue_setup,
+ .tx_queue_setup = ixgbe_vf_representor_tx_queue_setup,
+
+ .link_update = ixgbe_vf_representor_link_update,
+
+ .vlan_filter_set = ixgbe_vf_representor_vlan_filter_set,
+ .vlan_strip_queue_set = ixgbe_vf_representor_vlan_strip_queue_set,
+
+ .mac_addr_set = ixgbe_vf_representor_mac_addr_set,
+};
+
+static uint16_t
+ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_vf_info *vf_data;
+ struct rte_pci_device *pci_dev;
+ struct rte_eth_link *link;
+
+ if (!representor)
+ return -ENOMEM;
+
+ representor->vf_id =
+ ((struct ixgbe_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct ixgbe_vf_representor *)init_params)->switch_domain_id;
+ representor->pf_ethdev =
+ ((struct ixgbe_vf_representor *)init_params)->pf_ethdev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev);
+
+ if (representor->vf_id >= pci_dev->max_vfs)
+ return -ENODEV;
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &ixgbe_vf_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ /* Reference VF mac address from PF data structure */
+ vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ representor->pf_ethdev->data->dev_private);
+
+ ethdev->data->mac_addrs = (struct ether_addr *)
+ vf_data[representor->vf_id].vf_mac_addresses;
+
+ /* Link state. Inherited from PF */
+ link = &representor->pf_ethdev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build
index 60af0bae..02d5ef5e 100644
--- a/drivers/net/ixgbe/meson.build
+++ b/drivers/net/ixgbe/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
+
cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS']
subdir('base')
@@ -17,6 +19,7 @@ sources = files(
'ixgbe_pf.c',
'ixgbe_rxtx.c',
'ixgbe_tm.c',
+ 'ixgbe_vf_representor.c',
'rte_pmd_ixgbe.c'
)
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index d8ca8ca3..3a874f9a 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -5,6 +5,7 @@
#include <rte_ethdev_driver.h>
#include "base/ixgbe_api.h"
+#include "base/ixgbe_x550.h"
#include "ixgbe_ethdev.h"
#include "rte_pmd_ixgbe.h"
@@ -880,6 +881,34 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
return 0;
}
+int __rte_experimental
+rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t fctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ /* If 'enable' set the SBP bit else clear it */
+ if (enable)
+ fctrl |= IXGBE_FCTRL_SBP;
+ else
+ fctrl &= ~(IXGBE_FCTRL_SBP);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ return 0;
+}
+
#ifdef RTE_LIBRTE_IXGBE_BYPASS
int
rte_pmd_ixgbe_bypass_init(uint16_t port_id)
@@ -1012,3 +1041,204 @@ rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
return ixgbe_bypass_wd_reset(dev);
}
#endif
+
+/**
+ * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared phy token as needed
+ */
+STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask)
+{
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status = IXGBE_SUCCESS;
+
+ while (--retries) {
+ status = ixgbe_acquire_swfw_semaphore(hw, mask);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
+ status);
+ return status;
+ }
+ status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_SUCCESS)
+ return IXGBE_SUCCESS;
+
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
+ status);
+
+ ixgbe_release_swfw_semaphore(hw, mask);
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ PMD_DRV_LOG(ERR,
+ "Retry get PHY token failed, Status=%d\n",
+ status);
+ return status;
+ }
+ }
+ PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
+ return status;
+}
+
+/**
+ * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and puts the shared phy token as needed
+ */
+STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask)
+{
+ ixgbe_put_phy_token(hw);
+ ixgbe_release_swfw_semaphore(hw, mask);
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_lock(uint16_t port)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ return IXGBE_SUCCESS;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlock(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+ struct ixgbe_hw *hw;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ rte_pmd_ixgbe_release_swfw(hw, swfw_mask);
+
+ return IXGBE_SUCCESS;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 i, data, command;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Setup and write the read command */
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+ if (command & IXGBE_MSCA_MDI_COMMAND)
+ return IXGBE_ERR_PHY;
+
+ /* Read operation is complete. Get the data from MSRWD */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)data;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data)
+{
+ struct ixgbe_hw *hw;
+ u32 i, command;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the write command */
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+ if (command & IXGBE_MSCA_MDI_COMMAND) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+ return 0;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 11a9f334..72a941f9 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -573,6 +573,77 @@ int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout);
*/
int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port);
+/**
+ * Acquire swfw semaphore lock for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_SWFW_SYNC) If sw/fw semaphore acquisition failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_lock(uint16_t port);
+
+/**
+ * Release swfw semaphore lock used for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlock(uint16_t port);
+
+/**
+ * Read PHY register using MDIO without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Pointer for reading PHY register data
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data);
+
+/**
+ * Write data to PHY register using without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Data to write to PHY register
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data);
/**
* Response sent back to ixgbe driver from user app after callback
@@ -637,4 +708,17 @@ enum {
((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \
(x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM))
+/**
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param enable
+ * 0 to disable and nonzero to enable 'SBP' bit in FCTRL register
+ * to receive all packets
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int __rte_experimental
+rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable);
#endif /* _PMD_IXGBE_H_ */
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index bf776742..c814f96d 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -52,3 +52,13 @@ DPDK_17.08 {
rte_pmd_ixgbe_bypass_wd_timeout_show;
rte_pmd_ixgbe_bypass_wd_timeout_store;
} DPDK_17.05;
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_ixgbe_mdio_lock;
+ rte_pmd_ixgbe_mdio_unlock;
+ rte_pmd_ixgbe_mdio_unlocked_read;
+ rte_pmd_ixgbe_mdio_unlocked_write;
+ rte_pmd_ixgbe_upd_fctrl_sbp;
+};