From f239aed5e674965691846e8ce3f187dd47523689 Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Wed, 16 Aug 2017 18:42:05 +0100 Subject: New upstream version 17.08 Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi --- drivers/net/ixgbe/Makefile | 3 +- drivers/net/ixgbe/base/README | 2 +- drivers/net/ixgbe/base/ixgbe_common.c | 3 +- drivers/net/ixgbe/base/ixgbe_x550.c | 30 +- drivers/net/ixgbe/ixgbe_bypass.c | 5 +- drivers/net/ixgbe/ixgbe_bypass.h | 4 +- drivers/net/ixgbe/ixgbe_bypass_api.h | 4 +- drivers/net/ixgbe/ixgbe_bypass_defines.h | 4 +- drivers/net/ixgbe/ixgbe_ethdev.c | 328 +++++++-- drivers/net/ixgbe/ixgbe_ethdev.h | 93 ++- drivers/net/ixgbe/ixgbe_fdir.c | 37 +- drivers/net/ixgbe/ixgbe_flow.c | 668 ++++++++++++----- drivers/net/ixgbe/ixgbe_pf.c | 26 +- drivers/net/ixgbe/ixgbe_rxtx.c | 544 +++++++------- drivers/net/ixgbe/ixgbe_rxtx.h | 7 + drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 4 +- drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 108 ++- drivers/net/ixgbe/ixgbe_tm.c | 1043 +++++++++++++++++++++++++++ drivers/net/ixgbe/rte_pmd_ixgbe.c | 153 +++- drivers/net/ixgbe/rte_pmd_ixgbe.h | 215 ++++++ drivers/net/ixgbe/rte_pmd_ixgbe_version.map | 14 + 21 files changed, 2685 insertions(+), 610 deletions(-) create mode 100644 drivers/net/ixgbe/ixgbe_tm.c (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 5529d81c..5e57cb35 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -119,11 +119,12 @@ else SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c endif -ifeq ($(CONFIG_RTE_NIC_BYPASS),y) +ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y) SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c endif SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c # install this header file SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README index a61617be..8c833b44 100644 --- a/drivers/net/ixgbe/base/README +++ b/drivers/net/ixgbe/base/README @@ -34,7 +34,7 @@ IntelĀ® IXGBE driver =================== This directory contains source code of FreeBSD ixgbe driver of version -cid-10g-shared-code.2017.03.29 released by the team which develop +cid-10g-shared-code.2017.05.16 released by the team which develop basic drivers for any ixgbe NIC. The sub-directory of base/ contains the original source package. This driver is valid for the product(s) listed below diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c index 4dabb434..7f85713e 100644 --- a/drivers/net/ixgbe/base/ixgbe_common.c +++ b/drivers/net/ixgbe/base/ixgbe_common.c @@ -504,7 +504,8 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) } /* Initialize the LED link active for LED blink support */ - hw->mac.ops.init_led_link_act(hw); + if (hw->mac.ops.init_led_link_act) + hw->mac.ops.init_led_link_act(hw); if (status != IXGBE_SUCCESS) DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c index 674dc144..9862391b 100644 --- a/drivers/net/ixgbe/base/ixgbe_x550.c +++ b/drivers/net/ixgbe/base/ixgbe_x550.c @@ -86,6 +86,10 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) /* Manageability interface */ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550; switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->mac.ops.led_on = NULL; + hw->mac.ops.led_off = NULL; + break; case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: hw->mac.ops.led_on = ixgbe_led_on_t_X550em; @@ -459,9 +463,13 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) hw->phy.type = ixgbe_phy_x550em_kr; break; case IXGBE_DEV_ID_X550EM_A_10G_T: - case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->phy.type = ixgbe_phy_ext_1g_t; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: hw->phy.type = ixgbe_phy_fw; @@ -751,6 +759,11 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) phy->ops.set_phy_power = NULL; phy->ops.get_firmware_version = NULL; break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + mac->ops.setup_fc = NULL; + phy->ops.identify = ixgbe_identify_phy_x550em; + phy->ops.set_phy_power = NULL; + break; default: phy->ops.identify = ixgbe_identify_phy_x550em; } @@ -945,6 +958,11 @@ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw) ixgbe_write_i2c_combined_generic_unlocked; link->addr = IXGBE_CS4227; + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) { + mac->ops.setup_fc = NULL; + mac->ops.setup_eee = NULL; + mac->ops.init_led_link_act = NULL; + } return ret_val; } @@ -1915,6 +1933,8 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) ixgbe_setup_mac_link_sfp_x550em; break; case ixgbe_media_type_copper: + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) + break; if (hw->mac.type == ixgbe_mac_X550EM_a) { if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { @@ -2380,10 +2400,6 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; break; - case IXGBE_DEV_ID_X550EM_X_1G_T: - phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22; - phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22; - break; default: break; } @@ -2414,6 +2430,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) case ixgbe_phy_ext_1g_t: /* link is managed by FW */ phy->ops.setup_link = NULL; + phy->ops.reset = NULL; break; case ixgbe_phy_x550em_xfi: /* link is managed by HW */ @@ -2565,10 +2582,9 @@ mac_reset_top: status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status != IXGBE_SUCCESS) { ERROR_REPORT2(IXGBE_ERROR_CAUTION, - "semaphore failed with %d", status); + "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } - ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c index 70069284..38a44936 100644 --- a/drivers/net/ixgbe/ixgbe_bypass.c +++ b/drivers/net/ixgbe/ixgbe_bypass.c @@ -36,6 +36,7 @@ #include #include "ixgbe_ethdev.h" #include "ixgbe_bypass_api.h" +#include "rte_pmd_ixgbe.h" #define BYPASS_STATUS_OFF_MASK 3 @@ -284,7 +285,7 @@ ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout) FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP); /* disable the timer with timeout of zero */ - if (timeout == RTE_BYPASS_TMT_OFF) { + if (timeout == RTE_PMD_IXGBE_BYPASS_TMT_OFF) { status = 0x0; /* WDG enable off */ mask = BYPASS_WDT_ENABLE_M; } else { @@ -355,7 +356,7 @@ ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout) wdg = by_ctl & BYPASS_WDT_ENABLE_M; if (!wdg) - *wd_timeout = RTE_BYPASS_TMT_OFF; + *wd_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; else *wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) & BYPASS_WDT_MASK; diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h index 5f5c63e3..09155bb3 100644 --- a/drivers/net/ixgbe/ixgbe_bypass.h +++ b/drivers/net/ixgbe/ixgbe_bypass.h @@ -34,7 +34,7 @@ #ifndef _IXGBE_BYPASS_H_ #define _IXGBE_BYPASS_H_ -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS struct ixgbe_bypass_mac_ops { s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status); @@ -63,6 +63,6 @@ s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev); s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw); s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw); -#endif /* RTE_NIC_BYPASS */ +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ #endif /* _IXGBE_BYPASS_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_bypass_api.h b/drivers/net/ixgbe/ixgbe_bypass_api.h index aec8f1ec..d52fde04 100644 --- a/drivers/net/ixgbe/ixgbe_bypass_api.h +++ b/drivers/net/ixgbe/ixgbe_bypass_api.h @@ -34,7 +34,7 @@ #ifndef _IXGBE_BYPASS_API_H_ #define _IXGBE_BYPASS_API_H_ -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS #include "ixgbe_bypass_defines.h" /** @@ -295,6 +295,6 @@ static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value) return 0; } -#endif /* RTE_NIC_BYPASS */ +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ #endif /* _IXGBE_BYPASS_API_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h index cafcb278..d12c2714 100644 --- a/drivers/net/ixgbe/ixgbe_bypass_defines.h +++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h @@ -34,7 +34,7 @@ #ifndef _IXGBE_BYPASS_DEFINES_H_ #define _IXGBE_BYPASS_DEFINES_H_ -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS #define msleep(x) rte_delay_us(x*1000) #define usleep_range(min, max) rte_delay_us(min) @@ -155,6 +155,6 @@ enum ixgbe_state_t { #define IXGBE_BYPASS_FW_WRITE_FAILURE -35 -#endif /* RTE_NIC_BYPASS */ +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ #endif /* _IXGBE_BYPASS_DEFINES_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index aeaa432c..22171d86 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -187,13 +186,13 @@ ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, uint64_t *values, unsigned int n); static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); -static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, +static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, - __rte_unused unsigned int size); -static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); + unsigned int size); +static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit); static int ixgbe_dev_xstats_get_names_by_id( - __rte_unused struct rte_eth_dev *dev, + struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, unsigned int limit); @@ -240,7 +239,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); -static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); @@ -262,6 +261,8 @@ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbevf_dev_configure(struct rte_eth_dev *dev); static int ixgbevf_dev_start(struct rte_eth_dev *dev); +static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); static void ixgbevf_intr_disable(struct ixgbe_hw *hw); @@ -302,9 +303,6 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbe_configure_msix(struct rte_eth_dev *dev); -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate); - static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); @@ -444,13 +442,8 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = { { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, @@ -481,7 +474,7 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = { { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, #endif { .vendor_id = 0, /* sentinel */ }, @@ -575,17 +568,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .set_queue_rate_limit = ixgbe_set_queue_rate_limit, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, -#ifdef RTE_NIC_BYPASS - .bypass_init = ixgbe_bypass_init, - .bypass_state_set = ixgbe_bypass_state_store, - .bypass_state_show = ixgbe_bypass_state_show, - .bypass_event_set = ixgbe_bypass_event_store, - .bypass_event_show = ixgbe_bypass_event_show, - .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store, - .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show, - .bypass_ver_show = ixgbe_bypass_ver_show, - .bypass_wd_reset = ixgbe_bypass_wd_reset, -#endif /* RTE_NIC_BYPASS */ .rss_hash_update = ixgbe_dev_rss_hash_update, .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, .filter_ctrl = ixgbe_dev_filter_ctrl, @@ -608,6 +590,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, + .tm_ops_get = ixgbe_tm_ops_get, }; /* @@ -618,7 +601,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .dev_configure = ixgbevf_dev_configure, .dev_start = ixgbevf_dev_start, .dev_stop = ixgbevf_dev_stop, - .link_update = ixgbe_dev_link_update, + .link_update = ixgbevf_dev_link_update, .stats_get = ixgbevf_dev_stats_get, .xstats_get = ixgbevf_dev_xstats_get, .stats_reset = ixgbevf_dev_stats_reset, @@ -1131,7 +1114,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -1190,11 +1173,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) hw->allow_unsupported_sfp = 1; /* Initialize the shared code (base driver) */ -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS diag = ixgbe_bypass_init_shared_code(hw); #else diag = ixgbe_init_shared_code(hw); -#endif /* RTE_NIC_BYPASS */ +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ if (diag != IXGBE_SUCCESS) { PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); @@ -1227,11 +1210,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) return -EIO; } -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS diag = ixgbe_bypass_init_hw(hw); #else diag = ixgbe_init_hw(hw); -#endif /* RTE_NIC_BYPASS */ +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ /* * Devices with copper phys will fail to initialise if ixgbe_init_hw() @@ -1359,13 +1342,16 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); + /* initialize Traffic Manager configuration */ + ixgbe_tm_conf_init(eth_dev); + return 0; } static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; @@ -1412,6 +1398,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* clear all the filters list */ ixgbe_filterlist_flush(); + /* Remove all Traffic Manager configuration */ + ixgbe_tm_conf_uninit(eth_dev); + return 0; } @@ -1491,7 +1480,7 @@ static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) TAILQ_INIT(&fdir_info->fdir_list); snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, - "fdir_%s", eth_dev->data->name); + "fdir_%s", eth_dev->device->name); fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); if (!fdir_info->hash_handle) { PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); @@ -1527,7 +1516,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) TAILQ_INIT(&l2_tn_info->l2_tn_list); snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, - "l2_tn_%s", eth_dev->data->name); + "l2_tn_%s", eth_dev->device->name); l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); if (!l2_tn_info->hash_handle) { PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); @@ -1598,7 +1587,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) { int diag; uint32_t tc, tcs; - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -1747,7 +1736,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; @@ -2176,7 +2165,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) static int ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); switch (nb_rx_q) { case 1: @@ -2425,7 +2414,7 @@ ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t total_rate = 0; struct rte_pci_device *pci_dev; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); rte_eth_link_get_nowait(dev->data->port_id, &link); if (vf >= pci_dev->max_vfs) @@ -2496,7 +2485,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; @@ -2505,6 +2494,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) int status; uint16_t vf, idx; uint32_t *link_speeds; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -2651,9 +2642,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev) speed = 0x0; if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { - speed = (hw->mac.type != ixgbe_mac_82598EB) ? - IXGBE_LINK_SPEED_82599_AUTONEG : - IXGBE_LINK_SPEED_82598_AUTONEG; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + speed = IXGBE_LINK_SPEED_82598_AUTONEG; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + speed = IXGBE_LINK_SPEED_82599_AUTONEG; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + speed = IXGBE_LINK_SPEED_X550_AUTONEG; + break; + default: + speed = IXGBE_LINK_SPEED_82599_AUTONEG; + } } else { if (*link_speeds & ETH_LINK_SPEED_10G) speed |= IXGBE_LINK_SPEED_10GB_FULL; @@ -2672,7 +2676,9 @@ skip_link_setup: if (rte_intr_allow_others(intr_handle)) { /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) - ixgbe_dev_lsc_interrupt_setup(dev); + ixgbe_dev_lsc_interrupt_setup(dev, TRUE); + else + ixgbe_dev_lsc_interrupt_setup(dev, FALSE); ixgbe_dev_macsec_interrupt_setup(dev); } else { rte_intr_callback_unregister(intr_handle, @@ -2695,6 +2701,11 @@ skip_link_setup: ixgbe_l2_tunnel_conf(dev); ixgbe_filter_restore(dev); + if (tm_conf->root && !tm_conf->committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + return 0; error: @@ -2714,9 +2725,11 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -2763,6 +2776,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + /* reset hierarchy commit */ + tm_conf->committed = false; } /* @@ -2774,7 +2790,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type == ixgbe_mac_82599EB) { -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { /* Not suported in bypass mode */ PMD_INIT_LOG(ERR, "Set link up is not supported " @@ -2804,7 +2820,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type == ixgbe_mac_82599EB) { -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { /* Not suported in bypass mode */ PMD_INIT_LOG(ERR, "Set link down is not supported " @@ -3194,7 +3210,7 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, } static int ixgbe_dev_xstats_get_names_by_id( - __rte_unused struct rte_eth_dev *dev, + struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, unsigned int limit) @@ -3582,7 +3598,7 @@ ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) static void ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; @@ -3685,6 +3701,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw->mac.type == ixgbe_mac_X550_vf) { dev_info->speed_capa |= ETH_LINK_SPEED_100M; } + if (hw->mac.type == ixgbe_mac_X550) { + dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; + dev_info->speed_capa |= ETH_LINK_SPEED_5G; + } } static const uint32_t * @@ -3717,6 +3737,12 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) return ptypes; + +#if defined(RTE_ARCH_X86) + if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) + return ptypes; +#endif return NULL; } @@ -3724,7 +3750,7 @@ static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); dev_info->pci_dev = pci_dev; @@ -3776,9 +3802,116 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->tx_desc_lim = tx_desc_lim; } +static int +ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + int *link_up, int wait_to_complete) +{ + /** + * for a quick link status checking, wait_to_compelet == 0, + * skip PF link status checking + */ + bool no_pflink_check = wait_to_complete == 0; + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + uint32_t links_reg, in_msg; + int ret_val = 0; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + rte_delay_us(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + if (no_pflink_check) { + if (*speed == IXGBE_LINK_SPEED_UNKNOWN) + mac->get_link_status = true; + else + mac->get_link_status = false; + + goto out; + } + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + ret_val = -1; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + /* return 0 means link status changed, -1 means not changed */ static int -ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete, int vf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_link link, old; @@ -3788,6 +3921,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) int link_up; int diag; u32 speed = 0; + int wait = 1; bool autoneg = false; link.link_status = ETH_LINK_DOWN; @@ -3808,9 +3942,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) - diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); + wait = 0; + + if (vf) + diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); else - diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); + diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); if (diag != 0) { link.link_speed = ETH_SPEED_NUM_100M; @@ -3847,6 +3984,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_speed = ETH_SPEED_NUM_1G; break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_2_5G; + break; + + case IXGBE_LINK_SPEED_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_5G; + break; + case IXGBE_LINK_SPEED_10GB_FULL: link.link_speed = ETH_SPEED_NUM_10G; break; @@ -3859,6 +4004,18 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) return 0; } +static int +ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); +} + +static int +ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); +} + static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) { @@ -3916,19 +4073,24 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) * * @param dev * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable. * * @return * - On success, zero. * - On failure, a negative value. */ static int -ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) +ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); ixgbe_dev_link_status_print(dev); - intr->mask |= IXGBE_EICR_LSC; + if (on) + intr->mask |= IXGBE_EICR_LSC; + else + intr->mask &= ~IXGBE_EICR_LSC; return 0; } @@ -4035,7 +4197,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -4143,7 +4305,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -4166,12 +4328,13 @@ ixgbe_dev_interrupt_delayed_handler(void *param) ixgbe_dev_link_update(dev, 0); intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; ixgbe_dev_link_status_print(dev); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL, NULL); } if (intr->flags & IXGBE_FLAG_MACSEC) { _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, - NULL); + NULL, NULL); intr->flags &= ~IXGBE_FLAG_MACSEC; } @@ -4660,7 +4823,7 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ixgbe_remove_rar(dev, 0); @@ -4670,7 +4833,7 @@ ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) static bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) { - if (strcmp(dev->data->drv_name, drv->driver.name)) + if (strcmp(dev->device->driver->name, drv->driver.name)) return false; return true; @@ -4690,7 +4853,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) struct ixgbe_hw *hw; struct rte_eth_dev_info dev_info; uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + struct rte_eth_dev_data *dev_data = dev->data; ixgbe_dev_info_get(dev, &dev_info); @@ -4698,13 +4861,15 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) return -EINVAL; - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. */ - if (!rx_conf->enable_scatter && + if (dev_data->dev_started && !dev_data->scattered_rx && (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); @@ -4799,7 +4964,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t intr_vector = 0; - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int err, mask = 0; @@ -4863,7 +5028,7 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -5316,6 +5481,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + memset(&mr_info->mr_conf[rule_id], 0, sizeof(struct rte_eth_mirror_conf)); @@ -5336,7 +5504,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask; struct ixgbe_hw *hw = @@ -5370,7 +5538,7 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask; struct ixgbe_hw *hw = @@ -5473,7 +5641,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, tmp |= (msix_vector << (8 * (queue & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); } else if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540)) { + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550)) { if (direction == -1) { /* other causes */ idx = ((queue & 1) * 8); @@ -5495,7 +5664,7 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, static void ixgbevf_configure_msix(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5529,7 +5698,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) static void ixgbe_configure_msix(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5581,6 +5750,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); break; default: @@ -5598,8 +5768,9 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); } -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate) +int +ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t rf_dec, rf_int; @@ -7531,7 +7702,7 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel, bool en) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); int ret = 0; uint32_t vmtir, vmvir; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -7879,7 +8050,8 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev) /* PF reset VF event */ if (in_msg == IXGBE_PF_CONTROL_MSG) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL, NULL); } static int diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index b576a6f4..caa50c8b 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -33,12 +33,15 @@ #ifndef _IXGBE_ETHDEV_H_ #define _IXGBE_ETHDEV_H_ +#include "base/ixgbe_type.h" #include "base/ixgbe_dcb.h" #include "base/ixgbe_dcb_82599.h" #include "base/ixgbe_dcb_82598.h" #include "ixgbe_bypass.h" #include #include +#include +#include /* need update link, bit flag */ #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) @@ -72,7 +75,7 @@ #endif #define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY)) -/* EITR Inteval is in 2048ns uinits for 1G and 10G link */ +/* EITR Interval is in 2048ns uinits for 1G and 10G link */ #define IXGBE_EITR_INTERVAL_UNIT_NS 2048 #define IXGBE_EITR_ITR_INT_SHIFT 3 #define IXGBE_EITR_INTERVAL_US(us) \ @@ -152,6 +155,13 @@ return -ENOTSUP;\ } while (0) +/* Link speed for X550 auto negotiation */ +#define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_2_5GB_FULL | \ + IXGBE_LINK_SPEED_5GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + /* * Information about the fdir mode. */ @@ -189,6 +199,7 @@ struct ixgbe_fdir_rule { uint32_t fdirflags; /* drop or forward */ uint32_t soft_id; /* an unique value for this rule */ uint8_t queue; /* assigned rx queue */ + uint8_t flex_bytes_offset; }; struct ixgbe_hw_fdir_info { @@ -434,6 +445,68 @@ struct ixgbe_bw_conf { uint8_t tc_num; /* Number of TCs. */ }; +/* Struct to store Traffic Manager shaper profile. */ +struct ixgbe_tm_shaper_profile { + TAILQ_ENTRY(ixgbe_tm_shaper_profile) node; + uint32_t shaper_profile_id; + uint32_t reference_count; + struct rte_tm_shaper_params profile; +}; + +TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile); + +/* node type of Traffic Manager */ +enum ixgbe_tm_node_type { + IXGBE_TM_NODE_TYPE_PORT, + IXGBE_TM_NODE_TYPE_TC, + IXGBE_TM_NODE_TYPE_QUEUE, + IXGBE_TM_NODE_TYPE_MAX, +}; + +/* Struct to store Traffic Manager node configuration. */ +struct ixgbe_tm_node { + TAILQ_ENTRY(ixgbe_tm_node) node; + uint32_t id; + uint32_t priority; + uint32_t weight; + uint32_t reference_count; + uint16_t no; + struct ixgbe_tm_node *parent; + struct ixgbe_tm_shaper_profile *shaper_profile; + struct rte_tm_node_params params; +}; + +TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node); + +/* The configuration of Traffic Manager */ +struct ixgbe_tm_conf { + struct ixgbe_shaper_profile_list shaper_profile_list; + struct ixgbe_tm_node *root; /* root node - port */ + struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */ + struct ixgbe_tm_node_list queue_list; /* node list for all the queues */ + /** + * The number of added TC nodes. + * It should be no more than the TC number of this port. + */ + uint32_t nb_tc_node; + /** + * The number of added queue nodes. + * It should be no more than the queue number of this port. + */ + uint32_t nb_queue_node; + /** + * This flag is used to check if APP can change the TM node + * configuration. + * When it's true, means the configuration is applied to HW, + * APP should not change the configuration. + * As we don't support on-the-fly configuration, when starting + * the port, APP should call the hierarchy_commit API to set this + * flag to true. When stopping the port, this flag should be set + * to false. + */ + bool committed; +}; + /* * Structure to store private data for each driver instance (for each port). */ @@ -450,9 +523,9 @@ struct ixgbe_adapter { struct ixgbe_mirror_info mr_data; struct ixgbe_vf_info *vfdata; struct ixgbe_uta_info uta_info; -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS struct ixgbe_bypass_info bps; -#endif /* RTE_NIC_BYPASS */ +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ struct ixgbe_filter_info filter; struct ixgbe_l2_tn_info l2_tn; struct ixgbe_bw_conf bw_conf; @@ -462,11 +535,9 @@ struct ixgbe_adapter { struct rte_timecounter systime_tc; struct rte_timecounter rx_tstamp_tc; struct rte_timecounter tx_tstamp_tc; + struct ixgbe_tm_conf tm_conf; }; -#define IXGBE_DEV_TO_PCI(eth_dev) \ - RTE_DEV_TO_PCI((eth_dev)->device) - #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ (&((struct ixgbe_adapter *)adapter)->hw) @@ -512,6 +583,9 @@ struct ixgbe_adapter { #define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \ (&((struct ixgbe_adapter *)adapter)->bw_conf) +#define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \ + (&((struct ixgbe_adapter *)adapter)->tm_conf) + /* * RX/TX function prototypes */ @@ -624,6 +698,8 @@ void ixgbe_filterlist_flush(void); */ int ixgbe_fdir_configure(struct rte_eth_dev *dev); int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev); +int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset); int ixgbe_fdir_filter_program(struct rte_eth_dev *dev, struct ixgbe_fdir_rule *rule, bool del, bool update); @@ -671,6 +747,11 @@ int ixgbe_vt_check(struct ixgbe_hw *hw); int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); bool is_ixgbe_supported(struct rte_eth_dev *dev); +int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); +void ixgbe_tm_conf_init(struct rte_eth_dev *dev); +void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); +int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t tx_rate); static inline int ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index 7f6c7b58..eb2d5581 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -302,7 +302,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev) * mask VM pool and DIPv6 since there are currently not supported * mask FLEX byte, it will be set in flex_conf */ - uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX; + uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; uint32_t fdirtcpm; /* TCP source and destination port masks. */ uint32_t fdiripv6m; /* IPv6 source and destination masks. */ volatile uint32_t *reg; @@ -333,6 +333,10 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev) return -EINVAL; } + /* flex byte mask */ + if (info->mask.flex_bytes_mask == 0) + fdirm |= IXGBE_FDIRM_FLEX; + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); /* store the TCP/UDP port masks, bit reversed from port layout */ @@ -533,6 +537,31 @@ ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev) return -ENOTSUP; } +int +ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fdirctrl; + int i; + + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + + fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK; + fdirctrl |= ((offset >> 1) /* convert to word offset */ + << IXGBE_FDIRCTRL_FLEX_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + return 0; +} + static int fdir_set_input_mask(struct rte_eth_dev *dev, const struct rte_eth_fdir_masks *input_mask) @@ -654,7 +683,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev) /* * The defaults in the HW for RX PB 1-7 are not zero and so should be - * intialized to zero for non DCB mode otherwise actual total RX PB + * initialized to zero for non DCB mode otherwise actual total RX PB * would be bigger than programmed and filter space would run into * the PB 0 region. */ @@ -1243,7 +1272,9 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, hw->mac.type == ixgbe_mac_X550EM_x || hw->mac.type == ixgbe_mac_X550EM_a) && (rule->ixgbe_fdir.formatted.flow_type == - IXGBE_ATR_FLOW_TYPE_IPV4) && + IXGBE_ATR_FLOW_TYPE_IPV4 || + rule->ixgbe_fdir.formatted.flow_type == + IXGBE_ATR_FLOW_TYPE_IPV6) && (info->mask.src_port_mask != 0 || info->mask.dst_port_mask != 0)) { PMD_DRV_LOG(ERR, "By this device," diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 9aeb71e4..d6796088 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -56,7 +56,6 @@ #include #include #include -#include #include #include #include @@ -78,23 +77,40 @@ #define IXGBE_MIN_N_TUPLE_PRIO 1 #define IXGBE_MAX_N_TUPLE_PRIO 7 -#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\ - do { \ - item = pattern + index;\ - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\ - index++; \ - item = pattern + index; \ - } \ - } while (0) - -#define NEXT_ITEM_OF_ACTION(act, actions, index)\ - do { \ - act = actions + index; \ - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\ - index++; \ - act = actions + index; \ - } \ - } while (0) +#define IXGBE_MAX_FLX_SOURCE_OFF 62 + +/** + * Endless loop will never happen with below assumption + * 1. there is at least one no-void item(END) + * 2. cur is before END. + */ +static inline +const struct rte_flow_item *next_no_void_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + cur ? cur + 1 : &pattern[0]; + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_VOID) + return next; + next++; + } +} + +static inline +const struct rte_flow_action *next_no_void_action( + const struct rte_flow_action actions[], + const struct rte_flow_action *cur) +{ + const struct rte_flow_action *next = + cur ? cur + 1 : &actions[0]; + while (1) { + if (next->type != RTE_FLOW_ACTION_TYPE_VOID) + return next; + next++; + } +} /** * Please aware there's an asumption for all the parsers. @@ -144,7 +160,6 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_udp *udp_mask; const struct rte_flow_item_sctp *sctp_spec; const struct rte_flow_item_sctp *sctp_mask; - uint32_t index; if (!pattern) { rte_flow_error_set(error, @@ -166,11 +181,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse pattern */ - index = 0; - /* the first not void item can be MAC or IPv4 */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4) { @@ -198,8 +210,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } /* check if the next not void item is IPv4 */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -252,11 +263,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->proto = ipv4_spec->hdr.next_proto_id; /* check if the next not void item is TCP or UDP */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && - item->type != RTE_FLOW_ITEM_TYPE_SCTP) { + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -265,7 +276,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* get the TCP/UDP info */ - if (!item->spec || !item->mask) { + if ((item->type != RTE_FLOW_ITEM_TYPE_END) && + (!item->spec || !item->mask)) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -345,7 +357,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, udp_spec = (const struct rte_flow_item_udp *)item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; - } else { + } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { sctp_mask = (const struct rte_flow_item_sctp *)item->mask; /** @@ -368,11 +380,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, sctp_spec = (const struct rte_flow_item_sctp *)item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; + } else { + goto action; } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -381,14 +394,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; +action: /** * n-tuple only supports forwarding, * check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -400,8 +412,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, ((const struct rte_flow_action_queue *)act->conf)->index; /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -482,9 +493,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || - filter->priority > IXGBE_5TUPLE_MAX_PRI || - filter->priority < IXGBE_5TUPLE_MIN_PRI) + if (filter->queue >= dev->data->nb_rx_queues) return -rte_errno; /* fixed value for ixgbe */ @@ -520,7 +529,6 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_spec; const struct rte_flow_item_eth *eth_mask; const struct rte_flow_action_queue *act_q; - uint32_t index; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -543,15 +551,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* Parse pattern */ - index = 0; - + item = next_no_void_pattern(pattern, NULL); /* The first non-void item should be MAC. */ - item = pattern + index; - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { - index++; - item = pattern + index; - } if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -610,12 +611,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, filter->ether_type = rte_be_to_cpu_16(eth_spec->type); /* Check if the next non-void item is END. */ - index++; - item = pattern + index; - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { - index++; - item = pattern + index; - } + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -625,13 +621,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, /* Parse action */ - index = 0; - /* Check if the first non-void action is QUEUE or DROP. */ - act = actions + index; - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { - index++; - act = actions + index; - } + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && act->type != RTE_FLOW_ACTION_TYPE_DROP) { rte_flow_error_set(error, EINVAL, @@ -648,12 +638,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, } /* Check if the next non-void item is END */ - index++; - act = actions + index; - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { - index++; - act = actions + index; - } + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -725,7 +710,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) { + if (filter->queue >= dev->data->nb_rx_queues) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -793,7 +778,6 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_action_queue *act_q; - uint32_t index; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -816,11 +800,9 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse pattern */ - index = 0; /* the first not void item should be MAC or IPv4 or IPv6 or TCP */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6 && @@ -849,8 +831,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is IPv4 or IPv6 */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6) { rte_flow_error_set(error, EINVAL, @@ -872,8 +853,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is TCP */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -917,8 +897,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -927,11 +906,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - /* check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -951,8 +927,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -1012,6 +987,9 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, ret = cons_parse_syn_filter(attr, pattern, actions, filter, error); + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + if (ret) return ret; @@ -1048,7 +1026,6 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_e_tag *e_tag_mask; const struct rte_flow_action *act; const struct rte_flow_action_queue *act_q; - uint32_t index; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1070,11 +1047,9 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, NULL, "NULL attribute."); return -rte_errno; } - /* parse pattern */ - index = 0; /* The first not void item should be e-tag. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1121,8 +1096,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b); /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1159,11 +1133,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - /* check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1176,8 +1147,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, filter->pool = act_q->index; /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1213,6 +1183,9 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + if (l2_tn_filter->pool >= dev->data->nb_rx_queues) + return -rte_errno; + return ret; } @@ -1226,7 +1199,6 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, const struct rte_flow_action *act; const struct rte_flow_action_queue *act_q; const struct rte_flow_action_mark *mark; - uint32_t index; /* parse attr */ /* must be input direction */ @@ -1256,11 +1228,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - /* check if the first not void action is QUEUE or DROP. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && act->type != RTE_FLOW_ACTION_TYPE_DROP) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1274,12 +1243,19 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, act_q = (const struct rte_flow_action_queue *)act->conf; rule->queue = act_q->index; } else { /* drop */ + /* signature mode does not support drop action. */ + if (rule->mode == RTE_FDIR_MODE_SIGNATURE) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } rule->fdirflags = IXGBE_FDIRCMD_DROP; } /* check if the next not void item is MARK */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) && (act->type != RTE_FLOW_ACTION_TYPE_END)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1294,8 +1270,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, if (act->type == RTE_FLOW_ACTION_TYPE_MARK) { mark = (const struct rte_flow_action_mark *)act->conf; rule->soft_id = mark->id; - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); } /* check if the next not void item is END */ @@ -1310,14 +1285,78 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return 0; } +/* search next no void pattern and skip fuzzy */ +static inline +const struct rte_flow_item *next_no_fuzzy_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + next_no_void_pattern(pattern, cur); + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY) + return next; + next = next_no_void_pattern(pattern, next); + } +} + +static inline uint8_t signature_match(const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item_fuzzy *spec, *last, *mask; + const struct rte_flow_item *item; + uint32_t sh, lh, mh; + int i = 0; + + while (1) { + item = pattern + i; + if (item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { + spec = + (const struct rte_flow_item_fuzzy *)item->spec; + last = + (const struct rte_flow_item_fuzzy *)item->last; + mask = + (const struct rte_flow_item_fuzzy *)item->mask; + + if (!spec || !mask) + return 0; + + sh = spec->thresh; + + if (!last) + lh = sh; + else + lh = last->thresh; + + mh = mask->thresh; + sh = sh & mh; + lh = lh & mh; + + if (!sh || sh > lh) + return 0; + + return 1; + } + + i++; + } + + return 0; +} + /** * Parse the rule to see if it is a IP or MAC VLAN flow director rule. * And get the flow director filter info BTW. * UDP/TCP/SCTP PATTERN: - * The first not void item can be ETH or IPV4. - * The second not void item must be IPV4 if the first one is ETH. - * The third not void item must be UDP or TCP or SCTP. + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item could be UDP or TCP or SCTP (optional) + * The next not void item could be RAW (for flexbyte, optional) * The next not void item must be END. + * A Fuzzy Match pattern can appear at any place before END. + * Fuzzy Match is optional for IPV4 but is required for IPV6 * MAC VLAN PATTERN: * The first not void item must be ETH. * The second not void item must be MAC VLAN. @@ -1334,6 +1373,14 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * dst_addr 192.167.3.50 0xFFFFFFFF * UDP/TCP/SCTP src_port 80 0xFFFF * dst_port 80 0xFFFF + * FLEX relative 0 0x1 + * search 0 0x1 + * reserved 0 0 + * offset 12 0xFFFFFFFF + * limit 0 0xFFFF + * length 2 0xFFFF + * pattern[0] 0x86 0xFF + * pattern[1] 0xDD 0xFF * END * MAC VLAN pattern example: * ITEM Spec Mask @@ -1346,7 +1393,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * Item->last should be NULL. */ static int -ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, +ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct ixgbe_fdir_rule *rule, @@ -1357,6 +1405,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec; const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec; + const struct rte_flow_item_ipv6 *ipv6_mask; const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_item_udp *udp_spec; @@ -1365,8 +1415,11 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_sctp *sctp_mask; const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask; + const struct rte_flow_item_raw *raw_mask; + const struct rte_flow_item_raw *raw_spec; + uint8_t j; - uint32_t index, j; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1396,17 +1449,16 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); rule->mask.vlan_tci_mask = 0; - - /* parse pattern */ - index = 0; + rule->mask.flex_bytes_mask = 0; /** * The first not void item should be * MAC or IPv4 or TCP or UDP or SCTP. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP) { @@ -1417,7 +1469,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - rule->mode = RTE_FDIR_MODE_PERFECT; + if (signature_match(pattern)) + rule->mode = RTE_FDIR_MODE_SIGNATURE; + else + rule->mode = RTE_FDIR_MODE_PERFECT; /*Not supported last point for range*/ if (item->last) { @@ -1454,14 +1509,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->mask) { - /* If ethernet has meaning, it means MAC VLAN mode. */ - rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; rule->b_mask = TRUE; eth_mask = (const struct rte_flow_item_eth *)item->mask; /* Ether type should be masked. */ - if (eth_mask->type) { + if (eth_mask->type || + rule->mode == RTE_FDIR_MODE_SIGNATURE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1469,6 +1523,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } + /* If ethernet has meaning, it means MAC VLAN mode. */ + rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + /** * src MAC address must be masked, * and don't support dst MAC address mask. @@ -1497,8 +1554,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1544,18 +1600,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF); /* More than one tags are not supported. */ - /** - * Check if the next not void item is not vlan. - */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); - if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } else if (item->type != RTE_FLOW_ITEM_TYPE_END) { + /* Next not void item must be END */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1564,7 +1611,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, } } - /* Get the IP info. */ + /* Get the IPV4 info. */ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { /** * Set the flow type even if there's no content @@ -1624,12 +1671,104 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is * TCP or UDP or SCTP or END. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP && - item->type != RTE_FLOW_ITEM_TYPE_END) { + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the IPV6 info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type = + IXGBE_ATR_FLOW_TYPE_IPV6; + + /** + * 1. must signature match + * 2. not support last + * 3. mask must not null + */ + if (rule->mode != RTE_FDIR_MODE_SIGNATURE || + item->last || + !item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + rule->b_mask = TRUE; + ipv6_mask = + (const struct rte_flow_item_ipv6 *)item->mask; + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check src addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) { + rule->mask.src_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.src_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* check dst addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) { + rule->mask.dst_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.dst_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + if (item->spec) { + rule->b_spec = TRUE; + ipv6_spec = + (const struct rte_flow_item_ipv6 *)item->spec; + rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, + ipv6_spec->hdr.dst_addr, 16); + } + + /** + * Check if the next not void item is + * TCP or UDP or SCTP or END. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1644,8 +1783,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_TCPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_TCP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1690,6 +1829,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = tcp_spec->hdr.dst_port; } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } /* Get the UDP info */ @@ -1698,8 +1848,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_UDPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_UDP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1739,6 +1889,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = udp_spec->hdr.dst_port; } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } /* Get the SCTP info */ @@ -1747,8 +1908,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_SCTPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_SCTP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1756,46 +1917,147 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, item, "Not supported last point for range"); return -rte_errno; } - /** - * Only care about src & dst ports, - * others should be masked. - */ - if (!item->mask) { + + /* only x550 family only support sctp port */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + sctp_mask = + (const struct rte_flow_item_sctp *)item->mask; + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = sctp_mask->hdr.src_port; + rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + sctp_spec = + (const struct rte_flow_item_sctp *)item->spec; + rule->ixgbe_fdir.formatted.src_port = + sctp_spec->hdr.src_port; + rule->ixgbe_fdir.formatted.dst_port = + sctp_spec->hdr.dst_port; + } + /* others even sctp port is not supported */ + } else { + sctp_mask = + (const struct rte_flow_item_sctp *)item->mask; + if (sctp_mask && + (sctp_mask->hdr.src_port || + sctp_mask->hdr.dst_port || + sctp_mask->hdr.tag || + sctp_mask->hdr.cksum)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by fdir filter"); return -rte_errno; } - rule->b_mask = TRUE; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; - if (sctp_mask->hdr.tag || - sctp_mask->hdr.cksum) { + } + + /* Get the flex byte info */ + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) { + /* Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* mask should not be null */ + if (!item->mask || !item->spec) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by fdir filter"); return -rte_errno; } - rule->mask.src_port_mask = sctp_mask->hdr.src_port; - rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; - if (item->spec) { - rule->b_spec = TRUE; - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; - rule->ixgbe_fdir.formatted.src_port = - sctp_spec->hdr.src_port; - rule->ixgbe_fdir.formatted.dst_port = - sctp_spec->hdr.dst_port; + raw_mask = (const struct rte_flow_item_raw *)item->mask; + + /* check mask */ + if (raw_mask->relative != 0x1 || + raw_mask->search != 0x1 || + raw_mask->reserved != 0x0 || + (uint32_t)raw_mask->offset != 0xffffffff || + raw_mask->limit != 0xffff || + raw_mask->length != 0xffff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; } + + raw_spec = (const struct rte_flow_item_raw *)item->spec; + + /* check spec */ + if (raw_spec->relative != 0 || + raw_spec->search != 0 || + raw_spec->reserved != 0 || + raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF || + raw_spec->offset % 2 || + raw_spec->limit != 0 || + raw_spec->length != 2 || + /* pattern can't be 0xffff */ + (raw_spec->pattern[0] == 0xff && + raw_spec->pattern[1] == 0xff)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check pattern mask */ + if (raw_mask->pattern[0] != 0xff || + raw_mask->pattern[1] != 0xff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + rule->mask.flex_bytes_mask = 0xffff; + rule->ixgbe_fdir.formatted.flex_bytes = + (((uint16_t)raw_spec->pattern[1]) << 8) | + raw_spec->pattern[0]; + rule->flex_bytes_offset = raw_spec->offset; } if (item->type != RTE_FLOW_ITEM_TYPE_END) { /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -1863,7 +2125,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_mask; const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask; - uint32_t index, j; + uint32_t j; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1894,14 +2156,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); rule->mask.vlan_tci_mask = 0; - /* parse pattern */ - index = 0; - /** * The first not void item should be * MAC or IPv4 or IPv6 or UDP or VxLAN. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6 && @@ -1927,7 +2186,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } - /*Not supported last point for range*/ + /* Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -1936,8 +2195,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is IPv4 or IPv6. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1968,8 +2226,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is UDP or NVGRE. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_NVGRE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1999,8 +2256,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is VxLAN. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2156,8 +2412,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* check if the next not void item is MAC */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2240,8 +2495,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) && (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2277,8 +2531,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* More than one tags are not supported. */ /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2316,7 +2569,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, hw->mac.type != ixgbe_mac_X550EM_a) return -ENOTSUP; - ret = ixgbe_parse_fdir_filter_normal(attr, pattern, + ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern, actions, rule, error); if (!ret) @@ -2325,10 +2578,24 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern, actions, rule, error); + if (ret) + return ret; + step_next: + + if (hw->mac.type == ixgbe_mac_82599EB && + rule->fdirflags == IXGBE_FDIRCMD_DROP && + (rule->ixgbe_fdir.formatted.src_port != 0 || + rule->ixgbe_fdir.formatted.dst_port != 0)) + return -ENOTSUP; + if (fdir_mode == RTE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; + + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + return ret; } @@ -2414,6 +2681,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + uint8_t first_mask = FALSE; flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0); if (!flow) { @@ -2505,11 +2773,19 @@ ixgbe_flow_create(struct rte_eth_dev *dev, rte_memcpy(&fdir_info->mask, &fdir_rule.mask, sizeof(struct ixgbe_hw_fdir_mask)); + fdir_info->flex_bytes_offset = + fdir_rule.flex_bytes_offset; + + if (fdir_rule.mask.flex_bytes_mask) + ixgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); + ret = ixgbe_fdir_set_input_mask(dev); if (ret) goto out; fdir_info->mask_added = TRUE; + first_mask = TRUE; } else { /** * Only support one global mask, @@ -2520,6 +2796,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev, sizeof(struct ixgbe_hw_fdir_mask)); if (ret) goto out; + + if (fdir_info->flex_bytes_offset != + fdir_rule.flex_bytes_offset) + goto out; } } @@ -2540,8 +2820,15 @@ ixgbe_flow_create(struct rte_eth_dev *dev, return flow; } - if (ret) + if (ret) { + /** + * clean the mask_added flag if fail to + * program + **/ + if (first_mask) + fdir_info->mask_added = FALSE; goto out; + } } goto out; @@ -2583,7 +2870,7 @@ out: * the HW. Because there can be no enough room for the rule. */ static int -ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, +ixgbe_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], @@ -2774,9 +3061,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev, } const struct rte_flow_ops ixgbe_flow_ops = { - ixgbe_flow_validate, - ixgbe_flow_create, - ixgbe_flow_destroy, - ixgbe_flow_flush, - NULL, + .validate = ixgbe_flow_validate, + .create = ixgbe_flow_create, + .destroy = ixgbe_flow_destroy, + .flush = ixgbe_flow_flush, }; diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index d88832e5..c0d86c76 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -61,7 +61,7 @@ static inline uint16_t dev_num_vf(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); return pci_dev->max_vfs; } @@ -511,7 +511,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) } static int -ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) +ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = @@ -683,7 +683,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_pmd_ixgbe_mb_event_param cb_param; + struct rte_pmd_ixgbe_mb_event_param ret_param; retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); if (retval) { @@ -702,10 +702,10 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) * initialise structure to send to user application * will return response from user in retval field */ - cb_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED; - cb_param.vfid = vf; - cb_param.msg_type = msgbuf[0] & 0xFFFF; - cb_param.msg = (void *)msgbuf; + ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED; + ret_param.vfid = vf; + ret_param.msg_type = msgbuf[0] & 0xFFFF; + ret_param.msg = (void *)msgbuf; /* perform VF reset */ if (msgbuf[0] == IXGBE_VF_RESET) { @@ -714,20 +714,22 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) vfinfo[vf].clear_to_send = true; /* notify application about VF reset */ - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, + NULL, &ret_param); return ret; } /** * ask user application if we allowed to perform those functions - * if we get cb_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED + * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED * then business as usual, * if 0, do nothing and send ACK to VF - * if cb_param.retval > 1, do nothing and send NAK to VF + * if ret_param.retval > 1, do nothing and send NAK to VF */ - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, + NULL, &ret_param); - retval = cb_param.retval; + retval = ret_param.retval; /* check & process VF to PF mailbox message */ switch ((msgbuf[0] & 0xFFFF)) { diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 1e078959..64bff258 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -126,7 +126,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, * Check for descriptors with their DD bit set and free mbufs. * Return the total number of buffers freed. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) { struct ixgbe_tx_entry *txep; @@ -1084,282 +1084,279 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD -#define IXGBE_PACKET_TYPE_MAX 0X80 -#define IXGBE_PACKET_TYPE_TN_MAX 0X100 -#define IXGBE_PACKET_TYPE_SHIFT 0X04 +/** + * Use 2 different table for normal packet and tunnel packet + * to save the space. + */ +const uint32_t + ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER, + [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] = + RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, +}; + +const uint32_t + ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, + + [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, +}; /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */ static inline uint32_t ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) { - /** - * Use 2 different table for normal packet and tunnel packet - * to save the space. - */ - static const uint32_t - ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { - [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER, - [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4, - [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT, - [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6, - [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6, - [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] = - RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, - }; - - static const uint32_t - ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = { - [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER, - [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT, - [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | - RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | - RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | - RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] = - RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | - RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | - RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | - RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | - RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | - RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] = - RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | - RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | - RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | - RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | - RTE_PTYPE_INNER_L4_UDP, - - [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER, - [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT, - [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] = - RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] = - RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, - }; if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) return RTE_PTYPE_UNKNOWN; @@ -4111,10 +4108,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) return -ENOMEM; } - rte_mbuf_refcnt_set(mbuf, 1); - mbuf->next = NULL; mbuf->data_off = RTE_PKTMBUF_HEADROOM; - mbuf->nb_segs = 1; mbuf->port = rxq->port_id; dma_addr = diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 1ffab4cc..85feb0bd 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -87,6 +87,10 @@ #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF #define IXGBE_PACKET_TYPE_TUNNEL_BIT 0X1000 +#define IXGBE_PACKET_TYPE_MAX 0X80 +#define IXGBE_PACKET_TYPE_TN_MAX 0X100 +#define IXGBE_PACKET_TYPE_SHIFT 0X04 + /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -309,6 +313,9 @@ int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev); int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq); void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq); +extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX]; +extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX]; + #ifdef RTE_IXGBE_INC_VECTOR uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h index 1c34bb5f..9fc112b1 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h @@ -101,7 +101,7 @@ reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, return pkt_idx; } -static inline int __attribute__((always_inline)) +static __rte_always_inline int ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) { struct ixgbe_tx_entry_v *txep; @@ -158,7 +158,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) return txq->tx_rs_thresh; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void tx_backlog_entry(struct ixgbe_tx_entry_v *txep, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index a7bc199f..e704a7f3 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -87,6 +87,8 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) mb1 = rxep[1].mbuf; /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr)); vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr)); @@ -214,32 +216,84 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, * appropriate flags means that we have to do a shift and blend for * each mbuf before we do the write. */ -#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 - rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10); rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10); rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10); rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10); -#else - rearm0 = _mm_slli_si128(vtag1, 14); - rearm1 = _mm_slli_si128(vtag1, 12); - rearm2 = _mm_slli_si128(vtag1, 10); - rearm3 = _mm_slli_si128(vtag1, 8); - - rearm0 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm0, 48)); - rearm1 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm1, 48)); - rearm2 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm2, 48)); - rearm3 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm3, 48)); - -#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ - + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); } +static inline uint32_t get_packet_type(int index, + uint32_t pkt_info, + uint32_t etqf_check, + uint32_t tunnel_check) +{ + if (etqf_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) + return RTE_PTYPE_UNKNOWN; + + if (tunnel_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) { + pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL; + return ptype_table_tn[pkt_info]; + } + + pkt_info &= IXGBE_PACKET_TYPE_MASK_82599; + return ptype_table[pkt_info]; +} + +static inline void +desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask, + struct rte_mbuf **rx_pkts) +{ + __m128i etqf_mask = _mm_set_epi64x(0x800000008000LL, 0x800000008000LL); + __m128i ptype_mask = _mm_set_epi32( + pkt_type_mask, pkt_type_mask, pkt_type_mask, pkt_type_mask); + __m128i tunnel_mask = + _mm_set_epi64x(0x100000001000LL, 0x100000001000LL); + + uint32_t etqf_check, tunnel_check, pkt_info; + + __m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]); + __m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]); + + /* interleave low 32 bits, + * now we have 4 ptypes in a XMM register + */ + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); + + /* create a etqf bitmask based on the etqf bit. */ + etqf_check = _mm_movemask_epi8(_mm_and_si128(ptype0, etqf_mask)); + + /* shift left by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */ + ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, IXGBE_PACKET_TYPE_SHIFT), + ptype_mask); + + /* create a tunnel bitmask based on the tunnel bit */ + tunnel_check = _mm_movemask_epi8( + _mm_slli_epi32(_mm_and_si128(ptype0, tunnel_mask), 0x3)); + + pkt_info = _mm_extract_epi32(ptype0, 0); + rx_pkts[0]->packet_type = + get_packet_type(0, pkt_info, etqf_check, tunnel_check); + pkt_info = _mm_extract_epi32(ptype0, 1); + rx_pkts[1]->packet_type = + get_packet_type(1, pkt_info, etqf_check, tunnel_check); + pkt_info = _mm_extract_epi32(ptype0, 2); + rx_pkts[2]->packet_type = + get_packet_type(2, pkt_info, etqf_check, tunnel_check); + pkt_info = _mm_extract_epi32(ptype0, 3); + rx_pkts[3]->packet_type = + get_packet_type(3, pkt_info, etqf_check, tunnel_check); +} + /* * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) * @@ -266,6 +320,15 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, -rxq->crc_len, /* sub crc on pkt_len */ 0, 0 /* ignore pkt_type field */ ); + /* + * compile-time check the above crc_adjust layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi16 + * call above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); __m128i dd_check, eop_check; __m128i mbuf_init; uint8_t vlan_flags; @@ -312,6 +375,19 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, 0xFF, 0xFF, /* skip 32 bit pkt_type */ 0xFF, 0xFF ); + /* + * Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); @@ -447,6 +523,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, pkt_mb1); + desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]); + /* C.4 calc avaialbe number of desc */ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c new file mode 100644 index 00000000..cdcf45cb --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_tm.c @@ -0,0 +1,1043 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "ixgbe_ethdev.h" + +static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error); +static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error); +static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error); +static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error); +static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error); +static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error); +static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error); +static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error); +static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error); + +const struct rte_tm_ops ixgbe_tm_ops = { + .capabilities_get = ixgbe_tm_capabilities_get, + .shaper_profile_add = ixgbe_shaper_profile_add, + .shaper_profile_delete = ixgbe_shaper_profile_del, + .node_add = ixgbe_node_add, + .node_delete = ixgbe_node_delete, + .node_type_get = ixgbe_node_type_get, + .level_capabilities_get = ixgbe_level_capabilities_get, + .node_capabilities_get = ixgbe_node_capabilities_get, + .hierarchy_commit = ixgbe_hierarchy_commit, +}; + +int +ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused, + void *arg) +{ + if (!arg) + return -EINVAL; + + *(const void **)arg = &ixgbe_tm_ops; + + return 0; +} + +void +ixgbe_tm_conf_init(struct rte_eth_dev *dev) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + + /* initialize shaper profile list */ + TAILQ_INIT(&tm_conf->shaper_profile_list); + + /* initialize node configuration */ + tm_conf->root = NULL; + TAILQ_INIT(&tm_conf->queue_list); + TAILQ_INIT(&tm_conf->tc_list); + tm_conf->nb_tc_node = 0; + tm_conf->nb_queue_node = 0; + tm_conf->committed = false; +} + +void +ixgbe_tm_conf_uninit(struct rte_eth_dev *dev) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_shaper_profile *shaper_profile; + struct ixgbe_tm_node *tm_node; + + /* clear node configuration */ + while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) { + TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); + rte_free(tm_node); + } + tm_conf->nb_queue_node = 0; + while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) { + TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); + rte_free(tm_node); + } + tm_conf->nb_tc_node = 0; + if (tm_conf->root) { + rte_free(tm_conf->root); + tm_conf->root = NULL; + } + + /* Remove all shaper profiles */ + while ((shaper_profile = + TAILQ_FIRST(&tm_conf->shaper_profile_list))) { + TAILQ_REMOVE(&tm_conf->shaper_profile_list, + shaper_profile, node); + rte_free(shaper_profile); + } +} + +static inline uint8_t +ixgbe_tc_nb_get(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *eth_conf; + uint8_t nb_tcs = 0; + + eth_conf = &dev->data->dev_conf; + if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs; + } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == + ETH_32_POOLS) + nb_tcs = ETH_4_TCS; + else + nb_tcs = ETH_8_TCS; + } else { + nb_tcs = 1; + } + + return nb_tcs; +} + +static int +ixgbe_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t tc_nb = ixgbe_tc_nb_get(dev); + + if (!cap || !error) + return -EINVAL; + + if (tc_nb > hw->mac.max_tx_queues) + return -EINVAL; + + error->type = RTE_TM_ERROR_TYPE_NONE; + + /* set all the parameters to 0 first. */ + memset(cap, 0, sizeof(struct rte_tm_capabilities)); + + /** + * here is the max capability not the current configuration. + */ + /* port + TCs + queues */ + cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS + + hw->mac.max_tx_queues; + cap->n_levels_max = 3; + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + cap->shaper_n_max = cap->n_nodes_max; + cap->shaper_private_n_max = cap->n_nodes_max; + cap->shaper_private_dual_rate_n_max = 0; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + cap->shaper_shared_n_nodes_per_shaper_max = 0; + cap->shaper_shared_n_shapers_per_node_max = 0; + cap->shaper_shared_dual_rate_n_max = 0; + cap->shaper_shared_rate_min = 0; + cap->shaper_shared_rate_max = 0; + cap->sched_n_children_max = hw->mac.max_tx_queues; + /** + * HW supports SP. But no plan to support it now. + * So, all the nodes should have the same priority. + */ + cap->sched_sp_n_priorities_max = 1; + cap->sched_wfq_n_children_per_group_max = 0; + cap->sched_wfq_n_groups_max = 0; + /** + * SW only supports fair round robin now. + * So, all the nodes should have the same weight. + */ + cap->sched_wfq_weight_max = 1; + cap->cman_head_drop_supported = 0; + cap->dynamic_update_mask = 0; + cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD; + cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + cap->cman_wred_context_n_max = 0; + cap->cman_wred_context_private_n_max = 0; + cap->cman_wred_context_shared_n_max = 0; + cap->cman_wred_context_shared_n_nodes_per_context_max = 0; + cap->cman_wred_context_shared_n_contexts_per_node_max = 0; + cap->stats_mask = 0; + + return 0; +} + +static inline struct ixgbe_tm_shaper_profile * +ixgbe_shaper_profile_search(struct rte_eth_dev *dev, + uint32_t shaper_profile_id) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_shaper_profile_list *shaper_profile_list = + &tm_conf->shaper_profile_list; + struct ixgbe_tm_shaper_profile *shaper_profile; + + TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { + if (shaper_profile_id == shaper_profile->shaper_profile_id) + return shaper_profile; + } + + return NULL; +} + +static int +ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + /* min rate not supported */ + if (profile->committed.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "committed rate not supported"; + return -EINVAL; + } + /* min bucket size not supported */ + if (profile->committed.size) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; + error->message = "committed bucket size not supported"; + return -EINVAL; + } + /* max bucket size not supported */ + if (profile->peak.size) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; + error->message = "peak bucket size not supported"; + return -EINVAL; + } + /* length adjustment not supported */ + if (profile->pkt_length_adjust) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; + error->message = "packet length adjustment not supported"; + return -EINVAL; + } + + return 0; +} + +static int +ixgbe_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_shaper_profile *shaper_profile; + int ret; + + if (!profile || !error) + return -EINVAL; + + ret = ixgbe_shaper_profile_param_check(profile, error); + if (ret) + return ret; + + shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id); + + if (shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "profile ID exist"; + return -EINVAL; + } + + shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile", + sizeof(struct ixgbe_tm_shaper_profile), + 0); + if (!shaper_profile) + return -ENOMEM; + shaper_profile->shaper_profile_id = shaper_profile_id; + (void)rte_memcpy(&shaper_profile->profile, profile, + sizeof(struct rte_tm_shaper_params)); + TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list, + shaper_profile, node); + + return 0; +} + +static int +ixgbe_shaper_profile_del(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_shaper_profile *shaper_profile; + + if (!error) + return -EINVAL; + + shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id); + + if (!shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "profile ID not exist"; + return -EINVAL; + } + + /* don't delete a profile if it's used by one or several nodes */ + if (shaper_profile->reference_count) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "profile in use"; + return -EINVAL; + } + + TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node); + rte_free(shaper_profile); + + return 0; +} + +static inline struct ixgbe_tm_node * +ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, + enum ixgbe_tm_node_type *node_type) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_node *tm_node; + + if (tm_conf->root && tm_conf->root->id == node_id) { + *node_type = IXGBE_TM_NODE_TYPE_PORT; + return tm_conf->root; + } + + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->id == node_id) { + *node_type = IXGBE_TM_NODE_TYPE_TC; + return tm_node; + } + } + + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + if (tm_node->id == node_id) { + *node_type = IXGBE_TM_NODE_TYPE_QUEUE; + return tm_node; + } + } + + return NULL; +} + +static void +ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no, + uint16_t *base, uint16_t *nb) +{ + uint8_t nb_tcs = ixgbe_tc_nb_get(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t vf_num = pci_dev->max_vfs; + + *base = 0; + *nb = 0; + + /* VT on */ + if (vf_num) { + /* no DCB */ + if (nb_tcs == 1) { + if (vf_num >= ETH_32_POOLS) { + *nb = 2; + *base = vf_num * 2; + } else if (vf_num >= ETH_16_POOLS) { + *nb = 4; + *base = vf_num * 4; + } else { + *nb = 8; + *base = vf_num * 8; + } + } else { + /* DCB */ + *nb = 1; + *base = vf_num * nb_tcs + tc_node_no; + } + } else { + /* VT off */ + if (nb_tcs == ETH_8_TCS) { + switch (tc_node_no) { + case 0: + *base = 0; + *nb = 32; + break; + case 1: + *base = 32; + *nb = 32; + break; + case 2: + *base = 64; + *nb = 16; + break; + case 3: + *base = 80; + *nb = 16; + break; + case 4: + *base = 96; + *nb = 8; + break; + case 5: + *base = 104; + *nb = 8; + break; + case 6: + *base = 112; + *nb = 8; + break; + case 7: + *base = 120; + *nb = 8; + break; + default: + return; + } + } else { + switch (tc_node_no) { + /** + * If no VF and no DCB, only 64 queues can be used. + * This case also be covered by this "case 0". + */ + case 0: + *base = 0; + *nb = 64; + break; + case 1: + *base = 64; + *nb = 32; + break; + case 2: + *base = 96; + *nb = 16; + break; + case 3: + *base = 112; + *nb = 16; + break; + default: + return; + } + } + } +} + +static int +ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id, + uint32_t priority, uint32_t weight, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + if (priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; + error->message = "priority should be 0"; + return -EINVAL; + } + + if (weight != 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; + error->message = "weight must be 1"; + return -EINVAL; + } + + /* not support shared shaper */ + if (params->shared_shaper_id) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; + error->message = "shared shaper not supported"; + return -EINVAL; + } + if (params->n_shared_shapers) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; + error->message = "shared shaper not supported"; + return -EINVAL; + } + + /* for root node */ + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + /* check the unsupported parameters */ + if (params->nonleaf.wfq_weight_mode) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "WFQ not supported"; + return -EINVAL; + } + if (params->nonleaf.n_sp_priorities != 1) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; + error->message = "SP priority not supported"; + return -EINVAL; + } else if (params->nonleaf.wfq_weight_mode && + !(*params->nonleaf.wfq_weight_mode)) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "WFP should be byte mode"; + return -EINVAL; + } + + return 0; + } + + /* for TC or queue node */ + /* check the unsupported parameters */ + if (params->leaf.cman) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; + error->message = "Congestion management not supported"; + return -EINVAL; + } + if (params->leaf.wred.wred_profile_id != + RTE_TM_WRED_PROFILE_ID_NONE) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; + error->message = "WRED not supported"; + return -EINVAL; + } + if (params->leaf.wred.shared_wred_context_id) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; + error->message = "WRED not supported"; + return -EINVAL; + } + if (params->leaf.wred.n_shared_wred_contexts) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; + error->message = "WRED not supported"; + return -EINVAL; + } + + return 0; +} + +/** + * Now the TC and queue configuration is controlled by DCB. + * We need check if the node configuration follows the DCB configuration. + * In the future, we may use TM to cover DCB. + */ +static int +ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_shaper_profile *shaper_profile; + struct ixgbe_tm_node *tm_node; + struct ixgbe_tm_node *parent_node; + uint8_t nb_tcs; + uint16_t q_base = 0; + uint16_t q_nb = 0; + int ret; + + if (!params || !error) + return -EINVAL; + + /* if already committed */ + if (tm_conf->committed) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "already committed"; + return -EINVAL; + } + + ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight, + params, error); + if (ret) + return ret; + + /* check if the node ID is already used */ + if (ixgbe_tm_node_search(dev, node_id, &node_type)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "node id already used"; + return -EINVAL; + } + + /* check the shaper profile id */ + shaper_profile = ixgbe_shaper_profile_search(dev, + params->shaper_profile_id); + if (!shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; + error->message = "shaper profile not exist"; + return -EINVAL; + } + + /* root node if not have a parent */ + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + /* check level */ + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && + level_id > IXGBE_TM_NODE_TYPE_PORT) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "Wrong level"; + return -EINVAL; + } + + /* obviously no more than one root */ + if (tm_conf->root) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "already have a root"; + return -EINVAL; + } + + /* add the root node */ + tm_node = rte_zmalloc("ixgbe_tm_node", + sizeof(struct ixgbe_tm_node), + 0); + if (!tm_node) + return -ENOMEM; + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->reference_count = 0; + tm_node->no = 0; + tm_node->parent = NULL; + tm_node->shaper_profile = shaper_profile; + (void)rte_memcpy(&tm_node->params, params, + sizeof(struct rte_tm_node_params)); + tm_conf->root = tm_node; + + /* increase the reference counter of the shaper profile */ + shaper_profile->reference_count++; + + return 0; + } + + /* TC or queue node */ + /* check the parent node */ + parent_node = ixgbe_tm_node_search(dev, parent_node_id, + &parent_node_type); + if (!parent_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent not exist"; + return -EINVAL; + } + if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT && + parent_node_type != IXGBE_TM_NODE_TYPE_TC) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent is not port or TC"; + return -EINVAL; + } + /* check level */ + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && + level_id != parent_node_type + 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "Wrong level"; + return -EINVAL; + } + + /* check the node number */ + if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) { + /* check TC number */ + nb_tcs = ixgbe_tc_nb_get(dev); + if (tm_conf->nb_tc_node >= nb_tcs) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many TCs"; + return -EINVAL; + } + } else { + /* check queue number */ + if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many queues"; + return -EINVAL; + } + + ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb); + if (parent_node->reference_count >= q_nb) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many queues than TC supported"; + return -EINVAL; + } + + /** + * check the node id. + * For queue, the node id means queue id. + */ + if (node_id >= dev->data->nb_tx_queues) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too large queue id"; + return -EINVAL; + } + } + + /* add the TC or queue node */ + tm_node = rte_zmalloc("ixgbe_tm_node", + sizeof(struct ixgbe_tm_node), + 0); + if (!tm_node) + return -ENOMEM; + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->reference_count = 0; + tm_node->parent = parent_node; + tm_node->shaper_profile = shaper_profile; + (void)rte_memcpy(&tm_node->params, params, + sizeof(struct rte_tm_node_params)); + if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) { + tm_node->no = parent_node->reference_count; + TAILQ_INSERT_TAIL(&tm_conf->tc_list, + tm_node, node); + tm_conf->nb_tc_node++; + } else { + tm_node->no = q_base + parent_node->reference_count; + TAILQ_INSERT_TAIL(&tm_conf->queue_list, + tm_node, node); + tm_conf->nb_queue_node++; + } + tm_node->parent->reference_count++; + + /* increase the reference counter of the shaper profile */ + shaper_profile->reference_count++; + + return 0; +} + +static int +ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_node *tm_node; + + if (!error) + return -EINVAL; + + /* if already committed */ + if (tm_conf->committed) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "already committed"; + return -EINVAL; + } + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check the if the node id exists */ + tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* the node should have no child */ + if (tm_node->reference_count) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = + "cannot delete a node which has children"; + return -EINVAL; + } + + /* root node */ + if (node_type == IXGBE_TM_NODE_TYPE_PORT) { + tm_node->shaper_profile->reference_count--; + rte_free(tm_node); + tm_conf->root = NULL; + return 0; + } + + /* TC or queue node */ + tm_node->shaper_profile->reference_count--; + tm_node->parent->reference_count--; + if (node_type == IXGBE_TM_NODE_TYPE_TC) { + TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); + tm_conf->nb_tc_node--; + } else { + TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); + tm_conf->nb_queue_node--; + } + rte_free(tm_node); + + return 0; +} + +static int +ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error) +{ + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_node *tm_node; + + if (!is_leaf || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) + *is_leaf = true; + else + *is_leaf = false; + + return 0; +} + +static int +ixgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!cap || !error) + return -EINVAL; + + if (level_id >= IXGBE_TM_NODE_TYPE_MAX) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "too deep level"; + return -EINVAL; + } + + /* root node */ + if (level_id == IXGBE_TM_NODE_TYPE_PORT) { + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->n_nodes_leaf_max = 0; + cap->non_leaf_nodes_identical = true; + cap->leaf_nodes_identical = true; + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = false; + cap->nonleaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->nonleaf.shaper_private_rate_max = 1250000000ull; + cap->nonleaf.shaper_shared_n_max = 0; + cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + cap->nonleaf.stats_mask = 0; + + return 0; + } + + /* TC or queue node */ + if (level_id == IXGBE_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->n_nodes_leaf_max = 0; + cap->non_leaf_nodes_identical = true; + } else { + /* queue */ + cap->n_nodes_max = hw->mac.max_tx_queues; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = hw->mac.max_tx_queues; + cap->non_leaf_nodes_identical = true; + } + cap->leaf_nodes_identical = true; + cap->leaf.shaper_private_supported = true; + cap->leaf.shaper_private_dual_rate_supported = false; + cap->leaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->leaf.shaper_private_rate_max = 1250000000ull; + cap->leaf.shaper_shared_n_max = 0; + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + cap->leaf.stats_mask = 0; + + return 0; +} + +static int +ixgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_node *tm_node; + + if (!cap || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = false; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + + if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) { + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + } else { + if (node_type == IXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + } + + cap->stats_mask = 0; + + return 0; +} + +static int +ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_node *tm_node; + uint64_t bw; + int ret; + + if (!error) + return -EINVAL; + + /* check the setting */ + if (!tm_conf->root) + goto done; + + /* not support port max bandwidth yet */ + if (tm_conf->root->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no port max bandwidth"; + goto fail_clear; + } + + /* HW not support TC max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no TC max bandwidth"; + goto fail_clear; + } + } + + /* queue max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + bw = tm_node->shaper_profile->profile.peak.rate; + if (bw) { + /* interpret Bps to Mbps */ + bw = bw * 8 / 1000 / 1000; + ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw); + if (ret) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = + "failed to set queue max bandwidth"; + goto fail_clear; + } + } + } + +done: + tm_conf->committed = true; + return 0; + +fail_clear: + /* clear all the traffic manager configuration */ + if (clear_on_fail) { + ixgbe_tm_conf_uninit(dev); + ixgbe_tm_conf_init(dev); + } + return -EINVAL; +} diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c index e8fc9a64..79897ff6 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.c +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c @@ -51,7 +51,7 @@ rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf, RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -84,7 +84,7 @@ rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf) RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -115,7 +115,7 @@ rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -145,7 +145,7 @@ rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -174,7 +174,7 @@ rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id) RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -270,7 +270,7 @@ rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on) RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -306,7 +306,7 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!is_ixgbe_supported(dev)) @@ -354,7 +354,7 @@ rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -401,7 +401,7 @@ rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on) RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -452,7 +452,7 @@ rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on) RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; - pci_dev = IXGBE_DEV_TO_PCI(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!is_ixgbe_supported(dev)) return -ENOTSUP; @@ -908,3 +908,136 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port, return 0; } + +#ifdef RTE_LIBRTE_IXGBE_BYPASS +int +rte_pmd_ixgbe_bypass_init(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + ixgbe_bypass_init(dev); + return 0; +} + +int +rte_pmd_ixgbe_bypass_state_show(uint8_t port_id, uint32_t *state) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_state_show(dev, state); +} + +int +rte_pmd_ixgbe_bypass_state_set(uint8_t port_id, uint32_t *new_state) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_state_store(dev, new_state); +} + +int +rte_pmd_ixgbe_bypass_event_show(uint8_t port_id, + uint32_t event, + uint32_t *state) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_event_show(dev, event, state); +} + +int +rte_pmd_ixgbe_bypass_event_store(uint8_t port_id, + uint32_t event, + uint32_t state) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_event_store(dev, event, state); +} + +int +rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port_id, uint32_t timeout) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_wd_timeout_store(dev, timeout); +} + +int +rte_pmd_ixgbe_bypass_ver_show(uint8_t port_id, uint32_t *ver) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_ver_show(dev, ver); +} + +int +rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_wd_timeout_show(dev, wd_timeout); +} + +int +rte_pmd_ixgbe_bypass_wd_reset(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_wd_reset(dev); +} +#endif diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h index 1f2b1bd7..d33c285d 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.h +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h @@ -427,6 +427,177 @@ int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port, uint8_t tc_num, uint8_t *bw_weight); + +/** + * Initialize bypass logic. This function needs to be called before + * executing any other bypass API. + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_init(uint8_t port); + +/** + * Return bypass state. + * + * @param port + * The port identifier of the Ethernet device. + * @param state + * The return bypass state. + * - (1) Normal mode + * - (2) Bypass mode + * - (3) Isolate mode + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_state_show(uint8_t port, uint32_t *state); + +/** + * Set bypass state + * + * @param port + * The port identifier of the Ethernet device. + * @param new_state + * The current bypass state. + * - (1) Normal mode + * - (2) Bypass mode + * - (3) Isolate mode + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_state_set(uint8_t port, uint32_t *new_state); + +/** + * Return bypass state when given event occurs. + * + * @param port + * The port identifier of the Ethernet device. + * @param event + * The bypass event + * - (1) Main power on (power button is pushed) + * - (2) Auxiliary power on (power supply is being plugged) + * - (3) Main power off (system shutdown and power supply is left plugged in) + * - (4) Auxiliary power off (power supply is being unplugged) + * - (5) Display or set the watchdog timer + * @param state + * The bypass state when given event occurred. + * - (1) Normal mode + * - (2) Bypass mode + * - (3) Isolate mode + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_event_show(uint8_t port, + uint32_t event, + uint32_t *state); + +/** + * Set bypass state when given event occurs. + * + * @param port + * The port identifier of the Ethernet device. + * @param event + * The bypass event + * - (1) Main power on (power button is pushed) + * - (2) Auxiliary power on (power supply is being plugged) + * - (3) Main power off (system shutdown and power supply is left plugged in) + * - (4) Auxiliary power off (power supply is being unplugged) + * - (5) Display or set the watchdog timer + * @param state + * The assigned state when given event occurs. + * - (1) Normal mode + * - (2) Bypass mode + * - (3) Isolate mode + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_event_store(uint8_t port, + uint32_t event, + uint32_t state); + +/** + * Set bypass watchdog timeout count. + * + * @param port + * The port identifier of the Ethernet device. + * @param timeout + * The timeout to be set. + * - (0) 0 seconds (timer is off) + * - (1) 1.5 seconds + * - (2) 2 seconds + * - (3) 3 seconds + * - (4) 4 seconds + * - (5) 8 seconds + * - (6) 16 seconds + * - (7) 32 seconds + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port, uint32_t timeout); + +/** + * Get bypass firmware version. + * + * @param port + * The port identifier of the Ethernet device. + * @param ver + * The firmware version + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_ver_show(uint8_t port, uint32_t *ver); + +/** + * Return bypass watchdog timeout in seconds + * + * @param port + * The port identifier of the Ethernet device. + * @param wd_timeout + * The return watchdog timeout. "0" represents timer expired + * - (0) 0 seconds (timer is off) + * - (1) 1.5 seconds + * - (2) 2 seconds + * - (3) 3 seconds + * - (4) 4 seconds + * - (5) 8 seconds + * - (6) 16 seconds + * - (7) 32 seconds + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout); + +/** + * Reset bypass watchdog timer + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_wd_reset(uint8_t port); + + /** * Response sent back to ixgbe driver from user app after callback */ @@ -446,4 +617,48 @@ struct rte_pmd_ixgbe_mb_event_param { uint16_t retval; /**< return value */ void *msg; /**< pointer to message */ }; +enum { + RTE_PMD_IXGBE_BYPASS_MODE_NONE, + RTE_PMD_IXGBE_BYPASS_MODE_NORMAL, + RTE_PMD_IXGBE_BYPASS_MODE_BYPASS, + RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE, + RTE_PMD_IXGBE_BYPASS_MODE_NUM, +}; + +#define RTE_PMD_IXGBE_BYPASS_MODE_VALID(x) \ + ((x) > RTE_PMD_IXGBE_BYPASS_MODE_NONE && \ + (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM) + +enum { + RTE_PMD_IXGBE_BYPASS_EVENT_NONE, + RTE_PMD_IXGBE_BYPASS_EVENT_START, + RTE_PMD_IXGBE_BYPASS_EVENT_OS_ON = RTE_PMD_IXGBE_BYPASS_EVENT_START, + RTE_PMD_IXGBE_BYPASS_EVENT_POWER_ON, + RTE_PMD_IXGBE_BYPASS_EVENT_OS_OFF, + RTE_PMD_IXGBE_BYPASS_EVENT_POWER_OFF, + RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT, + RTE_PMD_IXGBE_BYPASS_EVENT_NUM +}; + +#define RTE_PMD_IXGBE_BYPASS_EVENT_VALID(x) \ + ((x) > RTE_PMD_IXGBE_BYPASS_EVENT_NONE && \ + (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM) + +enum { + RTE_PMD_IXGBE_BYPASS_TMT_OFF, /* timeout disabled. */ + RTE_PMD_IXGBE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_NUM +}; + +#define RTE_PMD_IXGBE_BYPASS_TMT_VALID(x) \ + ((x) == RTE_PMD_IXGBE_BYPASS_TMT_OFF || \ + ((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \ + (x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM)) + #endif /* _PMD_IXGBE_H_ */ diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map index 45a57e33..bf776742 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map +++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map @@ -38,3 +38,17 @@ DPDK_17.05 { rte_pmd_ixgbe_ping_vf; rte_pmd_ixgbe_set_tc_bw_alloc; } DPDK_17.02; + +DPDK_17.08 { + global: + + rte_pmd_ixgbe_bypass_event_show; + rte_pmd_ixgbe_bypass_event_store; + rte_pmd_ixgbe_bypass_init; + rte_pmd_ixgbe_bypass_state_set; + rte_pmd_ixgbe_bypass_state_show; + rte_pmd_ixgbe_bypass_ver_show; + rte_pmd_ixgbe_bypass_wd_reset; + rte_pmd_ixgbe_bypass_wd_timeout_show; + rte_pmd_ixgbe_bypass_wd_timeout_store; +} DPDK_17.05; -- cgit 1.2.3-korg