aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5 /drivers/net/e1000
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/Makefile1
-rw-r--r--drivers/net/e1000/e1000_ethdev.h99
-rw-r--r--drivers/net/e1000/em_ethdev.c17
-rw-r--r--drivers/net/e1000/igb_ethdev.c684
-rw-r--r--drivers/net/e1000/igb_flow.c1707
-rw-r--r--drivers/net/e1000/igb_pf.c2
6 files changed, 2251 insertions, 259 deletions
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index b5592d6b..ffdf36d3 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -96,6 +96,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 8352d0a7..5668910c 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -34,6 +34,7 @@
#ifndef _E1000_ETHDEV_H_
#define _E1000_ETHDEV_H_
#include <rte_time.h>
+#include <rte_pci.h>
#define E1000_INTEL_VENDOR_ID 0x8086
@@ -82,7 +83,7 @@
#define E1000_MAX_FLEX_FILTER_DWDS \
(E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
#define E1000_FLEX_FILTERS_MASK_SIZE \
- (E1000_MAX_FLEX_FILTER_DWDS / 4)
+ (E1000_MAX_FLEX_FILTER_DWDS / 2)
#define E1000_FHFT_QUEUEING_LEN 0x0000007F
#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
#define E1000_FHFT_QUEUEING_PRIO 0x00070000
@@ -143,6 +144,19 @@
#define EM_TX_MAX_SEG UINT8_MAX
#define EM_TX_MAX_MTU_SEG UINT8_MAX
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_82576 && (type) != e1000_i210 &&\
+ (type) != e1000_i211)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_i210 && (type) != e1000_i211)\
+ return -ENOTSUP; \
+} while (0)
+
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
@@ -237,13 +251,19 @@ struct e1000_2tuple_filter {
uint16_t queue; /* rx queue assigned to */
};
+/* ethertype filter structure */
+struct igb_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+};
+
/*
- * Structure to store filters' info.
+ * Structure to store filters'info.
*/
struct e1000_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
- uint16_t ethertype_filters[E1000_MAX_ETQF_FILTERS];
+ struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
uint8_t flex_mask; /* Bit mask for every used flex filter */
struct e1000_flex_filter_list flex_list;
/* Bit mask for every used 5tuple filter */
@@ -252,6 +272,8 @@ struct e1000_filter_info {
/* Bit mask for every used 2tuple filter */
uint8_t twotuple_mask;
struct e1000_2tuple_filter_list twotuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
};
/*
@@ -291,8 +313,55 @@ struct e1000_adapter {
#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct e1000_adapter *)adapter)->filter)
-#define E1000_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+/* ntuple filter list structure */
+struct igb_ntuple_filter_ele {
+ TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+
+/* ethertype filter list structure */
+struct igb_ethertype_filter_ele {
+ TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+
+/* syn filter list structure */
+struct igb_eth_syn_filter_ele {
+ TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+
+/* flex filter list structure */
+struct igb_flex_filter_ele {
+ TAILQ_ENTRY(igb_flex_filter_ele) entries;
+ struct rte_eth_flex_filter filter_info;
+};
+
+/* igb_flow memory list structure */
+struct igb_flow_mem {
+ TAILQ_ENTRY(igb_flow_mem) entries;
+ struct rte_flow *flow;
+ struct rte_eth_dev *dev;
+};
+
+TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
+struct igb_ntuple_filter_list igb_filter_ntuple_list;
+TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
+struct igb_ethertype_filter_list igb_filter_ethertype_list;
+TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
+struct igb_syn_filter_list igb_filter_syn_list;
+TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
+struct igb_flex_filter_list igb_filter_flex_list;
+TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
+struct igb_flow_mem_list igb_flow_list;
+
+extern const struct rte_flow_ops igb_flow_ops;
+
/*
* RX/TX IGB function prototypes
*/
@@ -411,4 +480,24 @@ void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void igb_pf_host_uninit(struct rte_eth_dev *dev);
+void igb_filterlist_flush(struct rte_eth_dev *dev);
+int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter);
+int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter);
+void igb_remove_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter);
+int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
+ uint8_t idx);
+int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter, bool add);
+int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter,
+ bool add);
#endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index a9bd92bc..3d4ab936 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -316,7 +316,7 @@ eth_em_dev_is_ich8(struct e1000_hw *hw)
static int
eth_em_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
@@ -390,7 +390,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -593,8 +593,7 @@ eth_em_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE(dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev =
- E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
@@ -777,7 +776,7 @@ eth_em_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_disable(hw);
@@ -1041,7 +1040,7 @@ static int
eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_enable(hw);
@@ -1091,7 +1090,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -1598,7 +1597,7 @@ static int
eth_em_interrupt_action(struct rte_eth_dev *dev,
struct rte_intr_handle *intr_handle)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
@@ -1671,7 +1670,7 @@ eth_em_interrupt_handler(void *param)
eth_em_interrupt_get_status(dev);
eth_em_interrupt_action(dev, dev->intr_handle);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
}
static int
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index d18dd48e..e4f7a9fa 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -138,7 +138,7 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
@@ -213,9 +213,6 @@ static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
-static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
- bool add);
static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
@@ -225,9 +222,6 @@ static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
-static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
- struct rte_eth_flex_filter *filter,
- bool add);
static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter);
static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
@@ -237,17 +231,11 @@ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
-static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter,
- bool add);
static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
-static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -291,6 +279,7 @@ static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
static void eth_igbvf_interrupt_handler(void *param);
static void igbvf_mbx_process(struct rte_eth_dev *dev);
+static int igb_filter_restore(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -756,11 +745,51 @@ igb_reset_swfw_lock(struct e1000_hw *hw)
return E1000_SUCCESS;
}
+/* Remove all ntuple filters of the device */
+static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple, entries);
+ rte_free(p_5tuple);
+ }
+ filter_info->fivetuple_mask = 0;
+ while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) {
+ TAILQ_REMOVE(&filter_info->twotuple_list,
+ p_2tuple, entries);
+ rte_free(p_2tuple);
+ }
+ filter_info->twotuple_mask = 0;
+
+ return 0;
+}
+
+/* Remove all flex filters of the device */
+static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_flex_filter *p_flex;
+
+ while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
+ TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
+ rte_free(p_flex);
+ }
+ filter_info->flex_mask = 0;
+
+ return 0;
+}
+
static int
eth_igb_dev_init(struct rte_eth_dev *eth_dev)
{
int error = 0;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
@@ -906,12 +935,19 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
/* enable support intr */
igb_intr_enable(eth_dev);
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct e1000_filter_info));
+
TAILQ_INIT(&filter_info->flex_list);
- filter_info->flex_mask = 0;
TAILQ_INIT(&filter_info->twotuple_list);
- filter_info->twotuple_mask = 0;
TAILQ_INIT(&filter_info->fivetuple_list);
- filter_info->fivetuple_mask = 0;
+
+ TAILQ_INIT(&igb_filter_ntuple_list);
+ TAILQ_INIT(&igb_filter_ethertype_list);
+ TAILQ_INIT(&igb_filter_syn_list);
+ TAILQ_INIT(&igb_filter_flex_list);
+ TAILQ_INIT(&igb_flow_list);
return 0;
@@ -929,6 +965,8 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
struct e1000_hw *hw;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -936,7 +974,7 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
intr_handle = &pci_dev->intr_handle;
if (adapter->stopped == 0)
@@ -960,6 +998,23 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
rte_intr_callback_unregister(intr_handle,
eth_igb_interrupt_handler, eth_dev);
+ /* clear the SYN filter info */
+ filter_info->syn_info = 0;
+
+ /* clear the ethertype filters info */
+ filter_info->ethertype_mask = 0;
+ memset(filter_info->ethertype_filters, 0,
+ E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
+
+ /* remove all ntuple filters of the device */
+ igb_ntuple_filter_uninit(eth_dev);
+
+ /* remove all flex filters of the device */
+ igb_flex_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ igb_filterlist_flush(eth_dev);
+
return 0;
}
@@ -994,7 +1049,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
@@ -1071,7 +1126,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1252,7 +1307,7 @@ eth_igb_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
@@ -1417,7 +1472,9 @@ eth_igb_start(struct rte_eth_dev *dev)
if (rte_intr_allow_others(intr_handle)) {
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- eth_igb_lsc_interrupt_setup(dev);
+ eth_igb_lsc_interrupt_setup(dev, TRUE);
+ else
+ eth_igb_lsc_interrupt_setup(dev, FALSE);
} else {
rte_intr_callback_unregister(intr_handle,
eth_igb_interrupt_handler,
@@ -1438,6 +1495,9 @@ eth_igb_start(struct rte_eth_dev *dev)
/* resume enabled intr since hw reset */
igb_intr_enable(dev);
+ /* restore all types filter */
+ igb_filter_restore(dev);
+
PMD_INIT_LOG(DEBUG, "<<");
return 0;
@@ -1459,13 +1519,8 @@ static void
eth_igb_stop(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_filter_info *filter_info =
- E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- struct e1000_flex_filter *p_flex;
- struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
- struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
igb_intr_disable(hw);
@@ -1494,31 +1549,6 @@ eth_igb_stop(struct rte_eth_dev *dev)
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_write_link_status(dev, &link);
- /* Remove all flex filters of the device */
- while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
- TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
- rte_free(p_flex);
- }
- filter_info->flex_mask = 0;
-
- /* Remove all ntuple filters of the device */
- for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
- p_5tuple != NULL; p_5tuple = p_5tuple_next) {
- p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
- TAILQ_REMOVE(&filter_info->fivetuple_list,
- p_5tuple, entries);
- rte_free(p_5tuple);
- }
- filter_info->fivetuple_mask = 0;
- for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
- p_2tuple != NULL; p_2tuple = p_2tuple_next) {
- p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
- TAILQ_REMOVE(&filter_info->twotuple_list,
- p_2tuple, entries);
- rte_free(p_2tuple);
- }
- filter_info->twotuple_mask = 0;
-
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
rte_intr_callback_register(intr_handle,
@@ -1566,7 +1596,7 @@ eth_igb_close(struct rte_eth_dev *dev)
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
struct rte_eth_link link;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
eth_igb_stop(dev);
@@ -2152,7 +2182,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2281,7 +2311,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2716,18 +2746,23 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
*
* @param dev
* Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- intr->mask |= E1000_ICR_LSC;
+ if (on)
+ intr->mask |= E1000_ICR_LSC;
+ else
+ intr->mask &= ~E1000_ICR_LSC;
return 0;
}
@@ -2813,7 +2848,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint32_t tctl, rctl;
struct rte_eth_link link;
int ret;
@@ -2870,7 +2905,8 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
E1000_WRITE_FLUSH(hw);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
return 0;
@@ -2929,7 +2965,8 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
/* PF reset VF event */
if (in_msg == E1000_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL, NULL);
}
static int
@@ -3081,7 +3118,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
#define E1000_RAH_POOLSEL_SHIFT (18)
static int
eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, __rte_unused uint32_t pool)
+ uint32_t index, uint32_t pool)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rah;
@@ -3229,7 +3266,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret;
uint32_t intr_vector = 0;
@@ -3285,7 +3322,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
static void
igbvf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -3549,18 +3586,14 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
return 0;
}
-#define MAC_TYPE_FILTER_SUP(type) do {\
- if ((type) != e1000_82580 && (type) != e1000_i350 &&\
- (type) != e1000_82576)\
- return -ENOTSUP;\
-} while (0)
-
-static int
+int
eth_igb_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
uint32_t synqf, rfctl;
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
@@ -3588,6 +3621,7 @@ eth_igb_syn_filter_set(struct rte_eth_dev *dev,
synqf = 0;
}
+ filter_info->syn_info = synqf;
E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
E1000_WRITE_FLUSH(hw);
return 0;
@@ -3655,11 +3689,6 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
return ret;
}
-#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
- if ((type) != e1000_82580 && (type) != e1000_i350)\
- return -ENOSYS; \
-} while (0)
-
/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
static inline int
ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
@@ -3722,6 +3751,54 @@ igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
return NULL;
}
+/* inject a igb 2tuple filter to HW */
+static inline void
+igb_inject_2uple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
+ uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ int i;
+
+ i = filter->index;
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ ttqf |= E1000_TTQF_QUEUE_ENABLE;
+ ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
+ ttqf |= (uint32_t)(filter->filter_info.proto &
+ E1000_TTQF_PROTOCOL_MASK);
+ if (filter->filter_info.proto_mask == 0)
+ ttqf &= ~E1000_TTQF_MASK_ENABLE;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else {
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+}
+
/*
* igb_add_2tuple_filter - add a 2tuple filter
*
@@ -3737,12 +3814,9 @@ static int
igb_add_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_2tuple_filter *filter;
- uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
- uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
int i, ret;
filter = rte_zmalloc("e1000_2tuple_filter",
@@ -3784,39 +3858,25 @@ igb_add_2tuple_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
- if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
- imir |= E1000_IMIR_PORT_BP;
- else
- imir &= ~E1000_IMIR_PORT_BP;
+ igb_inject_2uple_filter(dev, filter);
+ return 0;
+}
- imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+int
+igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- ttqf |= E1000_TTQF_QUEUE_ENABLE;
- ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
- ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
- if (filter->filter_info.proto_mask == 0)
- ttqf &= ~E1000_TTQF_MASK_ENABLE;
+ filter_info->twotuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
+ rte_free(filter);
- /* tcp flags bits setting. */
- if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
- if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_URG;
- if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_ACK;
- if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_PSH;
- if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_RST;
- if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_SYN;
- if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_FIN;
- } else
- imir_ext |= E1000_IMIREXT_CTRL_BP;
- E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
- E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
- E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+ E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -3835,7 +3895,6 @@ static int
igb_remove_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_2tuple_filter_info filter_2tuple;
@@ -3855,16 +3914,50 @@ igb_remove_2tuple_filter(struct rte_eth_dev *dev,
return -ENOENT;
}
- filter_info->twotuple_mask &= ~(1 << filter->index);
- TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
- rte_free(filter);
+ igb_delete_2tuple_filter(dev, filter);
- E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
- E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
+/* inject a igb flex filter to HW */
+static inline void
+igb_inject_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, queueing;
+ uint32_t reg_off;
+ uint8_t i, j = 0;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
+ (E1000_WUFC_FLX0 << filter->index));
+ queueing = filter->filter_info.len |
+ (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
+ (filter->filter_info.priority <<
+ E1000_FHFT_QUEUEING_PRIO_SHIFT);
+ E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
+ queueing);
+
+ for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
+ E1000_WRITE_REG(hw, reg_off,
+ filter->filter_info.dwords[j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ filter->filter_info.dwords[++j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ (uint32_t)filter->filter_info.mask[i]);
+ reg_off += sizeof(uint32_t) * 2;
+ ++j;
+ }
+}
+
static inline struct e1000_flex_filter *
eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
struct e1000_flex_filter_info *key)
@@ -3880,18 +3973,48 @@ eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
return NULL;
}
-static int
+/* remove a flex byte filter
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+void
+igb_remove_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, i;
+ uint32_t reg_off;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
+
+ for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
+ E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc &
+ (~(E1000_WUFC_FLX0 << filter->index)));
+
+ filter_info->flex_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->flex_list, filter, entries);
+ rte_free(filter);
+}
+
+int
eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_flex_filter *flex_filter, *it;
- uint32_t wufc, queueing, mask;
- uint32_t reg_off;
- uint8_t shift, i, j = 0;
+ uint32_t mask;
+ uint8_t shift, i;
flex_filter = rte_zmalloc("e1000_flex_filter",
sizeof(struct e1000_flex_filter), 0);
@@ -3911,15 +4034,20 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
flex_filter->filter_info.mask[i] = mask;
}
- wufc = E1000_READ_REG(hw, E1000_WUFC);
+ it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
+ &flex_filter->filter_info);
+ if (it == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ rte_free(flex_filter);
+ return -ENOENT;
+ }
+ if (it != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(flex_filter);
+ return -EEXIST;
+ }
if (add) {
- if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
- &flex_filter->filter_info) != NULL) {
- PMD_DRV_LOG(ERR, "filter exists.");
- rte_free(flex_filter);
- return -EEXIST;
- }
flex_filter->queue = filter->queue;
/*
* look for an unused flex filter index
@@ -3941,52 +4069,10 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- if (flex_filter->index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(flex_filter->index);
- else
- reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
-
- E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
- (E1000_WUFC_FLX0 << flex_filter->index));
- queueing = filter->len |
- (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
- (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
- E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
- queueing);
- for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
- E1000_WRITE_REG(hw, reg_off,
- flex_filter->filter_info.dwords[j]);
- reg_off += sizeof(uint32_t);
- E1000_WRITE_REG(hw, reg_off,
- flex_filter->filter_info.dwords[++j]);
- reg_off += sizeof(uint32_t);
- E1000_WRITE_REG(hw, reg_off,
- (uint32_t)flex_filter->filter_info.mask[i]);
- reg_off += sizeof(uint32_t) * 2;
- ++j;
- }
- } else {
- it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
- &flex_filter->filter_info);
- if (it == NULL) {
- PMD_DRV_LOG(ERR, "filter doesn't exist.");
- rte_free(flex_filter);
- return -ENOENT;
- }
-
- if (it->index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(it->index);
- else
- reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
+ igb_inject_flex_filter(dev, flex_filter);
- for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
- E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
- E1000_WRITE_REG(hw, E1000_WUFC, wufc &
- (~(E1000_WUFC_FLX0 << it->index)));
-
- filter_info->flex_mask &= ~(1 << it->index);
- TAILQ_REMOVE(&filter_info->flex_list, it, entries);
- rte_free(it);
+ } else {
+ igb_remove_flex_filter(dev, it);
rte_free(flex_filter);
}
@@ -4190,6 +4276,64 @@ igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
return NULL;
}
+/* inject a igb 5-tuple filter to HW */
+static inline void
+igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
+ uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ uint8_t i;
+
+ i = filter->index;
+ ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
+ if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
+ if (filter->filter_info.dst_ip_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
+ if (filter->filter_info.src_port_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ if (filter->filter_info.proto_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
+ ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
+ E1000_FTQF_QUEUE_MASK;
+ ftqf |= E1000_FTQF_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
+ E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
+ E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
+
+ spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
+ E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
+
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else {
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+}
+
/*
* igb_add_5tuple_filter_82576 - add a 5tuple filter
*
@@ -4205,12 +4349,9 @@ static int
igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter *filter;
- uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
- uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
uint8_t i;
int ret;
@@ -4254,50 +4395,29 @@ igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
return -ENOSYS;
}
- ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
- if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
- ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
- if (filter->filter_info.dst_ip_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
- if (filter->filter_info.src_port_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
- if (filter->filter_info.proto_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
- ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
- E1000_FTQF_QUEUE_MASK;
- ftqf |= E1000_FTQF_QUEUE_ENABLE;
- E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
- E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
- E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
+ igb_inject_5tuple_filter_82576(dev, filter);
+ return 0;
+}
- spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
- E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
+int
+igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
- if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
- imir |= E1000_IMIR_PORT_BP;
- else
- imir &= ~E1000_IMIR_PORT_BP;
- imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+ filter_info->fivetuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
- /* tcp flags bits setting. */
- if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
- if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_URG;
- if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_ACK;
- if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_PSH;
- if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_RST;
- if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_SYN;
- if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_FIN;
- } else
- imir_ext |= E1000_IMIREXT_CTRL_BP;
- E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
- E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+ E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
+ E1000_FTQF_VF_BP | E1000_FTQF_MASK);
+ E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -4316,7 +4436,6 @@ static int
igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter_info filter_5tuple;
@@ -4336,17 +4455,8 @@ igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
return -ENOENT;
}
- filter_info->fivetuple_mask &= ~(1 << filter->index);
- TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
- rte_free(filter);
+ igb_delete_5tuple_filter_82576(dev, filter);
- E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
- E1000_FTQF_VF_BP | E1000_FTQF_MASK);
- E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -4412,7 +4522,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* - On success, zero.
* - On failure, a negative value.
*/
-static int
+int
igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
@@ -4434,7 +4544,9 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
break;
case RTE_2TUPLE_FLAGS:
case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
- if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
+ if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
+ hw->mac.type != e1000_i210 &&
+ hw->mac.type != e1000_i211)
return -ENOTSUP;
if (add)
ret = igb_add_2tuple_filter(dev, ntuple_filter);
@@ -4576,7 +4688,7 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
int i;
for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
- if (filter_info->ethertype_filters[i] == ethertype &&
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
(filter_info->ethertype_mask & (1 << i)))
return i;
}
@@ -4585,33 +4697,35 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
static inline int
igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
- uint16_t ethertype)
+ uint16_t ethertype, uint32_t etqf)
{
int i;
for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
if (!(filter_info->ethertype_mask & (1 << i))) {
filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] = ethertype;
+ filter_info->ethertype_filters[i].ethertype = ethertype;
+ filter_info->ethertype_filters[i].etqf = etqf;
return i;
}
}
return -1;
}
-static inline int
+int
igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
uint8_t idx)
{
if (idx >= E1000_MAX_ETQF_FILTERS)
return -1;
filter_info->ethertype_mask &= ~(1 << idx);
- filter_info->ethertype_filters[idx] = 0;
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
return idx;
}
-static int
+int
igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
@@ -4651,16 +4765,15 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
if (add) {
+ etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
+ etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
+ etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
ret = igb_ethertype_filter_insert(filter_info,
- filter->ether_type);
+ filter->ether_type, etqf);
if (ret < 0) {
PMD_DRV_LOG(ERR, "ethertype filters are full.");
return -ENOSYS;
}
-
- etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
- etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
- etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
} else {
ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
if (ret < 0)
@@ -4755,7 +4868,7 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -4770,6 +4883,11 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FLEXIBLE:
ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &igb_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -5277,7 +5395,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask = 1 << queue_id;
uint32_t regval;
@@ -5350,7 +5468,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
uint32_t vec = E1000_MISC_VEC_ID;
uint32_t base = E1000_MISC_VEC_ID;
uint32_t misc_shift = 0;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
/* won't configure msix register if no mapping is done
@@ -5422,6 +5540,84 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
E1000_WRITE_FLUSH(hw);
}
+/* restore n-tuple filter */
+static inline void
+igb_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) {
+ igb_inject_5tuple_filter_82576(dev, p_5tuple);
+ }
+
+ TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) {
+ igb_inject_2uple_filter(dev, p_2tuple);
+ }
+}
+
+/* restore SYN filter */
+static inline void
+igb_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & E1000_SYN_FILTER_ENABLE) {
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+igb_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ E1000_WRITE_REG(hw, E1000_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ E1000_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore flex byte filter */
+static inline void
+igb_flex_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter;
+
+ TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) {
+ igb_inject_flex_filter(dev, flex_filter);
+ }
+}
+
+/* restore all types filter */
+static int
+igb_filter_restore(struct rte_eth_dev *dev)
+{
+ igb_ntuple_filter_restore(dev);
+ igb_ethertype_filter_restore(dev);
+ igb_syn_filter_restore(dev);
+ igb_flex_filter_restore(dev);
+
+ return 0;
+}
+
RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
new file mode 100644
index 00000000..ed2ecc40
--- /dev/null
+++ b/drivers/net/e1000/igb_flow.c
@@ -0,0 +1,1707 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
+ do { \
+ item = (pattern) + (index); \
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
+ (index)++; \
+ item = (pattern) + (index); \
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = (actions) + (index); \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ (index)++; \
+ act = (actions) + (index); \
+ } \
+ } while (0)
+
+#define IGB_FLEX_RAW_NUM 12
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP or SCTP
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP or SCTP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Not supported last point for range */
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* get the TCP/UDP/SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ if (item->spec && item->mask) {
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ }
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ if (item->spec && item->mask) {
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ }
+ } else {
+ if (item->spec && item->mask) {
+ sctp_mask = (const struct rte_flow_item_sctp *)
+ item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)
+ item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+ }
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+/* a specific function for igb because the flags is specific */
+static int
+igb_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Igb doesn't support many priorities. */
+ if (filter->priority > E1000_2TUPLE_MAX_PRI) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by ntuple filter");
+ return -rte_errno;
+ }
+ filter->flags |= RTE_5TUPLE_FLAGS;
+ } else {
+ if (filter->src_ip_mask || filter->dst_ip_mask ||
+ filter->src_port_mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "only two tuple are "
+ "supported by this filter");
+ return -rte_errno;
+ }
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by ntuple filter");
+ return -rte_errno;
+ }
+ filter->flags |= RTE_2TUPLE_FLAGS;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* Parse pattern */
+ index = 0;
+
+ /* The first non-void item should be MAC. */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ index = 0;
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+igb_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported "
+ "by ethertype filter");
+ return -rte_errno;
+ }
+ } else {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported "
+ "by ethertype filter");
+ return -rte_errno;
+ }
+ }
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+igb_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by syn filter");
+ return -rte_errno;
+ }
+ } else {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a flex byte rule.
+ * And get the flex byte filter info BTW.
+ * pattern:
+ * The first not void item must be RAW.
+ * The second not void item can be RAW or END.
+ * The third not void item can be RAW or END.
+ * The last not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * RAW relative 0 0x1
+ * offset 0 0xFFFFFFFF
+ * pattern {0x08, 0x06} {0xFF, 0xFF}
+ * RAW relative 1 0x1
+ * offset 100 0xFFFFFFFF
+ * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_flex_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_flex_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_raw *raw_spec;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index, i, offset, total_offset;
+ uint32_t max_offset = 0;
+ int32_t shift, j, raw_index = 0;
+ int32_t relative[IGB_FLEX_RAW_NUM] = {0};
+ int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+item_loop:
+
+ /* the first not void item should be RAW */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ if (!raw_mask->length ||
+ !raw_mask->relative) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (raw_mask->offset)
+ offset = raw_spec->offset;
+ else
+ offset = 0;
+
+ for (j = 0; j < raw_spec->length; j++) {
+ if (raw_mask->pattern[j] != 0xFF) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+ }
+
+ total_offset = 0;
+
+ if (raw_spec->relative) {
+ for (j = raw_index; j > 0; j--) {
+ total_offset += raw_offset[j - 1];
+ if (!relative[j - 1])
+ break;
+ }
+ if (total_offset + raw_spec->length + offset > max_offset)
+ max_offset = total_offset + raw_spec->length + offset;
+ } else {
+ if (raw_spec->length + offset > max_offset)
+ max_offset = raw_spec->length + offset;
+ }
+
+ if ((raw_spec->length + offset + total_offset) >
+ RTE_FLEX_FILTER_MAXLEN) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (raw_spec->relative == 0) {
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[offset + j] =
+ raw_spec->pattern[j];
+ j = offset / CHAR_BIT;
+ shift = offset % CHAR_BIT;
+ } else {
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[total_offset + offset + j] =
+ raw_spec->pattern[j];
+ j = (total_offset + offset) / CHAR_BIT;
+ shift = (total_offset + offset) % CHAR_BIT;
+ }
+
+ i = 0;
+
+ for ( ; shift < CHAR_BIT; shift++) {
+ filter->mask[j] |= (0x80 >> shift);
+ i++;
+ if (i == raw_spec->length)
+ break;
+ if (shift == (CHAR_BIT - 1)) {
+ j++;
+ shift = -1;
+ }
+ }
+
+ relative[raw_index] = raw_spec->relative;
+ raw_offset[raw_index] = offset + raw_spec->length;
+ raw_index++;
+
+ /* check if the next not void item is RAW */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ /* go back to parser */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* if the item is RAW, the content should be parse */
+ goto item_loop;
+ }
+
+ filter->len = RTE_ALIGN(max_offset, 8);
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+static int
+igb_parse_flex_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_flex_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ ret = cons_parse_flex_filter(attr, pattern,
+ actions, filter, error);
+
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
+ filter->len % sizeof(uint64_t) != 0) {
+ PMD_DRV_LOG(ERR, "filter's length is out of range");
+ return -EINVAL;
+ }
+
+ if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
+ PMD_DRV_LOG(ERR, "filter's priority is out of range");
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Create a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+igb_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_flex_filter flex_filter;
+ struct rte_flow *flow = NULL;
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+
+ flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
+ sizeof(struct igb_flow_mem), 0);
+ if (!igb_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ igb_flow_mem_ptr->flow = flow;
+ igb_flow_mem_ptr->dev = dev;
+ TAILQ_INSERT_TAIL(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = igb_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret) {
+ ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
+ sizeof(struct igb_ntuple_filter_ele), 0);
+ (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = igb_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret) {
+ ret = igb_add_del_ethertype_filter(dev,
+ &ethertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "igb_ethertype_filter",
+ sizeof(struct igb_ethertype_filter_ele), 0);
+ (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = igb_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("igb_syn_filter",
+ sizeof(struct igb_eth_syn_filter_ele), 0);
+ (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ ret = igb_parse_flex_filter(dev, attr, pattern,
+ actions, &flex_filter, error);
+ if (!ret) {
+ ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
+ if (!ret) {
+ flex_filter_ptr = rte_zmalloc("igb_flex_filter",
+ sizeof(struct igb_flex_filter_ele), 0);
+ (void)rte_memcpy(&flex_filter_ptr->filter_info,
+ &flex_filter,
+ sizeof(struct rte_eth_flex_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ flow->rule = flex_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(igb_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by igb.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_flex_filter flex_filter;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = igb_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = igb_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = igb_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ ret = igb_parse_flex_filter(dev, attr, pattern,
+ actions, &flex_filter, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on igb. */
+static int
+igb_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
+ pmd_flow->rule;
+ ret = igb_add_del_ntuple_filter(dev,
+ &ntuple_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
+ pmd_flow->rule;
+ ret = igb_add_del_ethertype_filter(dev,
+ &ethertype_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ ret = eth_igb_syn_filter_set(dev,
+ &syn_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ flex_filter_ptr = (struct igb_flex_filter_ele *)
+ pmd_flow->rule;
+ ret = eth_igb_add_del_flex_filter(dev,
+ &flex_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ rte_free(flex_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
+ if (igb_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+ rte_free(igb_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* remove all the n-tuple filters */
+static void
+igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ igb_delete_5tuple_filter_82576(dev, p_5tuple);
+
+ while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
+ igb_delete_2tuple_filter(dev, p_2tuple);
+}
+
+/* remove all the ether type filters */
+static void
+igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ (void)igb_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+static void
+igb_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the flex filters */
+static void
+igb_clear_all_flex_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter;
+
+ while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
+ igb_remove_flex_filter(dev, flex_filter);
+}
+
+void
+igb_filterlist_flush(struct rte_eth_dev *dev)
+{
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+ enum rte_filter_type filter_type;
+ struct rte_flow *pmd_flow;
+
+ TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
+ if (igb_flow_mem_ptr->dev == dev) {
+ pmd_flow = igb_flow_mem_ptr->flow;
+ filter_type = pmd_flow->filter_type;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr =
+ (struct igb_ntuple_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr =
+ (struct igb_ethertype_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr =
+ (struct igb_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ flex_filter_ptr =
+ (struct igb_flex_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ rte_free(flex_filter_ptr);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type"
+ "(%d) not supported", filter_type);
+ break;
+ }
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr,
+ entries);
+ rte_free(igb_flow_mem_ptr->flow);
+ rte_free(igb_flow_mem_ptr);
+ }
+ }
+}
+
+/* Destroy all flow rules associated with a port on igb. */
+static int
+igb_flow_flush(struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_error *error)
+{
+ igb_clear_all_ntuple_filter(dev);
+ igb_clear_all_ethertype_filter(dev);
+ igb_clear_syn_filter(dev);
+ igb_clear_all_flex_filter(dev);
+ igb_filterlist_flush(dev);
+
+ return 0;
+}
+
+const struct rte_flow_ops igb_flow_ops = {
+ .validate = igb_flow_validate,
+ .create = igb_flow_create,
+ .destroy = igb_flow_destroy,
+ .flush = igb_flow_flush,
+};
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 923c78a1..6809d30c 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -57,7 +57,7 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
return pci_dev->max_vfs;
}