aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_fdir.c
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 14:51:32 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 16:20:45 +0200
commit7595afa4d30097c1177b69257118d8ad89a539be (patch)
tree4bfeadc905c977e45e54a90c42330553b8942e4e /drivers/net/ixgbe/ixgbe_fdir.c
parentce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff)
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_fdir.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c415
1 files changed, 332 insertions, 83 deletions
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 4b81ee37..7f6c7b58 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -43,6 +43,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_malloc.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -111,10 +112,8 @@
static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
static int fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
@@ -294,8 +293,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
-fdir_set_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_82599(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@@ -307,8 +305,6 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
uint32_t fdirtcpm; /* TCP source and destination port masks. */
uint32_t fdiripv6m; /* IPv6 source and destination masks. */
- uint16_t dst_ipv6m = 0;
- uint16_t src_ipv6m = 0;
volatile uint32_t *reg;
PMD_INIT_FUNC_TRACE();
@@ -319,31 +315,30 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
*/
- if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
+ if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
- if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- else if (input_mask->vlan_tci_mask == 0)
+ else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
- info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = reverse_fdir_bitmasks(
- rte_be_to_cpu_16(input_mask->dst_port_mask),
- rte_be_to_cpu_16(input_mask->src_port_mask));
+ rte_be_to_cpu_16(info->mask.dst_port_mask),
+ rte_be_to_cpu_16(info->mask.src_port_mask));
/* write all the same so that UDP, TCP and SCTP use the same mask
* (little-endian)
@@ -351,30 +346,23 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
- info->mask.src_port_mask = input_mask->src_port_mask;
- info->mask.dst_port_mask = input_mask->dst_port_mask;
/* Store source and destination IPv4 masks (big-endian),
* can not use IXGBE_WRITE_REG.
*/
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
- *reg = ~(input_mask->ipv4_mask.src_ip);
+ *reg = ~(info->mask.src_ipv4_mask);
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
- *reg = ~(input_mask->ipv4_mask.dst_ip);
- info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
- info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ *reg = ~(info->mask.dst_ipv4_mask);
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
/*
* Store source and destination IPv6 masks (bit reversed)
*/
- IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
- IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
- fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
+ fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
+ info->mask.src_ipv6_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
- info->mask.src_ipv6_mask = src_ipv6m;
- info->mask.dst_ipv6_mask = dst_ipv6m;
}
return IXGBE_SUCCESS;
@@ -385,8 +373,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
-fdir_set_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_x550(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@@ -409,20 +396,19 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
/* some bits must be set for mac vlan or tunnel mode */
fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
- if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- else if (input_mask->vlan_tci_mask == 0)
+ else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
- info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
@@ -433,12 +419,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = input_mask->mac_addr_byte_mask;
+ mac_mask = info->mask.mac_addr_byte_mask;
fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
& IXGBE_FDIRIP6M_INNER_MAC;
- info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
- switch (input_mask->tunnel_type_mask) {
+ switch (info->mask.tunnel_type_mask) {
case 0:
/* Mask turnnel type */
fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
@@ -449,10 +434,8 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
return -EINVAL;
}
- info->mask.tunnel_type_mask =
- input_mask->tunnel_type_mask;
- switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
+ switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
case 0x0:
/* Mask vxlan id */
fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
@@ -466,8 +449,6 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
return -EINVAL;
}
- info->mask.tunnel_id_mask =
- input_mask->tunnel_id_mask;
}
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
@@ -481,22 +462,90 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
}
static int
-fdir_set_input_mask(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint16_t dst_ipv6m = 0;
+ uint16_t src_ipv6m = 0;
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.src_port_mask = input_mask->src_port_mask;
+ info->mask.dst_port_mask = input_mask->dst_port_mask;
+ info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+ info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+ info->mask.src_ipv6_mask = src_ipv6m;
+ info->mask.dst_ipv6_mask = dst_ipv6m;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+ info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
+ info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+ mode <= RTE_FDIR_MODE_PERFECT)
+ return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
+ else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
+
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+}
+
+int
+ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
{
enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
if (mode >= RTE_FDIR_MODE_SIGNATURE &&
mode <= RTE_FDIR_MODE_PERFECT)
- return fdir_set_input_mask_82599(dev, input_mask);
+ return fdir_set_input_mask_82599(dev);
else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
- return fdir_set_input_mask_x550(dev, input_mask);
+ return fdir_set_input_mask_x550(dev);
PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
return -ENOTSUP;
}
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ int ret;
+
+ ret = ixgbe_fdir_store_input_mask(dev, input_mask);
+ if (ret)
+ return ret;
+
+ return ixgbe_fdir_set_input_mask(dev);
+}
+
/*
* ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
* arguments are valid
@@ -681,6 +730,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp4_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp4_flow.dst_port;
+ /* fall-through */
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
@@ -696,6 +746,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp6_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp6_flow.dst_port;
+ /* fall-through */
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
@@ -1075,36 +1126,115 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
}
-/*
- * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
- * @dev: pointer to the structure rte_eth_dev
- * @fdir_filter: fdir filter entry
- * @del: 1 - delete, 0 - add
- * @update: 1 - update
- */
+static inline struct ixgbe_fdir_filter *
+ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ struct ixgbe_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_handle,
+ &fdir_filter->ixgbe_fdir);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ fdir_info->hash_map[ret] = fdir_filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ ret = rte_hash_del_key(fdir_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
+ return ret;
+ }
+
+ fdir_filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+ rte_free(fdir_filter);
+
+ return 0;
+}
+
static int
-ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter,
+ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct ixgbe_fdir_rule *rule)
+{
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ int err;
+
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+
+ err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
+ &rule->ixgbe_fdir,
+ fdir_mode);
+ if (err)
+ return err;
+
+ rule->mode = fdir_mode;
+ if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ rule->queue = fdir_filter->action.rx_queue;
+ rule->soft_id = fdir_filter->soft_id;
+
+ return 0;
+}
+
+int
+ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
bool del,
bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
uint32_t fdirhash;
- union ixgbe_atr_input input;
uint8_t queue;
bool is_perfect = FALSE;
int err;
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ struct ixgbe_fdir_filter *node;
+ bool add_node = FALSE;
- if (fdir_mode == RTE_FDIR_MODE_NONE)
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
return -ENOTSUP;
/*
* Sanity check for x550.
- * When adding a new filter with flow type set to IPv4-other,
+ * When adding a new filter with flow type set to IPv4,
* the flow director mask should be configed before,
* and the L4 protocol and ports are masked.
*/
@@ -1112,12 +1242,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
(hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
- (fdir_filter->input.flow_type ==
- RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+ (rule->ixgbe_fdir.formatted.flow_type ==
+ IXGBE_ATR_FLOW_TYPE_IPV4) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
- " IPv4-other is not supported without"
+ " IPv4 is not supported without"
" L4 protocol and ports masked!");
return -ENOTSUP;
}
@@ -1126,28 +1256,26 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
is_perfect = TRUE;
- memset(&input, 0, sizeof(input));
-
- err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
- fdir_mode);
- if (err)
- return err;
-
if (is_perfect) {
- if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+ if (rule->ixgbe_fdir.formatted.flow_type &
+ IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
" perfect mode!");
return -ENOTSUP;
}
- fdirhash = atr_compute_perfect_hash_82599(&input,
+ fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
- fdirhash |= fdir_filter->soft_id <<
+ fdirhash |= rule->soft_id <<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
- fdirhash = atr_compute_sig_hash_82599(&input,
+ fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
+ err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ if (err < 0)
+ return err;
+
err = fdir_erase_filter_82599(hw, fdirhash);
if (err < 0)
PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
@@ -1157,7 +1285,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
}
/* add or update an fdir filter*/
fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
- if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
+ if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
if (is_perfect) {
queue = dev->data->dev_conf.fdir_conf.drop_queue;
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
@@ -1166,28 +1294,86 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
" signature mode.");
return -EINVAL;
}
- } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
- fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
- queue = (uint8_t)fdir_filter->action.rx_queue;
+ } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
+ queue = (uint8_t)rule->queue;
else
return -EINVAL;
+ node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
+ if (node) {
+ if (update) {
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+ } else {
+ PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+ return -EINVAL;
+ }
+ } else {
+ add_node = TRUE;
+ node = rte_zmalloc("ixgbe_fdir",
+ sizeof(struct ixgbe_fdir_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+ (void)rte_memcpy(&node->ixgbe_fdir,
+ &rule->ixgbe_fdir,
+ sizeof(union ixgbe_atr_input));
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+
+ err = ixgbe_insert_fdir_filter(info, node);
+ if (err < 0) {
+ rte_free(node);
+ return err;
+ }
+ }
+
if (is_perfect) {
- err = fdir_write_perfect_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash,
- fdir_mode);
+ err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash, fdir_mode);
} else {
- err = fdir_add_signature_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash);
+ err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash);
}
- if (err < 0)
+ if (err < 0) {
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
- else
+
+ if (add_node)
+ (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ } else {
PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+ }
return err;
}
+/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
+{
+ struct ixgbe_fdir_rule rule;
+ int err;
+
+ err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
+
+ if (err)
+ return err;
+
+ return ixgbe_fdir_filter_program(dev, &rule, del, update);
+}
+
static int
ixgbe_fdir_flush(struct rte_eth_dev *dev)
{
@@ -1378,3 +1564,66 @@ ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* restore flow director filter */
+void
+ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *node;
+ bool is_perfect = FALSE;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_write_perfect_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash,
+ fdir_mode);
+ }
+ } else {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_add_signature_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash);
+ }
+ }
+}
+
+/* remove all the flow director filters */
+int
+ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+ struct ixgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = ixgbe_fdir_flush(dev);
+
+ return ret;
+}