diff options
Diffstat (limited to 'drivers/net/e1000/igb_ethdev.c')
-rw-r--r-- | drivers/net/e1000/igb_ethdev.c | 684 |
1 files changed, 440 insertions, 244 deletions
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index d18dd48e..e4f7a9fa 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -138,7 +138,7 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); -static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); static int eth_igb_interrupt_action(struct rte_eth_dev *dev, @@ -213,9 +213,6 @@ static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); -static int eth_igb_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, - bool add); static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, @@ -225,9 +222,6 @@ static int igb_add_2tuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter); static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter); -static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter, - bool add); static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, struct rte_eth_flex_filter *filter); static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, @@ -237,17 +231,11 @@ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter); static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter); -static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter, - bool add); static int igb_get_ntuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *filter); static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); -static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, - bool add); static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -291,6 +279,7 @@ static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); static void eth_igbvf_interrupt_handler(void *param); static void igbvf_mbx_process(struct rte_eth_dev *dev); +static int igb_filter_restore(struct rte_eth_dev *dev); /* * Define VF Stats MACRO for Non "cleared on read" register @@ -756,11 +745,51 @@ igb_reset_swfw_lock(struct e1000_hw *hw) return E1000_SUCCESS; } +/* Remove all ntuple filters of the device */ +static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct e1000_5tuple_filter *p_5tuple; + struct e1000_2tuple_filter *p_2tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, entries); + rte_free(p_5tuple); + } + filter_info->fivetuple_mask = 0; + while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) { + TAILQ_REMOVE(&filter_info->twotuple_list, + p_2tuple, entries); + rte_free(p_2tuple); + } + filter_info->twotuple_mask = 0; + + return 0; +} + +/* Remove all flex filters of the device */ +static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct e1000_flex_filter *p_flex; + + while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { + TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); + rte_free(p_flex); + } + filter_info->flex_mask = 0; + + return 0; +} + static int eth_igb_dev_init(struct rte_eth_dev *eth_dev) { int error = 0; - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct e1000_vfta * shadow_vfta = @@ -906,12 +935,19 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) /* enable support intr */ igb_intr_enable(eth_dev); + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct e1000_filter_info)); + TAILQ_INIT(&filter_info->flex_list); - filter_info->flex_mask = 0; TAILQ_INIT(&filter_info->twotuple_list); - filter_info->twotuple_mask = 0; TAILQ_INIT(&filter_info->fivetuple_list); - filter_info->fivetuple_mask = 0; + + TAILQ_INIT(&igb_filter_ntuple_list); + TAILQ_INIT(&igb_filter_ethertype_list); + TAILQ_INIT(&igb_filter_syn_list); + TAILQ_INIT(&igb_filter_flex_list); + TAILQ_INIT(&igb_flow_list); return 0; @@ -929,6 +965,8 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) struct e1000_hw *hw; struct e1000_adapter *adapter = E1000_DEV_PRIVATE(eth_dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -936,7 +974,7 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) return -EPERM; hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - pci_dev = E1000_DEV_TO_PCI(eth_dev); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); intr_handle = &pci_dev->intr_handle; if (adapter->stopped == 0) @@ -960,6 +998,23 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) rte_intr_callback_unregister(intr_handle, eth_igb_interrupt_handler, eth_dev); + /* clear the SYN filter info */ + filter_info->syn_info = 0; + + /* clear the ethertype filters info */ + filter_info->ethertype_mask = 0; + memset(filter_info->ethertype_filters, 0, + E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); + + /* remove all ntuple filters of the device */ + igb_ntuple_filter_uninit(eth_dev); + + /* remove all flex filters of the device */ + igb_flex_filter_uninit(eth_dev); + + /* clear all the filters list */ + igb_filterlist_flush(eth_dev); + return 0; } @@ -994,7 +1049,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) return 0; } - pci_dev = E1000_DEV_TO_PCI(eth_dev); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; @@ -1071,7 +1126,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) { struct e1000_adapter *adapter = E1000_DEV_PRIVATE(eth_dev->data->dev_private); - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); PMD_INIT_FUNC_TRACE(); @@ -1252,7 +1307,7 @@ eth_igb_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int ret, mask; uint32_t intr_vector = 0; @@ -1417,7 +1472,9 @@ eth_igb_start(struct rte_eth_dev *dev) if (rte_intr_allow_others(intr_handle)) { /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) - eth_igb_lsc_interrupt_setup(dev); + eth_igb_lsc_interrupt_setup(dev, TRUE); + else + eth_igb_lsc_interrupt_setup(dev, FALSE); } else { rte_intr_callback_unregister(intr_handle, eth_igb_interrupt_handler, @@ -1438,6 +1495,9 @@ eth_igb_start(struct rte_eth_dev *dev) /* resume enabled intr since hw reset */ igb_intr_enable(dev); + /* restore all types filter */ + igb_filter_restore(dev); + PMD_INIT_LOG(DEBUG, "<<"); return 0; @@ -1459,13 +1519,8 @@ static void eth_igb_stop(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_filter_info *filter_info = - E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - struct e1000_flex_filter *p_flex; - struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next; - struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next; struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; igb_intr_disable(hw); @@ -1494,31 +1549,6 @@ eth_igb_stop(struct rte_eth_dev *dev) memset(&link, 0, sizeof(link)); rte_igb_dev_atomic_write_link_status(dev, &link); - /* Remove all flex filters of the device */ - while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { - TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); - rte_free(p_flex); - } - filter_info->flex_mask = 0; - - /* Remove all ntuple filters of the device */ - for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); - p_5tuple != NULL; p_5tuple = p_5tuple_next) { - p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); - TAILQ_REMOVE(&filter_info->fivetuple_list, - p_5tuple, entries); - rte_free(p_5tuple); - } - filter_info->fivetuple_mask = 0; - for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list); - p_2tuple != NULL; p_2tuple = p_2tuple_next) { - p_2tuple_next = TAILQ_NEXT(p_2tuple, entries); - TAILQ_REMOVE(&filter_info->twotuple_list, - p_2tuple, entries); - rte_free(p_2tuple); - } - filter_info->twotuple_mask = 0; - if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ rte_intr_callback_register(intr_handle, @@ -1566,7 +1596,7 @@ eth_igb_close(struct rte_eth_dev *dev) struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); struct rte_eth_link link; - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; eth_igb_stop(dev); @@ -2152,7 +2182,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; @@ -2281,7 +2311,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; @@ -2716,18 +2746,23 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) * * @param dev * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable * * @return * - On success, zero. * - On failure, a negative value. */ static int -eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev) +eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) { struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - intr->mask |= E1000_ICR_LSC; + if (on) + intr->mask |= E1000_ICR_LSC; + else + intr->mask &= ~E1000_ICR_LSC; return 0; } @@ -2813,7 +2848,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev, E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); uint32_t tctl, rctl; struct rte_eth_link link; int ret; @@ -2870,7 +2905,8 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev, E1000_WRITE_REG(hw, E1000_TCTL, tctl); E1000_WRITE_REG(hw, E1000_RCTL, rctl); E1000_WRITE_FLUSH(hw); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL, NULL); } return 0; @@ -2929,7 +2965,8 @@ void igbvf_mbx_process(struct rte_eth_dev *dev) /* PF reset VF event */ if (in_msg == E1000_PF_CONTROL_MSG) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL, NULL); } static int @@ -3081,7 +3118,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) #define E1000_RAH_POOLSEL_SHIFT (18) static int eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, __rte_unused uint32_t pool) + uint32_t index, uint32_t pool) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t rah; @@ -3229,7 +3266,7 @@ igbvf_dev_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int ret; uint32_t intr_vector = 0; @@ -3285,7 +3322,7 @@ igbvf_dev_start(struct rte_eth_dev *dev) static void igbvf_dev_stop(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -3549,18 +3586,14 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev, return 0; } -#define MAC_TYPE_FILTER_SUP(type) do {\ - if ((type) != e1000_82580 && (type) != e1000_i350 &&\ - (type) != e1000_82576)\ - return -ENOTSUP;\ -} while (0) - -static int +int eth_igb_syn_filter_set(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter, bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); uint32_t synqf, rfctl; if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) @@ -3588,6 +3621,7 @@ eth_igb_syn_filter_set(struct rte_eth_dev *dev, synqf = 0; } + filter_info->syn_info = synqf; E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); E1000_WRITE_FLUSH(hw); return 0; @@ -3655,11 +3689,6 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev, return ret; } -#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ - if ((type) != e1000_82580 && (type) != e1000_i350)\ - return -ENOSYS; \ -} while (0) - /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ static inline int ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, @@ -3722,6 +3751,54 @@ igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, return NULL; } +/* inject a igb 2tuple filter to HW */ +static inline void +igb_inject_2uple_filter(struct rte_eth_dev *dev, + struct e1000_2tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ttqf = E1000_TTQF_DISABLE_MASK; + uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; + int i; + + i = filter->index; + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + ttqf |= E1000_TTQF_QUEUE_ENABLE; + ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); + ttqf |= (uint32_t)(filter->filter_info.proto & + E1000_TTQF_PROTOCOL_MASK); + if (filter->filter_info.proto_mask == 0) + ttqf &= ~E1000_TTQF_MASK_ENABLE; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { + if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else { + imir_ext |= E1000_IMIREXT_CTRL_BP; + } + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); +} + /* * igb_add_2tuple_filter - add a 2tuple filter * @@ -3737,12 +3814,9 @@ static int igb_add_2tuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_2tuple_filter *filter; - uint32_t ttqf = E1000_TTQF_DISABLE_MASK; - uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; int i, ret; filter = rte_zmalloc("e1000_2tuple_filter", @@ -3784,39 +3858,25 @@ igb_add_2tuple_filter(struct rte_eth_dev *dev, return -ENOSYS; } - imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); - if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ - imir |= E1000_IMIR_PORT_BP; - else - imir &= ~E1000_IMIR_PORT_BP; + igb_inject_2uple_filter(dev, filter); + return 0; +} - imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; +int +igb_delete_2tuple_filter(struct rte_eth_dev *dev, + struct e1000_2tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - ttqf |= E1000_TTQF_QUEUE_ENABLE; - ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); - ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK); - if (filter->filter_info.proto_mask == 0) - ttqf &= ~E1000_TTQF_MASK_ENABLE; + filter_info->twotuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); + rte_free(filter); - /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { - if (filter->filter_info.tcp_flags & TCP_URG_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_URG; - if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_ACK; - if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_PSH; - if (filter->filter_info.tcp_flags & TCP_RST_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_RST; - if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_SYN; - if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_FIN; - } else - imir_ext |= E1000_IMIREXT_CTRL_BP; - E1000_WRITE_REG(hw, E1000_IMIR(i), imir); - E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); - E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); + E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); return 0; } @@ -3835,7 +3895,6 @@ static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_2tuple_filter_info filter_2tuple; @@ -3855,16 +3914,50 @@ igb_remove_2tuple_filter(struct rte_eth_dev *dev, return -ENOENT; } - filter_info->twotuple_mask &= ~(1 << filter->index); - TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); - rte_free(filter); + igb_delete_2tuple_filter(dev, filter); - E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); - E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); - E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); return 0; } +/* inject a igb flex filter to HW */ +static inline void +igb_inject_flex_filter(struct rte_eth_dev *dev, + struct e1000_flex_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t wufc, queueing; + uint32_t reg_off; + uint8_t i, j = 0; + + wufc = E1000_READ_REG(hw, E1000_WUFC); + if (filter->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(filter->index); + else + reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); + + E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | + (E1000_WUFC_FLX0 << filter->index)); + queueing = filter->filter_info.len | + (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | + (filter->filter_info.priority << + E1000_FHFT_QUEUEING_PRIO_SHIFT); + E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, + queueing); + + for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { + E1000_WRITE_REG(hw, reg_off, + filter->filter_info.dwords[j]); + reg_off += sizeof(uint32_t); + E1000_WRITE_REG(hw, reg_off, + filter->filter_info.dwords[++j]); + reg_off += sizeof(uint32_t); + E1000_WRITE_REG(hw, reg_off, + (uint32_t)filter->filter_info.mask[i]); + reg_off += sizeof(uint32_t) * 2; + ++j; + } +} + static inline struct e1000_flex_filter * eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, struct e1000_flex_filter_info *key) @@ -3880,18 +3973,48 @@ eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, return NULL; } -static int +/* remove a flex byte filter + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +void +igb_remove_flex_filter(struct rte_eth_dev *dev, + struct e1000_flex_filter *filter) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t wufc, i; + uint32_t reg_off; + + wufc = E1000_READ_REG(hw, E1000_WUFC); + if (filter->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(filter->index); + else + reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); + + for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) + E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); + + E1000_WRITE_REG(hw, E1000_WUFC, wufc & + (~(E1000_WUFC_FLX0 << filter->index))); + + filter_info->flex_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->flex_list, filter, entries); + rte_free(filter); +} + +int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, struct rte_eth_flex_filter *filter, bool add) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_flex_filter *flex_filter, *it; - uint32_t wufc, queueing, mask; - uint32_t reg_off; - uint8_t shift, i, j = 0; + uint32_t mask; + uint8_t shift, i; flex_filter = rte_zmalloc("e1000_flex_filter", sizeof(struct e1000_flex_filter), 0); @@ -3911,15 +4034,20 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, flex_filter->filter_info.mask[i] = mask; } - wufc = E1000_READ_REG(hw, E1000_WUFC); + it = eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter->filter_info); + if (it == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + rte_free(flex_filter); + return -ENOENT; + } + if (it != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(flex_filter); + return -EEXIST; + } if (add) { - if (eth_igb_flex_filter_lookup(&filter_info->flex_list, - &flex_filter->filter_info) != NULL) { - PMD_DRV_LOG(ERR, "filter exists."); - rte_free(flex_filter); - return -EEXIST; - } flex_filter->queue = filter->queue; /* * look for an unused flex filter index @@ -3941,52 +4069,10 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, return -ENOSYS; } - if (flex_filter->index < E1000_MAX_FHFT) - reg_off = E1000_FHFT(flex_filter->index); - else - reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT); - - E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | - (E1000_WUFC_FLX0 << flex_filter->index)); - queueing = filter->len | - (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | - (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT); - E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, - queueing); - for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { - E1000_WRITE_REG(hw, reg_off, - flex_filter->filter_info.dwords[j]); - reg_off += sizeof(uint32_t); - E1000_WRITE_REG(hw, reg_off, - flex_filter->filter_info.dwords[++j]); - reg_off += sizeof(uint32_t); - E1000_WRITE_REG(hw, reg_off, - (uint32_t)flex_filter->filter_info.mask[i]); - reg_off += sizeof(uint32_t) * 2; - ++j; - } - } else { - it = eth_igb_flex_filter_lookup(&filter_info->flex_list, - &flex_filter->filter_info); - if (it == NULL) { - PMD_DRV_LOG(ERR, "filter doesn't exist."); - rte_free(flex_filter); - return -ENOENT; - } - - if (it->index < E1000_MAX_FHFT) - reg_off = E1000_FHFT(it->index); - else - reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); + igb_inject_flex_filter(dev, flex_filter); - for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) - E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); - E1000_WRITE_REG(hw, E1000_WUFC, wufc & - (~(E1000_WUFC_FLX0 << it->index))); - - filter_info->flex_mask &= ~(1 << it->index); - TAILQ_REMOVE(&filter_info->flex_list, it, entries); - rte_free(it); + } else { + igb_remove_flex_filter(dev, it); rte_free(flex_filter); } @@ -4190,6 +4276,64 @@ igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, return NULL; } +/* inject a igb 5-tuple filter to HW */ +static inline void +igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, + struct e1000_5tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; + uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; + uint8_t i; + + i = filter->index; + ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; + if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ + ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; + if (filter->filter_info.dst_ip_mask == 0) + ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; + if (filter->filter_info.src_port_mask == 0) + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + if (filter->filter_info.proto_mask == 0) + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; + ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & + E1000_FTQF_QUEUE_MASK; + ftqf |= E1000_FTQF_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); + E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); + E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); + + spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; + E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); + + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { + if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else { + imir_ext |= E1000_IMIREXT_CTRL_BP; + } + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); +} + /* * igb_add_5tuple_filter_82576 - add a 5tuple filter * @@ -4205,12 +4349,9 @@ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_5tuple_filter *filter; - uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; - uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; uint8_t i; int ret; @@ -4254,50 +4395,29 @@ igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, return -ENOSYS; } - ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; - if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ - ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; - if (filter->filter_info.dst_ip_mask == 0) - ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; - if (filter->filter_info.src_port_mask == 0) - ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; - if (filter->filter_info.proto_mask == 0) - ftqf &= ~E1000_FTQF_MASK_PROTO_BP; - ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & - E1000_FTQF_QUEUE_MASK; - ftqf |= E1000_FTQF_QUEUE_ENABLE; - E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); - E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); - E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); + igb_inject_5tuple_filter_82576(dev, filter); + return 0; +} - spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; - E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); +int +igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, + struct e1000_5tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); - if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ - imir |= E1000_IMIR_PORT_BP; - else - imir &= ~E1000_IMIR_PORT_BP; - imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + filter_info->fivetuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); - /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { - if (filter->filter_info.tcp_flags & TCP_URG_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_URG; - if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_ACK; - if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_PSH; - if (filter->filter_info.tcp_flags & TCP_RST_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_RST; - if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_SYN; - if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_FIN; - } else - imir_ext |= E1000_IMIREXT_CTRL_BP; - E1000_WRITE_REG(hw, E1000_IMIR(i), imir); - E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); + E1000_WRITE_REG(hw, E1000_FTQF(filter->index), + E1000_FTQF_VF_BP | E1000_FTQF_MASK); + E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); return 0; } @@ -4316,7 +4436,6 @@ static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_5tuple_filter_info filter_5tuple; @@ -4336,17 +4455,8 @@ igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, return -ENOENT; } - filter_info->fivetuple_mask &= ~(1 << filter->index); - TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); - rte_free(filter); + igb_delete_5tuple_filter_82576(dev, filter); - E1000_WRITE_REG(hw, E1000_FTQF(filter->index), - E1000_FTQF_VF_BP | E1000_FTQF_MASK); - E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); - E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); - E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); - E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); - E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); return 0; } @@ -4412,7 +4522,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * - On success, zero. * - On failure, a negative value. */ -static int +int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter, bool add) @@ -4434,7 +4544,9 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev, break; case RTE_2TUPLE_FLAGS: case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): - if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) + if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && + hw->mac.type != e1000_i210 && + hw->mac.type != e1000_i211) return -ENOTSUP; if (add) ret = igb_add_2tuple_filter(dev, ntuple_filter); @@ -4576,7 +4688,7 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, int i; for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { - if (filter_info->ethertype_filters[i] == ethertype && + if (filter_info->ethertype_filters[i].ethertype == ethertype && (filter_info->ethertype_mask & (1 << i))) return i; } @@ -4585,33 +4697,35 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, static inline int igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, - uint16_t ethertype) + uint16_t ethertype, uint32_t etqf) { int i; for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { if (!(filter_info->ethertype_mask & (1 << i))) { filter_info->ethertype_mask |= 1 << i; - filter_info->ethertype_filters[i] = ethertype; + filter_info->ethertype_filters[i].ethertype = ethertype; + filter_info->ethertype_filters[i].etqf = etqf; return i; } } return -1; } -static inline int +int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, uint8_t idx) { if (idx >= E1000_MAX_ETQF_FILTERS) return -1; filter_info->ethertype_mask &= ~(1 << idx); - filter_info->ethertype_filters[idx] = 0; + filter_info->ethertype_filters[idx].ethertype = 0; + filter_info->ethertype_filters[idx].etqf = 0; return idx; } -static int +int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, struct rte_eth_ethertype_filter *filter, bool add) @@ -4651,16 +4765,15 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev, } if (add) { + etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; + etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); + etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; ret = igb_ethertype_filter_insert(filter_info, - filter->ether_type); + filter->ether_type, etqf); if (ret < 0) { PMD_DRV_LOG(ERR, "ethertype filters are full."); return -ENOSYS; } - - etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; - etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); - etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; } else { ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); if (ret < 0) @@ -4755,7 +4868,7 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - int ret = -EINVAL; + int ret = 0; switch (filter_type) { case RTE_ETH_FILTER_NTUPLE: @@ -4770,6 +4883,11 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev, case RTE_ETH_FILTER_FLEXIBLE: ret = eth_igb_flex_filter_handle(dev, filter_op, arg); break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &igb_flow_ops; + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -5277,7 +5395,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask = 1 << queue_id; uint32_t regval; @@ -5350,7 +5468,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) uint32_t vec = E1000_MISC_VEC_ID; uint32_t base = E1000_MISC_VEC_ID; uint32_t misc_shift = 0; - struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; /* won't configure msix register if no mapping is done @@ -5422,6 +5540,84 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) E1000_WRITE_FLUSH(hw); } +/* restore n-tuple filter */ +static inline void +igb_ntuple_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter *p_5tuple; + struct e1000_2tuple_filter *p_2tuple; + + TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) { + igb_inject_5tuple_filter_82576(dev, p_5tuple); + } + + TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) { + igb_inject_2uple_filter(dev, p_2tuple); + } +} + +/* restore SYN filter */ +static inline void +igb_syn_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t synqf; + + synqf = filter_info->syn_info; + + if (synqf & E1000_SYN_FILTER_ENABLE) { + E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); + E1000_WRITE_FLUSH(hw); + } +} + +/* restore ethernet type filter */ +static inline void +igb_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + E1000_WRITE_REG(hw, E1000_ETQF(i), + filter_info->ethertype_filters[i].etqf); + E1000_WRITE_FLUSH(hw); + } + } +} + +/* restore flex byte filter */ +static inline void +igb_flex_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter *flex_filter; + + TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) { + igb_inject_flex_filter(dev, flex_filter); + } +} + +/* restore all types filter */ +static int +igb_filter_restore(struct rte_eth_dev *dev) +{ + igb_ntuple_filter_restore(dev); + igb_ethertype_filter_restore(dev); + igb_syn_filter_restore(dev); + igb_flex_filter_restore(dev); + + return 0; +} + RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci"); |