diff options
Diffstat (limited to 'drivers/net/i40e/i40e_ethdev.c')
-rw-r--r-- | drivers/net/i40e/i40e_ethdev.c | 1852 |
1 files changed, 1465 insertions, 387 deletions
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index bf7e5a05..4c49673f 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -40,10 +40,12 @@ #include <inttypes.h> #include <assert.h> +#include <rte_eal.h> #include <rte_string_fns.h> #include <rte_pci.h> #include <rte_ether.h> #include <rte_ethdev.h> +#include <rte_ethdev_pci.h> #include <rte_memzone.h> #include <rte_malloc.h> #include <rte_memcpy.h> @@ -51,6 +53,7 @@ #include <rte_dev.h> #include <rte_eth_ctrl.h> #include <rte_tailq.h> +#include <rte_hash_crc.h> #include "i40e_logs.h" #include "base/i40e_prototype.h" @@ -138,60 +141,6 @@ #define I40E_DEFAULT_DCB_APP_NUM 1 #define I40E_DEFAULT_DCB_APP_PRIO 3 -#define I40E_INSET_NONE 0x00000000000000000ULL - -/* bit0 ~ bit 7 */ -#define I40E_INSET_DMAC 0x0000000000000001ULL -#define I40E_INSET_SMAC 0x0000000000000002ULL -#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL -#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL -#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL - -/* bit 8 ~ bit 15 */ -#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL -#define I40E_INSET_IPV4_DST 0x0000000000000200ULL -#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL -#define I40E_INSET_IPV6_DST 0x0000000000000800ULL -#define I40E_INSET_SRC_PORT 0x0000000000001000ULL -#define I40E_INSET_DST_PORT 0x0000000000002000ULL -#define I40E_INSET_SCTP_VT 0x0000000000004000ULL - -/* bit 16 ~ bit 31 */ -#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL -#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL -#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL -#define I40E_INSET_IPV6_TC 0x0000000000080000ULL -#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL -#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL -#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL -#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL - -/* bit 32 ~ bit 47, tunnel fields */ -#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL -#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL -#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL -#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL -#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL -#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL - -/* bit 48 ~ bit 55 */ -#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL - -/* bit 56 ~ bit 63, Flex Payload */ -#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD \ - (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \ - I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \ - I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \ - I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8) - /** * Below are values for writing un-exposed registers suggested * by silicon experts @@ -284,11 +233,6 @@ #define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL #define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL -#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4)) -#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16 -#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \ - I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) - /* PCI offset for querying capability */ #define PCI_DEV_CAP_REG 0xA4 /* PCI offset for enabling/disabling Extended Tag */ @@ -324,6 +268,8 @@ static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); +static int i40e_fw_version_get(struct rte_eth_dev *dev, + char *fw_version, size_t fw_size); static void i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int i40e_vlan_filter_set(struct rte_eth_dev *dev, @@ -345,10 +291,10 @@ static int i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); -static void i40e_macaddr_add(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, - uint32_t index, - uint32_t pool); +static int i40e_macaddr_add(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, + uint32_t pool); static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, @@ -373,8 +319,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw, uint64_t *offset, uint64_t *stat); static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue); -static void i40e_dev_interrupt_handler( - __rte_unused struct rte_intr_handle *handle, void *param); +static void i40e_dev_interrupt_handler(void *param); static int i40e_res_pool_init(struct i40e_res_pool_info *pool, uint32_t base, uint32_t num); static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool); @@ -388,10 +333,6 @@ static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi); static int i40e_pf_config_mq_rx(struct i40e_pf *pf); static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); -static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, - struct i40e_macvlan_filter *mv_f, - int num, - struct ether_addr *addr); static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, int num, @@ -406,9 +347,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); static void i40e_filter_input_set_init(struct i40e_pf *pf); -static int i40e_ethertype_filter_set(struct i40e_pf *pf, - struct rte_eth_ethertype_filter *filter, - bool add); static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -461,6 +399,27 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int i40e_ethertype_filter_convert( + const struct rte_eth_ethertype_filter *input, + struct i40e_ethertype_filter *filter); +static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, + struct i40e_ethertype_filter *filter); + +static int i40e_tunnel_filter_convert( + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, + struct i40e_tunnel_filter *tunnel_filter); +static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter); +static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf); + +static void i40e_ethertype_filter_restore(struct i40e_pf *pf); +static void i40e_tunnel_filter_restore(struct i40e_pf *pf); +static void i40e_filter_restore(struct i40e_pf *pf); +static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); + +int i40e_logtype_init; +int i40e_logtype_driver; + static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) }, @@ -503,6 +462,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .stats_reset = i40e_dev_stats_reset, .xstats_reset = i40e_dev_stats_reset, .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, + .fw_version_get = i40e_fw_version_get, .dev_infos_get = i40e_dev_info_get, .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, .vlan_filter_set = i40e_vlan_filter_set, @@ -520,6 +480,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rx_queue_release = i40e_dev_rx_queue_release, .rx_queue_count = i40e_dev_rx_queue_count, .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .rx_descriptor_status = i40e_dev_rx_descriptor_status, + .tx_descriptor_status = i40e_dev_tx_descriptor_status, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .dev_led_on = i40e_dev_led_on, @@ -668,17 +630,23 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \ sizeof(rte_i40e_txq_prio_strings[0])) -static struct eth_driver rte_i40e_pmd = { - .pci_drv = { - .id_table = pci_id_i40e_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_DETACHABLE, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = eth_i40e_dev_init, - .eth_dev_uninit = eth_i40e_dev_uninit, - .dev_private_size = sizeof(struct i40e_adapter), +static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct i40e_adapter), eth_i40e_dev_init); +} + +static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit); +} + +static struct rte_pci_driver rte_i40e_pmd = { + .id_table = pci_id_i40e_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_i40e_pci_probe, + .remove = eth_i40e_pci_remove, }; static inline int @@ -709,8 +677,9 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, return 0; } -RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map); +RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio"); #ifndef I40E_GLQF_ORT #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) @@ -718,6 +687,9 @@ RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map); #ifndef I40E_GLQF_PIT #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) #endif +#ifndef I40E_GLQF_L3_MAP +#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4)) +#endif static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) { @@ -763,8 +735,8 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf) pf->main_vsi_seid, 0, TRUE, NULL, NULL); if (ret) - PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control " - " frames from VSIs."); + PMD_INIT_LOG(ERR, + "Failed to add filter to drop flow control frames from VSIs."); } static int @@ -907,7 +879,7 @@ is_floating_veb_supported(struct rte_devargs *devargs) static void config_floating_veb(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = dev->pci_dev; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -928,9 +900,144 @@ config_floating_veb(struct rte_eth_dev *dev) #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT) static int +i40e_init_ethtype_filter_list(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype; + char ethertype_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters ethertype_hash_params = { + .name = ethertype_hash_name, + .entries = I40E_MAX_ETHERTYPE_FILTER_NUM, + .key_len = sizeof(struct i40e_ethertype_filter_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + /* Initialize ethertype filter rule list and hash */ + TAILQ_INIT(ðertype_rule->ethertype_list); + snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE, + "ethertype_%s", dev->data->name); + ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params); + if (!ethertype_rule->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!"); + return -EINVAL; + } + ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map", + sizeof(struct i40e_ethertype_filter *) * + I40E_MAX_ETHERTYPE_FILTER_NUM, + 0); + if (!ethertype_rule->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for ethertype hash map!"); + ret = -ENOMEM; + goto err_ethertype_hash_map_alloc; + } + + return 0; + +err_ethertype_hash_map_alloc: + rte_hash_free(ethertype_rule->hash_table); + + return ret; +} + +static int +i40e_init_tunnel_filter_list(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + char tunnel_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters tunnel_hash_params = { + .name = tunnel_hash_name, + .entries = I40E_MAX_TUNNEL_FILTER_NUM, + .key_len = sizeof(struct i40e_tunnel_filter_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + /* Initialize tunnel filter rule list and hash */ + TAILQ_INIT(&tunnel_rule->tunnel_list); + snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE, + "tunnel_%s", dev->data->name); + tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params); + if (!tunnel_rule->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!"); + return -EINVAL; + } + tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map", + sizeof(struct i40e_tunnel_filter *) * + I40E_MAX_TUNNEL_FILTER_NUM, + 0); + if (!tunnel_rule->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for tunnel hash map!"); + ret = -ENOMEM; + goto err_tunnel_hash_map_alloc; + } + + return 0; + +err_tunnel_hash_map_alloc: + rte_hash_free(tunnel_rule->hash_table); + + return ret; +} + +static int +i40e_init_fdir_filter_list(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_fdir_info *fdir_info = &pf->fdir; + char fdir_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = I40E_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(struct rte_eth_fdir_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + /* Initialize flow director filter rule list and hash */ + TAILQ_INIT(&fdir_info->fdir_list); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", dev->data->name); + fdir_info->hash_table = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map", + sizeof(struct i40e_fdir_filter *) * + I40E_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + ret = -ENOMEM; + goto err_fdir_hash_map_alloc; + } + return 0; + +err_fdir_hash_map_alloc: + rte_hash_free(fdir_info->hash_table); + + return ret; +} + +static int eth_i40e_dev_init(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *vsi; @@ -943,6 +1050,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) dev->dev_ops = &i40e_eth_dev_ops; dev->rx_pkt_burst = i40e_recv_pkts; dev->tx_pkt_burst = i40e_xmit_pkts; + dev->tx_pkt_prepare = i40e_prep_pkts; /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different @@ -952,9 +1060,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_set_tx_function(dev); return 0; } - pci_dev = dev->pci_dev; + i40e_set_default_ptype_table(dev); + pci_dev = I40E_DEV_TO_PCI(dev); + intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); + dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); pf->adapter->eth_dev = dev; @@ -963,8 +1074,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) hw->back = I40E_PF_TO_ADAPTER(pf); hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr); if (!hw->hw_addr) { - PMD_INIT_LOG(ERR, "Hardware is not available, " - "as address is NULL"); + PMD_INIT_LOG(ERR, + "Hardware is not available, as address is NULL"); return -ENODEV; } @@ -1021,6 +1132,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) ((hw->nvm.version >> 4) & 0xff), (hw->nvm.version & 0xf), hw->nvm.eetrack); + /* initialise the L3_MAP register */ + ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), + 0x00000028, NULL); + if (ret) + PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret); + /* Need the special FW version to support floating VEB */ config_floating_veb(dev); /* Clear PXE mode */ @@ -1100,8 +1217,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* Set the global registers with default ether type value */ ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(ERR, "Failed to set the default outer " - "VLAN ether type"); + PMD_INIT_LOG(ERR, + "Failed to set the default outer VLAN ether type"); goto err_setup_pf_switch; } @@ -1137,26 +1254,35 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* Should be after VSI initialized */ dev->data->mac_addrs = rte_zmalloc("i40e", len, 0); if (!dev->data->mac_addrs) { - PMD_INIT_LOG(ERR, "Failed to allocated memory " - "for storing mac address"); + PMD_INIT_LOG(ERR, + "Failed to allocated memory for storing mac address"); goto err_mac_alloc; } ether_addr_copy((struct ether_addr *)hw->mac.perm_addr, &dev->data->mac_addrs[0]); + /* Init dcb to sw mode by default */ + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(INFO, "Failed to init dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + /* Update HW struct after DCB configuration */ + i40e_get_cap(hw); + /* initialize pf host driver to setup SRIOV resource if applicable */ i40e_pf_host_init(dev); /* register callback func to eal lib */ - rte_intr_callback_register(&(pci_dev->intr_handle), - i40e_dev_interrupt_handler, (void *)dev); + rte_intr_callback_register(intr_handle, + i40e_dev_interrupt_handler, dev); /* configure and enable device interrupt */ i40e_pf_config_irq0(hw, TRUE); i40e_pf_enable_irq0(hw); /* enable uio intr after callback register */ - rte_intr_enable(&(pci_dev->intr_handle)); + rte_intr_enable(intr_handle); /* * Add an ethertype filter to drop all flow control frames transmitted * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC @@ -1172,15 +1298,26 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); - /* Init dcb to sw mode by default */ - ret = i40e_dcb_init_configure(dev, TRUE); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(INFO, "Failed to init dcb."); - pf->flags &= ~I40E_FLAG_DCB; - } + ret = i40e_init_ethtype_filter_list(dev); + if (ret < 0) + goto err_init_ethtype_filter_list; + ret = i40e_init_tunnel_filter_list(dev); + if (ret < 0) + goto err_init_tunnel_filter_list; + ret = i40e_init_fdir_filter_list(dev); + if (ret < 0) + goto err_init_fdir_filter_list; return 0; +err_init_fdir_filter_list: + rte_free(pf->tunnel.hash_table); + rte_free(pf->tunnel.hash_map); +err_init_tunnel_filter_list: + rte_free(pf->ethertype.hash_table); + rte_free(pf->ethertype.hash_map); +err_init_ethtype_filter_list: + rte_free(dev->data->mac_addrs); err_mac_alloc: i40e_vsi_release(pf->main_vsi); err_setup_pf_switch: @@ -1200,12 +1337,73 @@ err_sync_phy_type: return ret; } +static void +i40e_rm_ethtype_filter_list(struct i40e_pf *pf) +{ + struct i40e_ethertype_filter *p_ethertype; + struct i40e_ethertype_rule *ethertype_rule; + + ethertype_rule = &pf->ethertype; + /* Remove all ethertype filter rules and hash */ + if (ethertype_rule->hash_map) + rte_free(ethertype_rule->hash_map); + if (ethertype_rule->hash_table) + rte_hash_free(ethertype_rule->hash_table); + + while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) { + TAILQ_REMOVE(ðertype_rule->ethertype_list, + p_ethertype, rules); + rte_free(p_ethertype); + } +} + +static void +i40e_rm_tunnel_filter_list(struct i40e_pf *pf) +{ + struct i40e_tunnel_filter *p_tunnel; + struct i40e_tunnel_rule *tunnel_rule; + + tunnel_rule = &pf->tunnel; + /* Remove all tunnel director rules and hash */ + if (tunnel_rule->hash_map) + rte_free(tunnel_rule->hash_map); + if (tunnel_rule->hash_table) + rte_hash_free(tunnel_rule->hash_table); + + while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) { + TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules); + rte_free(p_tunnel); + } +} + +static void +i40e_rm_fdir_filter_list(struct i40e_pf *pf) +{ + struct i40e_fdir_filter *p_fdir; + struct i40e_fdir_info *fdir_info; + + fdir_info = &pf->fdir; + /* Remove all flow director rules and hash */ + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_table) + rte_hash_free(fdir_info->hash_table); + + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules); + rte_free(p_fdir); + } +} + static int eth_i40e_dev_uninit(struct rte_eth_dev *dev) { + struct i40e_pf *pf; struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; struct i40e_hw *hw; struct i40e_filter_control_settings settings; + struct rte_flow *p_flow; int ret; uint8_t aq_fail = 0; @@ -1214,8 +1412,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = dev->pci_dev; + pci_dev = I40E_DEV_TO_PCI(dev); + intr_handle = &pci_dev->intr_handle; if (hw->adapter_stopped == 0) i40e_dev_close(dev); @@ -1245,11 +1445,21 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) dev->data->mac_addrs = NULL; /* disable uio intr before callback unregister */ - rte_intr_disable(&(pci_dev->intr_handle)); + rte_intr_disable(intr_handle); /* register callback func to eal lib */ - rte_intr_callback_unregister(&(pci_dev->intr_handle), - i40e_dev_interrupt_handler, (void *)dev); + rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, dev); + + i40e_rm_ethtype_filter_list(pf); + i40e_rm_tunnel_filter_list(pf); + i40e_rm_fdir_filter_list(pf); + + /* Remove all flows */ + while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { + TAILQ_REMOVE(&pf->flow_list, p_flow, node); + rte_free(p_flow); + } return 0; } @@ -1315,6 +1525,8 @@ i40e_dev_configure(struct rte_eth_dev *dev) } } + TAILQ_INIT(&pf->flow_list); + return 0; err_dcb: @@ -1335,7 +1547,8 @@ void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; uint16_t i; @@ -1448,7 +1661,8 @@ void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); @@ -1519,7 +1733,8 @@ static void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t interval = i40e_calc_itr_interval(\ RTE_LIBRTE_I40E_ITR_INTERVAL); @@ -1550,7 +1765,8 @@ static void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_intr, i; @@ -1629,7 +1845,7 @@ i40e_phy_conf_link(struct i40e_hw *hw, /* use get_phy_abilities_resp value for the rest */ phy_conf.phy_type = phy_ab.phy_type; phy_conf.phy_type_ext = phy_ab.phy_type_ext; - phy_conf.fec_config = phy_ab.mod_type_ext; + phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info; phy_conf.eee_capability = phy_ab.eee_capability; phy_conf.eeer = phy_ab.eeer_val; phy_conf.low_power_ctrl = phy_ab.d3_lpan; @@ -1676,8 +1892,10 @@ i40e_dev_start(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; int ret, i; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; + struct i40e_vsi *vsi; hw->adapter_stopped = 0; @@ -1693,8 +1911,9 @@ i40e_dev_start(struct rte_eth_dev *dev) !RTE_ETH_DEV_SRIOV(dev).active) && dev->data->dev_conf.intr_conf.rxq != 0) { intr_vector = dev->data->nb_rx_queues; - if (rte_intr_efd_enable(intr_handle, intr_vector)) - return -1; + ret = rte_intr_efd_enable(intr_handle, intr_vector); + if (ret) + return ret; } if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { @@ -1703,8 +1922,9 @@ i40e_dev_start(struct rte_eth_dev *dev) dev->data->nb_rx_queues * sizeof(int), 0); if (!intr_handle->intr_vec) { - PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + PMD_INIT_LOG(ERR, + "Failed to allocate %d rx_queues intr_vec", + dev->data->nb_rx_queues); return -ENOMEM; } } @@ -1754,6 +1974,15 @@ i40e_dev_start(struct rte_eth_dev *dev) PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); } + /* Enable the VLAN promiscuous mode. */ + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, + true, NULL); + } + } + /* Apply link configure */ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | @@ -1777,8 +2006,8 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_pf_enable_irq0(hw); if (dev->data->dev_conf.intr_conf.lsc != 0) - PMD_INIT_LOG(INFO, "lsc won't enable because of" - " no intr multiplex\n"); + PMD_INIT_LOG(INFO, + "lsc won't enable because of no intr multiplex"); } else if (dev->data->dev_conf.intr_conf.lsc != 0) { ret = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | @@ -1794,6 +2023,8 @@ i40e_dev_start(struct rte_eth_dev *dev) /* enable uio intr after callback register */ rte_intr_enable(intr_handle); + i40e_filter_restore(pf); + return I40E_SUCCESS; err_up: @@ -1809,7 +2040,8 @@ i40e_dev_stop(struct rte_eth_dev *dev) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; struct i40e_mirror_rule *p_mirror; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int i; /* Disable all queues */ @@ -1860,6 +2092,8 @@ i40e_dev_close(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t reg; int i; @@ -1871,7 +2105,7 @@ i40e_dev_close(struct rte_eth_dev *dev) /* Disable interrupt */ i40e_pf_disable_irq0(hw); - rte_intr_disable(&(dev->pci_dev->intr_handle)); + rte_intr_disable(intr_handle); /* shutdown and destroy the HMC */ i40e_shutdown_lan_hmc(hw); @@ -2069,6 +2303,8 @@ out: if (link.link_status == old.link_status) return -1; + i40e_notify_all_vfs_link_status(dev); + return 0; } @@ -2579,19 +2815,49 @@ i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev, return -ENOSYS; } +static int +i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 full_ver; + u8 ver, patch; + u16 build; + int ret; + + full_ver = hw->nvm.oem_ver; + ver = (u8)(full_ver >> 24); + build = (u16)((full_ver >> 8) & 0xffff); + patch = (u8)(full_ver & 0xff); + + ret = snprintf(fw_version, fw_size, + "%d.%d%d 0x%08x %d.%d.%d", + ((hw->nvm.version >> 12) & 0xf), + ((hw->nvm.version >> 4) & 0xff), + (hw->nvm.version & 0xf), hw->nvm.eetrack, + ver, build, patch); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + static void i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = vsi->nb_qps; dev_info->max_tx_queues = vsi->nb_qps; dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; dev_info->max_mac_addrs = vsi->max_macaddrs; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | @@ -2648,6 +2914,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_max = I40E_MAX_RING_DESC, .nb_min = I40E_MIN_RING_DESC, .nb_align = I40E_ALIGN_RING_DESC, + .nb_seg_max = I40E_TX_MAX_SEG, + .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG, }; if (pf->flags & I40E_FLAG_VMDQ) { @@ -2708,7 +2976,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, else { ret = -EINVAL; PMD_DRV_LOG(ERR, - "Unsupported vlan type in single vlan.\n"); + "Unsupported vlan type in single vlan."); return ret; } break; @@ -2720,13 +2988,15 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), ®_r, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Fail to debug read from " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + PMD_DRV_LOG(ERR, + "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); ret = -EIO; return ret; } - PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: " - "0x%08"PRIx64"", reg_id, reg_r); + PMD_DRV_LOG(DEBUG, + "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64, + reg_id, reg_r); reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); @@ -2740,12 +3010,14 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, reg_w, NULL); if (ret != I40E_SUCCESS) { ret = -EIO; - PMD_DRV_LOG(ERR, "Fail to debug write to " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + PMD_DRV_LOG(ERR, + "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); return ret; } - PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id); + PMD_DRV_LOG(DEBUG, + "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_w, reg_id); return ret; } @@ -2889,8 +3161,9 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, " - "High_water must <= %d.", max_high_water); + PMD_INIT_LOG(ERR, + "Invalid high/low water setup value in KB, High_water must be <= %d.", + max_high_water); return -EINVAL; } @@ -2994,7 +3267,7 @@ i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev, } /* Add a MAC address, and update filters */ -static void +static int i40e_macaddr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, __rte_unused uint32_t index, @@ -3011,13 +3284,13 @@ i40e_macaddr_add(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u", pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled", pool); - return; + return -ENOTSUP; } if (pool > pf->nb_cfg_vmdq_vsi) { PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u", pool, pf->nb_cfg_vmdq_vsi); - return; + return -EINVAL; } (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); @@ -3034,8 +3307,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev, ret = i40e_vsi_add_mac(vsi, &mac_filter); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); - return; + return -ENODEV; } + return 0; } /* Remove a MAC address, and update filters */ @@ -3062,8 +3336,8 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) /* No VMDQ pool enabled or configured */ if (!(pf->flags & I40E_FLAG_VMDQ) || (i > pf->nb_cfg_vmdq_vsi)) { - PMD_DRV_LOG(ERR, "No VMDQ pool enabled" - "/configured"); + PMD_DRV_LOG(ERR, + "No VMDQ pool enabled/configured"); return; } vsi = pf->vmdq[i - 1].vsi; @@ -3264,9 +3538,9 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, if (reta_size != lut_size || reta_size > ETH_RSS_RETA_SIZE_512) { - PMD_DRV_LOG(ERR, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, lut_size); + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", + reta_size, lut_size); return -EINVAL; } @@ -3305,9 +3579,9 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev, if (reta_size != lut_size || reta_size > ETH_RSS_RETA_SIZE_512) { - PMD_DRV_LOG(ERR, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, lut_size); + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", + reta_size, lut_size); return -EINVAL; } @@ -3362,8 +3636,9 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, mem->va = mz->addr; mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); mem->zone = (const void *)mz; - PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: " - "%"PRIu64, mz->name, mem->pa); + PMD_DRV_LOG(DEBUG, + "memzone %s allocated with physical address: %"PRIu64, + mz->name, mem->pa); return I40E_SUCCESS; } @@ -3380,9 +3655,9 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, if (!mem) return I40E_ERR_PARAM; - PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: " - "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name, - mem->pa); + PMD_DRV_LOG(DEBUG, + "memzone %s to be freed with physical address: %"PRIu64, + ((const struct rte_memzone *)mem->zone)->name, mem->pa); rte_memzone_free((const struct rte_memzone *)mem->zone); mem->zone = NULL; mem->va = NULL; @@ -3493,9 +3768,10 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); uint16_t qp_count = 0, vsi_count = 0; - if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) { + if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) { PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV"); return -EINVAL; } @@ -3536,13 +3812,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) /* VF queue/VSI allocation */ pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps; - if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) { + if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) { pf->flags |= I40E_FLAG_SRIOV; pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; - pf->vf_num = dev->pci_dev->max_vfs; - PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, " - "in total %u queues", pf->vf_num, pf->vf_nb_qps, - pf->vf_nb_qps * pf->vf_num); + pf->vf_num = pci_dev->max_vfs; + PMD_DRV_LOG(DEBUG, + "%u VF VSIs, %u queues per VF VSI, in total %u queues", + pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num); } else { pf->vf_nb_qps = 0; pf->vf_num = 0; @@ -3570,14 +3846,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) if (pf->max_nb_vmdq_vsi) { pf->flags |= I40E_FLAG_VMDQ; pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; - PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues " - "per VMDQ VSI, in total %u queues", - pf->max_nb_vmdq_vsi, - pf->vmdq_nb_qps, pf->vmdq_nb_qps * - pf->max_nb_vmdq_vsi); + PMD_DRV_LOG(DEBUG, + "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues", + pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps, + pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi); } else { - PMD_DRV_LOG(INFO, "No enough queues left for " - "VMDq"); + PMD_DRV_LOG(INFO, + "No enough queues left for VMDq"); } } else { PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq"); @@ -3590,15 +3865,15 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->flags |= I40E_FLAG_DCB; if (qp_count > hw->func_caps.num_tx_qp) { - PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds " - "the hardware maximum %u", qp_count, - hw->func_caps.num_tx_qp); + PMD_DRV_LOG(ERR, + "Failed to allocate %u queues, which exceeds the hardware maximum %u", + qp_count, hw->func_caps.num_tx_qp); return -EINVAL; } if (vsi_count > hw->func_caps.num_vsis) { - PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds " - "the hardware maximum %u", vsi_count, - hw->func_caps.num_vsis); + PMD_DRV_LOG(ERR, + "Failed to allocate %u VSIs, which exceeds the hardware maximum %u", + vsi_count, hw->func_caps.num_vsis); return -EINVAL; } @@ -3844,8 +4119,8 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool, */ entry = rte_zmalloc("res_pool", sizeof(*entry), 0); if (entry == NULL) { - PMD_DRV_LOG(ERR, "Failed to allocate memory for " - "resource pool"); + PMD_DRV_LOG(ERR, + "Failed to allocate memory for resource pool"); return -ENOMEM; } entry->base = valid_entry->base; @@ -3885,9 +4160,9 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap) } if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) { - PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to " - "HW support 0x%x", hw->func_caps.enabled_tcmap, - enabled_tcmap); + PMD_DRV_LOG(ERR, + "Enabled TC map 0x%x not applicable to HW support 0x%x", + hw->func_caps.enabled_tcmap, enabled_tcmap); return I40E_NOT_SUPPORTED; } return I40E_SUCCESS; @@ -4105,12 +4380,13 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) hw->aq.asq_last_status); goto fail; } + veb->enabled_tc = I40E_DEFAULT_TCMAP; /* get statistics index */ ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL, &veb->stats_idx, NULL, NULL, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d", + PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d", hw->aq.asq_last_status); goto fail; } @@ -4232,8 +4508,8 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) struct i40e_mac_filter *f; struct ether_addr *mac; - PMD_DRV_LOG(WARNING, "Cannot remove the default " - "macvlan filter"); + PMD_DRV_LOG(DEBUG, + "Cannot remove the default macvlan filter"); /* It needs to add the permanent mac into mac list */ f = rte_zmalloc("macv_filter", sizeof(*f), 0); if (f == NULL) { @@ -4283,8 +4559,9 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &ets_sla_config, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith " - "configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "VSI failed to get TC bandwdith configuration %u", + hw->aq.asq_last_status); return ret; } @@ -4351,7 +4628,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) - PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n", + PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d", hw->aq.asq_last_status); } @@ -4372,14 +4649,14 @@ i40e_vsi_setup(struct i40e_pf *pf, if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV && uplink_vsi == NULL) { - PMD_DRV_LOG(ERR, "VSI setup failed, " - "VSI link shouldn't be NULL"); + PMD_DRV_LOG(ERR, + "VSI setup failed, VSI link shouldn't be NULL"); return NULL; } if (type == I40E_VSI_MAIN && uplink_vsi != NULL) { - PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI " - "uplink VSI should be NULL"); + PMD_DRV_LOG(ERR, + "VSI setup failed, MAIN VSI uplink VSI should be NULL"); return NULL; } @@ -4423,6 +4700,8 @@ i40e_vsi_setup(struct i40e_pf *pf, vsi->max_macaddrs = I40E_NUM_MACADDR_MAX; vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi; vsi->user_param = user_param; + vsi->vlan_anti_spoof_on = 0; + vsi->vlan_filter_on = 0; /* Allocate queues */ switch (vsi->type) { case I40E_VSI_MAIN : @@ -4530,8 +4809,8 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } ctxt.seid = vsi->seid; @@ -4599,13 +4878,14 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, - I40E_DEFAULT_TCMAP); + hw->func_caps.enabled_tcmap); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } - ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + + ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap; ctxt.info.valid_sections |= rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); /** @@ -4644,8 +4924,8 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; @@ -4662,8 +4942,8 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping."); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping."); goto fail_msix_alloc; } ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; @@ -4926,8 +5206,9 @@ i40e_pf_setup(struct i40e_pf *pf) /* make queue allocated first, let FDIR use queue pair 0*/ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); if (ret != I40E_FDIR_QUEUE_ID) { - PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :" - " ret =%d", ret); + PMD_DRV_LOG(ERR, + "queue allocation fails for FDIR: ret =%d", + ret); pf->flags &= ~I40E_FLAG_FDIR; } } @@ -4946,12 +5227,12 @@ i40e_pf_setup(struct i40e_pf *pf) else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512) settings.hash_lut_size = I40E_HASH_LUT_SIZE_512; else { - PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n", - hw->func_caps.rss_table_size); + PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported", + hw->func_caps.rss_table_size); return I40E_ERR_PARAM; } - PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table " - "size: %u\n", hw->func_caps.rss_table_size); + PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u", + hw->func_caps.rss_table_size); pf->hash_lut_size = hw->func_caps.rss_table_size; /* Enable ethtype and macvlan filters */ @@ -5201,8 +5482,8 @@ i40e_dev_rx_init(struct i40e_pf *pf) ret = i40e_rx_queue_init(rxq); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to do RX queue " - "initialization"); + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); break; } } @@ -5452,18 +5733,10 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct i40e_virtchnl_pf_event event; int i; - event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; - event.event_data.link_event.link_status = - dev->data->dev_link.link_status; - event.event_data.link_event.link_speed = - (enum i40e_aq_link_speed)dev->data->dev_link.link_speed; - for (i = 0; i < pf->vf_num; i++) - i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT, - I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); + i40e_notify_vf_link_status(dev, &pf->vfs[i]); } static void @@ -5486,8 +5759,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) ret = i40e_clean_arq_element(hw, &info, &pending); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, " - "aq_err: %u", hw->aq.asq_last_status); + PMD_DRV_LOG(INFO, + "Failed to read msg from AdminQ, aq_err: %u", + hw->aq.asq_last_status); break; } opcode = rte_le_to_cpu_16(info.desc.opcode); @@ -5504,14 +5778,12 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) break; case i40e_aqc_opc_get_link_status: ret = i40e_dev_link_update(dev, 0); - if (!ret) { - i40e_notify_all_vfs_link_status(dev); + if (!ret) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); - } break; default: - PMD_DRV_LOG(ERR, "Request %u is not supported yet", + PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", opcode); break; } @@ -5532,8 +5804,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) * void */ static void -i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, - void *param) +i40e_dev_interrupt_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5550,7 +5821,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, PMD_DRV_LOG(INFO, "No interrupt event"); goto done; } -#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) @@ -5565,7 +5835,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, PMD_DRV_LOG(ERR, "ICR0: HMC error"); if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK) PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error"); -#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { PMD_DRV_LOG(INFO, "ICR0: VF reset detected"); @@ -5579,10 +5848,10 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, done: /* Enable interrupt */ i40e_pf_enable_irq0(hw); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(dev->intr_handle); } -static int +int i40e_add_macvlan_filters(struct i40e_vsi *vsi, struct i40e_macvlan_filter *filter, int total) @@ -5632,7 +5901,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; break; default: - PMD_DRV_LOG(ERR, "Invalid MAC match type\n"); + PMD_DRV_LOG(ERR, "Invalid MAC match type"); ret = I40E_ERR_PARAM; goto DONE; } @@ -5656,7 +5925,7 @@ DONE: return ret; } -static int +int i40e_remove_macvlan_filters(struct i40e_vsi *vsi, struct i40e_macvlan_filter *filter, int total) @@ -5707,7 +5976,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; break; default: - PMD_DRV_LOG(ERR, "Invalid MAC filter type\n"); + PMD_DRV_LOG(ERR, "Invalid MAC filter type"); ret = I40E_ERR_PARAM; goto DONE; } @@ -5762,14 +6031,11 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi, } static void -i40e_set_vlan_filter(struct i40e_vsi *vsi, - uint16_t vlan_id, bool on) +i40e_store_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) { uint32_t vid_idx, vid_bit; - if (vlan_id > ETH_VLAN_ID_MAX) - return; - vid_idx = I40E_VFTA_IDX(vlan_id); vid_bit = I40E_VFTA_BIT(vlan_id); @@ -5779,11 +6045,43 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, vsi->vfta[vid_idx] &= ~vid_bit; } +void +i40e_set_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; + int ret; + + if (vlan_id > ETH_VLAN_ID_MAX) + return; + + i40e_store_vlan_filter(vsi, vlan_id, on); + + if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id) + return; + + vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); + + if (on) { + ret = i40e_aq_add_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to add vlan filter"); + } else { + ret = i40e_aq_remove_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, + "Failed to remove vlan filter"); + } +} + /** * Find all vlan options for specific mac addr, * return with actual vlan found. */ -static inline int +int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, int num, struct ether_addr *addr) @@ -5804,8 +6102,8 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { if (vsi->vfta[j] & (1 << k)) { if (i > num - 1) { - PMD_DRV_LOG(ERR, "vlan number " - "not match"); + PMD_DRV_LOG(ERR, + "vlan number doesn't match"); return I40E_ERR_PARAM; } (void)rte_memcpy(&mv_f[i].macaddr, @@ -6098,7 +6396,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) if (filter_type == RTE_MACVLAN_PERFECT_MATCH || filter_type == RTE_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { - PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n"); + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } } else if (filter_type == RTE_MAC_PERFECT_MATCH || @@ -6207,18 +6505,14 @@ i40e_parse_hena(uint64_t flags) rss_hf |= ETH_RSS_FRAG_IPV4; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; -#ifdef X722_SUPPORT if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK)) rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; -#endif if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; -#ifdef X722_SUPPORT if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)) rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP)) rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; -#endif if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP)) rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)) @@ -6227,18 +6521,14 @@ i40e_parse_hena(uint64_t flags) rss_hf |= ETH_RSS_FRAG_IPV6; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; -#ifdef X722_SUPPORT if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; -#endif if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; -#ifdef X722_SUPPORT if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)) rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; -#endif if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP)) rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER)) @@ -6289,8 +6579,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); if (ret) - PMD_INIT_LOG(ERR, "Failed to configure RSS key " - "via AQ"); + PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ"); } else { uint32_t *hash_key = (uint32_t *)key; uint16_t i; @@ -6436,7 +6725,95 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) return 0; } +/* Convert tunnel filter structure */ +static int +i40e_tunnel_filter_convert( + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter, + struct i40e_tunnel_filter *tunnel_filter) +{ + ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac, + (struct ether_addr *)&tunnel_filter->input.outer_mac); + ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac, + (struct ether_addr *)&tunnel_filter->input.inner_mac); + tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan; + if ((rte_le_to_cpu_16(cld_filter->element.flags) & + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) == + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6; + else + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4; + tunnel_filter->input.flags = cld_filter->element.flags; + tunnel_filter->input.tenant_id = cld_filter->element.tenant_id; + tunnel_filter->queue = cld_filter->element.queue_number; + rte_memcpy(tunnel_filter->input.general_fields, + cld_filter->general_fields, + sizeof(cld_filter->general_fields)); + + return 0; +} + +/* Check if there exists the tunnel filter */ +struct i40e_tunnel_filter * +i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule, + const struct i40e_tunnel_filter_input *input) +{ + int ret; + + ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input); + if (ret < 0) + return NULL; + + return tunnel_rule->hash_map[ret]; +} + +/* Add a tunnel filter into the SW list */ static int +i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter) +{ + struct i40e_tunnel_rule *rule = &pf->tunnel; + int ret; + + ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert tunnel filter to hash table %d!", + ret); + return ret; + } + rule->hash_map[ret] = tunnel_filter; + + TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules); + + return 0; +} + +/* Delete a tunnel filter from the SW list */ +int +i40e_sw_tunnel_filter_del(struct i40e_pf *pf, + struct i40e_tunnel_filter_input *input) +{ + struct i40e_tunnel_rule *rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel_filter; + int ret; + + ret = rte_hash_del_key(rule->hash_table, input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to delete tunnel filter to hash table %d!", + ret); + return ret; + } + tunnel_filter = rule->hash_map[ret]; + rule->hash_map[ret] = NULL; + + TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules); + rte_free(tunnel_filter); + + return 0; +} + +int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct rte_eth_tunnel_filter_conf *tunnel_filter, uint8_t add) @@ -6449,37 +6826,44 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, int val, ret = 0; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi = pf->main_vsi; - struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter; - struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; + struct i40e_tunnel_filter check_filter; /* Check if filter exists */ cld_filter = rte_zmalloc("tunnel_filter", - sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data), - 0); + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); if (NULL == cld_filter) { PMD_DRV_LOG(ERR, "Failed to alloc memory."); - return -EINVAL; + return -ENOMEM; } pfilter = cld_filter; - ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac); - ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac); + ether_addr_copy(&tunnel_filter->outer_mac, + (struct ether_addr *)&pfilter->element.outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, + (struct ether_addr *)&pfilter->element.inner_mac); - pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan); + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); - rte_memcpy(&pfilter->ipaddr.v4.data, + rte_memcpy(&pfilter->element.ipaddr.v4.data, &rte_cpu_to_le_32(ipv4_addr), - sizeof(pfilter->ipaddr.v4.data)); + sizeof(pfilter->element.ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; for (i = 0; i < 4; i++) { convert_ipv6[i] = rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i])); } - rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6, - sizeof(pfilter->ipaddr.v6.data)); + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); } /* check tunneled type */ @@ -6501,23 +6885,369 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, } val = i40e_dev_get_filter_type(tunnel_filter->filter_type, - &pfilter->flags); + &pfilter->element.flags); if (val < 0) { rte_free(cld_filter); return -EINVAL; } - pfilter->flags |= rte_cpu_to_le_16( + pfilter->element.flags |= rte_cpu_to_le_16( I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); - pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); - pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_tunnel_filter_convert(cld_filter, &check_filter); + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + return -EINVAL; + } - if (add) - ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); - else + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + return -EINVAL; + } + + if (add) { + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + return -ENOTSUP; + } + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + } else { ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - cld_filter, 1); + &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + return -ENOTSUP; + } + ret = i40e_sw_tunnel_filter_del(pf, &node->input); + } + + rte_free(cld_filter); + return ret; +} + +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48 +#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4 +#define I40E_TR_GENEVE_KEY_MASK 0x8 +#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40 +#define I40E_TR_GRE_KEY_MASK 0x400 +#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 +#define I40E_TR_GRE_NO_KEY_MASK 0x8000 + +static enum +i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace.tr_bit = 0; + + /* Prepare the buffer, 3 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[7] = 0xF0; + filter_replace_buf.data[8] + = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0; + filter_replace_buf.data[8] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK | + I40E_TR_GENEVE_KEY_MASK | + I40E_TR_GENERIC_UDP_TUNNEL_MASK; + filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK | + I40E_TR_GRE_KEY_WITH_XSUM_MASK | + I40E_TR_GRE_NO_KEY_MASK) >> 8; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return status; +} + +static enum +i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + /* For MPLSoUDP */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER | + I40E_AQC_MIRROR_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + + /* For MPLSoGRE */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER | + I40E_AQC_MIRROR_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return status; +} + +int +i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, + struct i40e_tunnel_filter_conf *tunnel_filter, + uint8_t add) +{ + uint16_t ip_type; + uint32_t ipv4_addr; + uint8_t i, tun_type = 0; + /* internal variable to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; + int val, ret = 0; + struct i40e_pf_vf *vf = NULL; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter; + struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; + struct i40e_tunnel_filter check_filter; /* Check if filter exists */ + uint32_t teid_le; + bool big_buffer = 0; + + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); + + if (cld_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + pfilter = cld_filter; + + ether_addr_copy(&tunnel_filter->outer_mac, + (struct ether_addr *)&pfilter->element.outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, + (struct ether_addr *)&pfilter->element.inner_mac); + + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); + if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + rte_memcpy(&pfilter->element.ipaddr.v4.data, + &rte_cpu_to_le_32(ipv4_addr), + sizeof(pfilter->element.ipaddr.v4.data)); + } else { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(rte_be_to_cpu_32( + tunnel_filter->ip_addr.ipv6_addr[i])); + } + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); + } + + /* check tunneled type */ + switch (tunnel_filter->tunnel_type) { + case I40E_TUNNEL_TYPE_VXLAN: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; + break; + case I40E_TUNNEL_TYPE_NVGRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; + break; + case I40E_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; + case I40E_TUNNEL_TYPE_MPLSoUDP: + if (!pf->mpls_replace_flag) { + i40e_replace_mpls_l1_filter(pf); + i40e_replace_mpls_cloud_filter(pf); + pf->mpls_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + teid_le >> 4; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + (teid_le & 0xF) << 12; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + 0x40; + big_buffer = 1; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP; + break; + case I40E_TUNNEL_TYPE_MPLSoGRE: + if (!pf->mpls_replace_flag) { + i40e_replace_mpls_l1_filter(pf); + i40e_replace_mpls_cloud_filter(pf); + pf->mpls_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + teid_le >> 4; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + (teid_le & 0xF) << 12; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + 0x0; + big_buffer = 1; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE; + break; + case I40E_TUNNEL_TYPE_QINQ: + if (!pf->qinq_replace_flag) { + ret = i40e_cloud_filter_qinq_create(pf); + if (ret < 0) + PMD_DRV_LOG(DEBUG, + "QinQ tunnel filter already created."); + pf->qinq_replace_flag = 1; + } + /* Add in the General fields the values of + * the Outer and Inner VLAN + * Big Buffer should be set, see changes in + * i40e_aq_add_cloud_filters + */ + pfilter->general_fields[0] = tunnel_filter->inner_vlan; + pfilter->general_fields[1] = tunnel_filter->outer_vlan; + big_buffer = 1; + break; + default: + /* Other tunnel types is not supported. */ + PMD_DRV_LOG(ERR, "tunnel type is not supported."); + rte_free(cld_filter); + return -EINVAL; + } + + if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ) + pfilter->element.flags |= + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + else { + val = i40e_dev_get_filter_type(tunnel_filter->filter_type, + &pfilter->element.flags); + if (val < 0) { + rte_free(cld_filter); + return -EINVAL; + } + } + + pfilter->element.flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); + + if (!tunnel_filter->is_to_vf) + vsi = pf->main_vsi; + else { + if (tunnel_filter->vf_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + vf = &pf->vfs[tunnel_filter->vf_id]; + vsi = vf->vsi; + } + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_tunnel_filter_convert(cld_filter, &check_filter); + check_filter.is_to_vf = tunnel_filter->is_to_vf; + check_filter.vf_id = tunnel_filter->vf_id; + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + return -EINVAL; + } + + if (add) { + if (big_buffer) + ret = i40e_aq_add_cloud_filters_big_buffer(hw, + vsi->seid, cld_filter, 1); + else + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + return -ENOTSUP; + } + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + } else { + if (big_buffer) + ret = i40e_aq_remove_cloud_filters_big_buffer( + hw, vsi->seid, cld_filter, 1); + else + ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, + &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + return -ENOTSUP; + } + ret = i40e_sw_tunnel_filter_del(pf, &node->input); + } rte_free(cld_filter); return ret; @@ -6554,8 +7284,9 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port) /* Now check if there is space to add the new port */ idx = i40e_get_vxlan_port_idx(pf, 0); if (idx < 0) { - PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached," - "not adding port %d", port); + PMD_DRV_LOG(ERR, + "Maximum number of UDP ports reached, not adding port %d", + port); return -ENOSPC; } @@ -6794,7 +7525,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) int ret = -EINVAL; val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)); - PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val); + PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val); if (len == 3) { reg = val | I40E_GL_PRS_FVBM_MSK_ENA; @@ -6813,7 +7544,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) } else { ret = 0; } - PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n", + PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x", I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2))); return ret; @@ -6926,15 +7657,15 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) if (enable > 0) { if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { - PMD_DRV_LOG(INFO, "Symmetric hash has already " - "been enabled"); + PMD_DRV_LOG(INFO, + "Symmetric hash has already been enabled"); return; } reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } else { if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) { - PMD_DRV_LOG(INFO, "Symmetric hash has already " - "been disabled"); + PMD_DRV_LOG(INFO, + "Symmetric hash has already been disabled"); return; } reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; @@ -7051,23 +7782,60 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, pctype = i40e_flowtype_to_pctype(i); reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); + if (hw->mac.type == I40E_MAC_X722) { + if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP), + reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP), + reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP), + reg); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK), + reg); + } else { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), + reg); + } + } else { + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); + } } reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { /* Toeplitz */ if (reg & I40E_GLQF_CTL_HTOEP_MASK) { - PMD_DRV_LOG(DEBUG, "Hash function already set to " - "Toeplitz"); + PMD_DRV_LOG(DEBUG, + "Hash function already set to Toeplitz"); goto out; } reg |= I40E_GLQF_CTL_HTOEP_MASK; } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { /* Simple XOR */ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { - PMD_DRV_LOG(DEBUG, "Hash function already set to " - "Simple XOR"); + PMD_DRV_LOG(DEBUG, + "Hash function already set to Simple XOR"); goto out; } reg &= ~I40E_GLQF_CTL_HTOEP_MASK; @@ -7110,7 +7878,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7129,7 +7896,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD, -#endif [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7139,7 +7905,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7149,7 +7914,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, -#endif [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7183,7 +7947,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7202,7 +7965,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, -#endif [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7212,7 +7974,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7222,7 +7983,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, -#endif [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7262,7 +8022,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | @@ -7273,19 +8032,16 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#endif [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#endif [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | @@ -7307,7 +8063,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | @@ -7318,19 +8073,16 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#endif [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#endif [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | @@ -7374,7 +8126,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype, } /* default input set fields combination per pctype */ -static uint64_t +uint64_t i40e_get_default_input_set(uint16_t pctype) { static const uint64_t default_inset_table[] = { @@ -7383,22 +8135,18 @@ i40e_get_default_input_set(uint16_t pctype) [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#endif [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#endif [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | @@ -7410,22 +8158,18 @@ i40e_get_default_input_set(uint16_t pctype) [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#endif [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, -#endif [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | @@ -7681,10 +8425,10 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); - PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg); + PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); if (reg != val) i40e_write_rx_ctl(hw, addr, val); - PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr, + PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, (uint32_t)i40e_read_rx_ctl(hw, addr)); } @@ -7995,16 +8739,95 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev, return ret; } +/* Convert ethertype filter structure */ +static int +i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, + struct i40e_ethertype_filter *filter) +{ + rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN); + filter->input.ether_type = input->ether_type; + filter->flags = input->flags; + filter->queue = input->queue; + + return 0; +} + +/* Check if there exists the ehtertype filter */ +struct i40e_ethertype_filter * +i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule, + const struct i40e_ethertype_filter_input *input) +{ + int ret; + + ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input); + if (ret < 0) + return NULL; + + return ethertype_rule->hash_map[ret]; +} + +/* Add ethertype filter in SW list */ +static int +i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, + struct i40e_ethertype_filter *filter) +{ + struct i40e_ethertype_rule *rule = &pf->ethertype; + int ret; + + ret = rte_hash_add_key(rule->hash_table, &filter->input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert ethertype filter" + " to hash table %d!", + ret); + return ret; + } + rule->hash_map[ret] = filter; + + TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules); + + return 0; +} + +/* Delete ethertype filter in SW list */ +int +i40e_sw_ethertype_filter_del(struct i40e_pf *pf, + struct i40e_ethertype_filter_input *input) +{ + struct i40e_ethertype_rule *rule = &pf->ethertype; + struct i40e_ethertype_filter *filter; + int ret; + + ret = rte_hash_del_key(rule->hash_table, input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to delete ethertype filter" + " to hash table %d!", + ret); + return ret; + } + filter = rule->hash_map[ret]; + rule->hash_map[ret] = NULL; + + TAILQ_REMOVE(&rule->ethertype_list, filter, rules); + rte_free(filter); + + return 0; +} + /* * Configure ethertype filter, which can director packet by filtering * with mac address and ether_type or only ether_type */ -static int +int i40e_ethertype_filter_set(struct i40e_pf *pf, struct rte_eth_ethertype_filter *filter, bool add) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype; + struct i40e_ethertype_filter *ethertype_filter, *node; + struct i40e_ethertype_filter check_filter; struct i40e_control_filter_stats stats; uint16_t flags = 0; int ret; @@ -8015,13 +8838,29 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, } if (filter->ether_type == ETHER_TYPE_IPv4 || filter->ether_type == ETHER_TYPE_IPv6) { - PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" - " control packet filter.", filter->ether_type); + PMD_DRV_LOG(ERR, + "unsupported ether_type(0x%04x) in control packet filter.", + filter->ether_type); return -EINVAL; } if (filter->ether_type == ETHER_TYPE_VLAN) - PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is" - " not supported."); + PMD_DRV_LOG(WARNING, + "filter vlan ether_type in first tag is not supported."); + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_ethertype_filter_convert(filter, &check_filter); + node = i40e_sw_ethertype_filter_lookup(ethertype_rule, + &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!"); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!"); + return -EINVAL; + } if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC)) flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; @@ -8036,14 +8875,25 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, pf->main_vsi->seid, filter->queue, add, &stats, NULL); - PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d," - " mac_etype_used = %u, etype_used = %u," - " mac_etype_free = %u, etype_free = %u\n", - ret, stats.mac_etype_used, stats.etype_used, - stats.mac_etype_free, stats.etype_free); + PMD_DRV_LOG(INFO, + "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u", + ret, stats.mac_etype_used, stats.etype_used, + stats.mac_etype_free, stats.etype_free); if (ret < 0) return -ENOSYS; - return 0; + + /* Add or delete a filter in SW list */ + if (add) { + ethertype_filter = rte_zmalloc("ethertype_filter", + sizeof(*ethertype_filter), 0); + rte_memcpy(ethertype_filter, &check_filter, + sizeof(check_filter)); + ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter); + } else { + ret = i40e_sw_ethertype_filter_del(pf, &node->input); + } + + return ret; } /* @@ -8078,7 +8928,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev, FALSE); break; default: - PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); ret = -ENOSYS; break; } @@ -8116,6 +8966,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, case RTE_ETH_FILTER_FDIR: ret = i40e_fdir_ctrl_func(dev, filter_op, arg); break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &i40e_flow_ops; + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -8133,10 +8988,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, static void i40e_enable_extended_tag(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); uint32_t buf = 0; int ret; - ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CAP_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", @@ -8149,7 +9005,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev) } buf = 0; - ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CTRL_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", @@ -8161,7 +9017,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev) return; } buf |= PCI_DEV_CTRL_EXT_TAG_MASK; - ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf), + ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf), PCI_DEV_CTRL_REG); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x", @@ -8224,18 +9080,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4, [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = RTE_ETH_FLOW_NONFRAG_IPV4_UDP, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = RTE_ETH_FLOW_NONFRAG_IPV4_UDP, [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = RTE_ETH_FLOW_NONFRAG_IPV4_UDP, -#endif [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = RTE_ETH_FLOW_NONFRAG_IPV4_TCP, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = RTE_ETH_FLOW_NONFRAG_IPV4_TCP, -#endif [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = @@ -8243,18 +9095,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6, [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = RTE_ETH_FLOW_NONFRAG_IPV6_UDP, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = RTE_ETH_FLOW_NONFRAG_IPV6_UDP, [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = RTE_ETH_FLOW_NONFRAG_IPV6_UDP, -#endif [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = RTE_ETH_FLOW_NONFRAG_IPV6_TCP, -#ifdef X722_SUPPORT [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = RTE_ETH_FLOW_NONFRAG_IPV6_TCP, -#endif [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP, [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = @@ -8369,9 +9217,9 @@ i40e_configure_registers(struct i40e_hw *hw) ret = i40e_aq_debug_write_register(hw, reg_table[i].addr, reg_table[i].val, NULL); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the " - "address of 0x%"PRIx32, reg_table[i].val, - reg_table[i].addr); + PMD_DRV_LOG(ERR, + "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32, + reg_table[i].val, reg_table[i].addr); break; } PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of " @@ -8416,8 +9264,9 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) I40E_VSI_L2TAGSTXVALID( vsi->vsi_id), reg, NULL); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to update " - "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id); + PMD_DRV_LOG(ERR, + "Failed to update VSI_L2TAGSTXVALID[%d]", + vsi->vsi_id); return I40E_ERR_CONFIG; } } @@ -8468,11 +9317,10 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw, rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL); - PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d," - "rule_id = %u" - " mirror_rules_used = %u, mirror_rules_free = %u,", - hw->aq.asq_last_status, resp->rule_id, - resp->mirror_rules_used, resp->mirror_rules_free); + PMD_DRV_LOG(INFO, + "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,", + hw->aq.asq_last_status, resp->rule_id, + resp->mirror_rules_used, resp->mirror_rules_free); *rule_id = rte_le_to_cpu_16(resp->rule_id); return status; @@ -8550,8 +9398,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id); if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { - PMD_DRV_LOG(ERR, "mirror rule can not be configured" - " without veb or vfs."); + PMD_DRV_LOG(ERR, + "mirror rule can not be configured without veb or vfs."); return -ENOSYS; } if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { @@ -8583,9 +9431,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, mirr_rule->entries, mirr_rule->num_entries, mirr_rule->id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to remove mirror rule:" - " ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); @@ -8674,9 +9522,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, mirr_rule->rule_type, mirr_rule->entries, j, &rule_id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to add mirror rule:" - " ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to add mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); rte_free(mirr_rule); return -ENOSYS; } @@ -8728,9 +9576,9 @@ i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id) mirr_rule->entries, mirr_rule->num_entries, mirr_rule->id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to remove mirror rule:" - " status = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: status = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); @@ -9162,9 +10010,9 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid, &veb_bw, NULL); if (ret) { - PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation" - " per TC failed = %d", - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "AQ command Config switch_comp BW allocation per TC failed = %d", + hw->aq.asq_last_status); return ret; } @@ -9172,16 +10020,18 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, &ets_query, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS" - " configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "Failed to get switch_comp ETS configuration %u", + hw->aq.asq_last_status); return ret; } memset(&bw_query, 0, sizeof(bw_query)); ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, &bw_query, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth" - " configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "Failed to get switch_comp bandwidth configuration %u", + hw->aq.asq_last_status); return ret; } @@ -9246,8 +10096,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) } ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL); if (ret) { - PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation" - " per TC failed = %d", + PMD_INIT_LOG(ERR, + "AQ command Config VSI BW allocation per TC failed = %d", hw->aq.asq_last_status); goto out; } @@ -9268,9 +10118,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) /* Update the VSI after updating the VSI queue-mapping information */ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { - PMD_INIT_LOG(ERR, "Failed to configure " - "TC queue mapping = %d", - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d", + hw->aq.asq_last_status); goto out; } /* update the local VSI info with updated queue map */ @@ -9322,8 +10171,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, /* Use the FW API if FW > v4.4*/ if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) || (hw->aq.fw_maj_ver >= 5))) { - PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API" - " to configure DCB"); + PMD_INIT_LOG(ERR, + "FW < v4.4, can not use FW LLDP API to configure DCB"); return I40E_ERR_FIRMWARE_API_VERSION; } @@ -9338,8 +10187,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, old_cfg->etsrec = old_cfg->etscfg; ret = i40e_set_dcb_config(hw); if (ret) { - PMD_INIT_LOG(ERR, - "Set DCB Config failed, err %s aq_err %s\n", + PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; @@ -9371,7 +10219,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); if (ret) PMD_INIT_LOG(WARNING, - "Failed configuring TC for VEB seid=%d\n", + "Failed configuring TC for VEB seid=%d", main_vsi->veb->seid); } /* Update each VSI */ @@ -9389,8 +10237,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, I40E_DEFAULT_TCMAP); if (ret) PMD_INIT_LOG(WARNING, - "Failed configuring TC for VSI seid=%d\n", - vsi_list->vsi->seid); + "Failed configuring TC for VSI seid=%d", + vsi_list->vsi->seid); /* continue */ } } @@ -9409,7 +10257,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = 0; + int i, ret = 0; if ((pf->flags & I40E_FLAG_DCB) == 0) { PMD_INIT_LOG(ERR, "HW doesn't support DCB"); @@ -9436,6 +10284,9 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) hw->local_dcbx_config.etscfg.tcbwtable[0] = 100; hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; + /* all UPs mapping to TC0 */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + hw->local_dcbx_config.etscfg.prioritytable[i] = 0; hw->local_dcbx_config.etsrec = hw->local_dcbx_config.etscfg; hw->local_dcbx_config.pfc.willing = 0; @@ -9450,15 +10301,15 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) I40E_APP_PROTOID_FCOE; ret = i40e_set_dcb_config(hw); if (ret) { - PMD_INIT_LOG(ERR, "default dcb config fails." - " err = %d, aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "default dcb config fails. err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } } else { - PMD_INIT_LOG(ERR, "DCB initialization in FW fails," - " err = %d, aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "DCB initialization in FW fails, err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOTSUP; } } else { @@ -9469,14 +10320,14 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) ret = i40e_init_dcb(hw); if (!ret) { if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { - PMD_INIT_LOG(ERR, "HW doesn't support" - " DCBX offload."); + PMD_INIT_LOG(ERR, + "HW doesn't support DCBX offload."); return -ENOTSUP; } } else { - PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d," - " aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "DCBX configuration failed, err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOTSUP; } } @@ -9585,7 +10436,8 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); @@ -9610,7 +10462,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); I40E_WRITE_FLUSH(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(&pci_dev->intr_handle); return 0; } @@ -9618,7 +10470,8 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; @@ -9740,8 +10593,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_eth_dev_data *dev_data = pf->dev_data; - uint32_t frame_size = mtu + ETHER_HDR_LEN - + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE; + uint32_t frame_size = mtu + I40E_ETH_OVERHEAD; int ret = 0; /* check if mtu is within the allowed range */ @@ -9750,8 +10602,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* mtu setting is forbidden if port is start */ if (dev_data->dev_started) { - PMD_DRV_LOG(ERR, - "port %d must be stopped before configuration\n", + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", dev_data->port_id); return -EBUSY; } @@ -9765,3 +10616,230 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return ret; } + +/* Restore ethertype filter */ +static void +i40e_ethertype_filter_restore(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_ethertype_filter_list + *ethertype_list = &pf->ethertype.ethertype_list; + struct i40e_ethertype_filter *f; + struct i40e_control_filter_stats stats; + uint16_t flags; + + TAILQ_FOREACH(f, ethertype_list, rules) { + flags = 0; + if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC)) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; + if (f->flags & RTE_ETHTYPE_FLAGS_DROP) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; + + memset(&stats, 0, sizeof(stats)); + i40e_aq_add_rem_control_packet_filter(hw, + f->input.mac_addr.addr_bytes, + f->input.ether_type, + flags, pf->main_vsi->seid, + f->queue, 1, &stats, NULL); + } + PMD_DRV_LOG(INFO, "Ethertype filter:" + " mac_etype_used = %u, etype_used = %u," + " mac_etype_free = %u, etype_free = %u", + stats.mac_etype_used, stats.etype_used, + stats.mac_etype_free, stats.etype_free); +} + +/* Restore tunnel filter */ +static void +i40e_tunnel_filter_restore(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_pf_vf *vf; + struct i40e_tunnel_filter_list + *tunnel_list = &pf->tunnel.tunnel_list; + struct i40e_tunnel_filter *f; + struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter; + bool big_buffer = 0; + + TAILQ_FOREACH(f, tunnel_list, rules) { + if (!f->is_to_vf) + vsi = pf->main_vsi; + else { + vf = &pf->vfs[f->vf_id]; + vsi = vf->vsi; + } + memset(&cld_filter, 0, sizeof(cld_filter)); + ether_addr_copy((struct ether_addr *)&f->input.outer_mac, + (struct ether_addr *)&cld_filter.element.outer_mac); + ether_addr_copy((struct ether_addr *)&f->input.inner_mac, + (struct ether_addr *)&cld_filter.element.inner_mac); + cld_filter.element.inner_vlan = f->input.inner_vlan; + cld_filter.element.flags = f->input.flags; + cld_filter.element.tenant_id = f->input.tenant_id; + cld_filter.element.queue_number = f->queue; + rte_memcpy(cld_filter.general_fields, + f->input.general_fields, + sizeof(f->input.general_fields)); + + if (((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) == + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) || + ((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) == + I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) || + ((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) == + I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ)) + big_buffer = 1; + + if (big_buffer) + i40e_aq_add_cloud_filters_big_buffer(hw, + vsi->seid, &cld_filter, 1); + else + i40e_aq_add_cloud_filters(hw, vsi->seid, + &cld_filter.element, 1); + } +} + +static void +i40e_filter_restore(struct i40e_pf *pf) +{ + i40e_ethertype_filter_restore(pf); + i40e_tunnel_filter_restore(pf); + i40e_fdir_filter_restore(pf); +} + +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->data->drv_name, + drv->driver.name)) + return false; + + return true; +} + +bool +is_i40e_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_i40e_pmd); +} + +/* Create a QinQ cloud filter + * + * The Fortville NIC has limited resources for tunnel filters, + * so we can only reuse existing filters. + * + * In step 1 we define which Field Vector fields can be used for + * filter types. + * As we do not have the inner tag defined as a field, + * we have to define it first, by reusing one of L1 entries. + * + * In step 2 we are replacing one of existing filter types with + * a new one for QinQ. + * As we reusing L1 and replacing L2, some of the default filter + * types will disappear,which depends on L1 and L2 entries we reuse. + * + * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b) + * + * 1. Create L1 filter of outer vlan (12b) which will be in use + * later when we define the cloud filter. + * a. Valid_flags.replace_cloud = 0 + * b. Old_filter = 10 (Stag_Inner_Vlan) + * c. New_filter = 0x10 + * d. TR bit = 0xff (optional, not used here) + * e. Buffer – 2 entries: + * i. Byte 0 = 8 (outer vlan FV index). + * Byte 1 = 0 (rsv) + * Byte 2-3 = 0x0fff + * ii. Byte 0 = 37 (inner vlan FV index). + * Byte 1 =0 (rsv) + * Byte 2-3 = 0x0fff + * + * Step 2: + * 2. Create cloud filter using two L1 filters entries: stag and + * new filter(outer vlan+ inner vlan) + * a. Valid_flags.replace_cloud = 1 + * b. Old_filter = 1 (instead of outer IP) + * c. New_filter = 0x10 + * d. Buffer – 2 entries: + * i. Byte 0 = 0x80 | 7 (valid | Stag). + * Byte 1-3 = 0 (rsv) + * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1) + * Byte 9-11 = 0 (rsv) + */ +static int +i40e_cloud_filter_qinq_create(struct i40e_pf *pf) +{ + int ret = -ENOTSUP; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + /* Init */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace.tr_bit = 0; + + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Field Vector 12b mask */ + filter_replace_buf.data[2] = 0xff; + filter_replace_buf.data[3] = 0x0f; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Field Vector 12b mask */ + filter_replace_buf.data[6] = 0xff; + filter_replace_buf.data[7] = 0x0f; + ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (ret != I40E_SUCCESS) + return ret; + + /* Apply the second L2 cloud filter */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L2 filter, input for L2 filter will be L1 filter */ + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return ret; +} + +RTE_INIT(i40e_init_log); +static void +i40e_init_log(void) +{ + i40e_logtype_init = rte_log_register("pmd.i40e.init"); + if (i40e_logtype_init >= 0) + rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE); + i40e_logtype_driver = rte_log_register("pmd.i40e.driver"); + if (i40e_logtype_driver >= 0) + rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE); +} |